@Namespace(value="tflite::impl") @NoOffset @Properties(inherit=tensorflowlite.class) public class SignatureRunner extends Pointer
Pointer.CustomDeallocator, Pointer.Deallocator, Pointer.NativeDeallocator, Pointer.ReferenceCounter
Constructor and Description |
---|
SignatureRunner(Pointer p)
Pointer cast constructor.
|
Modifier and Type | Method and Description |
---|---|
int |
AllocateTensors()
Updates allocations for all tensors, related to the given signature.
|
int |
Cancel()
Attempts to cancel in flight invocation if any.
|
PointerPointer |
input_names()
Read-only access to list of signature input names.
|
long |
input_size()
Returns the number of inputs.
|
TfLiteTensor |
input_tensor(BytePointer input_name)
Returns the input tensor identified by 'input_name' in the
given signature.
|
TfLiteTensor |
input_tensor(String input_name) |
int |
Invoke()
Invokes the signature runner (run the graph identified by the given
signature in dependency order).
|
PointerPointer |
output_names()
Read-only access to list of signature output names.
|
long |
output_size()
Returns the number of outputs.
|
TfLiteTensor |
output_tensor(BytePointer output_name)
Returns the output tensor identified by 'output_name' in the
given signature.
|
TfLiteTensor |
output_tensor(String output_name) |
int |
ResizeInputTensor(BytePointer input_name,
int[] new_size) |
int |
ResizeInputTensor(BytePointer input_name,
IntBuffer new_size) |
int |
ResizeInputTensor(BytePointer input_name,
IntPointer new_size)
Change a dimensionality of a given tensor.
|
int |
ResizeInputTensor(String input_name,
int[] new_size) |
int |
ResizeInputTensor(String input_name,
IntBuffer new_size) |
int |
ResizeInputTensor(String input_name,
IntPointer new_size) |
int |
ResizeInputTensorStrict(BytePointer input_name,
int[] new_size) |
int |
ResizeInputTensorStrict(BytePointer input_name,
IntBuffer new_size) |
int |
ResizeInputTensorStrict(BytePointer input_name,
IntPointer new_size)
Change the dimensionality of a given tensor.
|
int |
ResizeInputTensorStrict(String input_name,
int[] new_size) |
int |
ResizeInputTensorStrict(String input_name,
IntBuffer new_size) |
int |
ResizeInputTensorStrict(String input_name,
IntPointer new_size) |
void |
SetAllowBufferHandleOutput(boolean allow_buffer_handle_output)
\brief Set if buffer handle output is allowed.
|
int |
SetCustomAllocationForInputTensor(BytePointer input_name,
TfLiteCustomAllocation allocation) |
int |
SetCustomAllocationForInputTensor(BytePointer input_name,
TfLiteCustomAllocation allocation,
long flags)
\brief Assigns (or reassigns) a custom memory allocation for the given
tensor name.
|
int |
SetCustomAllocationForInputTensor(String input_name,
TfLiteCustomAllocation allocation) |
int |
SetCustomAllocationForInputTensor(String input_name,
TfLiteCustomAllocation allocation,
long flags) |
int |
SetCustomAllocationForOutputTensor(BytePointer output_name,
TfLiteCustomAllocation allocation) |
int |
SetCustomAllocationForOutputTensor(BytePointer output_name,
TfLiteCustomAllocation allocation,
long flags)
\brief Assigns (or reassigns) a custom memory allocation for the given
tensor name.
|
int |
SetCustomAllocationForOutputTensor(String output_name,
TfLiteCustomAllocation allocation) |
int |
SetCustomAllocationForOutputTensor(String output_name,
TfLiteCustomAllocation allocation,
long flags) |
int |
SetInputBufferHandle(BytePointer input_name,
int buffer_handle,
TfLiteDelegate delegate) |
int |
SetInputBufferHandle(BytePointer input_name,
int buffer_handle,
TfLiteDelegate delegate,
boolean release_existing_buffer_handle)
\warning This is an experimental API and subject to change.
|
int |
SetInputBufferHandle(String input_name,
int buffer_handle,
TfLiteDelegate delegate) |
int |
SetInputBufferHandle(String input_name,
int buffer_handle,
TfLiteDelegate delegate,
boolean release_existing_buffer_handle) |
int |
SetOutputBufferHandle(BytePointer output_name,
int buffer_handle,
TfLiteDelegate delegate) |
int |
SetOutputBufferHandle(BytePointer output_name,
int buffer_handle,
TfLiteDelegate delegate,
boolean release_existing_buffer_handle)
\warning This is an experimental API and subject to change.
|
int |
SetOutputBufferHandle(String output_name,
int buffer_handle,
TfLiteDelegate delegate) |
int |
SetOutputBufferHandle(String output_name,
int buffer_handle,
TfLiteDelegate delegate,
boolean release_existing_buffer_handle) |
String |
signature_key()
Returns the key for the corresponding signature.
|
address, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, getDirectBufferAddress, getPointer, getPointer, getPointer, getPointer, hashCode, interruptDeallocatorThread, isNull, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetAddress, offsetof, offsetof, parseBytes, physicalBytes, physicalBytesInaccurate, position, position, put, realloc, referenceCount, releaseReference, retainReference, setNull, sizeof, sizeof, toString, totalBytes, totalCount, totalPhysicalBytes, withDeallocator, zero
public SignatureRunner(Pointer p)
Pointer(Pointer)
.@StdString public String signature_key()
@Cast(value="const char**") @StdVector public PointerPointer input_names()
@Cast(value="const char**") @StdVector public PointerPointer output_names()
public TfLiteTensor input_tensor(@Cast(value="const char*") BytePointer input_name)
public TfLiteTensor input_tensor(String input_name)
@Const public TfLiteTensor output_tensor(@Cast(value="const char*") BytePointer output_name)
@Const public TfLiteTensor output_tensor(String output_name)
@Cast(value="TfLiteStatus") public int ResizeInputTensor(@Cast(value="const char*") BytePointer input_name, @StdVector IntPointer new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensor(String input_name, @StdVector IntBuffer new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensor(@Cast(value="const char*") BytePointer input_name, @StdVector int[] new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensor(String input_name, @StdVector IntPointer new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensor(@Cast(value="const char*") BytePointer input_name, @StdVector IntBuffer new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensor(String input_name, @StdVector int[] new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensorStrict(@Cast(value="const char*") BytePointer input_name, @StdVector IntPointer new_size)
-1
in the
dims_signature
attribute of a TfLiteTensor.
Returns status of failure or success. Note that this doesn't actually
resize any existing buffers. A call to AllocateTensors() is required to
change the tensor input buffer.@Cast(value="TfLiteStatus") public int ResizeInputTensorStrict(String input_name, @StdVector IntBuffer new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensorStrict(@Cast(value="const char*") BytePointer input_name, @StdVector int[] new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensorStrict(String input_name, @StdVector IntPointer new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensorStrict(@Cast(value="const char*") BytePointer input_name, @StdVector IntBuffer new_size)
@Cast(value="TfLiteStatus") public int ResizeInputTensorStrict(String input_name, @StdVector int[] new_size)
@Cast(value="TfLiteStatus") public int AllocateTensors()
@Cast(value="TfLiteStatus") public int Invoke()
@Cast(value="TfLiteStatus") public int Cancel()
Invoke
that happened after this.
Non blocking and thread safe.
Returns kTfLiteError if cancellation is not enabled, otherwise returns
kTfLiteOk.
WARNING: This is an experimental API and subject to change.@Cast(value="TfLiteStatus") public int SetCustomAllocationForInputTensor(@Cast(value="const char*") BytePointer input_name, @Const @ByRef TfLiteCustomAllocation allocation, @Cast(value="int64_t") long flags)
flags
is a bitmask, see TfLiteCustomAllocationFlags.
The runtime does NOT take ownership of the underlying memory.
NOTE: User needs to call AllocateTensors() after this.
Invalid/insufficient buffers will cause an error during AllocateTensors or
Invoke (in case of dynamic shapes in the graph).
Parameters should satisfy the following conditions:
1. tensor->allocation_type == kTfLiteArenaRw or kTfLiteArenaRwPersistent
In general, this is true for I/O tensors & variable tensors.
2. allocation->data has the appropriate permissions for runtime access
(Read-only for inputs, Read-Write for others), and outlives
Interpreter.
3. allocation->bytes >= tensor->bytes.
This condition is checked again if any tensors are resized.
4. allocation->data should be aligned to kDefaultTensorAlignment
defined in lite/util.h. (Currently 64 bytes)
This check is skipped if kTfLiteCustomAllocationFlagsSkipAlignCheck is
set through flags
.
\warning This is an experimental API and subject to change. \n@Cast(value="TfLiteStatus") public int SetCustomAllocationForInputTensor(@Cast(value="const char*") BytePointer input_name, @Const @ByRef TfLiteCustomAllocation allocation)
@Cast(value="TfLiteStatus") public int SetCustomAllocationForInputTensor(String input_name, @Const @ByRef TfLiteCustomAllocation allocation, @Cast(value="int64_t") long flags)
@Cast(value="TfLiteStatus") public int SetCustomAllocationForInputTensor(String input_name, @Const @ByRef TfLiteCustomAllocation allocation)
@Cast(value="TfLiteStatus") public int SetCustomAllocationForOutputTensor(@Cast(value="const char*") BytePointer output_name, @Const @ByRef TfLiteCustomAllocation allocation, @Cast(value="int64_t") long flags)
flags
is a bitmask, see TfLiteCustomAllocationFlags.
The runtime does NOT take ownership of the underlying memory.
NOTE: User needs to call AllocateTensors() after this.
Invalid/insufficient buffers will cause an error during AllocateTensors or
Invoke (in case of dynamic shapes in the graph).
Parameters should satisfy the following conditions:
1. tensor->allocation_type == kTfLiteArenaRw or kTfLiteArenaRwPersistent
In general, this is true for I/O tensors & variable tensors.
2. allocation->data has the appropriate permissions for runtime access
(Read-only for inputs, Read-Write for others), and outlives
Interpreter.
3. allocation->bytes >= tensor->bytes.
This condition is checked again if any tensors are resized.
4. allocation->data should be aligned to kDefaultTensorAlignment
defined in lite/util.h. (Currently 64 bytes)
This check is skipped if kTfLiteCustomAllocationFlagsSkipAlignCheck is
set through flags
.
\warning This is an experimental API and subject to change. \n@Cast(value="TfLiteStatus") public int SetCustomAllocationForOutputTensor(@Cast(value="const char*") BytePointer output_name, @Const @ByRef TfLiteCustomAllocation allocation)
@Cast(value="TfLiteStatus") public int SetCustomAllocationForOutputTensor(String output_name, @Const @ByRef TfLiteCustomAllocation allocation, @Cast(value="int64_t") long flags)
@Cast(value="TfLiteStatus") public int SetCustomAllocationForOutputTensor(String output_name, @Const @ByRef TfLiteCustomAllocation allocation)
public void SetAllowBufferHandleOutput(@Cast(value="bool") boolean allow_buffer_handle_output)
tensor->data
by default. If the application can
consume the buffer handle directly (e.g. reading output from OpenGL
texture), it can set this flag to true, so Interpreter won't copy the
data from buffer handle to CPU memory.
\warning This is an experimental API and subject to change. \n@Cast(value="TfLiteStatus") public int SetInputBufferHandle(@Cast(value="const char*") BytePointer input_name, @Cast(value="TfLiteBufferHandle") int buffer_handle, TfLiteDelegate delegate, @Cast(value="bool") boolean release_existing_buffer_handle)
release_existing_buffer_handle
: If true, the existing buffer handle@Cast(value="TfLiteStatus") public int SetInputBufferHandle(@Cast(value="const char*") BytePointer input_name, @Cast(value="TfLiteBufferHandle") int buffer_handle, TfLiteDelegate delegate)
@Cast(value="TfLiteStatus") public int SetInputBufferHandle(String input_name, @Cast(value="TfLiteBufferHandle") int buffer_handle, TfLiteDelegate delegate, @Cast(value="bool") boolean release_existing_buffer_handle)
@Cast(value="TfLiteStatus") public int SetInputBufferHandle(String input_name, @Cast(value="TfLiteBufferHandle") int buffer_handle, TfLiteDelegate delegate)
@Cast(value="TfLiteStatus") public int SetOutputBufferHandle(@Cast(value="const char*") BytePointer output_name, @Cast(value="TfLiteBufferHandle") int buffer_handle, TfLiteDelegate delegate, @Cast(value="bool") boolean release_existing_buffer_handle)
release_existing_buffer_handle
: If true, the existing buffer handle
will be released by TfLiteDelegate::FreeBufferHandle.@Cast(value="TfLiteStatus") public int SetOutputBufferHandle(@Cast(value="const char*") BytePointer output_name, @Cast(value="TfLiteBufferHandle") int buffer_handle, TfLiteDelegate delegate)
@Cast(value="TfLiteStatus") public int SetOutputBufferHandle(String output_name, @Cast(value="TfLiteBufferHandle") int buffer_handle, TfLiteDelegate delegate, @Cast(value="bool") boolean release_existing_buffer_handle)
Copyright © 2025. All rights reserved.