Package | Description |
---|---|
org.bytedeco.tensorflowlite.global |
Modifier and Type | Method and Description |
---|---|
static TfLiteInterpreter |
tensorflowlite.TfLiteInterpreterCreate(TfLiteModel model,
TfLiteInterpreterOptions optional_options)
Returns a new interpreter using the provided model and options, or null on
failure.
|
static TfLiteInterpreter |
tensorflowlite.TfLiteInterpreterCreateWithSelectedOps(TfLiteModel model,
TfLiteInterpreterOptions options)
Returns a new interpreter using the provided model and options, or null on
failure, where the model uses only the operators explicitly added to the
options.
|
Modifier and Type | Method and Description |
---|---|
static int |
tensorflowlite.TfLiteInterpreterAllocateTensors(TfLiteInterpreter interpreter)
Updates allocations for all tensors, resizing dependent tensors using the
specified input tensor dimensionality.
|
static int |
tensorflowlite.TfLiteInterpreterCancel(TfLiteInterpreter interpreter)
Tries to cancel any in-flight invocation.
|
static void |
tensorflowlite.TfLiteInterpreterDelete(TfLiteInterpreter interpreter)
Destroys the interpreter.
|
static int |
tensorflowlite.TfLiteInterpreterEnsureTensorDataIsReadable(TfLiteInterpreter interpreter,
int tensor_index)
Ensures the data of the tensor at the given index is readable.
|
static int |
tensorflowlite.TfLiteInterpreterGetBufferHandle(TfLiteInterpreter interpreter,
int tensor_index,
int[] buffer_handle,
TfLiteOpaqueDelegateStruct delegate) |
static int |
tensorflowlite.TfLiteInterpreterGetBufferHandle(TfLiteInterpreter interpreter,
int tensor_index,
IntBuffer buffer_handle,
TfLiteOpaqueDelegateStruct delegate) |
static int |
tensorflowlite.TfLiteInterpreterGetBufferHandle(TfLiteInterpreter interpreter,
int tensor_index,
IntPointer buffer_handle,
PointerPointer delegate)
Gets the delegate buffer handle, and the delegate which can process
the buffer handle.
|
static int |
tensorflowlite.TfLiteInterpreterGetBufferHandle(TfLiteInterpreter interpreter,
int tensor_index,
IntPointer buffer_handle,
TfLiteOpaqueDelegateStruct delegate) |
static TfLiteTensor |
tensorflowlite.TfLiteInterpreterGetInputTensor(TfLiteInterpreter interpreter,
int input_index)
Returns the tensor associated with the input index.
|
static int |
tensorflowlite.TfLiteInterpreterGetInputTensorCount(TfLiteInterpreter interpreter)
Returns the number of input tensors associated with the model.
|
static int |
tensorflowlite.TfLiteInterpreterGetInputTensorIndex(TfLiteInterpreter interpreter,
int input_index)
Returns the tensor index corresponding to the input tensor
WARNING: This is an experimental API and subject to change.
|
static TfLiteTensor |
tensorflowlite.TfLiteInterpreterGetOutputTensor(TfLiteInterpreter interpreter,
int output_index)
Returns the tensor associated with the output index.
|
static int |
tensorflowlite.TfLiteInterpreterGetOutputTensorCount(TfLiteInterpreter interpreter)
Returns the number of output tensors associated with the model.
|
static int |
tensorflowlite.TfLiteInterpreterGetOutputTensorIndex(TfLiteInterpreter interpreter,
int output_index)
Returns the tensor index corresponding to the output tensor
WARNING: This is an experimental API and subject to change.
|
static int |
tensorflowlite.TfLiteInterpreterGetSignatureCount(TfLiteInterpreter interpreter)
--------------------------------------------------------------------------
SignatureRunner APIs
You can run inference by either:
(i) (recommended) using the Interpreter to initialize SignatureRunner(s) and
then only using SignatureRunner APIs.
|
static BytePointer |
tensorflowlite.TfLiteInterpreterGetSignatureKey(TfLiteInterpreter interpreter,
int signature_index)
Returns the key of the Nth signature in the model, where N is specified as
signature_index . |
static TfLiteSignatureRunner |
tensorflowlite.TfLiteInterpreterGetSignatureRunner(TfLiteInterpreter interpreter,
BytePointer signature_key)
Returns a new signature runner using the provided interpreter and signature
key, or nullptr on failure.
|
static TfLiteSignatureRunner |
tensorflowlite.TfLiteInterpreterGetSignatureRunner(TfLiteInterpreter interpreter,
String signature_key) |
static TfLiteTensor |
tensorflowlite.TfLiteInterpreterGetTensor(TfLiteInterpreter interpreter,
int index)
Returns modifiable access to the tensor that corresponds to the
specified
index and is associated with the provided interpreter . |
static TfLiteTensor |
tensorflowlite.TfLiteInterpreterGetVariableTensor(TfLiteInterpreter interpreter,
int variable_index) |
static int |
tensorflowlite.TfLiteInterpreterGetVariableTensorCount(TfLiteInterpreter interpreter) |
static IntPointer |
tensorflowlite.TfLiteInterpreterInputTensorIndices(TfLiteInterpreter interpreter)
Returns a pointer to an array of input tensor indices.
|
static int |
tensorflowlite.TfLiteInterpreterInvoke(TfLiteInterpreter interpreter)
Runs inference for the loaded graph.
|
static int |
tensorflowlite.TfLiteInterpreterModifyGraphWithDelegate(TfLiteInterpreter interpreter,
TfLiteDelegate delegate)
Allow a delegate to look at the graph and modify the graph to handle
parts of the graph themselves.
|
static IntPointer |
tensorflowlite.TfLiteInterpreterOutputTensorIndices(TfLiteInterpreter interpreter)
Returns a pointer to an array of output tensor indices.
|
static int |
tensorflowlite.TfLiteInterpreterResetVariableTensors(TfLiteInterpreter interpreter)
Resets all variable tensors to zero.
|
static int |
tensorflowlite.TfLiteInterpreterResizeInputTensor(TfLiteInterpreter interpreter,
int input_index,
int[] input_dims,
int input_dims_size) |
static int |
tensorflowlite.TfLiteInterpreterResizeInputTensor(TfLiteInterpreter interpreter,
int input_index,
IntBuffer input_dims,
int input_dims_size) |
static int |
tensorflowlite.TfLiteInterpreterResizeInputTensor(TfLiteInterpreter interpreter,
int input_index,
IntPointer input_dims,
int input_dims_size)
Resizes the specified input tensor.
|
static int |
tensorflowlite.TfLiteInterpreterSetBufferHandle(TfLiteInterpreter interpreter,
TfLiteTensor tensor,
int buffer_handle,
TfLiteOpaqueDelegateStruct delegate)
--------------------------------------------------------------------------
BufferHandle APIs
|
static int |
tensorflowlite.TfLiteInterpreterSetCustomAllocationForTensor(TfLiteInterpreter interpreter,
int tensor_index,
TfLiteCustomAllocation allocation,
long flags)
Assigns (or reassigns) a custom memory allocation for the given
tensor.
|
static void |
tensorflowlite.TfLiteSetAllowBufferHandleOutput(TfLiteInterpreter interpreter,
boolean allow_buffer_handle_output)
Sets whether buffer handle output is allowed.
|
Copyright © 2025. All rights reserved.