@Namespace(value="nvinfer1") @NoOffset @Properties(inherit=nvinfer.class) public class INetworkDefinition extends INoCopy
Pointer.CustomDeallocator, Pointer.Deallocator, Pointer.NativeDeallocator, Pointer.ReferenceCounter
Constructor and Description |
---|
INetworkDefinition()
Default native constructor.
|
INetworkDefinition(long size)
Native array allocator.
|
INetworkDefinition(Pointer p)
Pointer cast constructor.
|
Modifier and Type | Method and Description |
---|---|
IActivationLayer |
addActivation(ITensor input,
int type) |
IActivationLayer |
addActivation(ITensor input,
nvinfer.ActivationType type)
\brief Add an activation layer to the network.
|
IAssertionLayer |
addAssertion(ITensor condition,
BytePointer message) |
IAssertionLayer |
addAssertion(ITensor condition,
String message)
\brief Add an assertion layer to the network.
|
ICastLayer |
addCast(ITensor input,
int toType) |
ICastLayer |
addCast(ITensor input,
nvinfer.DataType toType)
\brief Add a cast layer.
|
IConcatenationLayer |
addConcatenation(ITensor inputs,
int nbInputs) |
IConcatenationLayer |
addConcatenation(PointerPointer inputs,
int nbInputs)
\brief Add a concatenation layer to the network.
|
IConstantLayer |
addConstant(Dims32 dimensions,
Weights weights)
\brief Add a constant layer to the network.
|
IConvolutionLayer |
addConvolution(ITensor input,
int nbOutputMaps,
DimsHW kernelSize,
Weights kernelWeights,
Weights biasWeights)
Deprecated.
Superseded by addConvolutionNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
|
IConvolutionLayer |
addConvolutionNd(ITensor input,
int nbOutputMaps,
Dims32 kernelSize,
Weights kernelWeights,
Weights biasWeights)
\brief Add a multi-dimension convolution layer to the network.
|
IDeconvolutionLayer |
addDeconvolution(ITensor input,
int nbOutputMaps,
DimsHW kernelSize,
Weights kernelWeights,
Weights biasWeights)
Deprecated.
Superseded by addDeconvolutionNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
|
IDeconvolutionLayer |
addDeconvolutionNd(ITensor input,
int nbOutputMaps,
Dims32 kernelSize,
Weights kernelWeights,
Weights biasWeights) |
IDequantizeLayer |
addDequantize(ITensor input,
ITensor scale)
\brief Add a dequantization layer to the network.
|
IEinsumLayer |
addEinsum(ITensor inputs,
int nbInputs,
BytePointer equation) |
IEinsumLayer |
addEinsum(ITensor inputs,
int nbInputs,
String equation) |
IEinsumLayer |
addEinsum(PointerPointer inputs,
int nbInputs,
String equation)
\brief Add an Einsum layer to the network.
|
IElementWiseLayer |
addElementWise(ITensor input1,
ITensor input2,
int op) |
IElementWiseLayer |
addElementWise(ITensor input1,
ITensor input2,
nvinfer.ElementWiseOperation op)
\brief Add an elementwise layer to the network.
|
IFillLayer |
addFill(Dims32 dimensions,
int op) |
IFillLayer |
addFill(Dims32 dimensions,
nvinfer.FillOperation op)
\brief Add a fill layer to the network.
|
IFullyConnectedLayer |
addFullyConnected(ITensor input,
int nbOutputs,
Weights kernelWeights,
Weights biasWeights)
Deprecated.
Deprecated in TensorRT 8.4. Superseded by addMatrixMultiply().
|
IGatherLayer |
addGather(ITensor data,
ITensor indices,
int axis)
\brief Add gather with mode GatherMode::kDEFAULT and specified axis and nbElementWiseDims=0.
|
IGatherLayer |
addGatherV2(ITensor data,
ITensor indices,
int mode) |
IGatherLayer |
addGatherV2(ITensor data,
ITensor indices,
nvinfer.GatherMode mode)
\brief Add gather with specified mode, axis=0 and nbElementWiseDims=0.
|
IGridSampleLayer |
addGridSample(ITensor input,
ITensor grid)
\brief Add a GridSample layer to the network.
|
IIdentityLayer |
addIdentity(ITensor input)
\brief Add an identity layer.
|
IIfConditional |
addIfConditional()
\brief Add an If-conditional layer to the network.
|
ITensor |
addInput(BytePointer name,
int type,
Dims32 dimensions) |
ITensor |
addInput(String name,
nvinfer.DataType type,
Dims32 dimensions)
\brief Add an input tensor to the network.
|
ILoop |
addLoop()
\brief Add a loop to the network.
|
ILRNLayer |
addLRN(ITensor input,
int window,
float alpha,
float beta,
float k)
\brief Add a LRN layer to the network.
|
IMatrixMultiplyLayer |
addMatrixMultiply(ITensor input0,
int op0,
ITensor input1,
int op1) |
IMatrixMultiplyLayer |
addMatrixMultiply(ITensor input0,
nvinfer.MatrixOperation op0,
ITensor input1,
nvinfer.MatrixOperation op1)
\brief Add a MatrixMultiply layer to the network.
|
INMSLayer |
addNMS(ITensor boxes,
ITensor scores,
ITensor maxOutputBoxesPerClass)
\brief Add a non-maximum suppression layer to the network.
|
INonZeroLayer |
addNonZero(ITensor input)
\brief Add a nonzero layer to the network.
|
INormalizationLayer |
addNormalization(ITensor input,
ITensor scale,
ITensor bias,
int axesMask)
\brief Add a normalization layer to the network.
|
IOneHotLayer |
addOneHot(ITensor indices,
ITensor values,
ITensor depth,
int axis)
\brief Add a OneHot layer to the network.
|
IPaddingLayer |
addPadding(ITensor input,
DimsHW prePadding,
DimsHW postPadding)
Deprecated.
Superseded by addPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
|
IPaddingLayer |
addPaddingNd(ITensor input,
Dims32 prePadding,
Dims32 postPadding)
Deprecated.
Deprecated in TensorRT 8.0. Superseded by addSlice().
|
IParametricReLULayer |
addParametricReLU(ITensor input,
ITensor slope)
\brief Add a parametric ReLU layer to the network.
|
IPluginV2Layer |
addPluginV2(ITensor inputs,
int nbInputs,
IPluginV2 plugin) |
IPluginV2Layer |
addPluginV2(PointerPointer inputs,
int nbInputs,
IPluginV2 plugin)
\brief Add a plugin layer to the network using the IPluginV2 interface.
|
IPoolingLayer |
addPooling(ITensor input,
int type,
DimsHW windowSize)
Deprecated.
|
IPoolingLayer |
addPooling(ITensor input,
nvinfer.PoolingType type,
DimsHW windowSize)
Deprecated.
Superseded by addPoolingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
|
IPoolingLayer |
addPoolingNd(ITensor input,
int type,
Dims32 windowSize) |
IPoolingLayer |
addPoolingNd(ITensor input,
nvinfer.PoolingType type,
Dims32 windowSize)
\brief Add a multi-dimension pooling layer to the network.
|
IQuantizeLayer |
addQuantize(ITensor input,
ITensor scale)
\brief Add a quantization layer to the network.
|
IRaggedSoftMaxLayer |
addRaggedSoftMax(ITensor input,
ITensor bounds)
\brief Add a RaggedSoftMax layer to the network.
|
IReduceLayer |
addReduce(ITensor input,
int operation,
int reduceAxes,
boolean keepDimensions) |
IReduceLayer |
addReduce(ITensor input,
nvinfer.ReduceOperation operation,
int reduceAxes,
boolean keepDimensions)
\brief Add a reduce layer to the network.
|
IResizeLayer |
addResize(ITensor input)
\brief Add a resize layer to the network.
|
IReverseSequenceLayer |
addReverseSequence(ITensor input,
ITensor sequenceLens)
\brief Add a ReverseSequence layer to the network.
|
IRNNv2Layer |
addRNNv2(ITensor input,
int layerCount,
int hiddenSize,
int maxSeqLen,
int op)
Deprecated.
|
IRNNv2Layer |
addRNNv2(ITensor input,
int layerCount,
int hiddenSize,
int maxSeqLen,
nvinfer.RNNOperation op)
Deprecated.
Deprecated prior to TensorRT 8.0 and will be removed in 9.0. Superseded by
INetworkDefinition::addLoop().
\warning RNN inputs do not support wildcard dimensions or explicit batch size networks.
\warning Int32 tensors are not valid input tensors, only for sequence lengths.
|
IScaleLayer |
addScale(ITensor input,
int mode,
Weights shift,
Weights scale,
Weights power) |
IScaleLayer |
addScale(ITensor input,
nvinfer.ScaleMode mode,
Weights shift,
Weights scale,
Weights power)
\brief Add a Scale layer to the network.
|
IScaleLayer |
addScaleNd(ITensor input,
int mode,
Weights shift,
Weights scale,
Weights power,
int channelAxis) |
IScaleLayer |
addScaleNd(ITensor input,
nvinfer.ScaleMode mode,
Weights shift,
Weights scale,
Weights power,
int channelAxis)
\brief Add a multi-dimension scale layer to the network.
|
IScatterLayer |
addScatter(ITensor data,
ITensor indices,
ITensor updates,
int mode) |
IScatterLayer |
addScatter(ITensor data,
ITensor indices,
ITensor updates,
nvinfer.ScatterMode mode)
\brief Add a Scatter layer to the network with specified mode and axis=0.
|
ISelectLayer |
addSelect(ITensor condition,
ITensor thenInput,
ITensor elseInput)
\brief Add a select layer to the network.
|
IShapeLayer |
addShape(ITensor input)
\brief Add a shape layer to the network.
|
IShuffleLayer |
addShuffle(ITensor input)
\brief Add a shuffle layer to the network.
|
ISliceLayer |
addSlice(ITensor input,
Dims32 start,
Dims32 size,
Dims32 stride)
\brief Add a slice layer to the network.
|
ISoftMaxLayer |
addSoftMax(ITensor input)
\brief Add a SoftMax layer to the network.
|
ITopKLayer |
addTopK(ITensor input,
int op,
int k,
int reduceAxes) |
ITopKLayer |
addTopK(ITensor input,
nvinfer.TopKOperation op,
int k,
int reduceAxes)
\brief Add a TopK layer to the network.
|
IUnaryLayer |
addUnary(ITensor input,
int operation) |
IUnaryLayer |
addUnary(ITensor input,
nvinfer.UnaryOperation operation)
\brief Add a unary layer to the network.
|
void |
destroy()
Deprecated.
Deprecated in TensorRT 8.0. Superseded by
delete .
\warning Calling destroy on a managed pointer will result in a double-free error. |
IBuilder |
getBuilder()
\brief Return the builder from which this INetworkDefinition was created.
|
IErrorRecorder |
getErrorRecorder()
\brief get the ErrorRecorder assigned to this interface.
|
ITensor |
getInput(int index)
\brief Get the input tensor specified by the given index.
|
ILayer |
getLayer(int index)
\brief Get the layer specified by the given index.
|
String |
getName()
\brief Returns the name associated with the network.
|
int |
getNbInputs()
\brief Get the number of inputs in the network.
|
int |
getNbLayers()
\brief Get the number of layers in the network.
|
int |
getNbOutputs()
\brief Get the number of outputs in the network.
|
ITensor |
getOutput(int index)
\brief Get the output tensor specified by the given index.
|
INetworkDefinition |
getPointer(long i) |
boolean |
hasExplicitPrecision()
Deprecated.
Deprecated in TensorRT 8.0.
|
boolean |
hasImplicitBatchDimension()
\brief Query whether the network was created with an implicit batch dimension.
|
void |
markOutput(ITensor tensor)
\brief Mark a tensor as a network output.
|
boolean |
markOutputForShapes(ITensor tensor)
\brief Enable tensor's value to be computed by IExecutionContext::getShapeBinding.
|
INetworkDefinition |
position(long position) |
void |
removeTensor(ITensor tensor)
\brief remove a tensor from the network definition.
|
void |
setErrorRecorder(IErrorRecorder recorder) |
void |
setName(BytePointer name) |
void |
setName(String name)
\brief Sets the name of the network.
|
boolean |
setWeightsName(Weights weights,
BytePointer name) |
boolean |
setWeightsName(Weights weights,
String name)
\brief Associate a name with all current uses of the given weights.
|
void |
unmarkOutput(ITensor tensor)
\brief unmark a tensor as a network output.
|
boolean |
unmarkOutputForShapes(ITensor tensor)
\brief Undo markOutputForShapes.
|
address, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, getDirectBufferAddress, getPointer, getPointer, getPointer, hashCode, interruptDeallocatorThread, isNull, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetAddress, offsetof, offsetof, parseBytes, physicalBytes, physicalBytesInaccurate, position, put, realloc, referenceCount, releaseReference, retainReference, setNull, sizeof, sizeof, toString, totalBytes, totalCount, totalPhysicalBytes, withDeallocator, zero
public INetworkDefinition()
public INetworkDefinition(long size)
Pointer.position(long)
.public INetworkDefinition(Pointer p)
Pointer(Pointer)
.public INetworkDefinition position(long position)
public INetworkDefinition getPointer(long i)
getPointer
in class Pointer
@NoException(value=true) public ITensor addInput(String name, nvinfer.DataType type, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 dimensions)
name
- The name of the tensor.type
- The type of the data held in the tensor.dimensions
- The dimensions of the tensor.
\warning It is an error to specify a wildcard value on a dimension that is determined by trained parameters.
\warning If run on DLA with explicit dimensions, only leading dimension can be a wildcard. And provided profile
must have same minimum, optimum, and maximum dimensions.
\warning The string name must be null-terminated, and be at most 4096 bytes including the terminator.ITensor
@NoException(value=true) public ITensor addInput(@Cast(value="const char*") BytePointer name, @Cast(value="nvinfer1::DataType") int type, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 dimensions)
@NoException(value=true) public void markOutput(@ByRef ITensor tensor)
tensor
- The tensor to mark as an output tensor.
\warning It is an error to mark a network input as an output.
\warning It is an error to mark a tensor inside an ILoop or an
IIfConditional as an output.@Deprecated @NoException(value=true) public IConvolutionLayer addConvolution(@ByRef ITensor input, int nbOutputMaps, @ByVal DimsHW kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights)
input
- The input tensor to the convolution.nbOutputMaps
- The number of output feature maps for the convolution.kernelSize
- The HW-dimensions of the convolution kernel.kernelWeights
- The kernel weights for the convolution.biasWeights
- The bias weights for the convolution. Weights{} represents no bias.\warning It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
\warning Int32 tensors are not valid input tensors.
@Deprecated @NoException(value=true) public IFullyConnectedLayer addFullyConnected(@ByRef ITensor input, int nbOutputs, @ByVal Weights kernelWeights, @ByVal Weights biasWeights)
input
- The input tensor to the layer.nbOutputs
- The number of outputs of the layer.kernelWeights
- The kernel weights for the fully connected layer.biasWeights
- The bias weights for the fully connected layer. Weights{} represents no bias.\warning It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IActivationLayer addActivation(@ByRef ITensor input, nvinfer.ActivationType type)
input
- The input tensor to the layer.type
- The type of activation function to apply.
Note that the setAlpha() and setBeta() methods must be used on the
output for activations that require these parameters.ActivationType
\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IActivationLayer addActivation(@ByRef ITensor input, @Cast(value="nvinfer1::ActivationType") int type)
@Deprecated @NoException(value=true) public IPoolingLayer addPooling(@ByRef ITensor input, nvinfer.PoolingType type, @ByVal DimsHW windowSize)
input
- The input tensor to the layer.type
- The type of pooling to apply.windowSize
- The size of the pooling window.PoolingType
\warning Int32 tensors are not valid input tensors.
@Deprecated @NoException(value=true) public IPoolingLayer addPooling(@ByRef ITensor input, @Cast(value="nvinfer1::PoolingType") int type, @ByVal DimsHW windowSize)
@NoException(value=true) public ILRNLayer addLRN(@ByRef ITensor input, int window, float alpha, float beta, float k)
input
- The input tensor to the layer.window
- The size of the window.alpha
- The alpha value for the LRN computation.beta
- The beta value for the LRN computation.k
- The k value for the LRN computation.\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IScaleLayer addScale(@ByRef ITensor input, nvinfer.ScaleMode mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power)
input
- The input tensor to the layer.
This tensor is required to have a minimum of 3 dimensions in implicit batch mode
and a minimum of 4 dimensions in explicit batch mode.mode
- The scaling mode.shift
- The shift value.scale
- The scale value.power
- The power value.
If the weights are available, then the size of weights are dependent on the ScaleMode.
For ScaleMode::kUNIFORM, the number of weights equals 1.
For ScaleMode::kCHANNEL, the number of weights equals the channel dimension.
For ScaleMode::kELEMENTWISE, the number of weights equals the product of the last three dimensions of the input.addScaleNd
,
\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IScaleLayer addScale(@ByRef ITensor input, @Cast(value="nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power)
@NoException(value=true) public ISoftMaxLayer addSoftMax(@ByRef ITensor input)
\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IConcatenationLayer addConcatenation(@Cast(value="nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs)
inputs
- The input tensors to the layer.nbInputs
- The number of input tensors.IConcatenationLayer
@NoException(value=true) public IConcatenationLayer addConcatenation(@ByPtrPtr ITensor inputs, int nbInputs)
@Deprecated @NoException(value=true) public IDeconvolutionLayer addDeconvolution(@ByRef ITensor input, int nbOutputMaps, @ByVal DimsHW kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights)
input
- The input tensor to the layer.nbOutputMaps
- The number of output feature maps.kernelSize
- The HW-dimensions of the deconvolution kernel.kernelWeights
- The kernel weights for the deconvolution.biasWeights
- The bias weights for the deconvolution. Weights{} represents no bias.\warning It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IElementWiseLayer addElementWise(@ByRef ITensor input1, @ByRef ITensor input2, nvinfer.ElementWiseOperation op)
input1
- The first input tensor to the layer.input2
- The second input tensor to the layer.op
- The binary operation that the layer applies.
The input tensors must have the same rank and compatible type.
Two types are compatible if they are the same type or are both in the set {kFLOAT, kHALF}.
For each dimension, their lengths must match, or one of them must be one.
In the latter case, the tensor is broadcast along that axis.
The output tensor has the same rank as the inputs.
For each dimension, its length is the maximum of the lengths of the
corresponding input dimension.
The inputs are shape tensors if the output is a shape tensor.IElementWiseLayer
@NoException(value=true) public IElementWiseLayer addElementWise(@ByRef ITensor input1, @ByRef ITensor input2, @Cast(value="nvinfer1::ElementWiseOperation") int op)
@NoException(value=true) public IUnaryLayer addUnary(@ByRef ITensor input, nvinfer.UnaryOperation operation)
input
- The input tensor to the layer.operation
- The operation to apply.Generally the input must have a floating-point type (or kINT8 as a quantized float),
except for the following operations:
* kSIGN accepts a floating-point or Int32 tensor.
* kNOT requires a Bool tensor.
The input is a shape tensor if the output is a shape tensor.
@NoException(value=true) public IUnaryLayer addUnary(@ByRef ITensor input, @Cast(value="nvinfer1::UnaryOperation") int operation)
@Deprecated @NoException(value=true) public IPaddingLayer addPadding(@ByRef ITensor input, @ByVal DimsHW prePadding, @ByVal DimsHW postPadding)
input
- The input tensor to the layer.prePadding
- The padding to apply to the start of the tensor.postPadding
- The padding to apply to the end of the tensor.IPaddingLayer
@NoException(value=true) public IShuffleLayer addShuffle(@ByRef ITensor input)
input
- The input tensor to the layer.IShuffleLayer
@NoException(value=true) public IOneHotLayer addOneHot(@ByRef ITensor indices, @ByRef ITensor values, @ByRef ITensor depth, int axis)
indices
- - tensor containing indices where on_value should be set.values
- - a 2-element tensor, consisting of [off_value, on_value].depth
- - tensor containing the width of the added one-hot dimension.axis
- - the axis to add the one-hot encoding to.IOneHotLayer
@NoException(value=true) public int getNbLayers()
getLayer()
@NoException(value=true) public ILayer getLayer(int index)
index
- The index of the layer.getNbLayers()
@NoException(value=true) public int getNbInputs()
getInput()
@NoException(value=true) public ITensor getInput(int index)
index
- The index of the input tensor.getNbInputs()
@NoException(value=true) public int getNbOutputs()
getOutput()
@NoException(value=true) public ITensor getOutput(int index)
index
- The index of the output tensor.getNbOutputs()
@Deprecated @NoException(value=true) public void destroy()
delete
.
\warning Calling destroy on a managed pointer will result in a double-free error.@NoException(value=true) public IReduceLayer addReduce(@ByRef ITensor input, nvinfer.ReduceOperation operation, @Cast(value="uint32_t") int reduceAxes, @Cast(value="bool") boolean keepDimensions)
input
- The input tensor to the layer.operation
- The reduction operation to perform.reduceAxes
- The reduction dimensions.
The bit in position i of bitmask reduceAxes corresponds to explicit dimension i if result.
E.g., the least significant bit corresponds to the first explicit dimension and the next to least
significant bit corresponds to the second explicit dimension.keepDimensions
- The boolean that specifies whether or not to keep the reduced dimensions in the
output of the layer.
The reduce layer works by performing an operation specified by \p operation to reduce the tensor \p input
across the axes specified by \p reduceAxes.\warning If output is an Int32 shape tensor, ReduceOperation::kAVG is unsupported.
@NoException(value=true) public IReduceLayer addReduce(@ByRef ITensor input, @Cast(value="nvinfer1::ReduceOperation") int operation, @Cast(value="uint32_t") int reduceAxes, @Cast(value="bool") boolean keepDimensions)
@NoException(value=true) public ITopKLayer addTopK(@ByRef ITensor input, nvinfer.TopKOperation op, int k, @Cast(value="uint32_t") int reduceAxes)
input
- The input tensor to the layer.op
- Operation to perform.k
- The number of elements to keep. For dynamic k, use the setInput() method to pass in k as a tensor
instead, which will override the static k value passed here in calculations.reduceAxes
- The reduction dimensions.
The bit in position i of bitmask reduceAxes corresponds to explicit dimension i of the result.
E.g., the least significant bit corresponds to the first explicit dimension and the next to least
significant bit corresponds to the second explicit dimension.
Currently reduceAxes must specify exactly one dimension, and it must be one of the last four dimensions.\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public ITopKLayer addTopK(@ByRef ITensor input, @Cast(value="nvinfer1::TopKOperation") int op, int k, @Cast(value="uint32_t") int reduceAxes)
@NoException(value=true) public IGatherLayer addGather(@ByRef ITensor data, @ByRef ITensor indices, int axis)
data
- The tensor to gather values from.indices
- The tensor to get indices from to populate the output tensor.axis
- The axis in the data tensor to gather on.IGatherLayer
@NoException(value=true) public IGatherLayer addGatherV2(@ByRef ITensor data, @ByRef ITensor indices, nvinfer.GatherMode mode)
data
- The tensor to gather values from.indices
- The tensor to get indices from to populate the output tensor.mode
- The gather mode.IGatherLayer
@NoException(value=true) public IGatherLayer addGatherV2(@ByRef ITensor data, @ByRef ITensor indices, @Cast(value="nvinfer1::GatherMode") int mode)
@NoException(value=true) public IRaggedSoftMaxLayer addRaggedSoftMax(@ByRef ITensor input, @ByRef ITensor bounds)
input
- The ZxS input tensor.bounds
- The Zx1 bounds tensor.\warning The bounds tensor cannot have the last dimension be the wildcard character.
\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IMatrixMultiplyLayer addMatrixMultiply(@ByRef ITensor input0, nvinfer.MatrixOperation op0, @ByRef ITensor input1, nvinfer.MatrixOperation op1)
input0
- The first input tensor (commonly A).op0
- The operation to apply to input0.input1
- The second input tensor (commonly B).op1
- The operation to apply to input1.
The inputs are shape tensors if the output is a shape tensor.\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IMatrixMultiplyLayer addMatrixMultiply(@ByRef ITensor input0, @Cast(value="nvinfer1::MatrixOperation") int op0, @ByRef ITensor input1, @Cast(value="nvinfer1::MatrixOperation") int op1)
@NoException(value=true) public INonZeroLayer addNonZero(@ByRef ITensor input)
input
- The input tensor to the layer.INonZeroLayer
@NoException(value=true) public IConstantLayer addConstant(@ByVal @Cast(value="nvinfer1::Dims*") Dims32 dimensions, @ByVal Weights weights)
dimensions
- The dimensions of the constant.weights
- The constant value, represented as weights.IConstantLayer
@Deprecated @NoException(value=true) public IRNNv2Layer addRNNv2(@ByRef ITensor input, int layerCount, int hiddenSize, int maxSeqLen, nvinfer.RNNOperation op)
input
- The input tensor to the layer (see below).layerCount
- The number of layers in the RNN.hiddenSize
- Size of the internal hidden state for each layer.maxSeqLen
- Maximum sequence length for the input.op
- The type of RNN to execute.
By default, the layer is configured with RNNDirection::kUNIDIRECTION and RNNInputMode::kLINEAR.
To change these settings, use IRNNv2Layer::setDirection() and IRNNv2Layer::setInputMode().
%Weights and biases for the added layer should be set using
IRNNv2Layer::setWeightsForGate() and IRNNv2Layer::setBiasForGate() prior
to building an engine using this network.
The input tensors must be of the type DataType::kFLOAT or DataType::kHALF.
The layout of the weights is row major and must be the same datatype as the input tensor.
\p weights contain 8 matrices and \p bias contains 8 vectors.
See IRNNv2Layer::setWeightsForGate() and IRNNv2Layer::setBiasForGate() for details on the required input
format for \p weights and \p bias.
The \p input ITensor should contain zero or more index dimensions {N1, ..., Np}
, followed by
two dimensions, defined as follows:
- S_max
is the maximum allowed sequence length (number of RNN iterations)
- E
specifies the embedding length (unless RNNInputMode::kSKIP is set, in which case it should match
getHiddenSize()).
By default, all sequences in the input are assumed to be size \p maxSeqLen. To provide explicit sequence
lengths for each input sequence in the batch, use IRNNv2Layer::setSequenceLengths().
The RNN layer outputs up to three tensors.
The first output tensor is the output of the final RNN layer across all timesteps, with dimensions
{N1, ..., Np, S_max, H}
:
- N1..Np
are the index dimensions specified by the input tensor
- S_max
is the maximum allowed sequence length (number of RNN iterations)
- H
is an output hidden state (equal to getHiddenSize() or 2x getHiddenSize())
The second tensor is the final hidden state of the RNN across all layers, and if the RNN
is an LSTM (i.e. getOperation() is RNNOperation::kLSTM), then the third tensor is the final cell state
of the RNN across all layers. Both the second and third output tensors have dimensions
{N1, ..., Np, L, H}
:
- N1..Np
are the index dimensions specified by the input tensor
- L
is the number of layers in the RNN, equal to getLayerCount() if getDirection is
RNNDirection::kUNIDIRECTION,
and 2x getLayerCount() if getDirection is RNNDirection::kBIDIRECTION. In the bi-directional
case, layer l
's final forward hidden state is stored in L = 2*l
, and
final backward hidden state is stored in L= 2*l + 1
.
- H
is the hidden state for each layer, equal to getHiddenSize().IRNNv2Layer
@Deprecated @NoException(value=true) public IRNNv2Layer addRNNv2(@ByRef ITensor input, int layerCount, int hiddenSize, int maxSeqLen, @Cast(value="nvinfer1::RNNOperation") int op)
@NoException(value=true) public IIdentityLayer addIdentity(@ByRef ITensor input)
input
- The input tensor to the layer.IIdentityLayer
@NoException(value=true) public ICastLayer addCast(@ByRef ITensor input, nvinfer.DataType toType)
input
- The input tensor to the layer.toType
- The DataType of the output tensorICastLayer
@NoException(value=true) public ICastLayer addCast(@ByRef ITensor input, @Cast(value="nvinfer1::DataType") int toType)
@NoException(value=true) public void removeTensor(@ByRef ITensor tensor)
tensor
- the tensor to remove
It is illegal to remove a tensor that is the input or output of a layer.
if this method is called with such a tensor, a warning will be emitted on the log
and the call will be ignored. Its intended use is to remove detached tensors after
e.g. concatenating two networks with Layer::setInput().@NoException(value=true) public void unmarkOutput(@ByRef ITensor tensor)
tensor
- The tensor to unmark as an output tensor.
see markOutput()@NoException(value=true) public IPluginV2Layer addPluginV2(@Cast(value="nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs, @ByRef IPluginV2 plugin)
inputs
- The input tensors to the layer.nbInputs
- The number of input tensors.plugin
- The layer plugin.\warning Dimension wildcard are only supported with IPluginV2DynamicExt or IPluginV2IOExt plugins.
\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IPluginV2Layer addPluginV2(@ByPtrPtr ITensor inputs, int nbInputs, @ByRef IPluginV2 plugin)
@NoException(value=true) public ISliceLayer addSlice(@ByRef ITensor input, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 start, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 size, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 stride)
input
- The input tensor to the layer.start
- The start offsetsize
- The output dimensionstride
- The slicing stride
Positive, negative, zero stride values, and combinations of them in different dimensions are allowed.ISliceLayer
@NoException(value=true) public void setName(String name)
name
- The name to assign to this network.
Set the name of the network so that it can be associated with a built
engine. The \p name must be a null-terminated C-style string.
TensorRT makes no use of this string except storing it as part of the engine
so that it may be retrieved at runtime.
A name unique to the builder will be generated by default.
This method copies the name string.
\warning The string name must be null-terminated, and be at most 4096 bytes including the terminator.INetworkDefinition::getName(), ISafeCudaEngine::getName()
@NoException(value=true) public void setName(@Cast(value="const char*") BytePointer name)
@NoException(value=true) public String getName()
INetworkDefinition::setName()
@NoException(value=true) public IShapeLayer addShape(@ByRef ITensor input)
input
- The input tensor to the layer.\warning addShape is only supported when hasImplicitBatchDimensions is false.
@Cast(value="bool") @NoException(value=true) public boolean hasImplicitBatchDimension()
createNetworkV2
@Cast(value="bool") @NoException(value=true) public boolean markOutputForShapes(@ByRef ITensor tensor)
isShapeBinding(), getShapeBinding()
@Cast(value="bool") @NoException(value=true) public boolean unmarkOutputForShapes(@ByRef ITensor tensor)
@NoException(value=true) public IParametricReLULayer addParametricReLU(@ByRef ITensor input, @ByRef ITensor slope)
input
- The input tensor to the layer.slope
- The slope tensor to the layer. This tensor should be unidirectionally broadcastable
to the input tensor.\warning Int32 tensors are not valid input tensors.
@NoException(value=true) public IConvolutionLayer addConvolutionNd(@ByRef ITensor input, int nbOutputMaps, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights)
input
- The input tensor to the convolution.nbOutputMaps
- The number of output feature maps for the convolution.kernelSize
- The multi-dimensions of the convolution kernel.kernelWeights
- The kernel weights for the convolution.biasWeights
- The bias weights for the convolution. Weights{} represents no bias.\warning It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
\warning Int32 tensors are not valid input tensors.
\warning Only 2D or 3D convolution is supported.
@NoException(value=true) public IPoolingLayer addPoolingNd(@ByRef ITensor input, nvinfer.PoolingType type, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 windowSize)
input
- The input tensor to the layer.type
- The type of pooling to apply.windowSize
- The size of the pooling window.PoolingType
\warning Int32 tensors are not valid input tensors.
\warning Only 2D or 3D pooling is supported.
@NoException(value=true) public IPoolingLayer addPoolingNd(@ByRef ITensor input, @Cast(value="nvinfer1::PoolingType") int type, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 windowSize)
@NoException(value=true) public IDeconvolutionLayer addDeconvolutionNd(@ByRef ITensor input, int nbOutputMaps, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights)
@NoException(value=true) public IScaleLayer addScaleNd(@ByRef ITensor input, nvinfer.ScaleMode mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power, int channelAxis)
input
- The input tensor to the layer.mode
- The scaling mode.shift
- The shift value.scale
- The scale value.power
- The power value.channelAxis
- The channel axis.
If the weights are available, then the size of weights are dependent on the ScaleMode.
For ScaleMode::kUNIFORM, the number of weights equals 1.
For ScaleMode::kCHANNEL, the number of weights equals the channel dimension.
For ScaleMode::kELEMENTWISE, the number of weights equals the product of all input dimensions at channelAxis and
beyond.
For example, if the inputs dimensions are [A,B,C,D,E,F], and channelAxis=2:
For ScaleMode::kUNIFORM, the number of weights is equal to 1.
For ScaleMode::kCHANNEL, the number of weights is C.
For ScaleMode::kELEMENTWISE, the number of weights is C*D*E*F.
channelAxis can also be set explicitly using setChannelAxis().IScaleLayer
,
\warning Int32 tensors are not valid input tensors.
\warning Only 2D or 3D scale is supported.
@NoException(value=true) public IScaleLayer addScaleNd(@ByRef ITensor input, @Cast(value="nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power, int channelAxis)
@NoException(value=true) public IResizeLayer addResize(@ByRef ITensor input)
input
- The input tensor to the layer.\warning Int32 tensors are not valid input tensors.
@Cast(value="bool") @Deprecated @NoException(value=true) public boolean hasExplicitPrecision()
createNetworkV2
@NoException(value=true) public ILoop addLoop()
@NoException(value=true) public ISelectLayer addSelect(@ByRef ITensor condition, @ByRef ITensor thenInput, @ByRef ITensor elseInput)
condition
- The condition tensor to the layer. Must have type DataType::kBOOL.thenInput
- The "then" input tensor to the layer.elseInput
- The "else" input tensor to the layer.
All three input tensors must have the same rank, and along each axis
must have the same length or a length of one. If the length is one, the tensor
is broadcast along that axis. The output tensor has the dimensions of the inputs AFTER
the broadcast rule is applied. For example, given:
dimensions of condition: [1,1,5,9]
dimensions of thenInput: [1,1,5,9]
dimensions of elseInput: [1,3,1,9]
the output dimensions are [1,3,5,9], and the output contents are defined by:
output[0,i,j,k] = condition[0,0,j,k] ? thenInput[0,0,j,k] : elseInput[0,i,0,k]
The output dimensions are not necessarily the max of the input dimensions if any input
is an empty tensor. For example, if in the preceding example, 5 is changed to 0:
dimensions of condition: [1,1,0,9]
dimensions of thenInput: [1,1,0,9]
dimensions of elseInput: [1,3,1,9]
then the output dimensions are [1,3,0,9].
The network must not have an implicit batch dimension.
The inputs are shape tensors if the output is a shape tensor.ISelectLayer
@NoException(value=true) public IAssertionLayer addAssertion(@ByRef ITensor condition, String message)
condition
- The input tensor to the layer.message
- A message to print if the assertion fails.IAssertionLayer
@NoException(value=true) public IAssertionLayer addAssertion(@ByRef ITensor condition, @Cast(value="const char*") BytePointer message)
@NoException(value=true) public IFillLayer addFill(@ByVal @Cast(value="nvinfer1::Dims*") Dims32 dimensions, nvinfer.FillOperation op)
dimensions
- The output tensor dimensions.op
- The fill operation that the layer applies.
\warning For FillOperation::kLINSPACE, dimensions.nbDims must be 1.
This layer is non-deterministic across subsequent calls as the same inputs will produce different
output tensors if \p op is either FillOperation::kRANDOM_UNIFORM or FillOperation::kRANDOM_NORMAL
due to random state being shared across calls. The output tensors generated are determinstic when
starting from the same initial state.
The network must not have an implicit batch dimension.IFillLayer
@NoException(value=true) public IFillLayer addFill(@ByVal @Cast(value="nvinfer1::Dims*") Dims32 dimensions, @Cast(value="nvinfer1::FillOperation") int op)
@Deprecated @NoException(value=true) public IPaddingLayer addPaddingNd(@ByRef ITensor input, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 prePadding, @ByVal @Cast(value="nvinfer1::Dims*") Dims32 postPadding)
input
- The input tensor to the layer.prePadding
- The padding to apply to the start of the tensor.postPadding
- The padding to apply to the end of the tensor.IPaddingLayer
@Cast(value="bool") @NoException(value=true) public boolean setWeightsName(@ByVal Weights weights, String name)
weights
- The weights to be named.name
- The name to associate with the weights.@Cast(value="bool") @NoException(value=true) public boolean setWeightsName(@ByVal Weights weights, @Cast(value="const char*") BytePointer name)
@NoException(value=true) public void setErrorRecorder(IErrorRecorder recorder)
/**
@NoException(value=true) public IErrorRecorder getErrorRecorder()
setErrorRecorder()
@NoException(value=true) public IDequantizeLayer addDequantize(@ByRef ITensor input, @ByRef ITensor scale)
input
- The input tensor to be quantized.scale
- A tensor with the scale value.\p input tensor data type must be DataType::kFLOAT.
\p scale tensor data type must be DataType::kFLOAT. The subgraph which terminates with the \p scale tensor must
be a build-time constant.
@NoException(value=true) public IScatterLayer addScatter(@ByRef ITensor data, @ByRef ITensor indices, @ByRef ITensor updates, nvinfer.ScatterMode mode)
data
- The input tensor to be updated with additional values.indices
- indices of the elements to be updated.updates
- values to be used for updates.mode
- scatter mode.\p indices tensor data type must be DataType::kINT32.
\p updates tensor data type must be the same as \p data
@NoException(value=true) public IScatterLayer addScatter(@ByRef ITensor data, @ByRef ITensor indices, @ByRef ITensor updates, @Cast(value="nvinfer1::ScatterMode") int mode)
@NoException(value=true) public IQuantizeLayer addQuantize(@ByRef ITensor input, @ByRef ITensor scale)
input
- The input tensor to be quantized.scale
- A tensor with the scale value.\p input tensor data type must be DataType::kFLOAT.
\p scale tensor data type must be DataType::kFLOAT. The subgraph which terminates with the \p scale tensor must
be a build-time constant.
@NoException(value=true) public IIfConditional addIfConditional()
IIfConditional
@NoException(value=true) public IEinsumLayer addEinsum(@Cast(value="nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs, String equation)
inputs
- The input tensors to the layer.nbInputs
- The number of input tensors.equation
- The equation of the layerIEinsumLayer
@NoException(value=true) public IEinsumLayer addEinsum(@ByPtrPtr ITensor inputs, int nbInputs, String equation)
@NoException(value=true) public IEinsumLayer addEinsum(@ByPtrPtr ITensor inputs, int nbInputs, @Cast(value="const char*") BytePointer equation)
@NoException(value=true) public IGridSampleLayer addGridSample(@ByRef ITensor input, @ByRef ITensor grid)
input
- The input tensor to the layer.grid
- The grid tensor to the layer.Creates a GridSample layer with a InterpolationMode::kLINEAR, unaligned corners,
and SampleMode::kFILL for 4d-shape input tensors.
@NoException(value=true) public INMSLayer addNMS(@ByRef ITensor boxes, @ByRef ITensor scores, @ByRef ITensor maxOutputBoxesPerClass)
boxes
- The input boxes tensor to the layer.scores
- The input scores tensor to the layer.maxOutputBoxesPerClass
- The input maxOutputBoxesPerClass tensor to the layer.INMSLayer
@NoException(value=true) public IReverseSequenceLayer addReverseSequence(@ByRef ITensor input, @ByRef ITensor sequenceLens)
input
- The input tensor to the layer. Must have rank >= 2.sequenceLens
- 1D tensor specifying lengths of sequences to reverse in a batch. The length of the
sequenceLens tensor must be equal to the size of the dimension in input tensor specified by batchAxis.IReverseSequenceLayer
@NoException(value=true) public INormalizationLayer addNormalization(@ByRef ITensor input, @ByRef ITensor scale, @ByRef ITensor bias, @Cast(value="uint32_t") int axesMask)
input
- The input tensor to the layer.scale
- The scale tensor used to scale the normalized output.bias
- The bias tensor used to scale the normalized output.axesMask
- The axes on which to perform mean calculations.
The bit in position i of bitmask axesMask corresponds to explicit dimension i of the result.
E.g., the least significant bit corresponds to the first explicit dimension and the next to least
significant bit corresponds to the second explicit dimension.
The normalization layer works by performing normalization of the tensor \p input on the specified \p axesMask.
The result is then scaled by multiplying with \p scale and adding \p bias.
The shape of \p scale and \p bias are expected the be the same, and must have the same rank and be
unidirectionally broadcastable to the shape of \p input.INormalizationLayer
@ByRef @NoException(value=true) public IBuilder getBuilder()
IBuilder::createNetworkV2
Copyright © 2024. All rights reserved.