@Namespace(value="at") @NoOffset @Properties(inherit=torch.class) public class TensorIteratorBase extends MetaBase
Pointer.CustomDeallocator, Pointer.Deallocator, Pointer.NativeDeallocator, Pointer.ReferenceCounter
Constructor and Description |
---|
TensorIteratorBase(Pointer p)
Pointer cast constructor.
|
Modifier and Type | Method and Description |
---|---|
void |
_unsafe_set_arg_data(int arg,
Pointer data) |
void |
_unsafe_set_arg_strides(int arg,
long... strides) |
void |
_unsafe_set_arg_strides(int arg,
LongArrayRef strides) |
void |
build_binary_float_op(TensorBase out,
TensorBase a,
TensorBase b) |
void |
build_binary_op(TensorBase out,
TensorBase a,
TensorBase b) |
void |
build_borrowing_binary_float_op(TensorBase out,
TensorBase a,
TensorBase b) |
void |
build_borrowing_binary_op(TensorBase out,
TensorBase a,
TensorBase b) |
void |
build_borrowing_comparison_op(TensorBase out,
TensorBase a,
TensorBase b) |
void |
build_borrowing_except_last_argument_comparison_op(TensorBase out,
TensorBase a,
TensorBase b) |
void |
build_borrowing_unary_float_op(TensorBase out,
TensorBase a) |
void |
build_borrowing_unary_force_boolean_op(TensorBase out,
TensorBase a) |
void |
build_borrowing_unary_op(TensorBase out,
TensorBase a) |
void |
build_comparison_op(TensorBase out,
TensorBase a,
TensorBase b) |
void |
build_output_borrowing_argument_owning_unary_op(TensorBase out,
TensorBase a) |
void |
build_ternary_op(TensorBase out,
TensorBase a,
TensorBase b,
TensorBase c) |
void |
build_unary_float_op(TensorBase out,
TensorBase a) |
void |
build_unary_op(TensorBase out,
TensorBase a) |
void |
build(TensorIteratorConfig arg0) |
boolean |
can_use_32bit_indexing()
true if the stride computation can use 32-bit arithmetic.
|
void |
cast_outputs() |
torch.ScalarType |
common_dtype() |
SymDimVector |
compatible_stride(int element_size)
Create a strides array for a Tensor with shape of this iterator.
|
Pointer |
data_ptr(int arg) |
torch.DeviceType |
device_type() |
torch.DeviceType |
device_type(int arg) |
Device |
device() |
Device |
device(int arg) |
torch.ScalarType |
dtype() |
torch.ScalarType |
dtype(int arg) |
long |
element_size(int arg) |
SymDimVector |
get_base_ptrs() |
SymDimVector |
get_dim_strides(int dim)
Helper functions for CPU iteration
|
int |
get_dim_to_split()
Returns the dimension with the largest extent: (size[dim]-1) * stride[dim]
|
SymDimVector |
get_inner_strides() |
SymDimVector |
get_strides() |
boolean |
has_contiguous_first_dim() |
TensorBase |
input_base() |
TensorBase |
input_base(int arg) |
torch.ScalarType |
input_dtype() |
torch.ScalarType |
input_dtype(int arg) |
Tensor |
input() |
Tensor |
input(int arg) |
DimVector |
invert_perm(long... input) |
DimVector |
invert_perm(LongArrayRef input)
Inverts the re-ordering done by reorder_dimensions.
|
boolean |
is_contiguous()
Reducible to 1-dimensional and all operands are contiguous
|
boolean |
is_cpu_scalar(int arg) |
boolean |
is_dim_reduced(int dim) |
boolean |
is_final_output()
Whether this iterator produces the actual output,
as opposed to something that will be accumulated further.
|
boolean |
is_scalar(int arg) |
boolean |
is_trivial_1d()
1-dimensional iteration and no buffering or type conversion
|
void |
narrow(int dim,
long start,
long size)
Shrinks an iterated dimension
|
int |
ndim() |
int |
ninputs() |
int |
noutputs() |
int |
ntensors() |
long |
num_output_elements()
number of elements in the output operand.
|
int |
num_reduce_dims()
number of reduced dimensions in a reduction operation
|
long |
numel() |
TensorBase |
output_base() |
TensorBase |
output_base(int arg) |
Tensor |
output() |
Tensor |
output(int arg) |
void |
remove_operand(int arg)
Removes an operand from this iterator
|
void |
select_all_keeping_dim(int start_dim,
long... starts) |
void |
select_all_keeping_dim(int start_dim,
LongArrayRef starts)
Narrows every dim after and including
start_dim to size one. |
void |
set_output_raw_strided(long output_idx,
long[] sizes,
long[] strides,
TensorOptions options,
DimnameVector names) |
void |
set_output_raw_strided(long output_idx,
LongArrayRef sizes,
LongArrayRef strides,
TensorOptions options,
DimnameArrayRef names) |
LongArrayRef |
shape() |
boolean |
should_accumulate()
If the kernel should accumulate into the output.
|
TensorIterator |
split(int dim)
Splits this TensorIterator into two iterators.
|
LongArrayRef |
strides(int arg)
Accessors for each operand
|
TensorBase |
tensor_base(int arg) |
Tensor |
tensor(int arg) |
void |
unsafe_replace_operand(int arg,
Pointer data)
Replaces the data pointer for the operand at index
arg . |
LongArrayRef |
view_offsets() |
SplitUntil32Bit |
with_32bit_indexing()
An "iteratable" object that recursively splits this iterator into
sub-iterators that can use 32-bit indexing.
|
maybe_get_output, maybe_get_output, set_output_contiguous, set_output_contiguous, set_output_contiguous, set_output_contiguous, set_output_raw_strided, set_output_raw_strided, set_output_strided, set_output_strided, set_output_strided, set_output_strided
address, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, getDirectBufferAddress, getPointer, getPointer, getPointer, getPointer, hashCode, interruptDeallocatorThread, isNull, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetAddress, offsetof, offsetof, parseBytes, physicalBytes, physicalBytesInaccurate, position, position, put, realloc, referenceCount, releaseReference, retainReference, setNull, sizeof, sizeof, toString, totalBytes, totalCount, totalPhysicalBytes, withDeallocator, zero
public TensorIteratorBase(Pointer p)
Pointer(Pointer)
.public void build(@ByRef TensorIteratorConfig arg0)
public int ndim()
@ByVal public LongArrayRef shape()
public int ntensors()
public int noutputs()
public int ninputs()
@ByVal public LongArrayRef view_offsets()
@Cast(value="int64_t") public long num_output_elements()
public int num_reduce_dims()
@Cast(value="bool") public boolean is_trivial_1d()
@Cast(value="bool") public boolean is_contiguous()
@ByVal public LongArrayRef strides(int arg)
public Pointer data_ptr(int arg)
public torch.ScalarType dtype(int arg)
public torch.ScalarType dtype()
public torch.ScalarType common_dtype()
public torch.ScalarType input_dtype(int arg)
public torch.ScalarType input_dtype()
public torch.DeviceType device_type(int arg)
public torch.DeviceType device_type()
@Const @ByRef public TensorBase tensor_base(int arg)
@Const @ByRef public TensorBase output_base(int arg)
@Const @ByRef public TensorBase output_base()
@Const @ByRef public TensorBase input_base(int arg)
@Const @ByRef public TensorBase input_base()
public void cast_outputs()
public void remove_operand(int arg)
public void narrow(int dim, @Cast(value="int64_t") long start, @Cast(value="int64_t") long size)
public void select_all_keeping_dim(int start_dim, @ByVal LongArrayRef starts)
start_dim
to size one.public void select_all_keeping_dim(int start_dim, @ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long... starts)
public void unsafe_replace_operand(int arg, Pointer data)
arg
.
The new pointer should have the same sizes, strides and dtype as the
original@UniquePtr @ByVal public TensorIterator split(int dim)
with_32bit_indexing()
.public int get_dim_to_split()
@ByVal @Cast(value="at::TensorIteratorBase::StrideVector*") public SymDimVector compatible_stride(int element_size)
element_size
specifies the size of Tensor's data type in
bytes (e.g. 4
for float
)@ByVal public DimVector invert_perm(@ByVal LongArrayRef input)
@ByVal public DimVector invert_perm(@ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long... input)
@ByVal @Cast(value="at::TensorIteratorBase::StrideVector*") public SymDimVector get_dim_strides(int dim)
@ByVal @Cast(value="at::TensorIteratorBase::StrideVector*") public SymDimVector get_strides()
@ByVal @Cast(value="at::TensorIteratorBase::StrideVector*") public SymDimVector get_inner_strides()
@ByVal @Cast(value="at::TensorIteratorBase::PtrVector*") public SymDimVector get_base_ptrs()
public void _unsafe_set_arg_strides(int arg, @ByVal LongArrayRef strides)
public void _unsafe_set_arg_strides(int arg, @ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long... strides)
public void _unsafe_set_arg_data(int arg, Pointer data)
@Cast(value="bool") public boolean can_use_32bit_indexing()
@ByVal public SplitUntil32Bit with_32bit_indexing()
@Cast(value="bool") public boolean should_accumulate()
@Cast(value="bool") public boolean is_final_output()
public void set_output_raw_strided(@Cast(value="int64_t") long output_idx, @ByVal LongArrayRef sizes, @ByVal LongArrayRef strides, @ByVal TensorOptions options, @ByVal DimnameArrayRef names)
set_output_raw_strided
in class MetaBase
public void set_output_raw_strided(@Cast(value="int64_t") long output_idx, @ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long[] sizes, @ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long[] strides, @ByVal TensorOptions options, @ByVal DimnameVector names)
set_output_raw_strided
in class MetaBase
public void build_binary_float_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a, @Const @ByRef TensorBase b)
public void build_borrowing_binary_float_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a, @Const @ByRef TensorBase b)
public void build_binary_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a, @Const @ByRef TensorBase b)
public void build_borrowing_binary_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a, @Const @ByRef TensorBase b)
public void build_unary_float_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a)
public void build_borrowing_unary_float_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a)
public void build_unary_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a)
public void build_output_borrowing_argument_owning_unary_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a)
public void build_borrowing_unary_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a)
public void build_borrowing_unary_force_boolean_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a)
public void build_comparison_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a, @Const @ByRef TensorBase b)
public void build_borrowing_comparison_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a, @Const @ByRef TensorBase b)
public void build_borrowing_except_last_argument_comparison_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a, @Const @ByRef TensorBase b)
public void build_ternary_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a, @Const @ByRef TensorBase b, @Const @ByRef TensorBase c)
Copyright © 2024. All rights reserved.