@Namespace(value="c10") @NoOffset @Properties(inherit=torch.class) public class TensorImpl extends Pointer
Modifier and Type | Class and Description |
---|---|
static class |
TensorImpl.ImplType |
static class |
TensorImpl.LongIdentity |
static class |
TensorImpl.SizesStridesPolicy |
static class |
TensorImpl.SymIntIdentity |
Pointer.CustomDeallocator, Pointer.Deallocator, Pointer.NativeDeallocator, Pointer.ReferenceCounter
Constructor and Description |
---|
TensorImpl(DispatchKeySet arg0,
TypeMeta data_type,
DeviceOptional device_opt)
Construct a 1-dim 0 size tensor that doesn't have a storage.
|
TensorImpl(int arg0,
Storage storage,
DispatchKeySet arg2,
TypeMeta data_type) |
TensorImpl(Pointer p)
Pointer cast constructor.
|
TensorImpl(short dispatch_key,
TypeMeta data_type,
DeviceOptional device_opt) |
TensorImpl(Storage storage,
DispatchKeySet arg1,
TypeMeta data_type)
Construct a 1-dim 0-size tensor backed by the given storage.
|
TensorImpl(Storage storage,
short dispatch_key,
TypeMeta data_type) |
TensorImpl(Storage storage,
torch.DispatchKey dispatch_key,
TypeMeta data_type) |
TensorImpl(TensorImpl.ImplType arg0,
Storage storage,
DispatchKeySet arg2,
TypeMeta data_type) |
TensorImpl(torch.DispatchKey dispatch_key,
TypeMeta data_type,
DeviceOptional device_opt) |
Modifier and Type | Method and Description |
---|---|
void |
_change_backend_component_keys(Device device)
XXX: do not use, private api!
Update the backend component related keys to the backend component
corresponding to this device.
|
Tensor |
_fw_grad(long level,
TensorBase self)
Return the accumulated gradient of a tensor.
|
LongArrayRef |
_generic_sizes(TensorImpl.LongIdentity arg0) |
SymIntArrayRef |
_generic_sizes(TensorImpl.SymIntIdentity arg0) |
long |
_generic_storage_offset(TensorImpl.LongIdentity arg0) |
SymInt |
_generic_storage_offset(TensorImpl.SymIntIdentity arg0) |
LongArrayRef |
_generic_strides(TensorImpl.LongIdentity arg0) |
SymIntArrayRef |
_generic_strides(TensorImpl.SymIntIdentity arg0) |
boolean |
_is_zerotensor()
Whether or not the tensor is a zerotensor
|
void |
_set_conj(boolean value)
Set whether or not to take the conjugate of the tensor (flip the imaginary
bit).
|
void |
_set_fw_grad(TensorBase new_grad,
TensorBase self,
long level,
boolean is_inplace_op)
Sets the forward gradient for this Tensor.
|
void |
_set_neg(boolean value)
Set whether or not to take the conjugate of the tensor (flip the imaginary
bit).
|
void |
_set_zero(boolean value)
Set whether or not the tensor is a zero tensor
|
boolean |
allow_tensor_metadata_change()
True if a tensor allows changes to its metadata (e.g.
|
AutogradMetaInterface |
autograd_meta()
Return the pointer to autograd metadata.
|
void |
bump_version() |
Pointer |
data()
More efficient helper for Tensor::data_ptr().
|
torch.DeviceType |
device_type()
The device type of a Tensor, e.g., DeviceType::CPU or DeviceType::CUDA.
|
Device |
device() |
long |
dim_default() |
long |
dim()
Return the number of dimensions of this tensor.
|
boolean |
dtype_initialized()
True if a tensor is dtype initialized.
|
TypeMeta |
dtype() |
void |
empty_tensor_restride_symint(byte memory_format) |
void |
empty_tensor_restride_symint(torch.MemoryFormat memory_format) |
void |
empty_tensor_restride(byte memory_format) |
void |
empty_tensor_restride(torch.MemoryFormat memory_format)
Set the strides of the tensor to match memory_format
WARNING: This function doesn't rearrange data and assumes tensor is a
memory contiguous
|
void |
Extend(long num,
float growthPct)
\brief Extends the outer-most dimension of this tensor by num elements,
preserving the existing data.
|
void |
FreeMemory()
Release whatever memory the tensor was holding but keep size and type
information.
|
void |
generic_set_sizes_contiguous(long... sizes) |
void |
generic_set_sizes_contiguous(LongArrayRef sizes) |
void |
generic_set_sizes_contiguous(SymIntArrayRef sizes) |
BackendMetaRef |
get_backend_meta_intrusive_ptr() |
BackendMeta |
get_backend_meta() |
long |
get_device() |
Tensor |
grad()
Return the accumulated gradient of a tensor.
|
boolean |
has_compatible_shallow_copy_type(DispatchKeySet from)
One TensorImpl can be copied to another TensorImpl if they have the same
DispatchKeySet.
|
boolean |
has_named_tensor_meta() |
boolean |
has_storage()
True if this tensor has storage.
|
boolean |
has_symbolic_sizes_strides() |
boolean |
is_conj()
Whether or not the imaginary part of the tensor should be negated
|
boolean |
is_contiguous_default(torch.MemoryFormat memory_format) |
boolean |
is_contiguous() |
boolean |
is_contiguous(torch.MemoryFormat memory_format)
Whether or not a tensor is laid out in contiguous memory.
|
boolean |
is_cpu() |
boolean |
is_cuda() |
boolean |
is_empty()
True if a tensor has no elements (e.g., numel() == 0).
|
boolean |
is_hip() |
boolean |
is_hpu() |
boolean |
is_inference() |
boolean |
is_ipu() |
boolean |
is_lazy() |
boolean |
is_meta() |
boolean |
is_metal() |
boolean |
is_mkldnn() |
boolean |
is_mps() |
boolean |
is_mtia() |
boolean |
is_neg()
Whether or not the tensor should be negated
|
boolean |
is_nested() |
boolean |
is_non_overlapping_and_dense_default() |
boolean |
is_non_overlapping_and_dense() |
boolean |
is_ort() |
boolean |
is_python_dispatch() |
boolean |
is_quantized() |
boolean |
is_sparse_csr() |
boolean |
is_sparse() |
boolean |
is_strides_like_channels_last_3d() |
boolean |
is_strides_like_channels_last() |
boolean |
is_strides_like_default(torch.MemoryFormat memory_format) |
boolean |
is_strides_like(torch.MemoryFormat memory_format) |
boolean |
is_ve() |
boolean |
is_vulkan() |
boolean |
is_wrapped_number()
True if a tensor was auto-wrapped from a C++ or Python number.
|
boolean |
is_xla() |
boolean |
is_xpu() |
long |
itemsize()
Return the size of a single element of this tensor in bytes.
|
DispatchKeySet |
key_set()
Return the DispatchKeySet corresponding to this Tensor, specifying
all of the DispatchKeys that this Tensor identifies as.
|
torch.Layout |
layout() |
Pointer |
mutable_data()
Return a void* data pointer to the actual data which this tensor refers to.
|
Tensor |
mutable_grad()
Return a mutable reference to the gradient.
|
NamedTensorMetaInterface |
named_tensor_meta()
Return the pointer to named tensor metadata.
|
long |
numel_default() |
long |
numel()
The number of elements in a tensor.
|
Pointer |
pyobj_slot() |
Pointer |
raw_mutable_data(TypeMeta meta)
Returns a mutable raw pointer of the underlying storage.
|
void |
release_resources()
Release (decref) storage, and any other external allocations.
|
void |
release_storage_and_set_meta_custom_data_ptr_error_msg_(StringOptional s) |
void |
remove_autograd_key() |
boolean |
requires_grad()
True if a tensor requires gradient.
|
void |
ReserveSpace(long outer_dim)
\brief Reserve space for the underlying tensor.
|
void |
Reshape(LongVector dims)
Resizes the tensor without touching underlying storage.
|
void |
set_allow_tensor_metadata_change(boolean value)
Set whether a tensor allows changes to its metadata (e.g.
|
void |
set_autograd_meta(AutogradMetaInterface autograd_meta)
Set the pointer to autograd metadata.
|
void |
set_backend_meta(BackendMetaRef backend_meta) |
void |
set_custom_device(boolean custom_device) |
void |
set_custom_layout(boolean custom_layout) |
void |
set_custom_sizes_strides(byte policy) |
void |
set_custom_sizes_strides(TensorImpl.SizesStridesPolicy policy) |
void |
set_named_tensor_meta(NamedTensorMetaInterface named_tensor_meta)
Set the pointer to named tensor metadata.
|
void |
set_python_custom_device(boolean custom_device) |
void |
set_python_custom_layout(boolean custom_layout) |
void |
set_python_custom_sizes_strides(byte policy) |
void |
set_python_custom_sizes_strides(TensorImpl.SizesStridesPolicy policy) |
void |
set_python_dispatch(boolean k) |
void |
set_requires_grad(boolean requires_grad)
Set whether or not a tensor requires gradient.
|
void |
set_size(long dim,
long new_size)
Change the size at some dimension.
|
void |
set_sizes_and_strides(long[] new_size,
long... new_stride) |
void |
set_sizes_and_strides(long[] new_size,
long[] new_stride,
LongOptional storage_offset) |
void |
set_sizes_and_strides(LongArrayRef new_size,
LongArrayRef new_stride) |
void |
set_sizes_and_strides(LongArrayRef new_size,
LongArrayRef new_stride,
LongOptional storage_offset)
Set the sizes and strides of a tensor.
|
void |
set_sizes_and_strides(SymIntArrayRef sizes,
SymIntArrayRef strides) |
void |
set_sizes_and_strides(SymIntArrayRef sizes,
SymIntArrayRef strides,
SymIntOptional storage_offset) |
void |
set_sizes_contiguous(long... new_size) |
void |
set_sizes_contiguous(LongArrayRef new_size)
Like set_sizes_and_strides but assumes contiguous strides.
|
void |
set_storage_access_should_throw() |
void |
set_storage_and_dtype(Storage storage,
TypeMeta data_type) |
void |
set_storage_keep_dtype(Storage storage) |
void |
set_storage_offset(long storage_offset)
Set the offset into the storage of this tensor.
|
void |
set_stride(long dim,
long new_stride)
Change the stride at some dimension.
|
void |
set_version_counter(VariableVersion version_counter) |
void |
set_wrapped_number(boolean value)
Set whether or not a tensor was auto-wrapped from a C++ or Python
number.
|
TensorImplPtr |
shallow_copy_and_detach(VariableVersion version_counter,
boolean allow_tensor_metadata_change) |
void |
shallow_copy_from(TensorImplPtr impl)
Shallow-copies data from another TensorImpl into this TensorImpl.
|
void |
ShareData(TensorImpl src)
\brief Shares the data with another tensor.
|
void |
ShareExternalPointer(DataPtr data_ptr,
TypeMeta data_type,
long size_bytes) |
long |
size(long d)
Return the size of a tensor at some dimension, wrapping the dimension if
necessary.
|
LongArrayRef |
sizes_default() |
LongArrayRef |
sizes()
Return a reference to the sizes of this tensor.
|
boolean |
storage_initialized()
True if a tensor is storage initialized.
|
long |
storage_offset_default() |
long |
storage_offset()
Return the offset in number of elements into the storage that this
tensor points to.
|
Storage |
storage()
Return the underlying storage of a Tensor.
|
long |
stride(long d)
Return the stride of a tensor at some dimension, wrapping the dimension
if necessary.
|
LongArrayRef |
strides_default() |
LongArrayRef |
strides()
Return a reference to the strides of this tensor.
|
boolean |
support_as_strided()
Returns true if Tensor supports as_strided and as_strided_backward.
|
SymInt |
sym_numel_default() |
SymInt |
sym_numel() |
SymInt |
sym_size(long d) |
SymIntArrayRef |
sym_sizes_default() |
SymIntArrayRef |
sym_sizes() |
SymInt |
sym_storage_offset_default() |
SymInt |
sym_storage_offset() |
SymIntArrayRef |
sym_strides_default() |
SymIntArrayRef |
sym_strides() |
boolean |
unique_version() |
Storage |
unsafe_storage()
Return the underlying storage, unsafely assuming this is a basic strided
tensor.
|
VariableVersion |
version_counter() |
address, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, getDirectBufferAddress, getPointer, getPointer, getPointer, getPointer, hashCode, interruptDeallocatorThread, isNull, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetAddress, offsetof, offsetof, parseBytes, physicalBytes, physicalBytesInaccurate, position, position, put, realloc, referenceCount, releaseReference, retainReference, setNull, sizeof, sizeof, toString, totalBytes, totalCount, totalPhysicalBytes, withDeallocator, zero
public TensorImpl(Pointer p)
Pointer(Pointer)
.public TensorImpl(@Cast(value={"","c10::Storage&&"}) @StdMove Storage storage, @ByVal DispatchKeySet arg1, @Const @ByVal TypeMeta data_type)
public TensorImpl(TensorImpl.ImplType arg0, @Cast(value={"","c10::Storage&&"}) @StdMove Storage storage, @ByVal DispatchKeySet arg2, @Const @ByVal TypeMeta data_type)
public TensorImpl(@Cast(value="c10::TensorImpl::ImplType") int arg0, @Cast(value={"","c10::Storage&&"}) @StdMove Storage storage, @ByVal DispatchKeySet arg2, @Const @ByVal TypeMeta data_type)
public TensorImpl(@ByVal DispatchKeySet arg0, @Const @ByVal TypeMeta data_type, @ByVal DeviceOptional device_opt)
public TensorImpl(@Cast(value={"","c10::Storage&&"}) @StdMove Storage storage, torch.DispatchKey dispatch_key, @Const @ByVal TypeMeta data_type)
public TensorImpl(@Cast(value={"","c10::Storage&&"}) @StdMove Storage storage, @Cast(value="c10::DispatchKey") short dispatch_key, @Const @ByVal TypeMeta data_type)
public TensorImpl(torch.DispatchKey dispatch_key, @Const @ByVal TypeMeta data_type, @ByVal DeviceOptional device_opt)
public void release_resources()
intrusive_ptr_target
and is used to implement weak
tensors.@ByVal public DispatchKeySet key_set()
@ByVal public LongArrayRef sizes()
@ByVal public SymIntArrayRef sym_sizes()
@ByVal public LongArrayRef sizes_default()
@ByVal public SymIntArrayRef sym_sizes_default()
@ByVal public LongArrayRef _generic_sizes(@ByVal TensorImpl.LongIdentity arg0)
@ByVal public SymIntArrayRef _generic_sizes(@ByVal TensorImpl.SymIntIdentity arg0)
@ByVal public LongArrayRef _generic_strides(@ByVal TensorImpl.LongIdentity arg0)
@ByVal public SymIntArrayRef _generic_strides(@ByVal TensorImpl.SymIntIdentity arg0)
@Cast(value="int64_t") public long _generic_storage_offset(@ByVal TensorImpl.LongIdentity arg0)
@ByVal public SymInt _generic_storage_offset(@ByVal TensorImpl.SymIntIdentity arg0)
@Cast(value="int64_t") public long numel()
@Cast(value="int64_t") public long dim()
@Cast(value="int64_t") public long storage_offset()
@ByVal public LongArrayRef strides()
@ByVal public SymIntArrayRef sym_strides()
@ByVal public LongArrayRef strides_default()
@ByVal public SymIntArrayRef sym_strides_default()
@Cast(value="bool") public boolean is_contiguous(@ByVal(nullValue="at::MemoryFormat::Contiguous") torch.MemoryFormat memory_format)
@Cast(value="bool") public boolean is_contiguous_default(@ByVal torch.MemoryFormat memory_format)
@Cast(value="bool") public boolean is_strides_like_default(@ByVal torch.MemoryFormat memory_format)
@Cast(value="bool") public boolean is_non_overlapping_and_dense_default()
@Cast(value="int64_t") public long size(@Cast(value="int64_t") long d)
@Cast(value="int64_t") public long stride(@Cast(value="int64_t") long d)
@Cast(value="bool") public boolean has_storage()
@Cast(value={"","c10::Storage&&"}) @StdMove public Storage storage()
@Cast(value={"","c10::Storage&&"}) @StdMove public Storage unsafe_storage()
storage
access would throw, this returns a
default-constructed Storage.public void remove_autograd_key()
public torch.Layout layout()
@Cast(value="bool") public boolean is_wrapped_number()
is_wrapped_number_
set to true.
Wrapped numbers do not participate in the result type computation for
mixed-type operations if there are any Tensors that are not wrapped
numbers. This is useful, because we want 't + 2' to work with
any type of tensor, not just LongTensor (which is what integers
in Python represent).
Otherwise, they behave like their non-wrapped equivalents.
See [Result type computation] in TensorIterator.h.
Why did we opt for wrapped numbers, as opposed to just having
an extra function add(Tensor, Scalar)? This helps greatly reduce
the amount of code we have to write for add, when actually
a Tensor-Scalar addition is really just a Tensor-Tensor
addition when the RHS is 0-dim (except for promotion behavior.)public void set_wrapped_number(@Cast(value="bool") boolean value)
@Cast(value="bool") public boolean support_as_strided()
as_strided
case.
It can be expanded as needed in the future, e.g sparse Tensor.public void set_requires_grad(@Cast(value="bool") boolean requires_grad)
@Cast(value="bool") public boolean requires_grad()
@ByRef public Tensor mutable_grad()
t.grad() = x
to set a gradient to a completely new tensor.@Const @ByRef public Tensor grad()
@Cast(value="bool") public boolean is_conj()
public void _set_conj(@Cast(value="bool") boolean value)
public void _change_backend_component_keys(@ByVal Device device)
@Cast(value="bool") public boolean _is_zerotensor()
public void _set_zero(@Cast(value="bool") boolean value)
public void _set_neg(@Cast(value="bool") boolean value)
@Const @ByRef public Tensor _fw_grad(@Cast(value="uint64_t") long level, @Const @ByRef TensorBase self)
public void _set_fw_grad(@Const @ByRef TensorBase new_grad, @Const @ByRef TensorBase self, @Cast(value="uint64_t") long level, @Cast(value="bool") boolean is_inplace_op)
@Const public Pointer data()
public Pointer mutable_data()
@Cast(value="size_t") public long itemsize()
public void set_backend_meta(@ByVal BackendMetaRef backend_meta)
public BackendMeta get_backend_meta()
@ByVal public BackendMetaRef get_backend_meta_intrusive_ptr()
public void release_storage_and_set_meta_custom_data_ptr_error_msg_(@ByVal StringOptional s)
@Cast(value="bool") public boolean is_empty()
public void set_sizes_and_strides(@ByVal SymIntArrayRef sizes, @ByVal SymIntArrayRef strides, @ByVal(nullValue="c10::optional<c10::SymInt>(c10::nullopt)") SymIntOptional storage_offset)
public void set_sizes_and_strides(@ByVal SymIntArrayRef sizes, @ByVal SymIntArrayRef strides)
public void generic_set_sizes_contiguous(@ByVal SymIntArrayRef sizes)
public void generic_set_sizes_contiguous(@ByVal LongArrayRef sizes)
public void generic_set_sizes_contiguous(@ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long... sizes)
public void set_size(@Cast(value="int64_t") long dim, @Cast(value="int64_t") long new_size)
set_sizes_and_strides
,
which is harder to misuse.public void set_stride(@Cast(value="int64_t") long dim, @Cast(value="int64_t") long new_stride)
set_sizes_and_strides
,
which is harder to misuse.public void set_storage_offset(@Cast(value="int64_t") long storage_offset)
public void set_sizes_contiguous(@ByVal LongArrayRef new_size)
public void set_sizes_contiguous(@ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long... new_size)
public void set_sizes_and_strides(@ByVal LongArrayRef new_size, @ByVal LongArrayRef new_stride, @ByVal(nullValue="c10::optional<int64_t>(c10::nullopt)") LongOptional storage_offset)
public void set_sizes_and_strides(@ByVal LongArrayRef new_size, @ByVal LongArrayRef new_stride)
public void set_sizes_and_strides(@ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long[] new_size, @ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long[] new_stride, @ByVal(nullValue="c10::optional<int64_t>(c10::nullopt)") LongOptional storage_offset)
public void set_sizes_and_strides(@ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long[] new_size, @ByVal @Cast(value={"int64_t*","c10::ArrayRef<int64_t>","std::vector<int64_t>&"}) @StdVector(value="int64_t") long... new_stride)
public void set_allow_tensor_metadata_change(@Cast(value="bool") boolean value)
@Cast(value="bool") public boolean allow_tensor_metadata_change()
public void set_autograd_meta(@UniquePtr AutogradMetaInterface autograd_meta)
public AutogradMetaInterface autograd_meta()
public void set_named_tensor_meta(@UniquePtr NamedTensorMetaInterface named_tensor_meta)
public NamedTensorMetaInterface named_tensor_meta()
@Cast(value="bool") public boolean has_compatible_shallow_copy_type(@ByVal DispatchKeySet from)
@ByVal public TensorImplPtr shallow_copy_and_detach(@Const @ByRef VariableVersion version_counter, @Cast(value="bool") boolean allow_tensor_metadata_change)
public void shallow_copy_from(@Const @ByRef TensorImplPtr impl)
allow_tensor_metadata_change_
, see NOTE [ TensorImpl Shallow-Copying ].public void set_version_counter(@Const @ByRef VariableVersion version_counter)
@Const @ByRef @NoException(value=true) public VariableVersion version_counter()
public void bump_version()
public torch.DeviceType device_type()
public void Extend(@Cast(value="int64_t") long num, float growthPct)
public void ReserveSpace(@Cast(value="int64_t") long outer_dim)
public void Reshape(@Cast(value="const std::vector<int64_t>*") @ByRef LongVector dims)
public void FreeMemory()
public void ShareData(@Const @ByRef TensorImpl src)
public void ShareExternalPointer(@Cast(value={"","c10::DataPtr&&"}) @StdMove DataPtr data_ptr, @Const @ByVal TypeMeta data_type, @Cast(value="size_t") long size_bytes)
public Pointer raw_mutable_data(@Const @ByRef TypeMeta meta)
@Cast(value="bool") public boolean storage_initialized()
@Cast(value="bool") @NoException(value=true) public boolean dtype_initialized()
public void set_storage_keep_dtype(@Cast(value={"","c10::Storage&&"}) @StdMove Storage storage)
public void set_storage_and_dtype(@Cast(value={"","c10::Storage&&"}) @StdMove Storage storage, @Const @ByVal TypeMeta data_type)
public void empty_tensor_restride_symint(torch.MemoryFormat memory_format)
public void empty_tensor_restride_symint(@Cast(value="c10::MemoryFormat") byte memory_format)
public void empty_tensor_restride(torch.MemoryFormat memory_format)
public void empty_tensor_restride(@Cast(value="c10::MemoryFormat") byte memory_format)
@Cast(value="bool") public boolean is_strides_like(@ByVal torch.MemoryFormat memory_format)
@Cast(value="bool") public boolean is_strides_like_channels_last_3d()
public void set_storage_access_should_throw()
public void set_custom_sizes_strides(TensorImpl.SizesStridesPolicy policy)
public void set_custom_sizes_strides(@Cast(value="c10::TensorImpl::SizesStridesPolicy") byte policy)
public void set_python_custom_sizes_strides(TensorImpl.SizesStridesPolicy policy)
public void set_python_custom_sizes_strides(@Cast(value="c10::TensorImpl::SizesStridesPolicy") byte policy)
public void set_python_custom_device(@Cast(value="bool") boolean custom_device)
Copyright © 2024. All rights reserved.