@Namespace(value="at") @NoOffset @Properties(inherit=torch.class) public class TensorBase extends AbstractTensor
Pointer.CustomDeallocator, Pointer.Deallocator, Pointer.NativeDeallocator, Pointer.ReferenceCounter
Constructor and Description |
---|
TensorBase() |
TensorBase(long size)
Native array allocator.
|
TensorBase(Pointer p)
Pointer cast constructor.
|
TensorBase(TensorBase arg0) |
TensorBase(TensorImplPtr tensor_impl) |
Modifier and Type | Method and Description |
---|---|
TensorBase |
_base()
Returns the
Variable that this Variable is a view of. |
boolean |
_is_zerotensor() |
void |
_set_conj(boolean conjugate) |
void |
_set_neg(boolean negative) |
void |
_set_zero(boolean _zero) |
long |
_version() |
Pointer |
const_data_ptr() |
TensorBase |
contiguous() |
TensorBase |
contiguous(byte memory_format) |
TensorBase |
contiguous(torch.MemoryFormat memory_format) |
BoolPointer |
data_ptr_bool() |
BytePointer |
data_ptr_byte() |
BytePointer |
data_ptr_char() |
DoublePointer |
data_ptr_double() |
FloatPointer |
data_ptr_float() |
IntPointer |
data_ptr_int() |
LongPointer |
data_ptr_long() |
ShortPointer |
data_ptr_short() |
Pointer |
data_ptr() |
TensorBase |
data() |
boolean |
defined() |
Device |
device()
Returns a
Tensor 's device. |
long |
dim() |
TypeMeta |
dtype()
Returns a
Tensor 's dtype (TypeMeta ). |
long |
element_size() |
TensorBase |
fill_(Scalar scalar)
Should be used if *this can reasonably be expected to be contiguous and
performance is important.
|
NamedTensorMeta |
get_named_tensor_meta()
Returns a
Tensor 's dimension names data structure |
TensorImplPtr |
getIntrusivePtr() |
TensorBase |
getPointer(long i) |
Node |
grad_fn()
Gets the gradient function of the
Variable . |
boolean |
has_names()
Returns if a
Tensor has any dimension names |
boolean |
has_storage() |
boolean |
is_alias_of(TensorBase other) |
boolean |
is_complex() |
boolean |
is_conj() |
boolean |
is_contiguous() |
boolean |
is_contiguous(torch.MemoryFormat memory_format) |
boolean |
is_cpu()
Returns if a
Tensor has CPU backend. |
boolean |
is_cuda()
Returns if a
Tensor has CUDA backend. |
boolean |
is_floating_point() |
boolean |
is_hip()
Returns if a
Tensor has HIP backend. |
boolean |
is_hpu()
Returns if a
Tensor has HPU backend. |
boolean |
is_inference()
Returns if a
Tensor is an inference tensor. |
boolean |
is_ipu()
Returns if a
Tensor has IPU backend. |
boolean |
is_lazy()
Returns if a
Tensor has Lazy backend. |
boolean |
is_leaf() |
boolean |
is_meta()
Returns if a
Tensor is a meta tensor. |
boolean |
is_metal()
Returns if a
Tensor is metal tensor. |
boolean |
is_mkldnn()
Returns if a
Tensor is mkldnn tensor. |
boolean |
is_mps()
Returns if a
Tensor is mps tensor. |
boolean |
is_mtia()
Returns if a
Tensor has MTIA backend. |
boolean |
is_neg() |
boolean |
is_nested() |
boolean |
is_non_overlapping_and_dense() |
boolean |
is_ort()
Returns if a
Tensor is ort tensor. |
boolean |
is_quantized()
Returns if a
Tensor has quantized backend. |
boolean |
is_same(TensorBase other) |
boolean |
is_signed() |
boolean |
is_sparse_csr()
Returns is a
Tensor has a sparse CSR backend. |
boolean |
is_sparse()
Returns if a
Tensor has sparse backend. |
boolean |
is_ve()
Returns if a
Tensor has VE backend. |
boolean |
is_view()
Returns true if this
Variable is a view of another Variable . |
boolean |
is_vulkan()
Returns if a
Tensor is vulkan tensor. |
boolean |
is_xla()
Returns if a
Tensor has XLA backend. |
boolean |
is_xpu()
Returns if a
Tensor has XPU backend. |
long |
itemsize() |
DispatchKeySet |
key_set() |
torch.Layout |
layout()
Returns a
Tensor 's layout. |
Pointer |
mutable_data_ptr() |
BytePointer |
name() |
DimnameArrayRef |
names() |
long |
nbytes() |
long |
ndimension() |
long |
numel() |
DimnameListOptional |
opt_names() |
TensorOptions |
options()
Returns the
TensorOptions corresponding to this Tensor . |
long |
output_nr() |
TensorBase |
position(long position) |
void |
print() |
TensorBase |
put(TensorBase x) |
QuantizerPtr |
quantizer()
If a tensor is a quantized tensor, returns its quantizer
TODO: it's not in native_functions.yaml yet as it's not exposed to python
|
int |
register_hook(TensorTensorHook hook) |
int |
register_hook(VoidTensorHook hook)
Registers a backward hook.
|
void |
remove_hook(int pos)
Remove hook at given position
|
TensorBase |
requires_grad_() |
TensorBase |
requires_grad_(boolean _requires_grad) |
boolean |
requires_grad() |
void |
reset() |
void |
retain_grad() |
boolean |
retains_grad() |
torch.ScalarType |
scalar_type() |
void |
set_data(TensorBase new_data) |
TensorBase |
set_requires_grad(boolean requires_grad)
\fn bool is_leaf() const;
All Tensors that have
requires_grad() which is false will be leaf Tensors by convention. |
void |
share_memory_() |
long |
size(long dim) |
LongArrayRef |
sizes() |
long |
storage_offset() |
Storage |
storage() |
long |
stride(long dim) |
LongArrayRef |
strides() |
torch.MemoryFormat |
suggest_memory_format() |
torch.MemoryFormat |
suggest_memory_format(boolean channels_last_strides_exact_match) |
SymInt |
sym_nbytes() |
SymInt |
sym_numel() |
SymInt |
sym_size(long dim) |
SymIntArrayRef |
sym_sizes() |
SymInt |
sym_storage_offset() |
SymInt |
sym_stride(long dim) |
SymIntArrayRef |
sym_strides() |
TensorBase |
tensor_data()
NOTE: This is similar to the legacy
.data() function on Variable , and is intended
to be used from functions that need to access the Variable 's equivalent Tensor
(i.e. |
TensorBase |
to() |
TensorBase |
to(TensorOptions options,
boolean non_blocking,
boolean copy,
MemoryFormatOptional memory_format) |
String |
toString() |
TensorImpl |
unsafeGetTensorImpl() |
TensorImplPtr |
unsafeReleaseIntrusivePtr() |
TensorImpl |
unsafeReleaseTensorImpl() |
long |
use_count() |
TensorBase |
variable_data()
NOTE:
var.variable_data() in C++ has the same semantics as tensor.data
in Python, which create a new Variable that shares the same storage and
tensor metadata with the original Variable , but with a completely new
autograd history. |
long |
weak_use_count() |
static TensorBase |
wrap_tensor_impl(TensorImplPtr tensor_impl) |
TensorBase |
zero_() |
create, create, create, create, create, create, create, create, create, create, create, create, create, create, create, create, createBuffer, createBuffer, createIndexer, createIndexer, shape
address, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, getDirectBufferAddress, getPointer, getPointer, getPointer, hashCode, interruptDeallocatorThread, isNull, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetAddress, offsetof, offsetof, parseBytes, physicalBytes, physicalBytesInaccurate, position, put, realloc, referenceCount, releaseReference, retainReference, setNull, sizeof, sizeof, totalBytes, totalCount, totalPhysicalBytes, withDeallocator, zero
public TensorBase(Pointer p)
Pointer(Pointer)
.public TensorBase(long size)
Pointer.position(long)
.public TensorBase()
public TensorBase(@ByVal TensorImplPtr tensor_impl)
public TensorBase(@Const @ByRef TensorBase arg0)
public TensorBase position(long position)
public TensorBase getPointer(long i)
getPointer
in class Pointer
@ByVal public static TensorBase wrap_tensor_impl(@ByVal TensorImplPtr tensor_impl)
@ByVal public TensorBase contiguous(torch.MemoryFormat memory_format)
@ByVal public TensorBase contiguous()
@ByVal public TensorBase contiguous(@Cast(value="c10::MemoryFormat") byte memory_format)
@Const @ByRef public TensorBase fill_(@Const @ByRef Scalar scalar)
@Const @ByRef public TensorBase zero_()
@ByVal public TensorBase to(@ByVal(nullValue="at::TensorOptions{}") TensorOptions options, @Cast(value="bool") boolean non_blocking, @Cast(value="bool") boolean copy, @ByVal(nullValue="c10::optional<at::MemoryFormat>(c10::nullopt)") MemoryFormatOptional memory_format)
@ByVal public TensorBase to()
@Cast(value="int64_t") public long size(@Cast(value="int64_t") long dim)
size
in class AbstractTensor
@Cast(value="int64_t") public long stride(@Cast(value="int64_t") long dim)
stride
in class AbstractTensor
public TensorImpl unsafeGetTensorImpl()
public TensorImpl unsafeReleaseTensorImpl()
@Const @ByRef public TensorImplPtr getIntrusivePtr()
@ByVal public TensorImplPtr unsafeReleaseIntrusivePtr()
public void reset()
@ByRef @Name(value="operator =") public TensorBase put(@Const @ByRef TensorBase x)
@Cast(value="bool") @NoException(value=true) public boolean is_same(@Const @ByRef TensorBase other)
@Cast(value="size_t") @NoException(value=true) public long use_count()
@Cast(value="size_t") @NoException(value=true) public long weak_use_count()
@StdString public String toString()
@ByVal public LongArrayRef sizes()
@ByVal public SymIntArrayRef sym_sizes()
@ByVal public SymIntArrayRef sym_strides()
@ByVal public LongArrayRef strides()
@ByVal public DimnameListOptional opt_names()
@ByVal public DimnameArrayRef names()
@Cast(value="int64_t") public long ndimension()
ndimension
in class AbstractTensor
@Cast(value="bool") public boolean is_contiguous(@ByVal(nullValue="at::MemoryFormat::Contiguous") torch.MemoryFormat memory_format)
@ByVal public torch.MemoryFormat suggest_memory_format(@Cast(value="bool") boolean channels_last_strides_exact_match)
@ByVal public torch.MemoryFormat suggest_memory_format()
@Cast(value="size_t") public long nbytes()
nbytes
in class AbstractTensor
@Cast(value="int64_t") public long numel()
numel
in class AbstractTensor
@ByVal public DispatchKeySet key_set()
public torch.ScalarType scalar_type()
scalar_type
in class AbstractTensor
@Cast(value="bool") public boolean is_alias_of(@Const @ByRef TensorBase other)
public void share_memory_()
public torch.Layout layout()
Tensor
's layout.@Cast(value="bool") public boolean is_sparse_csr()
Tensor
has a sparse CSR backend.@Cast(value="bool") public boolean is_quantized()
Tensor
has quantized backend.@Cast(value="bool") public boolean is_meta()
Tensor
is a meta tensor. Meta tensors can
also have other designations.@Cast(value="bool") public boolean is_inference()
Tensor
is an inference tensor.@ByVal public QuantizerPtr quantizer()
@Cast(value="bool") public boolean has_names()
Tensor
has any dimension namespublic NamedTensorMeta get_named_tensor_meta()
Tensor
's dimension names data structure@ByVal public TensorOptions options()
TensorOptions
corresponding to this Tensor
. Defined in
TensorOptions.h.options
in class AbstractTensor
public Pointer mutable_data_ptr()
public Pointer data_ptr()
data_ptr
in class AbstractTensor
@Cast(value="bool*") @Name(value="data_ptr<bool>") public BoolPointer data_ptr_bool()
@Name(value="data_ptr<int8_t>") public BytePointer data_ptr_char()
@Cast(value="uint8_t*") @Name(value="data_ptr<uint8_t>") public BytePointer data_ptr_byte()
@Name(value="data_ptr<int16_t>") public ShortPointer data_ptr_short()
@Name(value="data_ptr<int>") public IntPointer data_ptr_int()
@Cast(value="int64_t*") @Name(value="data_ptr<int64_t>") public LongPointer data_ptr_long()
@Name(value="data_ptr<float>") public FloatPointer data_ptr_float()
@Name(value="data_ptr<double>") public DoublePointer data_ptr_double()
public void print()
@Const @ByRef public TensorBase set_requires_grad(@Cast(value="bool") boolean requires_grad)
requires_grad()
which is false will be leaf Tensors by convention.
For Tensors that have requires_grad()
which is true, they will be leaf Tensors if they were
created by the user. This means that they are not the result of an operation and so
grad_fn()
is nullptr
.
Only leaf Tensors will have their grad()
populated during a call to backward()
.
To get grad()
populated for non-leaf Tensors, you can use retain_grad()
.
Example:
auto a = torch::rand(10, torch::requires_grad());
std::cout << a.is_leaf() << std::endl; // prints `true`
auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
std::cout << b.is_leaf() << std::endl; // prints `false`
// b was created by the operation that cast a cpu Tensor into a cuda Tensor
auto c = torch::rand(10, torch::requires_grad()) + 2;
std::cout << c.is_leaf() << std::endl; // prints `false`
// c was created by the addition operation
auto d = torch::rand(10).cuda();
std::cout << d.is_leaf() << std::endl; // prints `true`
// d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
auto e = torch::rand(10).cuda().requires_grad_();
std::cout << e.is_leaf() << std::endl; // prints `true`
// e requires gradients and has no operations creating it
auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
std::cout << f.is_leaf() << std::endl; // prints `true`
// f requires grad, has no operation creating it
\fn void backward(const Tensor & gradient={}, c10::optional
gradient
- Gradient w.r.t. the
tensor. If it is a tensor, it will be automatically converted
to a Tensor that does not require grad unless create_graph is True.
None values can be specified for scalar Tensors or ones that
don't require grad. If a None value would be acceptable then
this argument is optional.retain_graph
- If false, the graph used to compute
the grads will be freed. Note that in nearly all cases setting
this option to True is not needed and often can be worked around
in a much more efficient way. Defaults to the value of
create_graph.create_graph
- If true, graph of the derivative will
be constructed, allowing to compute higher order derivative
products. Defaults to false.inputs
- Inputs w.r.t. which the gradient will be accumulated into
at::Tensor::grad. All other Tensors will be ignored. If not
provided, the gradient is accumulated into all the leaf Tensors
that were used to compute the current tensor.
When inputs are provided and a given input is not a leaf,
the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
It is an implementation detail on which the user should not rely.
See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
\fn Tensor detach() const; Returns a new Tensor, detached from the current graph. The result will never require gradient.
\fn Tensor & detach_() const; Detaches the Tensor from the graph that created it, making it a leaf. Views cannot be detached in-place.
\fn void retain_grad() const;
Enables this Tensor to have their :attr:grad
populated during
:func:backward
. This is a no-op for leaf tensors.
\fn bool retains_grad() const;
Is true if this Tensor is non-leaf and its :attr:grad
is enabled to be
populated during :func:backward
, false otherwise.
@ByVal public TensorBase tensor_data()
.data()
function on Variable
, and is intended
to be used from functions that need to access the Variable
's equivalent Tensor
(i.e. Tensor
that shares the same storage and tensor metadata with the Variable
).
One notable difference with the legacy .data()
function is that changes to the
returned Tensor
's tensor metadata (e.g. sizes / strides / storage / storage_offset)
will not update the original Variable
, due to the fact that this function
shallow-copies the Variable
's underlying TensorImpl.@ByVal public TensorBase variable_data()
var.variable_data()
in C++ has the same semantics as tensor.data
in Python, which create a new Variable
that shares the same storage and
tensor metadata with the original Variable
, but with a completely new
autograd history.
NOTE: If we change the tensor metadata (e.g. sizes / strides /
storage / storage_offset) of a variable created from var.variable_data()
, those
changes will not update the original variable var
. In .variable_data()
, we set
allow_tensor_metadata_change_
to false to make such changes explicitly illegal,
in order to prevent users from changing metadata of var.variable_data()
and expecting the original variable var
to also be updated.@SharedPtr public Node grad_fn()
Variable
. If this is a leaf variable,
the pointer returned will be null.
For View Variables:
Gets the up-to-date grad_fn. If the shared data or base was modified, we
re-create the grad_fn to express the up-to-date view relationship between
this and the base Variable.@Name(value="register_hook<std::function<void(at::TensorBase)> >") public int register_hook(@ByRef(value=true) VoidTensorHook hook)
hook(TensorBase grad) -> TensorBase
hook(TensorBase grad) -> void
The hook should not modify its argument, but it can optionally return a new gradient
which will be used in place of grad
.
This function returns the index of the hook in the list which can be used to remove hook.
Example:
auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
v.backward(torch::tensor({1., 2., 3.}));
// This prints:
// ```
// 2
// 4
// 6
// [ CPUFloatType{3} ]
// ```
std::cout << v.grad() << std::endl;
v.remove_hook(h); // removes the hook
@Name(value="register_hook<std::function<at::TensorBase(at::TensorBase)> >") public int register_hook(@ByRef(value=true) TensorTensorHook hook)
public void set_data(@Const @ByRef TensorBase new_data)
@ByVal public TensorBase data()
public void retain_grad()
@Const @ByRef public TensorBase requires_grad_(@Cast(value="bool") boolean _requires_grad)
@Const @ByRef public TensorBase requires_grad_()
@Cast(value="bool") public boolean is_view()
Variable
is a view of another Variable
.@Const @ByRef public TensorBase _base()
Variable
that this Variable
is a view of. If this
Variable
is not a view, throw a std::runtime_error
.@StdString public BytePointer name()
Copyright © 2024. All rights reserved.