@Namespace(value="torch::autograd") @NoOffset @Properties(inherit=torch.class) public class AutogradMeta extends AutogradMetaInterface
Variable
has one unique AutogradMeta
struct, which stores autograd
metadata fields that are necessary for tracking the Variable's autograd
history. As an optimization, a Variable may store a nullptr, in lieu of a
default constructed AutogradMeta.Pointer.CustomDeallocator, Pointer.Deallocator, Pointer.NativeDeallocator, Pointer.ReferenceCounter
Constructor and Description |
---|
AutogradMeta() |
AutogradMeta(Pointer p)
Pointer cast constructor.
|
AutogradMeta(TensorImpl self_impl,
boolean requires_grad,
Edge gradient_edge) |
address, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, getDirectBufferAddress, getPointer, getPointer, getPointer, getPointer, hashCode, interruptDeallocatorThread, isNull, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetAddress, offsetof, offsetof, parseBytes, physicalBytes, physicalBytesInaccurate, position, position, put, realloc, referenceCount, releaseReference, retainReference, setNull, sizeof, sizeof, toString, totalBytes, totalCount, totalPhysicalBytes, withDeallocator, zero
public AutogradMeta(Pointer p)
Pointer(Pointer)
.public AutogradMeta(TensorImpl self_impl, @Cast(value="bool") boolean requires_grad, @ByVal(nullValue="torch::autograd::Edge()") Edge gradient_edge)
public AutogradMeta()
@StdString public BytePointer name_()
public AutogradMeta name_(BytePointer setter)
public AutogradMeta grad_(Tensor setter)
@SharedPtr public Node grad_fn_()
public AutogradMeta grad_fn_(Node setter)
@SharedPtr public ForwardGrad fw_grad_()
public AutogradMeta fw_grad_(ForwardGrad setter)
@UniquePtr @Cast(value={"","","std::unique_ptr<torch::autograd::PostAccumulateGradHook>&&"}) public PostAccumulateGradHook post_acc_grad_hooks_()
public AutogradMeta post_acc_grad_hooks_(PostAccumulateGradHook setter)
public AutogradMeta requires_grad_(boolean setter)
public AutogradMeta retains_grad_(boolean setter)
public AutogradMeta is_view_(boolean setter)
public AutogradMeta output_nr_(int setter)
public void set_requires_grad(@Cast(value="bool") boolean requires_grad, TensorImpl self_impl)
requires_grad
property of Variable
. This should be true for
leaf variables that want to accumulate gradients, and false for all other
variables.set_requires_grad
in class AutogradMetaInterface
@Cast(value="bool") public boolean requires_grad()
requires_grad
in class AutogradMetaInterface
@Cast(value="torch::autograd::Variable*") @ByRef public Tensor mutable_grad()
Variable
of this Variable
.mutable_grad
in class AutogradMetaInterface
@Cast(value="const torch::autograd::Variable*") @ByRef public Tensor grad()
grad
in class AutogradMetaInterface
@Cast(value="const torch::autograd::Variable*") @ByRef public Tensor fw_grad(@Cast(value="uint64_t") long level, @Const @ByRef TensorBase self)
fw_grad
in class AutogradMetaInterface
public void set_fw_grad(@Const @ByRef TensorBase new_grad, @Const @ByRef TensorBase self, @Cast(value="uint64_t") long level, @Cast(value="bool") boolean is_inplace_op)
set_fw_grad
in class AutogradMetaInterface
Copyright © 2024. All rights reserved.