@Namespace(value="torch::optim") @Properties(inherit=torch.class) public class Adagrad extends Optimizer
Pointer.CustomDeallocator, Pointer.Deallocator, Pointer.NativeDeallocator, Pointer.ReferenceCounter
Constructor and Description |
---|
Adagrad(OptimizerParamGroupVector param_groups) |
Adagrad(OptimizerParamGroupVector param_groups,
AdagradOptions defaults) |
Adagrad(Pointer p)
Pointer cast constructor.
|
Adagrad(TensorVector params) |
Adagrad(TensorVector params,
AdagradOptions defaults) |
Modifier and Type | Method and Description |
---|---|
void |
load(InputArchive archive)
Deserializes the optimizer state from the given
archive . |
void |
save(OutputArchive archive)
Serializes the optimizer state into the given
archive . |
Tensor |
step() |
Tensor |
step(LossClosure closure)
A loss function closure, which is expected to return the loss value.
|
add_param_group, add_parameters, defaults, param_groups, parameters, size, zero_grad, zero_grad
address, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, getDirectBufferAddress, getPointer, getPointer, getPointer, getPointer, hashCode, interruptDeallocatorThread, isNull, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetAddress, offsetof, offsetof, parseBytes, physicalBytes, physicalBytesInaccurate, position, position, put, realloc, referenceCount, releaseReference, retainReference, setNull, sizeof, sizeof, toString, totalBytes, totalCount, totalPhysicalBytes, withDeallocator, zero
public Adagrad(Pointer p)
Pointer(Pointer)
.public Adagrad(@ByVal OptimizerParamGroupVector param_groups, @ByVal(nullValue="torch::optim::AdagradOptions{}") AdagradOptions defaults)
public Adagrad(@ByVal OptimizerParamGroupVector param_groups)
public Adagrad(@Cast(value={"","std::vector<torch::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue="torch::optim::AdagradOptions{}") AdagradOptions defaults)
public Adagrad(@Cast(value={"","std::vector<torch::Tensor>"}) @StdMove TensorVector params)
@ByVal public Tensor step(@ByVal(nullValue="torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure)
Optimizer
public void save(@ByRef OutputArchive archive)
Optimizer
archive
.public void load(@ByRef InputArchive archive)
Optimizer
archive
.Copyright © 2024. All rights reserved.