Package | Description |
---|---|
org.bytedeco.pytorch | |
org.bytedeco.pytorch.cuda | |
org.bytedeco.pytorch.global |
Modifier and Type | Method and Description |
---|---|
Device |
StorageImpl.device() |
Device |
TensorIteratorBase.device() |
Device |
TensorImpl.device() |
Device |
ArgumentInfo.device() |
Device |
DataPtr.device() |
Device |
TensorBase.device()
Returns a
Tensor 's device. |
Device |
GeneratorImpl.device() |
Device |
Generator.device() |
Device |
TensorOptions.device()
Returns the device of the
TensorOptions . |
Device |
Storage.device() |
Device |
Stream.device() |
Device |
VariableInfo.device() |
Device |
TensorIteratorBase.device(int arg) |
Device |
PyInterpreterVTable.device(TensorImpl self) |
Device |
Future.devices() |
Device |
DeviceGuardImplInterface.exchangeDevice(Device arg0)
Set the current device to Device, and return the previous Device.
|
Device |
DeviceOptional.get() |
Device |
XPUHooksInterface.getATenDeviceFromDLPackDevice(DLDevice_ dl_device,
Pointer data) |
Device |
DeviceGuardImplInterface.getDevice()
Get the current device.
|
Device |
PrivateUse1HooksInterface.getDeviceFromPtr(Pointer data) |
Device |
CUDAHooksInterface.getDeviceFromPtr(Pointer arg0) |
Device |
Context.getDeviceFromPtr(Pointer data,
byte device_type) |
Device |
Context.getDeviceFromPtr(Pointer data,
torch.DeviceType device_type) |
Device |
IValue.toDevice() |
Modifier and Type | Method and Description |
---|---|
void |
TensorImpl._change_backend_component_keys(Device device)
XXX: do not use, private api!
Update the backend component related keys to the backend component
corresponding to this device.
|
static Storage |
Storage.create_legacy(Device device) |
static LegacyTensorConstructor |
LegacyTensorConstructor.create(Symbol form,
torch.ScalarType dtype,
Device device) |
static TensorType |
TensorType.createContiguous(torch.ScalarType scalar_type,
Device device,
long... sizes) |
static TensorType |
TensorType.createContiguous(torch.ScalarType scalar_type,
Device device,
LongArrayRef sizes) |
TensorIteratorConfig |
TensorIteratorConfig.declare_static_device(Device device) |
TensorIteratorConfig |
TensorIteratorConfig.declare_static_dtype_and_device(torch.ScalarType dtype,
Device device) |
Generator |
Context.defaultGenerator(Device device) |
VariableInfo |
VariableInfo.device(Device setter) |
boolean |
Device.equals(Device other)
Returns true if the type and index of this
Device matches that of
other . |
Device |
DeviceGuardImplInterface.exchangeDevice(Device arg0)
Set the current device to Device, and return the previous Device.
|
Stream |
DeviceGuardImplInterface.getDefaultStream(Device arg0)
Get the default stream for a given device.
|
DLDevice_ |
XPUHooksInterface.getDLPackDeviceFromATenDevice(DLDevice_ dl_device,
Device aten_device,
Pointer data) |
Stream |
DeviceGuardImplInterface.getStream(Device arg0)
Get the current stream for a given device.
|
Stream |
DeviceGuardImplInterface.getStreamFromGlobalPool(Device arg0) |
Stream |
DeviceGuardImplInterface.getStreamFromGlobalPool(Device arg0,
boolean isHighPriority)
Get a stream from the global pool for a given device.
|
static DataPtr |
PlacementDeleteContext.makeDataPtr(DataPtr data_ptr,
PlacementConsumer placement_dtor,
long size,
Device device) |
boolean |
Device.notEquals(Device other)
Returns true if the type or index of this
Device differs from that of
other . |
DeviceOptional |
DeviceOptional.put(Device value) |
void |
MemoryReportingInfoBase.reportMemoryUsage(Pointer ptr,
long alloc_size,
long total_allocated,
long total_reserved,
Device device)
alloc_size corresponds to the size of the ptr.
|
void |
MemoryReportingInfoBase.reportOutOfMemory(long alloc_size,
long total_allocated,
long total_reserved,
Device device) |
void |
OptionalDeviceGuard.reset_device(Device device)
Sets the device to the given one.
|
void |
OptionalDeviceGuard.reset_device(Device device,
DeviceGuardImplInterface impl)
For testing only
|
void |
DeviceGuardImplInterface.setDevice(Device arg0)
Set the current device to Device.
|
void |
JitModule.to(Device device) |
void |
RNNImplBase.to(Device device) |
void |
GRUImplBase.to(Device device) |
void |
LSTMImplBase.to(Device device) |
void |
Module.to(Device device,
boolean non_blocking)
Recursively moves all parameters to the given device.
|
void |
JitModule.to(Device device,
boolean non_blocking)
Recursively moves all parameters to the given device.
|
void |
RNNImplBase.to(Device device,
boolean non_blocking) |
void |
GRUImplBase.to(Device device,
boolean non_blocking) |
void |
LSTMImplBase.to(Device device,
boolean non_blocking) |
void |
JitModule.to(Device device,
torch.ScalarType dtype) |
Tensor |
Tensor.to(Device device,
torch.ScalarType dtype) |
void |
RNNImplBase.to(Device device,
torch.ScalarType dtype) |
void |
GRUImplBase.to(Device device,
torch.ScalarType dtype) |
void |
LSTMImplBase.to(Device device,
torch.ScalarType dtype) |
void |
Module.to(Device device,
torch.ScalarType dtype,
boolean non_blocking)
Recursively casts all parameters to the given
dtype and device . |
void |
JitModule.to(Device device,
torch.ScalarType dtype,
boolean non_blocking)
Recursively casts all parameters to the given
dtype and device . |
void |
RNNImplBase.to(Device device,
torch.ScalarType dtype,
boolean non_blocking)
Overrides
nn::Module::to() to call flatten_parameters() after the
original operation. |
void |
GRUImplBase.to(Device device,
torch.ScalarType dtype,
boolean non_blocking)
Overrides
nn::Module::to() to call flatten_parameters() after the
original operation. |
void |
LSTMImplBase.to(Device device,
torch.ScalarType dtype,
boolean non_blocking)
Overrides
nn::Module::to() to call flatten_parameters() after the
original operation. |
Tensor |
Tensor.to(Device device,
torch.ScalarType dtype,
boolean non_blocking,
boolean copy,
MemoryFormatOptional memory_format) |
Tensor |
Tensor.to(Device device,
TypeMeta type_meta) |
Tensor |
Tensor.to(Device device,
TypeMeta type_meta,
boolean non_blocking,
boolean copy) |
void |
DeviceGuardImplInterface.uncheckedSetDevice(Device arg0)
Set the current device to Device, without checking for errors
(so, e.g., this can be called from a destructor).
|
void |
DataPtr.unsafe_set_device(Device device) |
Constructor and Description |
---|
DataPtr(Pointer data,
Device device) |
DataPtr(Pointer data,
Pointer ctx,
PointerConsumer ctx_deleter,
Device device) |
DeviceOptional(Device value) |
IValue(Device d) |
LegacyTensorConstructor(Symbol form,
torch.ScalarType dtype,
Device device) |
OptionalDeviceGuard(Device device)
Initialize the guard, setting the current device to the passed Device.
|
OptionalDeviceGuard(Device device,
DeviceGuardImplInterface impl)
Constructor for testing only.
|
Stream(int arg0,
Device device) |
Stream(int arg0,
Device device,
long id) |
Stream(Stream.Default arg0,
Device device)
Construct the default stream of a Device.
|
Stream(Stream.Unsafe arg0,
Device device,
long id)
Unsafely construct a stream from a Device and a StreamId.
|
TensorOptions(Device device)
Constructs a
TensorOptions object with the given device. |
Modifier and Type | Method and Description |
---|---|
Device |
CUDAGuard.current_device()
Returns the last device that was set via
set_device , if any, otherwise
the device passed during construction. |
Device |
CUDAStreamGuard.current_device()
Returns the most recent CUDA device that was set using this device guard,
either from construction, or via set_device/reset_device/set_index.
|
Device |
CUDAStream.device()
Get the full Device that this stream is associated with.
|
Device |
CUDAGuard.original_device()
Returns the device that was set upon construction of the guard
|
Device |
CUDAStreamGuard.original_device()
Returns the CUDA device that was set at the most recent reset_stream(),
or otherwise the device at construction time.
|
Modifier and Type | Method and Description |
---|---|
void |
CUDAGuard.reset_device(Device device)
Sets the CUDA device to the given device.
|
void |
CUDAGuard.set_device(Device device)
Sets the CUDA device to the given device.
|
Constructor and Description |
---|
CUDAGuard(Device device)
Sets the current CUDA device to the passed device.
|
Modifier and Type | Method and Description |
---|---|
static Device |
torch.device_or_default(DeviceOptional device) |
Modifier and Type | Method and Description |
---|---|
static Tensor |
torch.applySelect(Tensor self,
long dim,
SymInt index,
long real_dim,
Device arg4,
SymIntArrayRefOptional self_sizes) |
static Tensor |
torch.applySlice(Tensor self,
long dim,
SymInt start,
SymInt stop,
SymInt step,
boolean disable_slice_optimization,
Device self_device,
SymIntArrayRefOptional self_sizes) |
static Tensor |
torch.applySlicing(Tensor self,
TensorIndexArrayRef indices,
TensorVector outIndices,
boolean disable_slice_optimization,
Device self_device,
SymIntArrayRefOptional self_sizes) |
static Tensor |
torch.applySlicing(Tensor self,
TensorIndexVector indices,
TensorVector outIndices,
boolean disable_slice_optimization,
Device self_device,
SymIntArrayRefOptional self_sizes) |
static Tensor |
torch.boolToIndexingTensor(Tensor self,
boolean value,
Device self_device) |
static TensorOptions |
torch.device(Device device)
Convenience function that returns a
TensorOptions object with the device
set to the given one. |
static Tensor |
torch.handleDimInMultiDimIndexing(Tensor prev_dim_result,
Tensor original_tensor,
TensorIndex index,
long[] dim_ptr,
long[] specified_dims_ptr,
long real_dim,
TensorVector outIndices,
boolean disable_slice_optimization,
Device original_tensor_device,
SymIntArrayRefOptional prev_dim_result_sizes) |
static Tensor |
torch.handleDimInMultiDimIndexing(Tensor prev_dim_result,
Tensor original_tensor,
TensorIndex index,
LongBuffer dim_ptr,
LongBuffer specified_dims_ptr,
long real_dim,
TensorVector outIndices,
boolean disable_slice_optimization,
Device original_tensor_device,
SymIntArrayRefOptional prev_dim_result_sizes) |
static Tensor |
torch.handleDimInMultiDimIndexing(Tensor prev_dim_result,
Tensor original_tensor,
TensorIndex index,
LongPointer dim_ptr,
LongPointer specified_dims_ptr,
long real_dim,
TensorVector outIndices,
boolean disable_slice_optimization,
Device original_tensor_device,
SymIntArrayRefOptional prev_dim_result_sizes) |
static void |
torch.reportMemoryUsageToProfiler(Pointer ptr,
long alloc_size,
long total_allocated,
long total_reserved,
Device device) |
static void |
torch.reportOutOfMemoryToProfiler(long alloc_size,
long total_allocated,
long total_reserved,
Device device) |
static Tensor |
torch.scalar_to_tensor(Scalar s,
Device device) |
static Tensor |
torch.scalarToTensor(Scalar v,
TensorOptions options,
Device self_device) |
static Pointer |
torch.shiftLeft(Pointer stream,
Device device)
An index representing a specific device; e.g., the 1 in GPU 1.
|
static Tensor |
torch.wrapped_scalar_tensor(Scalar scalar,
Device device) |
Copyright © 2024. All rights reserved.