|
Mila
Deep Neural Network Library
|
#include <vector>#include <iostream>#include <sstream>#include <iomanip>#include <variant>#include <memory>#include <type_traits>#include <cuda_fp16.h>#include <atomic>#include <string>#include <stdexcept>#include <numeric>import Compute.CudaPinnedMemoryResource;import Compute.CudaManagedMemoryResource;import Compute.CudaMemoryResource;import Compute.CpuMemoryResource;import Compute.DeviceContext;import Compute.DynamicMemoryResource;import Dnn.TensorTraits;import Dnn.TensorPtr;import Dnn.TensorBuffer;import Compute.MemoryResource;import Compute.ComputeDevice;import Dnn.TensorType;Classes | |
| class | Mila::Dnn::Tensor< TElementType, TMemoryResource > |
| class | Mila::Dnn::UniqueIdGenerator |
Namespaces | |
| namespace | Mila |
| namespace | Mila::Dnn |
Typedefs | |
| template<class T > | |
| using | Mila::Dnn::DeviceTensor = Tensor< T, Compute::CudaMemoryResource > |
| Tensor type that uses device (GPU) memory. | |
| template<typename T > | |
| using | Mila::Dnn::HostTensor = Tensor< T, Compute::HostMemoryResource > |
| Tensor type that uses host (CPU) memory. | |
| template<class T > | |
| using | Mila::Dnn::PinnedTensor = Tensor< T, Compute::CudaPinnedMemoryResource > |
| Tensor type that uses pinned (page-locked) host memory. | |
| template<class T > | |
| using | Mila::Dnn::UniversalTensor = Tensor< T, Compute::CudaManagedMemoryResource > |
| Tensor type that uses CUDA managed memory accessible from both CPU and GPU. | |