|
Mila
Deep Neural Network Library
|
Implementation of the CUDA-based GELU activation function for neural networks. More...
#include <vector>#include <memory>#include <iostream>#include <cuda_fp16.h>#include "Kernels/CudaOps.h"#include <stdexcept>#include <type_traits>import Compute.CudaDevice;import Compute.CudaMemoryResource;import Dnn.Modules.Gelu;import Dnn.Tensor;import Compute.OperationAttributes;import Compute.Precision;import Dnn.ComponentConfig;import Compute.DeviceContext;import Compute.UnaryOperation;import Compute.OperationType;import Compute.OperationBase;import Compute.OperationRegistry;import Dnn.TensorTraits;import Compute.DeviceType;import Compute.MemoryResource;Classes | |
| struct | Mila::Dnn::Compute::Detail::cuda_gelu_impl< float > |
| struct | Mila::Dnn::Compute::Detail::cuda_gelu_impl< half > |
| class | Mila::Dnn::Compute::CudaGeluOp< TDataType > |
| CUDA implementation of the GELU activation function for neural networks. More... | |
| class | Mila::Dnn::Compute::CudaGeluOpRegistrar |
| Class responsible for registering the CudaGeluOp operation. More... | |
Namespaces | |
| namespace | Mila |
| namespace | Mila::Dnn |
| namespace | Mila::Dnn::Compute |
| namespace | Mila::Dnn::Compute::Detail |
| Namespace for CUDA layer normalization implementation details. | |
Typedefs | |
| using | Mila::Dnn::Compute::Detail::BackwardFp16Func = void(*)(half *, const half *, const half *, int, cudaStream_t) |
| using | Mila::Dnn::Compute::Detail::BackwardFp32Func = void(*)(float *, const float *, const float *, int, cudaStream_t) |
| using | Mila::Dnn::Compute::Detail::ForwardFp16Func = void(*)(half *, const half *, int, cudaStream_t) |
| using | Mila::Dnn::Compute::Detail::ForwardFp32Func = void(*)(float *, const float *, int, cudaStream_t) |
Implementation of the CUDA-based GELU activation function for neural networks.