|
| CudaGeluOp (const GeluConfig &config) |
|
| CudaGeluOp (std::shared_ptr< DeviceContext > context, const GeluConfig &config) |
|
void | backward (const Tensor< TDataType, MR > &input, const Tensor< TDataType, MR > &output, const Tensor< TDataType, MR > &output_gradient, const std::vector< std::shared_ptr< Tensor< TDataType, MR > > > ¶meters, std::vector< std::shared_ptr< Tensor< TDataType, MR > > > ¶meter_gradients, Tensor< TDataType, MR > &input_gradient, const OperationAttributes &properties, const std::vector< std::shared_ptr< Tensor< TDataType, MR > > > &output_state) const |
| Performs the backward pass of the GELU activation function.
|
|
void | forward (const Tensor< TDataType, MR > &input, const std::vector< std::shared_ptr< Tensor< TDataType, MR > > > ¶meters, const OperationAttributes &properties, Tensor< TDataType, MR > &output, std::vector< std::shared_ptr< Tensor< TDataType, MR > > > &output_state) const override |
| Performs the forward pass of the GELU activation function on CUDA.
|
|
const GeluConfig & | getConfig () const |
|
std::string | getName () const override |
| Gets the name of this operation.
|
|
| UnaryOperation (OperationType operation_type) |
| Constructs a UnaryOperation with the specified operation type.
|
|
| UnaryOperation (OperationType operation_type, std::shared_ptr< DeviceContext > context) |
| Constructs a UnaryOperation with the specified operation type and device context.
|
|
virtual | ~UnaryOperation ()=default |
| Virtual destructor for proper cleanup of derived classes.
|
|
virtual void | backward (const Tensor< TDataType, MR > &grad, const std::vector< std::shared_ptr< Tensor< TDataType, MR > > > ¶meters, std::vector< std::shared_ptr< Tensor< TDataType, MR > > > &output_grads) const |
| Executes the backward pass of a unary operation.
|
|
virtual void | backward (const Tensor< TDataType, MR > &input, const Tensor< TDataType, MR > &output_grad, const std::vector< std::shared_ptr< Tensor< TDataType, MR > > > ¶meters, std::vector< std::shared_ptr< Tensor< TDataType, MR > > > ¶meter_grads, Tensor< TDataType, MR > &input_grad, const OperationAttributes &properties, const std::vector< std::shared_ptr< Tensor< TDataType, MR > > > &output_state) const |
| Executes the comprehensive backward pass of a unary operation.
|
|
virtual void | forward (const Tensor< TDataType, MR > &input, const std::vector< std::shared_ptr< Tensor< TDataType, MR > > > ¶meters, const OperationAttributes &properties, Tensor< TDataType, MR > &output, std::vector< std::shared_ptr< Tensor< TDataType, MR > > > &output_state) const=0 |
| Executes the forward pass of a unary operation.
|
|
| OperationBase (OperationType operation_type, std::shared_ptr< DeviceContext > context) |
| Constructs an OperationBase object with a specific device context and compute precision.
|
|
virtual | ~OperationBase ()=default |
| Virtual destructor for the OperationBase class.
|
|
std::shared_ptr< DeviceContext > | getDeviceContext () const |
| Gets the device context associated with this operation.
|
|
DeviceType | getDeviceType () const |
| Gets the device type for this operation.
|
|
OperationType | getOperationType () const |
| Gets the operation type enumeration value.
|
|
template<typename TDataType>
requires ValidFloatTensorType<TDataType>
class Mila::Dnn::Compute::CudaGeluOp< TDataType >
CUDA implementation of the GELU activation function for neural networks.
This class provides a CUDA-based implementation of the Gaussian Error Linear Unit (GELU) activation function, which is commonly used in transformer architectures. GELU is a smooth approximation of the ReLU function that applies a non-linear transformation to its input.
The implementation leverages CUDA for GPU acceleration, providing efficient computation for large neural network models. It also supports different precision modes via the ComputePrecision policy.
- Template Parameters
-
TDataType | The data type of the output tensor elements. |
TInput | The data type of the input tensor elements (defaults to TDataType). |