Configuration class for MLP block.
More...
Configuration class for MLP block.
◆ MLPConfig() [1/2]
Mila::Dnn::MLPConfig::MLPConfig |
( |
const std::vector< size_t > & |
input_shape, |
|
|
size_t |
hidden_size |
|
) |
| |
|
inline |
Constructor with required parameters.
- Parameters
-
input_shape | The shape of the input tensor |
hidden_size | The size of the hidden layer (intermediate dimension) |
◆ MLPConfig() [2/2]
Mila::Dnn::MLPConfig::MLPConfig |
( |
size_t |
input_features, |
|
|
size_t |
hidden_size |
|
) |
| |
|
inline |
Alternative constructor with direct input features specification.
- Parameters
-
input_features | The number of input features |
hidden_size | The size of the hidden layer (intermediate dimension) |
◆ getActivationType()
◆ getDropout()
float Mila::Dnn::MLPConfig::getDropout |
( |
| ) |
const |
|
inline |
◆ getHiddenSize()
size_t Mila::Dnn::MLPConfig::getHiddenSize |
( |
| ) |
const |
|
inline |
◆ getInputFeatures()
size_t Mila::Dnn::MLPConfig::getInputFeatures |
( |
| ) |
const |
|
inline |
◆ getInputShape()
const std::vector< size_t > & Mila::Dnn::MLPConfig::getInputShape |
( |
| ) |
const |
|
inline |
◆ hasBias()
bool Mila::Dnn::MLPConfig::hasBias |
( |
| ) |
const |
|
inline |
◆ useFusedOperations()
bool Mila::Dnn::MLPConfig::useFusedOperations |
( |
| ) |
const |
|
inline |
◆ useLayerNorm()
bool Mila::Dnn::MLPConfig::useLayerNorm |
( |
| ) |
const |
|
inline |
◆ useResidual()
bool Mila::Dnn::MLPConfig::useResidual |
( |
| ) |
const |
|
inline |
◆ validate()
void Mila::Dnn::MLPConfig::validate |
( |
| ) |
const |
|
inlinevirtual |
Validate configuration parameters.
- Exceptions
-
std::invalid_argument | If validation fails |
Reimplemented from Mila::Dnn::ComponentConfig.
◆ withActivation()
Set the activation function type.
- Parameters
-
activation | The activation function to use |
- Returns
- MLPConfig& Reference to this for method chaining
◆ withBias()
MLPConfig & Mila::Dnn::MLPConfig::withBias |
( |
bool |
has_bias | ) |
|
|
inline |
Configure whether the linear layers use bias.
- Parameters
-
has_bias | Whether to include bias terms |
- Returns
- MLPConfig& Reference to this for method chaining
◆ withDropout()
MLPConfig & Mila::Dnn::MLPConfig::withDropout |
( |
float |
dropout | ) |
|
|
inline |
Set the dropout rate.
- Parameters
-
dropout | Dropout probability (0.0 to 1.0) |
- Returns
- MLPConfig& Reference to this for method chaining
◆ withFusedOperations()
MLPConfig & Mila::Dnn::MLPConfig::withFusedOperations |
( |
bool |
fuse_ops | ) |
|
|
inline |
Configure whether to fuse operations for inference.
- Parameters
-
fuse_ops | Whether to fuse operations when possible |
- Returns
- MLPConfig& Reference to this for method chaining
◆ withLayerNorm()
MLPConfig & Mila::Dnn::MLPConfig::withLayerNorm |
( |
bool |
use_layer_norm | ) |
|
|
inline |
Configure whether to use layer normalization.
- Parameters
-
use_layer_norm | Whether to use layer normalization |
- Returns
- MLPConfig& Reference to this for method chaining
◆ withResidual()
MLPConfig & Mila::Dnn::MLPConfig::withResidual |
( |
bool |
use_residual | ) |
|
|
inline |
Configure whether to use a residual connection.
- Parameters
-
use_residual | Whether to add a residual connection |
- Returns
- MLPConfig& Reference to this for method chaining
◆ activation_type_
◆ dropout_
float Mila::Dnn::MLPConfig::dropout_ { 0.0f } |
|
private |
◆ fuse_operations_
bool Mila::Dnn::MLPConfig::fuse_operations_ { false } |
|
private |
◆ has_bias_
bool Mila::Dnn::MLPConfig::has_bias_ { true } |
|
private |
◆ hidden_size_
size_t Mila::Dnn::MLPConfig::hidden_size_ { 0 } |
|
private |
◆ input_features_
size_t Mila::Dnn::MLPConfig::input_features_ { 0 } |
|
private |
◆ input_shape_
std::vector<size_t> Mila::Dnn::MLPConfig::input_shape_ |
|
private |
◆ use_layer_norm_
bool Mila::Dnn::MLPConfig::use_layer_norm_ { false } |
|
private |
◆ use_residual_
bool Mila::Dnn::MLPConfig::use_residual_ { false } |
|
private |
The documentation for this class was generated from the following file:
- /home/runner/work/Mila/Mila/Mila/Src/Dnn/Modules/Blocks/MLPConfig.ixx