Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ENH] Add and Validate n_layers, n_units, activation & dropout_rate kwargs to MLPNetwork #2338

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
Prev Previous commit
Next Next commit
fixes
  • Loading branch information
aadya940 committed Nov 18, 2024
commit d47025816157ed08c25008192d19b1a81a376b60
15 changes: 5 additions & 10 deletions aeon/classification/deep_learning/_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,6 @@ class MLPClassifier(BaseDeepClassifier):
a single string metric is provided, it will be
used as the only metric. If a list of metrics are
provided, all will be used for evaluation.
activation : string or a tf callable, default="sigmoid"
Activation function used in the output linear layer.
List of available activation functions:
https://keras.io/api/layers/activations/

Notes
-----
Expand All @@ -115,11 +111,11 @@ class MLPClassifier(BaseDeepClassifier):

def __init__(
self,
n_layers = 3,
n_units = 200,
activation = "relu",
dropout_rate = None,
dropout_last = None,
n_layers=3,
n_units=200,
activation="relu",
dropout_rate=None,
dropout_last=None,
use_bias=True,
n_epochs=2000,
batch_size=16,
Expand Down Expand Up @@ -150,7 +146,6 @@ def __init__(
self.loss = loss
self.metrics = metrics
self.use_mini_batch_size = use_mini_batch_size
self.activation = activation
self.use_bias = use_bias
self.file_path = file_path
self.save_best_model = save_best_model
Expand Down
16 changes: 5 additions & 11 deletions aeon/regression/deep_learning/_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,6 @@ class MLPRegressor(BaseDeepRegressor):
by `np.random`.
Seeded random number generation can only be guaranteed on CPU processing,
GPU processing will be non-deterministic.
activation : string or a tf callable, default="relu"
Activation function used in the output linear layer.
List of available activation functions:
https://keras.io/api/layers/activations/
output_activation : str = "linear"
Activation for the last layer in a Regressor.
optimizer : keras.optimizer, default = tf.keras.optimizers.Adam()
Expand All @@ -111,11 +107,11 @@ class MLPRegressor(BaseDeepRegressor):

def __init__(
self,
n_layers = 3,
n_units = 200,
activation = "relu",
dropout_rate = None,
dropout_last = None,
n_layers=3,
n_units=200,
activation="relu",
dropout_rate=None,
dropout_last=None,
use_bias=True,
n_epochs=2000,
batch_size=16,
Expand All @@ -131,7 +127,6 @@ def __init__(
last_file_name="last_model",
init_file_name="init_model",
random_state=None,
activation="relu",
output_activation="linear",
optimizer=None,
):
Expand All @@ -145,7 +140,6 @@ def __init__(
self.verbose = verbose
self.loss = loss
self.metrics = metrics
self.activation = activation
self.use_bias = use_bias
self.file_path = file_path
self.save_best_model = save_best_model
Expand Down