diff --git a/Keras/Optimizers.cs b/Keras/Optimizers.cs
index 1817acb..bfecd71 100644
--- a/Keras/Optimizers.cs
+++ b/Keras/Optimizers.cs
@@ -184,4 +184,37 @@ public Nadam(float lr = 0.002f, float beta_1 = 0.9f, float beta_2 = 0.999f)
Init();
}
}
+
+
+ ///
+ /// "Follow The Regularized Leader" (FTRL) is an optimization algorithm developed at Google for click-through rate prediction in the early 2010s.
+ /// It is most suitable for shallow models with large and sparse feature spaces.
+ ///
+ ///
+ public class Ftrl : Base
+ {
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// float >= 0. Learning rate.
+ /// float >= 0. Learning rate Power.
+ /// float <= 0. Initial Accumulator Value.
+ /// float <= 0. Lambda 1 Regularization Strength.
+ /// float <= 0. Lambda 2 Regularization Strength.
+ /// float <= 0. Lambda 2 Shrinkage Regularization Strength.
+ /// floats, 0 < beta < 1. Generally close to 1.
+ public Nadam(float lr = 0.001f,float lrp = -0.5, float iav = 0.1f, float l1rs = 0.0f, float l2rs = 0.0f, float l2srs = 0.0f, float beta = 0.0f)
+ {
+ Parameters["learning_rate"] = lr;
+ Parameters["learning_rate_power"] = lrp;
+ Parameters["initial_accumulator_value"] = iav;
+ Parameters["l1_regularization_strength"] = l1rs;
+ Parameters["l2_regularization_strength"] = l2rs;
+ Parameters["l2_shrinkage_regularization_strength"] = l2srs;
+ Parameters["beta"] = beta;
+
+ PyInstance = Instance.keras.optimizers.Ftrl;
+ Init();
+ }
+ }
}