Skip to content

Commit

Permalink
Merge pull request argmin-rs#68 from Maher4Ever/feature/add-nalgebra-…
Browse files Browse the repository at this point in the history
…support

Add nalgebra support
  • Loading branch information
stefan-k authored Aug 24, 2020
2 parents 7860158 + 884d04e commit 57327d8
Show file tree
Hide file tree
Showing 25 changed files with 2,080 additions and 26 deletions.
2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ bincode = "1.1.4"
ctrlc = { version = "3.1.2", optional = true }
gnuplot = { version = "0.0.37", optional = true}
paste = "1.0.0"
nalgebra = { version = "0.21.1", optional = true, features = ["serde-serialize"] }
ndarray = { version = "0.13", optional = true, features = ["serde-1"] }
ndarray-linalg = { version = "0.12", optional = true }
ndarray-rand = {version = "0.11.0", optional = true }
Expand All @@ -45,6 +46,7 @@ argmin_testfunctions = "0.1.1"

[features]
default = []
nalgebral = ["nalgebra"]
ndarrayl = ["ndarray", "ndarray-linalg", "ndarray-rand"]
visualizer = ["gnuplot"]

Expand Down
7 changes: 4 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ There are additional features which can be activated in `Cargo.toml`:

```toml
[dependencies]
argmin = { version = "0.3.1", features = ["ctrlc", "ndarrayl"] }
argmin = { version = "0.3.1", features = ["ctrlc", "ndarrayl", "nalgebral"] }
```

These may become default features in the future. Without these features compilation to
Expand All @@ -96,13 +96,14 @@ These may become default features in the future. Without these features compilat
- `ctrlc`: Uses the `ctrlc` crate to properly stop the optimization (and return the current best
result) after pressing Ctrl+C.
- `ndarrayl`: Support for `ndarray`, `ndarray-linalg` and `ndarray-rand`.
- `nalgebral`: Support for [`nalgebra`](https://nalgebra.org)

### Running the tests

Running the tests requires the `ndarrayl` feature to be enabled
Running the tests requires the `ndarrayl` and `nalgebral` features to be enabled

```bash
cargo test --features "ndarrayl"
cargo test --features "ndarrayl nalgebral"
```

## Defining a problem
Expand Down
92 changes: 92 additions & 0 deletions examples/gaussnewton_nalgebra.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
// Copyright 2018-2020 argmin developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.

use argmin::prelude::*;
use argmin::solver::gaussnewton::GaussNewton;

use nalgebra::{DMatrix, DVector};

type Rate = f64;
type S = f64;
type Measurement = (S, Rate);

// Example taken from Wikipedia: https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm
// Model used in this example:
// `rate = (V_{max} * [S]) / (K_M + [S]) `
// where `V_{max}` and `K_M` are the sought parameters and `[S]` and `rate` is the measured data.
struct Problem {
data: Vec<Measurement>,
}

impl ArgminOp for Problem {
type Param = DVector<f64>;
type Output = DVector<f64>;
type Hessian = ();
type Jacobian = DMatrix<f64>;
type Float = f64;

fn apply(&self, p: &Self::Param) -> Result<Self::Output, Error> {
Ok(DVector::from_vec(
self.data
.iter()
.map(|(s, rate)| rate - (p[0] * s) / (p[1] + s))
.collect(),
))
}

fn jacobian(&self, p: &Self::Param) -> Result<Self::Jacobian, Error> {
Ok(DMatrix::from_fn(7, 2, |si, i| {
if i == 0 {
-self.data[si].0 / (p[1] + self.data[si].0)
} else {
p[0] * self.data[si].0 / (p[1] + self.data[si].0).powi(2)
}
}))
}
}

fn run() -> Result<(), Error> {
// Define cost function
// Example taken from Wikipedia: https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm
let cost = Problem {
data: vec![
(0.038, 0.050),
(0.194, 0.127),
(0.425, 0.094),
(0.626, 0.2122),
(1.253, 0.2729),
(2.5, 0.2665),
(3.74, 0.3317),
],
};

// Define initial parameter vector
let init_param: DVector<f64> = DVector::from_vec(vec![0.9, 0.2]);

// Set up solver
let solver: GaussNewton<f64> = GaussNewton::new();

// Run solver
let res = Executor::new(cost, solver, init_param)
.add_observer(ArgminSlogLogger::term(), ObserverMode::Always)
.max_iters(10)
.run()?;

// Wait a second (lets the logger flush everything before printing again)
std::thread::sleep(std::time::Duration::from_secs(1));

// Print result
println!("{}", res);
Ok(())
}

fn main() {
if let Err(ref e) = run() {
println!("{}", e);
std::process::exit(1);
}
}
230 changes: 230 additions & 0 deletions src/core/math/add_nalgebra.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
// Copyright 2018-2020 argmin developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.

use crate::core::math::ArgminAdd;

use nalgebra::{
base::{
allocator::{Allocator, SameShapeAllocator},
constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint},
dimension::Dim,
storage::Storage,
Scalar,
},
ClosedAdd, DefaultAllocator, Matrix, MatrixMN, MatrixSum,
};

impl<N, R, C, S> ArgminAdd<N, MatrixMN<N, R, C>> for Matrix<N, R, C, S>
where
N: Scalar + ClosedAdd + Copy,
R: Dim,
C: Dim,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn add(&self, other: &N) -> MatrixMN<N, R, C> {
self.add_scalar(*other)
}
}

impl<N, R, C, S> ArgminAdd<Matrix<N, R, C, S>, MatrixMN<N, R, C>> for N
where
N: Scalar + ClosedAdd + Copy,
R: Dim,
C: Dim,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn add(&self, other: &Matrix<N, R, C, S>) -> MatrixMN<N, R, C> {
other.add_scalar(*self)
}
}

impl<N, R1, C1, R2, C2, SA, SB> ArgminAdd<Matrix<N, R2, C2, SB>, MatrixSum<N, R1, C1, R2, C2>>
for Matrix<N, R1, C1, SA>
where
N: Scalar + ClosedAdd,
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
SA: Storage<N, R1, C1>,
SB: Storage<N, R2, C2>,
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
#[inline]
fn add(&self, other: &Matrix<N, R2, C2, SB>) -> MatrixSum<N, R1, C1, R2, C2> {
self + other
}
}

#[cfg(test)]
mod tests {
use super::*;
use nalgebra::{DMatrix, DVector, Matrix2x3, Vector3};
use paste::item;

macro_rules! make_test {
($t:ty) => {
item! {
#[test]
fn [<test_add_vec_scalar_ $t>]() {
let a = Vector3::new(1 as $t, 4 as $t, 8 as $t);
let b = 34 as $t;
let target = Vector3::new(35 as $t, 38 as $t, 42 as $t);
let res = <Vector3<$t> as ArgminAdd<$t, Vector3<$t>>>::add(&a, &b);
for i in 0..3 {
assert!(((target[i] - res[i]) as f64).abs() < std::f64::EPSILON);
}
}
}

item! {
#[test]
fn [<test_add_scalar_vec_ $t>]() {
let a = Vector3::new(1 as $t, 4 as $t, 8 as $t);
let b = 34 as $t;
let target = Vector3::new(35 as $t, 38 as $t, 42 as $t);
let res = <$t as ArgminAdd<Vector3<$t>, Vector3<$t>>>::add(&b, &a);
for i in 0..3 {
assert!(((target[i] - res[i]) as f64).abs() < std::f64::EPSILON);
}
}
}

item! {
#[test]
fn [<test_add_vec_vec_ $t>]() {
let a = Vector3::new(1 as $t, 4 as $t, 8 as $t);
let b = Vector3::new(41 as $t, 38 as $t, 34 as $t);
let target = Vector3::new(42 as $t, 42 as $t, 42 as $t);
let res = <Vector3<$t> as ArgminAdd<Vector3<$t>, Vector3<$t>>>::add(&a, &b);
for i in 0..3 {
assert!(((target[i] - res[i]) as f64).abs() < std::f64::EPSILON);
}
}
}

item! {
#[test]
#[should_panic]
fn [<test_add_vec_vec_panic_ $t>]() {
let a = DVector::from_vec(vec![1 as $t, 4 as $t]);
let b = DVector::from_vec(vec![41 as $t, 38 as $t, 34 as $t]);
<DVector<$t> as ArgminAdd<DVector<$t>, DVector<$t>>>::add(&a, &b);
}
}

item! {
#[test]
#[should_panic]
fn [<test_add_vec_vec_panic_2_ $t>]() {
let a = DVector::from_vec(vec![]);
let b = DVector::from_vec(vec![41 as $t, 38 as $t, 34 as $t]);
<DVector<$t> as ArgminAdd<DVector<$t>, DVector<$t>>>::add(&a, &b);
}
}

item! {
#[test]
#[should_panic]
fn [<test_add_vec_vec_panic_3_ $t>]() {
let a = DVector::from_vec(vec![41 as $t, 38 as $t, 34 as $t]);
let b = DVector::from_vec(vec![]);
<DVector<$t> as ArgminAdd<DVector<$t>, DVector<$t>>>::add(&a, &b);
}
}

item! {
#[test]
fn [<test_add_mat_mat_ $t>]() {
let a = Matrix2x3::new(
1 as $t, 4 as $t, 8 as $t,
2 as $t, 5 as $t, 9 as $t
);
let b = Matrix2x3::new(
41 as $t, 38 as $t, 34 as $t,
40 as $t, 37 as $t, 33 as $t
);
let target = Matrix2x3::new(
42 as $t, 42 as $t, 42 as $t,
42 as $t, 42 as $t, 42 as $t
);
let res = <Matrix2x3<$t> as ArgminAdd<Matrix2x3<$t>, Matrix2x3<$t>>>::add(&a, &b);
for i in 0..3 {
for j in 0..2 {
assert!(((target[(j, i)] - res[(j, i)]) as f64).abs() < std::f64::EPSILON);
}
}
}
}

item! {
#[test]
fn [<test_add_mat_scalar_ $t>]() {
let a = Matrix2x3::new(
1 as $t, 4 as $t, 8 as $t,
2 as $t, 5 as $t, 9 as $t
);
let b = 2 as $t;
let target = Matrix2x3::new(
3 as $t, 6 as $t, 10 as $t,
4 as $t, 7 as $t, 11 as $t
);
let res = <Matrix2x3<$t> as ArgminAdd<$t, Matrix2x3<$t>>>::add(&a, &b);
for i in 0..3 {
for j in 0..2 {
assert!(((target[(j, i)] - res[(j, i)]) as f64).abs() < std::f64::EPSILON);
}
}
}
}

item! {
#[test]
#[should_panic]
fn [<test_add_mat_mat_panic_2_ $t>]() {
let a = DMatrix::from_vec(2, 3, vec![
1 as $t, 4 as $t, 8 as $t,
2 as $t, 5 as $t, 9 as $t
]);
let b = DMatrix::from_vec(1, 2, vec![
41 as $t, 38 as $t,
]);
<DMatrix<$t> as ArgminAdd<DMatrix<$t>, DMatrix<$t>>>::add(&a, &b);
}
}

item! {
#[test]
#[should_panic]
fn [<test_add_mat_mat_panic_3_ $t>]() {
let a = DMatrix::from_vec(2, 3, vec![
1 as $t, 4 as $t, 8 as $t,
2 as $t, 5 as $t, 9 as $t
]);
let b = DMatrix::from_vec(0, 0, vec![]);
<DMatrix<$t> as ArgminAdd<DMatrix<$t>, DMatrix<$t>>>::add(&a, &b);
}
}
};
}

make_test!(i8);
make_test!(u8);
make_test!(i16);
make_test!(u16);
make_test!(i32);
make_test!(u32);
make_test!(i64);
make_test!(u64);
make_test!(f32);
make_test!(f64);
}
Loading

0 comments on commit 57327d8

Please sign in to comment.