Skip to content

Commit

Permalink
Remove _devauto fuctions (tracel-ai#518) (tracel-ai#1110)
Browse files Browse the repository at this point in the history
  • Loading branch information
kpot authored Jan 6, 2024
1 parent fab344c commit 9729753
Show file tree
Hide file tree
Showing 185 changed files with 3,007 additions and 2,322 deletions.
5 changes: 3 additions & 2 deletions burn-autodiff/src/tests/abs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@ mod tests {
let data_1 = Data::<f32, 2>::from([[0.0, -1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, -10.0]]);

let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let device = Default::default();
let tensor_1 = TestAutodiffTensor::from_data(data_1, &device).require_grad();
let tensor_2 = TestAutodiffTensor::from_data(data_2, &device).require_grad();

let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().abs());
let tensor_4 = tensor_3.matmul(tensor_2.clone());
Expand Down
17 changes: 11 additions & 6 deletions burn-autodiff/src/tests/adaptive_avgpool1d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,13 @@ mod tests {
output_size: 3,
};

test.assert_output(TestTensor::from_floats_devauto([[
[0.5000, 0.8333, 0.3333, 0.8333, 0.5000],
[0.5000, 0.8333, 0.3333, 0.8333, 0.5000],
]]));
test.assert_output(TestTensor::from_floats(
[[
[0.5000, 0.8333, 0.3333, 0.8333, 0.5000],
[0.5000, 0.8333, 0.3333, 0.8333, 0.5000],
]],
&Default::default(),
));
}

struct AdaptiveAvgPool1dTestCase {
Expand All @@ -29,11 +32,13 @@ mod tests {
impl AdaptiveAvgPool1dTestCase {
fn assert_output(self, x_grad: TestTensor<3>) {
let shape_x = Shape::new([self.batch_size, self.channels, self.length]);
let x = TestAutodiffTensor::from_data_devauto(
TestTensorInt::arange_devauto(0..shape_x.num_elements())
let device = Default::default();
let x = TestAutodiffTensor::from_data(
TestTensorInt::arange(0..shape_x.num_elements(), &device)
.reshape(shape_x)
.into_data()
.convert(),
&device,
)
.require_grad();
let output = adaptive_avg_pool1d(x.clone(), self.output_size);
Expand Down
41 changes: 23 additions & 18 deletions burn-autodiff/src/tests/adaptive_avgpool2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,25 @@ mod tests {
output_size_2: 2,
};

test.assert_output(TestTensor::from_floats_devauto([[
[
[0.2500, 0.5000, 0.2500],
[0.4167, 0.8333, 0.4167],
[0.1667, 0.3333, 0.1667],
[0.4167, 0.8333, 0.4167],
[0.2500, 0.5000, 0.2500],
],
[
[0.2500, 0.5000, 0.2500],
[0.4167, 0.8333, 0.4167],
[0.1667, 0.3333, 0.1667],
[0.4167, 0.8333, 0.4167],
[0.2500, 0.5000, 0.2500],
],
]]));
test.assert_output(TestTensor::from_floats(
[[
[
[0.2500, 0.5000, 0.2500],
[0.4167, 0.8333, 0.4167],
[0.1667, 0.3333, 0.1667],
[0.4167, 0.8333, 0.4167],
[0.2500, 0.5000, 0.2500],
],
[
[0.2500, 0.5000, 0.2500],
[0.4167, 0.8333, 0.4167],
[0.1667, 0.3333, 0.1667],
[0.4167, 0.8333, 0.4167],
[0.2500, 0.5000, 0.2500],
],
]],
&Default::default(),
));
}

struct AdaptiveAvgPool2dTestCase {
Expand All @@ -45,11 +48,13 @@ mod tests {
impl AdaptiveAvgPool2dTestCase {
fn assert_output(self, x_grad: TestTensor<4>) {
let shape_x = Shape::new([self.batch_size, self.channels, self.height, self.width]);
let x = TestAutodiffTensor::from_data_devauto(
TestTensorInt::arange_devauto(0..shape_x.num_elements())
let device = Default::default();
let x = TestAutodiffTensor::from_data(
TestTensorInt::arange(0..shape_x.num_elements(), &device)
.reshape(shape_x)
.into_data()
.convert(),
&device,
)
.require_grad();
let output = adaptive_avg_pool2d(x.clone(), [self.output_size_1, self.output_size_2]);
Expand Down
14 changes: 8 additions & 6 deletions burn-autodiff/src/tests/add.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,9 @@ mod tests {

#[test]
fn should_diff_add() {
let tensor_1 = TestAutodiffTensor::from_floats_devauto([2.0, 5.0]).require_grad();
let tensor_2 = TestAutodiffTensor::from_floats_devauto([4.0, 1.0]).require_grad();
let device = Default::default();
let tensor_1 = TestAutodiffTensor::from_floats([2.0, 5.0], &device).require_grad();
let tensor_2 = TestAutodiffTensor::from_floats([4.0, 1.0], &device).require_grad();

let tensor_3 = tensor_1.clone() + tensor_2.clone();
let grads = tensor_3.backward();
Expand All @@ -23,7 +24,7 @@ mod tests {
fn should_diff_add_scalar() {
let data = Data::from([2.0, 10.0]);

let tensor = TestAutodiffTensor::from_data_devauto(data).require_grad();
let tensor = TestAutodiffTensor::from_data(data, &Default::default()).require_grad();
let tensor_out = tensor.clone().add_scalar(5.0);
let grads = tensor_out.backward();

Expand All @@ -39,9 +40,10 @@ mod tests {
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);

let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_3 = TestAutodiffTensor::from_data_devauto(data_3).require_grad();
let device = Default::default();
let tensor_1 = TestAutodiffTensor::from_data(data_1, &device).require_grad();
let tensor_2 = TestAutodiffTensor::from_data(data_2, &device).require_grad();
let tensor_3 = TestAutodiffTensor::from_data(data_3, &device).require_grad();

let tensor_4 = tensor_1.clone().add(tensor_2.clone());
let tensor_5 = tensor_4
Expand Down
25 changes: 15 additions & 10 deletions burn-autodiff/src/tests/aggregation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@ mod tests {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);

let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let device = Default::default();
let tensor_1 = TestAutodiffTensor::from_data(data_1, &device).require_grad();
let tensor_2 = TestAutodiffTensor::from_data(data_2, &device).require_grad();

let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_1.clone().mul(tensor_3.mean().unsqueeze());
Expand All @@ -31,8 +32,9 @@ mod tests {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);

let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let device = Default::default();
let tensor_1 = TestAutodiffTensor::from_data(data_1, &device).require_grad();
let tensor_2 = TestAutodiffTensor::from_data(data_2, &device).require_grad();

let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_1.clone().mul(tensor_3.sum().unsqueeze());
Expand All @@ -54,8 +56,9 @@ mod tests {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);

let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let device = Default::default();
let tensor_1 = TestAutodiffTensor::from_data(data_1, &device).require_grad();
let tensor_2 = TestAutodiffTensor::from_data(data_2, &device).require_grad();

let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_3.clone().sum_dim(1);
Expand All @@ -78,8 +81,9 @@ mod tests {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);

let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let device = Default::default();
let tensor_1 = TestAutodiffTensor::from_data(data_1, &device).require_grad();
let tensor_2 = TestAutodiffTensor::from_data(data_2, &device).require_grad();

let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_1.clone().mul(tensor_3.mean_dim(1).unsqueeze());
Expand All @@ -101,8 +105,9 @@ mod tests {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);

let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let device = Default::default();
let tensor_1 = TestAutodiffTensor::from_data(data_1, &device).require_grad();
let tensor_2 = TestAutodiffTensor::from_data(data_2, &device).require_grad();

let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_1.clone().mul(tensor_3.sum_dim(1).unsqueeze());
Expand Down
35 changes: 22 additions & 13 deletions burn-autodiff/src/tests/avgpool1d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@ mod tests {
count_include_pad: true,
};

test.assert_output(TestTensor::from_floats_devauto([[[
0.3333, 0.6667, 1.0000, 1.0000, 0.6667, 0.3333,
]]]));
test.assert_output(TestTensor::from_floats(
[[[0.3333, 0.6667, 1.0000, 1.0000, 0.6667, 0.3333]]],
&Default::default(),
));
}

#[test]
Expand All @@ -33,10 +34,13 @@ mod tests {
count_include_pad: true,
};

test.assert_output(TestTensor::from_floats_devauto([[
[0.3333, 0.6667, 0.3333, 0.6667, 0.3333, 0.3333],
[0.3333, 0.6667, 0.3333, 0.6667, 0.3333, 0.3333],
]]));
test.assert_output(TestTensor::from_floats(
[[
[0.3333, 0.6667, 0.3333, 0.6667, 0.3333, 0.3333],
[0.3333, 0.6667, 0.3333, 0.6667, 0.3333, 0.3333],
]],
&Default::default(),
));
}

#[test]
Expand All @@ -51,10 +55,13 @@ mod tests {
count_include_pad: false,
};

test.assert_output(TestTensor::from_floats_devauto([[
[0.5000, 0.8333, 0.3333, 0.6667, 0.3333, 0.3333],
[0.5000, 0.8333, 0.3333, 0.6667, 0.3333, 0.3333],
]]));
test.assert_output(TestTensor::from_floats(
[[
[0.5000, 0.8333, 0.3333, 0.6667, 0.3333, 0.3333],
[0.5000, 0.8333, 0.3333, 0.6667, 0.3333, 0.3333],
]],
&Default::default(),
));
}

struct AvgPool1dTestCase {
Expand All @@ -70,11 +77,13 @@ mod tests {
impl AvgPool1dTestCase {
fn assert_output(self, x_grad: TestTensor<3>) {
let shape_x = Shape::new([self.batch_size, self.channels, self.length]);
let x = TestAutodiffTensor::from_data_devauto(
TestTensorInt::arange_devauto(0..shape_x.num_elements())
let device = Default::default();
let x = TestAutodiffTensor::from_data(
TestTensorInt::arange(0..shape_x.num_elements(), &device)
.reshape(shape_x)
.into_data()
.convert(),
&device,
)
.require_grad();
let output = avg_pool1d(
Expand Down
55 changes: 33 additions & 22 deletions burn-autodiff/src/tests/avgpool2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,17 @@ mod tests {
count_include_pad: true,
};

test.assert_output(TestTensor::from_floats_devauto([[[
[0.1111, 0.2222, 0.3333, 0.3333, 0.2222, 0.1111],
[0.2222, 0.4444, 0.6667, 0.6667, 0.4444, 0.2222],
[0.3333, 0.6667, 1.0000, 1.0000, 0.6667, 0.3333],
[0.3333, 0.6667, 1.0000, 1.0000, 0.6667, 0.3333],
[0.2222, 0.4444, 0.6667, 0.6667, 0.4444, 0.2222],
[0.1111, 0.2222, 0.3333, 0.3333, 0.2222, 0.1111],
]]]));
test.assert_output(TestTensor::from_floats(
[[[
[0.1111, 0.2222, 0.3333, 0.3333, 0.2222, 0.1111],
[0.2222, 0.4444, 0.6667, 0.6667, 0.4444, 0.2222],
[0.3333, 0.6667, 1.0000, 1.0000, 0.6667, 0.3333],
[0.3333, 0.6667, 1.0000, 1.0000, 0.6667, 0.3333],
[0.2222, 0.4444, 0.6667, 0.6667, 0.4444, 0.2222],
[0.1111, 0.2222, 0.3333, 0.3333, 0.2222, 0.1111],
]]],
&Default::default(),
));
}

#[test]
Expand All @@ -46,12 +49,15 @@ mod tests {
count_include_pad: true,
};

test.assert_output(TestTensor::from_floats_devauto([[[
[0.3333, 0.3333, 0.3333, 0.3333, 0.3333, 0.3333],
[0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
[0.3333, 0.3333, 0.3333, 0.3333, 0.3333, 0.3333],
]]]));
test.assert_output(TestTensor::from_floats(
[[[
[0.3333, 0.3333, 0.3333, 0.3333, 0.3333, 0.3333],
[0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
[0.3333, 0.3333, 0.3333, 0.3333, 0.3333, 0.3333],
]]],
&Default::default(),
));
}

#[test]
Expand All @@ -70,12 +76,15 @@ mod tests {
count_include_pad: false,
};

test.assert_output(TestTensor::from_floats_devauto([[[
[0.6250, 0.6250, 0.4167, 0.4167, 0.6250, 0.6250],
[0.8750, 0.8750, 0.5833, 0.5833, 0.8750, 0.8750],
[0.8750, 0.8750, 0.5833, 0.5833, 0.8750, 0.8750],
[0.6250, 0.6250, 0.4167, 0.4167, 0.6250, 0.6250],
]]]));
test.assert_output(TestTensor::from_floats(
[[[
[0.6250, 0.6250, 0.4167, 0.4167, 0.6250, 0.6250],
[0.8750, 0.8750, 0.5833, 0.5833, 0.8750, 0.8750],
[0.8750, 0.8750, 0.5833, 0.5833, 0.8750, 0.8750],
[0.6250, 0.6250, 0.4167, 0.4167, 0.6250, 0.6250],
]]],
&Default::default(),
));
}

struct AvgPool2dTestCase {
Expand All @@ -95,11 +104,13 @@ mod tests {
impl AvgPool2dTestCase {
fn assert_output(self, x_grad: TestTensor<4>) {
let shape_x = Shape::new([self.batch_size, self.channels, self.height, self.width]);
let x = TestAutodiffTensor::from_data_devauto(
TestTensorInt::arange_devauto(0..shape_x.num_elements())
let device = Default::default();
let x = TestAutodiffTensor::from_data(
TestTensorInt::arange(0..shape_x.num_elements(), &device)
.reshape(shape_x)
.into_data()
.convert(),
&device,
)
.require_grad();
let output = avg_pool2d(
Expand Down
7 changes: 4 additions & 3 deletions burn-autodiff/src/tests/backward.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,10 @@ mod tests {
[[1.0, 2.0], [4.0, 5.0], [3.0, 4.0]],
[[4.0, 5.0], [8.0, 5.0], [1.0, 9.0]],
]);
let weights = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(weights).require_grad();
let indices = Tensor::<TestAutodiffBackend, 2, Int>::from_data_devauto(indices);
let x = Tensor::<TestAutodiffBackend, 3>::from_data_devauto(x).require_grad();
let device = Default::default();
let weights = Tensor::<TestAutodiffBackend, 2>::from_data(weights, &device).require_grad();
let indices = Tensor::<TestAutodiffBackend, 2, Int>::from_data(indices, &device);
let x = Tensor::<TestAutodiffBackend, 3>::from_data(x, &device).require_grad();

let output = embedding(weights.clone(), indices);
let output = output.matmul(x);
Expand Down
5 changes: 3 additions & 2 deletions burn-autodiff/src/tests/broadcast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ mod tests {
where
F: Fn(TestAutodiffTensor<3>, TestAutodiffTensor<3>) -> TestAutodiffTensor<3>,
{
let w = TestAutodiffTensor::zeros_devauto([16, 5, 5]).require_grad();
let x = TestAutodiffTensor::zeros_devauto([4, 5, 5]).require_grad();
let device = Default::default();
let w = TestAutodiffTensor::zeros([16, 5, 5], &device).require_grad();
let x = TestAutodiffTensor::zeros([4, 5, 5], &device).require_grad();

// Slice isn't a broadcastable operation, so it will fail when the previous backward pass
// of an operation that support broadcast doesn't support it during the backward pass.
Expand Down
Loading

0 comments on commit 9729753

Please sign in to comment.