Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Cleanup][B-10]Replace to_variable #61531

Merged
merged 1 commit into from
Feb 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions test/legacy_test/test_imperative_hook_for_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
import numpy as np
from test_imperative_lod_tensor_to_selected_rows import SimpleNet

import paddle
from paddle import base
from paddle.base import core
from paddle.base.dygraph import base as imperative_base

call_forward_post_hook = False
call_forward_pre_hook = False
Expand Down Expand Up @@ -78,9 +78,9 @@ def test_forward_hook_return_value(self):
)
y_data = y_data.reshape((-1, 1))

input = imperative_base.to_variable(input_word)
input1 = imperative_base.to_variable(input_word1)
y = imperative_base.to_variable(y_data)
input = paddle.to_tensor(input_word)
input1 = paddle.to_tensor(input_word1)
y = paddle.to_tensor(y_data)

simplenet = SimpleNet(
hidden_size=20,
Expand Down Expand Up @@ -161,8 +161,8 @@ def test_forward_hook(self):
)
y_data = y_data.reshape((-1, 1))

input = imperative_base.to_variable(input_word)
y = imperative_base.to_variable(y_data)
input = paddle.to_tensor(input_word)
y = paddle.to_tensor(y_data)

simplenet = SimpleNet(
hidden_size=20,
Expand Down
3 changes: 1 addition & 2 deletions test/legacy_test/test_imperative_layer_trainable.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,14 @@

import paddle
from paddle import base
from paddle.base import dygraph


class TestImperativeLayerTrainable(unittest.TestCase):
def test_set_trainable(self):
with base.dygraph.guard():
label = np.random.uniform(-1, 1, [10, 10]).astype(np.float32)

label = dygraph.to_variable(label)
label = paddle.to_tensor(label)

linear = paddle.nn.Linear(10, 10)
y = linear(label)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.base.dygraph.base import to_variable


class SimpleNet(paddle.nn.Layer):
Expand Down Expand Up @@ -133,8 +132,8 @@ def simple_net_float32(self, is_sparse, dtype):
x_data = x_data.reshape((-1, num_steps))
y_data = y_data.reshape((-1, 1))

x = to_variable(x_data)
y = to_variable(y_data)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
outs = simple_net(x, y)
dy_loss = outs
if i == 0:
Expand Down
5 changes: 2 additions & 3 deletions test/legacy_test/test_imperative_mnist_sorted_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.base.dygraph.base import to_variable


class TestImperativeMnistSortGradient(unittest.TestCase):
Expand Down Expand Up @@ -55,8 +54,8 @@ def test_mnist_sort_gradient_float32(self):
.reshape(128, 1)
)

img2 = to_variable(dy_x_data2)
label2 = to_variable(y_data2)
img2 = paddle.to_tensor(dy_x_data2)
label2 = paddle.to_tensor(y_data2)
label2.stop_gradient = True

cost2 = mnist2(img2)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_imperative_named_members.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def __init__(self):
self.linear2 = paddle.nn.Linear(5, 5)
self.conv2d = paddle.nn.Conv2D(3, 2, 3)
self.embedding = paddle.nn.Embedding(128, 16)
self.h_0 = base.dygraph.to_variable(
self.h_0 = paddle.to_tensor(
np.zeros([10, 10]).astype('float32')
)
self.weight = self.create_parameter(
Expand Down
9 changes: 4 additions & 5 deletions test/legacy_test/test_imperative_ocr_attention_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.base.dygraph.base import to_variable
from paddle.nn import BatchNorm, Linear


Expand Down Expand Up @@ -210,7 +209,7 @@ def __init__(
h_0 = np.zeros(
(Config.batch_size, rnn_hidden_size), dtype="float32"
)
h_0 = to_variable(h_0)
h_0 = paddle.to_tensor(h_0)
else:
h_0 = paddle.tensor.fill_constant(
shape=[Config.batch_size, rnn_hidden_size],
Expand Down Expand Up @@ -464,10 +463,10 @@ def run_dygraph():
dy_param_init_value[param.name] = param.numpy()
for epoch in range(epoch_num):
for batch_id in range(batch_num):
label_in = to_variable(label_in_np)
label_out = to_variable(label_out_np)
label_in = paddle.to_tensor(label_in_np)
label_out = paddle.to_tensor(label_out_np)
label_out.stop_gradient = True
img = to_variable(image_np)
img = paddle.to_tensor(image_np)
dy_prediction = ocr_attention(img, label_in)
label_out = paddle.reshape(label_out, [-1, 1])
dy_prediction = paddle.reshape(
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_imperative_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def test_constant_lr(self):

linear = paddle.nn.Linear(10, 10)

a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)

b = linear(a)

Expand All @@ -258,7 +258,7 @@ def test_lr_decay(self):

linear = paddle.nn.Linear(10, 10)

a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)

b = linear(a)

Expand Down Expand Up @@ -289,7 +289,7 @@ def test_lr_decay_natural_exp(self):

linear = paddle.nn.Linear(10, 10)

a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)

b = linear(a)

Expand Down Expand Up @@ -324,7 +324,7 @@ def test_set_lr(self):

linear = paddle.nn.Linear(10, 10)

a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)

b = linear(a)

Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_imperative_optimizer_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@ def test_constant_lr(self):

linear = paddle.nn.Linear(10, 10)

a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)

b = linear(a)

Expand All @@ -557,7 +557,7 @@ def test_lr_decay(self):

linear = paddle.nn.Linear(10, 10)

a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)

b = linear(a)

Expand Down Expand Up @@ -585,7 +585,7 @@ def test_lr_scheduler_natural_exp(self):
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")

linear = paddle.nn.Linear(10, 10)
a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)
b = linear(a)

loss = paddle.mean(b)
Expand All @@ -611,7 +611,7 @@ def test_set_lr(self):

linear = paddle.nn.Linear(10, 10)

a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)

b = linear(a)

Expand Down Expand Up @@ -647,7 +647,7 @@ def test_set_lr_scheduler(self):

linear = paddle.nn.Linear(10, 10)

a = base.dygraph.to_variable(a)
a = paddle.to_tensor(a)

b = linear(a)

Expand Down Expand Up @@ -879,7 +879,7 @@ def test_parameter_list(self):
)

in_np = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
in_data = base.dygraph.to_variable(in_np)
in_data = paddle.to_tensor(in_np)

y = linear_1(in_data)
y = linear_2(y)
Expand Down
13 changes: 8 additions & 5 deletions test/legacy_test/test_imperative_parallel_coalesce_split.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import paddle.nn.functional as F
from paddle import base
from paddle.base import core
from paddle.base.dygraph.base import to_variable


class MyLayer(paddle.nn.Layer):
Expand Down Expand Up @@ -49,10 +48,14 @@ def test_coalesce_split(self):

# test variables prepare
vars = []
vars.append(to_variable(np.random.random([2, 3]).astype("float32")))
vars.append(to_variable(np.random.random([4, 9]).astype("float32")))
vars.append(
to_variable(np.random.random([10, 1]).astype("float32"))
paddle.to_tensor(np.random.random([2, 3]).astype("float32"))
)
vars.append(
paddle.to_tensor(np.random.random([4, 9]).astype("float32"))
)
vars.append(
paddle.to_tensor(np.random.random([10, 1]).astype("float32"))
)
var_groups = OrderedDict()
var_groups.setdefault(0, vars)
Expand Down Expand Up @@ -81,7 +84,7 @@ def test_reshape_inplace(self):
ori_shape = [2, 25]
new_shape = [5, 10]
x_data = np.random.random(ori_shape).astype("float32")
x = to_variable(x_data)
x = paddle.to_tensor(x_data)
_reshape_inplace(x, new_shape)
self.assertEqual(x.shape, new_shape)

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_imperative_partitial_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class TestImperativePartitialBackward(unittest.TestCase):
def test_partitial_backward(self):
with base.dygraph.guard():
x = np.random.randn(2, 4, 5).astype("float32")
x = base.dygraph.to_variable(x)
x = paddle.to_tensor(x)
linear1 = paddle.nn.Linear(5, 10)
linear2 = paddle.nn.Linear(5, 10)

Expand Down