Skip to content

Commit

Permalink
Polish usage of deprecated or private API in Dygraph (PaddlePaddle#4035)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhiqiu authored Dec 9, 2019
1 parent 1fb663c commit d766869
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 16 deletions.
8 changes: 2 additions & 6 deletions PaddleNLP/Research/Dialogue-PLATO/plato/modules/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,12 +226,8 @@ def apply_collective_grads(self):
grad_vars = []
for param in self._layers.parameters():
# NOTE(zcd): The grad_ivar maybe no generated.
if param.trainable and param._ivar._grad_ivar():
g_var = framework.Variable(
block=self._helper.main_program.current_block(),
name=param._ivar._grad_name(),
stop_gradient=True,
ivar=param._ivar._grad_ivar())
if param.trainable and param._grad_ivar():
g_var = param._grad_ivar()
grad_vars.append(g_var)
assert g_var not in grad_var_set
grad_var_set.add(g_var)
Expand Down
2 changes: 1 addition & 1 deletion PaddleSpeech/DeepVoice3/deepvoice3_paddle/modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ def __init__(self,

def set_weight(self, array):
assert self.embed._w.shape == list(array.shape), "shape does not match"
self.embed._w._ivar.value().get_tensor().set(
self.embed._w.value().get_tensor().set(
array, fluid.framework._current_expected_place())

def forward(self, indices, speaker_position_rate=None):
Expand Down
8 changes: 2 additions & 6 deletions dygraph/ocr_recognition/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,8 +486,7 @@ def eval():
label_in = to_variable(data_dict["label_in"])
label_out = to_variable(data_dict["label_out"])

label_out._stop_gradient = True
label_out.trainable = False
label_out.stop_gradient = True

img = to_variable(data_dict["pixel"])

Expand Down Expand Up @@ -528,8 +527,7 @@ def eval():
label_in = to_variable(data_dict["label_in"])
label_out = to_variable(data_dict["label_out"])

label_out._stop_gradient = True
label_out.trainable = False
label_out.stop_gradient = True

img = to_variable(data_dict["pixel"])

Expand All @@ -549,8 +547,6 @@ def eval():
optimizer.minimize(avg_loss, grad_clip=grad_clip)
ocr_attention.clear_gradients()

framework._dygraph_tracer()._clear_ops()

if batch_id > 0 and batch_id % 1000 == 0:
print("epoch: {}, batch_id: {}, loss {}".format(epoch, batch_id, total_loss / args.batch_size / 1000))

Expand Down
4 changes: 2 additions & 2 deletions dygraph/resnet/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ def eval(model, data):

img = to_variable(dy_x_data)
label = to_variable(y_data)
label._stop_gradient = True
label.stop_gradient = True

out = model(img)
#loss = fluid.layers.cross_entropy(input=out, label=label)
Expand Down Expand Up @@ -335,7 +335,7 @@ def train_resnet():

img = to_variable(dy_x_data)
label = to_variable(y_data)
label._stop_gradient = True
label.stop_gradient = True

out = resnet(img)
loss = fluid.layers.cross_entropy(input=out, label=label)
Expand Down
2 changes: 1 addition & 1 deletion dygraph/se_resnext/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ def eval(model, data):

img = to_variable(dy_x_data)
label = to_variable(y_data)
label._stop_gradient = True
label.stop_gradient = True
out = model(img)

softmax_out = fluid.layers.softmax(out, use_cudnn=False)
Expand Down

0 comments on commit d766869

Please sign in to comment.