Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Dy2St] Remove internal API to_variable #61952

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
[Dy2St] Remove internal API to_variable
  • Loading branch information
SigureMo committed Feb 21, 2024
commit 7a6af16bdc8ad99fb54d996c7cbd1a4b28acf0f5
1 change: 0 additions & 1 deletion python/paddle/base/dygraph/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
guard,
no_grad,
no_grad_,
to_variable,
)
from .tracer import Tracer # noqa: F401

Expand Down
121 changes: 0 additions & 121 deletions python/paddle/base/dygraph/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,12 @@
import warnings

import decorator
import numpy as np

import paddle
from paddle.base import core, framework
from paddle.base.framework import global_var
from paddle.base.multiprocess_utils import CleanupFuncRegistrar

from ..data_feeder import convert_dtype
from ..framework import _get_paddle_place
from ..wrapped_decorator import signature_safe_contextmanager, wrap_decorator
from .tracer import Tracer
Expand Down Expand Up @@ -837,122 +835,3 @@ def check_in_out(in_out_list, name):
allow_unused,
no_grad_vars,
)


@framework.dygraph_only
def to_variable(value, name=None, zero_copy=None, dtype=None):
r"""
:api_attr: imperative

The API will create a ``Variable`` object from
tuple, list, numpy\.ndarray or Variable object.

Parameters:
value(tuple|list|ndarray|Variable|Tensor): Initial data.
Can be a list, tuple, NumPy ndarray, Variable, Tensor.
The shape can be multi-dimensional. The data type is one of
numpy\.{float16, float32, float64, int16, int32, int64,
uint8, uint16, complex64, complex128}.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name` .
zero_copy(bool, optional): Whether to share memory with the input numpy
array. This parameter only works with CPUPlace and will be set to
True when it is None. Default: None. (Note: zero_copy is discarded temporally for some reason.)
dtype(str, optional): The desired data type of returned ``Variable`` .
Can be 'bool' , 'float16' , 'float32' , 'float64' , 'int8' , 'int16' ,
'int32' , 'int64' , 'uint8' . Default: None.

Returns:
Variable : If ``value`` is a tuple/list/numpy\.ndarray object,
return ``Tensor`` created from the corresponding numpy\.ndarray object, which has
same data type and shape with ``value``.


Examples:

.. code-block:: python

>>> import numpy as np
>>> import paddle.base as base

>>> with base.dygraph.guard(base.CPUPlace()):
... x = np.ones([2, 2], np.float32)
... y = base.dygraph.to_variable(x, zero_copy=False)
... x[0][0] = -1
... print(y[0][0].numpy())
... y = base.dygraph.to_variable(x)
... x[0][0] = 0
... print(y[0][0].numpy())
... c = np.array([2+1j, 2])
... z = base.dygraph.to_variable(c)
... print(z.numpy())
... print(z.dtype)
...
... y = base.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
... print(y.shape)
...
... y = base.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32')
... print(y.shape)
1
-1
[2.+1.j, 2.+0.j]
paddle.complex128
[3, 2]
[3, 2]
"""
support_type = (
list,
tuple,
np.ndarray,
core.eager.Tensor,
framework.Variable,
core.Tensor,
core.LoDTensor,
)
if not isinstance(value, support_type):
raise TypeError(
"The type of 'value' in base.dygraph.to_variable must be {}, but received {}.".format(
support_type, type(value)
)
)
if isinstance(value, (core.eager.Tensor, framework.Variable)):
return value
elif isinstance(value, (core.Tensor, core.LoDTensor)):
return core.eager.Tensor(value)
else:
if isinstance(
framework._current_expected_place(), framework.core.CPUPlace
):
# TODO(zhiqiu): we found two problems when enable zero_copy on CPUPlace.
# (1): eigen requires 16-bytes alignments, but the data of numpy array may not statisfy.
# Details: https://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html
# (2): when used in flask framework, it may result in hang.
# Details: https://github.com/PaddlePaddle/Paddle/issues/26635
# So, we temporally diable the zero_copy strategy.
if zero_copy is True:
warnings.warn(
"Currently, zero_copy is not supported, and it will be discarded."
)
zero_copy = False
else:
assert (
not zero_copy
), "zero_copy mode can only be used with CPUPlace"

if not isinstance(value, np.ndarray):
value = np.array(value)

if dtype is not None:
dtype = convert_dtype(dtype)
if value.dtype != dtype:
value = value.astype(dtype)

return core.eager.Tensor(
value,
framework._current_expected_place(),
False,
zero_copy,
name if name else None,
True,
)
2 changes: 1 addition & 1 deletion python/paddle/framework/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
IPUPlace,
XPUPlace,
)
from ..base.dygraph import base, to_variable # noqa: F401
from ..base.dygraph import base # noqa: F401
from ..base.dygraph.base import ( # noqa: F401
disable_dygraph as enable_static,
enable_dygraph as disable_static,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,46 +21,6 @@
__all__ = []


class BasicApiTransformer(BaseTransformer):
"""
Class to transform basic API from dygraph to static graph.
"""

def __init__(self, root):
self.root = root

def transform(self):
to_tensor_transformer = ToTensorTransformer(self.root)
to_tensor_transformer.transform()
attribute_transformer = AttributeJstTransformer(self.root)
attribute_transformer.transform()
self.visit(self.root)
return self.root


class ToTensorTransformer(BaseTransformer):
"""
Class to transform paddle.to_tensor and paddle.to_variable to paddle.assign
"""

def __init__(self, node):
assert isinstance(
node, gast.AST
), "Input non-gast.AST node for the initialization of ToTensorTransformer."
self.root = node

def transform(self):
self.visit(self.root)
return self.root

def visit_Call(self, node):
assert isinstance(node, gast.Call)
if is_to_variable(node):
node = to_assign_node(node)
self.generic_visit(node)
return node


class NameloadJstTransformer(BaseTransformer):
"""
change name and attribute load to __jst.Ld(name) pattern.
Expand Down Expand Up @@ -168,34 +128,3 @@ def visit_Attribute(self, node):
)
self.generic_visit(node)
return node


def is_to_variable(node):
assert isinstance(node, gast.Call)
api_name = ast_to_source_code(node.func).strip()

return api_name.split(".")[-1] == "to_variable"


def to_assign_node(node):
# Transform dygraph api `base.dygraph.to_variable` alias `paddle.to_tensor` to static api `paddle.assign`.
# NOTE:
# 1. Api `to_variable` supports data type {float16, float32, float64, int16, int32, int64, uint8, uint16},
# but api `assign` only supports {float32, float64, int32, int64, bool};
# 2. If the input of api `assign` is numpy.ndarray, its size cannot be greater than 1024 * 1024.

assert isinstance(node, gast.Call)
assign_api = gast.parse('paddle.assign').body[0].value
node.func = assign_api

if node.args:
node.args = [node.args[0]]
node.keywords = []
else:
for idx, kw in enumerate(node.keywords):
if kw.arg == 'value' or kw.arg == 'data':
node.keywords[idx].arg = 'x'
node.keywords = [node.keywords[idx]]
node.args = []
break
return node
7 changes: 5 additions & 2 deletions python/paddle/jit/dy2static/transformers/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
from ..utils import ast_to_source_code
from .assert_transformer import AssertTransformer
from .base import BaseTransformer
from .basic_api_transformer import BasicApiTransformer, NameloadJstTransformer
from .break_continue_transformer import (
BreakContinueTransformer,
BreakTransformOptimizer,
Expand All @@ -36,6 +35,10 @@
from .ifelse_transformer import IfElseTransformer
from .logical_transformer import LogicalTransformer
from .loop_transformer import LoopTransformer
from .name_load_transformer import (
AttributeJstTransformer,
NameloadJstTransformer,
)
from .return_transformer import ReturnTransformer
from .tensor_shape_transformer import TensorShapeTransformer
from .tensorhook_transformer import RegisterHookTransformer
Expand Down Expand Up @@ -91,7 +94,7 @@ def transfer_from_node_type(self, node):
transformers = [
RegisterHookTransformer,
EarlyReturnTransformer,
BasicApiTransformer, # Basic Api
AttributeJstTransformer, # Tensor.size -> Tensor.size(), it's no need in PIR mode
TensorShapeTransformer, # Tensor.shape -> paddle.shape(Tensor)
BreakContinueTransformer, # break/continue in loops
ReturnTransformer, # return in functions
Expand Down