Skip to content

Commit

Permalink
[ONNX] use consistent quoting for string literals (#57757) (#58695)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #58695

As PEP8 says: "Pick a rule and stick to it." [1]

[1] https://www.python.org/dev/peps/pep-0008/#string-quotes

Test Plan: Imported from OSS

Reviewed By: driazati

Differential Revision: D28714811

Pulled By: SplitInfinity

fbshipit-source-id: c95103aceb1725c17c034dc6fc8216627f189548

Co-authored-by: Gary Miguel <garymiguel@microsoft.com>
  • Loading branch information
2 people authored and facebook-github-bot committed May 27, 2021
1 parent b27fc0f commit 0a6828a
Show file tree
Hide file tree
Showing 32 changed files with 1,156 additions and 1,156 deletions.
8 changes: 4 additions & 4 deletions test/onnx/debug_embed_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@
from test_pytorch_common import flatten


torch.set_default_tensor_type('torch.FloatTensor')
torch.set_default_tensor_type("torch.FloatTensor")
try:
import torch
except ImportError:
print('Cannot import torch, hence caffe2-torch test will not run.')
print("Cannot import torch, hence caffe2-torch test will not run.")
sys.exit(0)


Expand All @@ -23,9 +23,9 @@ def run_embed_params(proto, model, input, state_dict=None, use_gpu=True):
case as well on pytorch front
This should likely be removed from the release version of the code
"""
device = 'CPU'
device = "CPU"
if use_gpu:
device = 'CUDA'
device = "CUDA"
model_def = onnx.ModelProto.FromString(proto)
onnx.checker.check_model(model_def)
prepared = c2.prepare(model_def, device=device)
Expand Down
10 changes: 5 additions & 5 deletions test/onnx/export_onnx_tests_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,11 @@ def collect_generated_testcases(root_dir=test_onnx_common.pytorch_converted_dir,
print("Failed {} testcases are moved to {}.".format(total_fail, _fail_test_dir))


if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Check and filter the failed test cases.')
parser.add_argument('-v', action="store_true", default=False, help="verbose")
parser.add_argument('--delete', action="store_true", default=False, help="delete failed test cases")
parser.add_argument('--no-expect', action="store_true", default=False, help="generate expect txt files")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check and filter the failed test cases.")
parser.add_argument("-v", action="store_true", default=False, help="verbose")
parser.add_argument("--delete", action="store_true", default=False, help="delete failed test cases")
parser.add_argument("--no-expect", action="store_true", default=False, help="generate expect txt files")
args = parser.parse_args()
verbose = args.v
delete = args.delete
Expand Down
2 changes: 1 addition & 1 deletion test/onnx/export_onnx_tests_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,6 @@ def convert_tests(testcases, sets=1):
print("PyTorch converted cases are stored in {}.".format(test_onnx_common.pytorch_converted_dir))
print_stats(FunctionalModule_nums, nn_module)

if __name__ == '__main__':
if __name__ == "__main__":
testcases = module_tests + new_module_tests
convert_tests(testcases)
8 changes: 4 additions & 4 deletions test/onnx/pytorch_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def PyTorchModule(helper, model, sample_arguments, caffe2_inputs, prefix_name=No
"""
if prefix_name is None:
global _next_idx
prefix_name = 'pytorch_import_' + str(_next_idx) + '/'
prefix_name = "pytorch_import_" + str(_next_idx) + "/"
_next_idx += 1

# TODO: handle the case where model cannot be exported
Expand All @@ -65,7 +65,7 @@ def PyTorchModule(helper, model, sample_arguments, caffe2_inputs, prefix_name=No
onnx_model.graph.input) if x.name not in initialized}

if(len(uninitialized_inputs) != len(caffe2_inputs)):
raise ValueError('Expected {} inputs but found {}'.format(
raise ValueError("Expected {} inputs but found {}".format(
len(uninitialized_inputs), len(caffe2_inputs)))

def remap_blob_name(name):
Expand All @@ -74,10 +74,10 @@ def remap_blob_name(name):
return str(caffe2_inputs[idx])
return prefix_name + name

predict_net = Net(predict_net).Clone('anon', _FakeDict(remap_blob_name))
predict_net = Net(predict_net).Clone("anon", _FakeDict(remap_blob_name))
helper.net.AppendNet(predict_net)

init_net = Net(init_net).Clone('anon', _FakeDict(remap_blob_name))
init_net = Net(init_net).Clone("anon", _FakeDict(remap_blob_name))
helper.param_init_net.AppendNet(init_net)

results = tuple([BlobReference(remap_blob_name(x.name), helper.net)
Expand Down
4 changes: 2 additions & 2 deletions test/onnx/test_caffe2_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

def load_tensor_as_numpy_array(f):
tensor = onnx.TensorProto()
with open(f, 'rb') as file:
with open(f, "rb") as file:
tensor.ParseFromString(file.read())
return tensor

Expand All @@ -19,7 +19,7 @@ def assert_similar(ref, real):
np.testing.assert_allclose(ref[i], real[i], rtol=1e-3)


def run_generated_test(model_file, data_dir, device='CPU'):
def run_generated_test(model_file, data_dir, device="CPU"):
model = onnx.load(model_file)
input_num = len(glob.glob(os.path.join(data_dir, "input_*.pb")))
inputs = []
Expand Down
10 changes: 5 additions & 5 deletions test/onnx/test_custom_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@ def forward(self, a, b):
return torch.ops.custom_namespace.custom_add(a, b)

def symbolic_custom_add(g, self, other):
return g.op('Add', self, other)
return g.op("Add", self, other)

from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic('custom_namespace::custom_add', symbolic_custom_add, 9)
register_custom_op_symbolic("custom_namespace::custom_add", symbolic_custom_add, 9)

x = torch.randn(2, 3, 4, requires_grad=False)
y = torch.randn(2, 3, 4, requires_grad=False)
Expand Down Expand Up @@ -110,7 +110,7 @@ def forward(self, x):
return h

def symbolic_pythonop(g, n, *args, **kwargs):
name = kwargs['name']
name = kwargs["name"]
if name == "MyClip":
return g.op("Clip", args[0], min_f=args[1])
elif name == "MyRelu":
Expand All @@ -119,11 +119,11 @@ def symbolic_pythonop(g, n, *args, **kwargs):
return _unimplemented("prim::PythonOp", "unknown node kind: " + name)

from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic('::prim_PythonOp', symbolic_pythonop, 1)
register_custom_op_symbolic("::prim_PythonOp", symbolic_pythonop, 1)

x = torch.randn(2, 3, 4, requires_grad=True)
model = MyModule()
run_model_test(self, model, input=(x, ))

if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
4 changes: 2 additions & 2 deletions test/onnx/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def test_vgg19(self):

@unittest.skip("This model takes too much memory")
def test_vgg19_bn(self):
# VGG 19-layer model (configuration 'E') with batch normalization
# VGG 19-layer model (configuration "E") with batch normalization
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg19_bn()), toC(x))

Expand Down Expand Up @@ -263,5 +263,5 @@ def test_r2plus1d_18_video(self):
self.exportTest(toC(r2plus1d_18()), toC(x), rtol=1e-3, atol=1e-5)


if __name__ == '__main__':
if __name__ == "__main__":
run_tests()
2 changes: 1 addition & 1 deletion test/onnx/test_models_onnxruntime.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,5 @@ def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7, opset_versions=None):
onnx_shape_inference=True))


if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
14 changes: 7 additions & 7 deletions test/onnx/test_onnx_opset.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ def check_onnx_opset_operator(model, ops, opset_version=_export_onnx_opset_versi
# specified as well
assert len(ops) == len(graph.node)
for i in range(0, len(ops)):
assert graph.node[i].op_type == ops[i]['op_name']
assert graph.node[i].op_type == ops[i]["op_name"]
if "attributes" in ops[i] :
attributes = ops[i]['attributes']
attributes = ops[i]["attributes"]
assert len(attributes) == len(graph.node[i].attribute)
for j in range(0, len(attributes)):
for attribute_field in attributes[j].keys():
Expand Down Expand Up @@ -63,7 +63,7 @@ def forward(self, x):

ops = [{"op_name" : "IsNaN"}]
ops = {9 : ops, 10 : ops}
x = torch.tensor([1.0, float('nan'), 2.0])
x = torch.tensor([1.0, float("nan"), 2.0])
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])

def test_topk(self):
Expand Down Expand Up @@ -136,7 +136,7 @@ def __init__(self):
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
size = [int(i) for i in size]
return torch.nn.functional.interpolate(x, size=size, mode='nearest')
return torch.nn.functional.interpolate(x, size=size, mode="nearest")

module = MyModule()
ops8 = [{"op_name" : "Upsample", "attributes" : [{"name": "mode", "s": ("nearest").encode(), "type": 3},
Expand Down Expand Up @@ -271,7 +271,7 @@ def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
return torch.nn.functional.interpolate(x,
size=size,
mode='nearest')
mode="nearest")
ops_9 = [{"op_name" : "Shape"},
{"op_name" : "Constant"},
{"op_name" : "Gather"},
Expand Down Expand Up @@ -358,7 +358,7 @@ def forward(self, x):
size = [int(i) for i in size]
return torch.nn.functional.interpolate(x,
size=size,
mode='nearest')
mode="nearest")
ops_9 = [{"op_name" : "Constant"},
{"op_name" : "Upsample",
"attributes" :
Expand All @@ -372,5 +372,5 @@ def forward(self, x):
check_onnx_opsets_operator(MyDynamicModel(), x, ops, opset_versions=[9, 10])


if __name__ == '__main__':
if __name__ == "__main__":
run_tests()
44 changes: 22 additions & 22 deletions test/onnx/test_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def assertONNX(self, f, args, params=None, **kwargs):
m = FuncModule(f, params)
m.eval()
onnx_model_pbtxt = export_to_pbtxt(m, args, **kwargs)
subname = kwargs.pop('subname', None)
subname = kwargs.pop("subname", None)
self.assertExpected(onnx_model_pbtxt, subname)
if _onnx_dep:
onnx_model_pb = export_to_pb(m, args, **kwargs)
Expand All @@ -80,22 +80,22 @@ def assertONNX(self, f, args, params=None, **kwargs):
# 2) only one assertONNX in each test, otherwise will override the data.
assert not os.path.exists(output_dir), "{} should not exist!".format(output_dir)
os.makedirs(output_dir)
with open(os.path.join(output_dir, "model.onnx"), 'wb') as file:
with open(os.path.join(output_dir, "model.onnx"), "wb") as file:
file.write(model_def.SerializeToString())
data_dir = os.path.join(output_dir, "test_data_set_0")
os.makedirs(data_dir)
if isinstance(args, Variable):
args = (args,)
for index, var in enumerate(flatten(args)):
tensor = onnx.numpy_helper.from_array(var.data.numpy())
with open(os.path.join(data_dir, "input_{}.pb".format(index)), 'wb') as file:
with open(os.path.join(data_dir, "input_{}.pb".format(index)), "wb") as file:
file.write(tensor.SerializeToString())
outputs = m(*args)
if isinstance(outputs, Variable):
outputs = (outputs,)
for index, var in enumerate(flatten(outputs)):
tensor = onnx.numpy_helper.from_array(var.data.numpy())
with open(os.path.join(data_dir, "output_{}.pb".format(index)), 'wb') as file:
with open(os.path.join(data_dir, "output_{}.pb".format(index)), "wb") as file:
file.write(tensor.SerializeToString())

def assertONNXRaises(self, err, f, args, params=None, **kwargs):
Expand Down Expand Up @@ -278,8 +278,8 @@ def test_conv_variable_length(self):
model = torch.nn.Conv2d(3, 2, 3)
y = model(x)

dynamic_axes = {'input_1': [0, 2, 3], 'output_1': {0: 'output_1_variable_dim_0', 1: 'output_1_variable_dim_1'}}
model_proto_name = 'conv2d.onnx'
dynamic_axes = {"input_1": [0, 2, 3], "output_1": {0: "output_1_variable_dim_0", 1: "output_1_variable_dim_1"}}
model_proto_name = "conv2d.onnx"
torch.onnx.export(model, x, model_proto_name, verbose=True, input_names=["input_1"], output_names=["output_1"],
example_outputs=y, dynamic_axes=dynamic_axes)

Expand Down Expand Up @@ -385,12 +385,12 @@ def test_reduced_mean_keepdim(self):

def test_mean_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNXRaisesRegex(RuntimeError, 'Couldn\'t export operator aten::mean',
self.assertONNXRaisesRegex(RuntimeError, "Couldn't export operator aten::mean",
lambda x: torch.mean(x, dtype=torch.double), x)

def test_reduced_mean_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNXRaisesRegex(RuntimeError, 'Couldn\'t export operator aten::mean',
self.assertONNXRaisesRegex(RuntimeError, "Couldn't export operator aten::mean",
lambda x: torch.mean(x, dim=0, dtype=torch.double), x)

def test_sum(self):
Expand All @@ -399,12 +399,12 @@ def test_sum(self):

def test_sum_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNXRaisesRegex(RuntimeError, 'Couldn\'t export operator aten::sum',
self.assertONNXRaisesRegex(RuntimeError, "Couldn't export operator aten::sum",
lambda x: torch.sum(x, dtype=torch.double), x)

def test_reduced_sum_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNXRaisesRegex(RuntimeError, 'Couldn\'t export operator aten::sum',
self.assertONNXRaisesRegex(RuntimeError, "Couldn't export operator aten::sum",
lambda x: torch.sum(x, dim=0, dtype=torch.double), x)

def test_reduced_sum(self):
Expand All @@ -429,12 +429,12 @@ def test_reduced_prod_keepdim(self):

def test_prod_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNXRaisesRegex(RuntimeError, 'Couldn\'t export operator aten::prod',
self.assertONNXRaisesRegex(RuntimeError, "Couldn't export operator aten::prod",
lambda x: torch.prod(x, dtype=torch.double), x)

def test_reduced_prod_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNXRaisesRegex(RuntimeError, 'Couldn\'t export operator aten::prod',
self.assertONNXRaisesRegex(RuntimeError, "Couldn't export operator aten::prod",
lambda x: torch.prod(x, dim=0, dtype=torch.double), x)

def test_sqrt(self):
Expand Down Expand Up @@ -527,7 +527,7 @@ def test_flatten2D(self):
self.assertONNX(lambda x: torch.flatten(x, 1), x)

def test_isnan(self):
x = torch.tensor([1, float('nan'), 2])
x = torch.tensor([1, float("nan"), 2])
self.assertONNX(lambda x: torch.isnan(x), x)

def test_argmax(self):
Expand Down Expand Up @@ -570,16 +570,16 @@ def test_norm_p2(self):
def test_upsample_nearest_scale(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: nn.functional.interpolate(x, scale_factor=2.,
mode='nearest', recompute_scale_factor=False), x)
mode="nearest", recompute_scale_factor=False), x)

def test_upsample_nearest_scale_default_scale_factor(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: nn.functional.interpolate(x, scale_factor=2.,
mode='nearest'), x)
mode="nearest"), x)

def test_upsample_nearest_size(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: nn.functional.interpolate(x, size=16, mode='nearest'), x)
self.assertONNX(lambda x: nn.functional.interpolate(x, size=16, mode="nearest"), x)

def test_unsqueeze(self):
x = torch.randn(3, 4, requires_grad=True)
Expand Down Expand Up @@ -766,7 +766,7 @@ def forward(self, scores, bbox_deltas, im_info, anchors):
im_info = torch.ones(img_count, 3, dtype=torch.float32)
anchors = torch.ones(A, 4, dtype=torch.float32)
inputs = (scores, bbox_deltas, im_info, anchors)
self.assertONNX(model, inputs, custom_opsets={'org.pytorch._caffe2': 0})
self.assertONNX(model, inputs, custom_opsets={"org.pytorch._caffe2": 0})

def test_dict(self):
class MyModel(torch.nn.Module):
Expand Down Expand Up @@ -873,7 +873,7 @@ def test_dim(self):

@skipIfNoLapack
def test_det(self):
x = torch.randn(2, 3, 5, 5, device=torch.device('cpu'))
x = torch.randn(2, 3, 5, 5, device=torch.device("cpu"))
self.assertONNX(lambda x: torch.det(x), x, opset_version=11)
self.assertONNX(lambda x: torch.linalg.det(x), x, opset_version=11)

Expand All @@ -900,19 +900,19 @@ def test_softmaxcrossentropy_3d(self):
def test_softmaxcrossentropy_3d_none(self):
x = torch.randn(3, 5, 2)
y = torch.empty(3, 2, dtype=torch.long).random_(5)
self.assertONNX(torch.nn.CrossEntropyLoss(reduction='none'), (x, y), opset_version=12)
self.assertONNX(torch.nn.CrossEntropyLoss(reduction="none"), (x, y), opset_version=12)

def test_softmaxcrossentropy_4d(self):
x = torch.randn(3, 5, 2, 1)
y = torch.empty(3, 2, 1, dtype=torch.long).random_(5)
self.assertONNX(torch.nn.CrossEntropyLoss(), (x, y), opset_version=12)

if __name__ == '__main__':
no_onnx_dep_flag = '--no-onnx'
if __name__ == "__main__":
no_onnx_dep_flag = "--no-onnx"
_onnx_dep = no_onnx_dep_flag not in common.UNITTEST_ARGS
if no_onnx_dep_flag in common.UNITTEST_ARGS:
common.UNITTEST_ARGS.remove(no_onnx_dep_flag)
onnx_test_flag = '--produce-onnx-test-data'
onnx_test_flag = "--produce-onnx-test-data"
_onnx_test = onnx_test_flag in common.UNITTEST_ARGS
if onnx_test_flag in common.UNITTEST_ARGS:
common.UNITTEST_ARGS.remove(onnx_test_flag)
Expand Down
8 changes: 4 additions & 4 deletions test/onnx/test_pytorch_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

from torch.testing._internal.common_utils import * # noqa: F401,F403

torch.set_default_tensor_type('torch.FloatTensor')
torch.set_default_tensor_type("torch.FloatTensor")

BATCH_SIZE = 2

Expand All @@ -32,10 +32,10 @@ def wrapper(*args, **kwargs):


skipIfNoCuda = _skipper(lambda: not torch.cuda.is_available(),
'CUDA is not available')
"CUDA is not available")

skipIfTravis = _skipper(lambda: os.getenv('TRAVIS'),
'Skip In Travis')
skipIfTravis = _skipper(lambda: os.getenv("TRAVIS"),
"Skip In Travis")

# skips tests for all versions below min_opset_version.
# if exporting the op is only supported after a specific version,
Expand Down
Loading

0 comments on commit 0a6828a

Please sign in to comment.