Skip to content

Commit

Permalink
[tests/onnx] Add onnx and model out check (#1569)
Browse files Browse the repository at this point in the history
  • Loading branch information
felixdittrich92 authored Apr 30, 2024
1 parent 8b9b8f6 commit 46d5974
Show file tree
Hide file tree
Showing 8 changed files with 65 additions and 22 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
pip install -e .[testing]
- name: Run unittests
run: |
coverage run -m pytest tests/common/
coverage run -m pytest tests/common/ -rs
coverage xml -o coverage-common.xml
- uses: actions/upload-artifact@v4
with:
Expand Down Expand Up @@ -65,7 +65,7 @@ jobs:
pip install -e .[testing]
- name: Run unittests
run: |
coverage run -m pytest tests/tensorflow/
coverage run -m pytest tests/tensorflow/ -rs
coverage xml -o coverage-tf.xml
- uses: actions/upload-artifact@v4
with:
Expand Down Expand Up @@ -99,7 +99,7 @@ jobs:
- name: Run unittests
run: |
coverage run -m pytest tests/pytorch/
coverage run -m pytest tests/pytorch/ -rs
coverage xml -o coverage-pt.xml
- uses: actions/upload-artifact@v4
Expand Down
12 changes: 6 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,18 @@ style:

# Run tests for the library
test:
coverage run -m pytest tests/common/
USE_TF='1' coverage run -m pytest tests/tensorflow/
USE_TORCH='1' coverage run -m pytest tests/pytorch/
coverage run -m pytest tests/common/ -rs
USE_TF='1' coverage run -m pytest tests/tensorflow/ -rs
USE_TORCH='1' coverage run -m pytest tests/pytorch/ -rs

test-common:
coverage run -m pytest tests/common/
coverage run -m pytest tests/common/ -rs

test-tf:
USE_TF='1' coverage run -m pytest tests/tensorflow/
USE_TF='1' coverage run -m pytest tests/tensorflow/ -rs

test-torch:
USE_TORCH='1' coverage run -m pytest tests/pytorch/
USE_TORCH='1' coverage run -m pytest tests/pytorch/ -rs

# Check that docs can build
docs-single-version:
Expand Down
11 changes: 9 additions & 2 deletions tests/pytorch/test_models_classification_pt.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ def test_models_onnx_export(arch_name, input_shape, output_size):
batch_size = 2
model = classification.__dict__[arch_name](pretrained=True).eval()
dummy_input = torch.rand((batch_size, *input_shape), dtype=torch.float32)
pt_logits = model(dummy_input).detach().cpu().numpy()
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path = export_model_to_onnx(model, model_name=os.path.join(tmpdir, "model"), dummy_input=dummy_input)
Expand All @@ -184,5 +185,11 @@ def test_models_onnx_export(arch_name, input_shape, output_size):
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(["logits"], {"input": dummy_input.numpy()})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)

assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)
# Check that the output is close to the PyTorch output - only warn if not close
try:
assert np.allclose(pt_logits, ort_outs[0], atol=1e-4)
except AssertionError:
pytest.skip(f"Output of {arch_name}:\nMax element-wise difference: {np.max(np.abs(pt_logits - ort_outs[0]))}")
11 changes: 9 additions & 2 deletions tests/pytorch/test_models_detection_pt.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ def test_models_onnx_export(arch_name, input_shape, output_size):
else:
model = detection.__dict__[arch_name](pretrained=True, exportable=True).eval()
dummy_input = torch.rand((batch_size, *input_shape), dtype=torch.float32)
pt_logits = model(dummy_input)["logits"].detach().cpu().numpy()
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path = export_model_to_onnx(model, model_name=os.path.join(tmpdir, "model"), dummy_input=dummy_input)
Expand All @@ -172,5 +173,11 @@ def test_models_onnx_export(arch_name, input_shape, output_size):
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(["logits"], {"input": dummy_input.numpy()})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)

assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)
# Check that the output is close to the PyTorch output - only warn if not close
try:
assert np.allclose(pt_logits, ort_outs[0], atol=1e-4)
except AssertionError:
pytest.skip(f"Output of {arch_name}:\nMax element-wise difference: {np.max(np.abs(pt_logits - ort_outs[0]))}")
12 changes: 10 additions & 2 deletions tests/pytorch/test_models_recognition_pt.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import tempfile

import numpy as np
import onnxruntime
import psutil
import pytest
Expand Down Expand Up @@ -135,6 +136,7 @@ def test_models_onnx_export(arch_name, input_shape):
batch_size = 2
model = recognition.__dict__[arch_name](pretrained=True, exportable=True).eval()
dummy_input = torch.rand((batch_size, *input_shape), dtype=torch.float32)
pt_logits = model(dummy_input)["logits"].detach().cpu().numpy()
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path = export_model_to_onnx(model, model_name=os.path.join(tmpdir, "model"), dummy_input=dummy_input)
Expand All @@ -144,5 +146,11 @@ def test_models_onnx_export(arch_name, input_shape):
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(["logits"], {"input": dummy_input.numpy()})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape[0] == batch_size

assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape[0] == batch_size
# Check that the output is close to the PyTorch output - only warn if not close
try:
assert np.allclose(pt_logits, ort_outs[0], atol=1e-4)
except AssertionError:
pytest.skip(f"Output of {arch_name}:\nMax element-wise difference: {np.max(np.abs(pt_logits - ort_outs[0]))}")
11 changes: 9 additions & 2 deletions tests/tensorflow/test_models_classification_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ def test_models_onnx_export(arch_name, input_shape, output_size):
dummy_input = [tf.TensorSpec([None, *input_shape], tf.float32, name="input")]

np_dummy_input = np.random.rand(batch_size, *input_shape).astype(np.float32)
tf_logits = model(np_dummy_input, training=False).numpy()
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path, output = export_model_to_onnx(
Expand All @@ -217,5 +218,11 @@ def test_models_onnx_export(arch_name, input_shape, output_size):
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(output, {"input": np_dummy_input})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)

assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)
# Check that the output is close to the TensorFlow output - only warn if not close
try:
assert np.allclose(tf_logits, ort_outs[0], atol=1e-4)
except AssertionError:
pytest.skip(f"Output of {arch_name}:\nMax element-wise difference: {np.max(np.abs(tf_logits - ort_outs[0]))}")
11 changes: 9 additions & 2 deletions tests/tensorflow/test_models_detection_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,7 @@ def test_models_onnx_export(arch_name, input_shape, output_size):
# batch_size = None for dynamic batch size
dummy_input = [tf.TensorSpec([None, *input_shape], tf.float32, name="input")]
np_dummy_input = np.random.rand(batch_size, *input_shape).astype(np.float32)
tf_logits = model(np_dummy_input, training=False)["logits"].numpy()
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path, output = export_model_to_onnx(
Expand All @@ -254,5 +255,11 @@ def test_models_onnx_export(arch_name, input_shape, output_size):
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(output, {"input": np_dummy_input})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)

assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)
# Check that the output is close to the TensorFlow output - only warn if not close
try:
assert np.allclose(ort_outs[0], tf_logits, atol=1e-4)
except AssertionError:
pytest.skip(f"Output of {arch_name}:\nMax element-wise difference: {np.max(np.abs(tf_logits - ort_outs[0]))}")
13 changes: 10 additions & 3 deletions tests/tensorflow/test_models_recognition_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def test_recognitionpredictor(mock_pdf, mock_vocab):
recognition.crnn_vgg16_bn(vocab=mock_vocab, input_shape=(32, 128, 3)),
)

pages = DocumentFile.from_pdf(mock_pdf).as_images()
pages = DocumentFile.from_pdf(mock_pdf)
# Create bounding boxes
boxes = np.array([[0.5, 0.5, 0.75, 0.75], [0.5, 0.5, 1.0, 1.0]], dtype=np.float32)
crops = extract_crops(pages[0], boxes)
Expand Down Expand Up @@ -203,6 +203,7 @@ def test_models_onnx_export(arch_name, input_shape):
# batch_size = None for dynamic batch size
dummy_input = [tf.TensorSpec([None, *input_shape], tf.float32, name="input")]
np_dummy_input = np.random.rand(batch_size, *input_shape).astype(np.float32)
tf_logits = model(np_dummy_input, training=False)["logits"].numpy()
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path, output = export_model_to_onnx(
Expand All @@ -223,5 +224,11 @@ def test_models_onnx_export(arch_name, input_shape):
# Inference
ort_session = onnxruntime.InferenceSession(model_path, providers=["CPUExecutionProvider"])
ort_outs = ort_session.run(output, {"input": np_dummy_input})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape[0] == batch_size

assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape[0] == batch_size
# Check that the output is close to the TensorFlow output - only warn if not close
try:
assert np.allclose(tf_logits, ort_outs[0], atol=1e-4)
except AssertionError:
pytest.skip(f"Output of {arch_name}:\nMax element-wise difference: {np.max(np.abs(tf_logits - ort_outs[0]))}")

0 comments on commit 46d5974

Please sign in to comment.