Skip to content

Commit

Permalink
Switched to using subprocesses always. No more single-process-in-this…
Browse files Browse the repository at this point in the history
…-process mode. We're still defaulting to running in just one subprocess, since lots of existing test suites would probably break if you suddenly run them in parallel. It's just in a single worker process instead of in the main process, though. Makes things a lot simpler. Also overhauled the test_versions script.
  • Loading branch information
CleanCut committed Jun 17, 2015
1 parent 6c72401 commit 97569e1
Show file tree
Hide file tree
Showing 6 changed files with 186 additions and 49 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,6 @@ docs/_build/

# Editor files
tags

# virtualenvs
venv*
55 changes: 26 additions & 29 deletions green/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,35 +48,32 @@ def run(suite, stream, args):

result.startTestRun()

if args.subprocesses == 1:
suite.run(result)
else:
tests = toProtoTestList(suite)
pool = LoggingDaemonlessPool(processes=args.subprocesses or None)
if tests:
async_responses = []
for index, test in enumerate(tests):
if args.run_coverage:
coverage_number = index + 1
else:
coverage_number = None
async_responses.append(pool.apply_async(
poolRunner,
(test.dotted_name, coverage_number, args.omit_patterns)))
pool.close()
for test, async_response in zip(tests, async_responses):
# Prints out the white 'processing...' version of the output
result.startTest(test)
# This blocks until the worker who is processing this
# particular test actually finishes
try:
result.addProtoTestResult(async_response.get())
except KeyboardInterrupt: # pragma: no cover
result.shouldStop = True
if result.shouldStop:
break
pool.terminate()
pool.join()
tests = toProtoTestList(suite)
pool = LoggingDaemonlessPool(processes=args.subprocesses or None)
if tests:
async_responses = []
for index, test in enumerate(tests):
if args.run_coverage:
coverage_number = index + 1
else:
coverage_number = None
async_responses.append(pool.apply_async(
poolRunner,
(test.dotted_name, coverage_number, args.omit_patterns)))
pool.close()
for test, async_response in zip(tests, async_responses):
# Prints out the white 'processing...' version of the output
result.startTest(test)
# This blocks until the worker who is processing this
# particular test actually finishes
try:
result.addProtoTestResult(async_response.get())
except KeyboardInterrupt: # pragma: no cover
result.shouldStop = True
if result.shouldStop:
break
pool.terminate()
pool.join()

result.stopTestRun()

Expand Down
21 changes: 18 additions & 3 deletions green/subprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,16 +223,31 @@ def poolRunner(test_name, coverage_number=None, omit_patterns=[]):
test = None
try:
test = loadTargets(test_name)
test.run(result)
except:
err = sys.exc_info()
t = ProtoTest()
t.module = 'green.runner'
t.module = 'green.loader'
t.class_name = 'N/A'
t.description = 'Green encountered an error loading the unit test itself.'
t.description = 'Green encountered an error loading the unit test.'
t.method_name = 'poolRunner'
result.addError(t, err)

try:
test.run(result)
except:
# Some frameworks like testtools record the error AND THEN let it
# through to crash things. So we only need to manufacture another error
# if the underlying framework didn't, but either way we don't want to
# crash.
if not result.errors:
err = sys.exc_info()
t = ProtoTest()
t.module = 'green.runner'
t.class_name = 'N/A'
t.description = 'Green encountered an exception not caught by the underlying test framework.'
t.method_name = 'poolRunner'
result.addError(t, err)

# Finish coverage
if coverage_number and coverage:
cov.stop()
Expand Down
84 changes: 78 additions & 6 deletions green/test/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,20 @@ def test_verbose3(self):
verbose=3 causes version output, and an empty test case passes.
"""
self.args.verbose = 3
run(FakeCase(), self.stream, self.args)
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
fh = open(os.path.join(sub_tmpdir, 'test_verbose3.py'), 'w')
fh.write("""
import unittest
class Verbose3(unittest.TestCase):
def test01(self):
pass
""".format(os.getpid()))
fh.close()
os.chdir(sub_tmpdir)
tests = loadTargets('test_verbose3')
result = run(tests, self.stream, self.args)
os.chdir(self.startdir)
self.assertEqual(result.testsRun, 1)
self.assertIn('Green', self.stream.getvalue())
self.assertIn('OK', self.stream.getvalue())

Expand All @@ -105,7 +118,20 @@ def test_warnings(self):
setting warnings='always' doesn't crash
"""
self.args.warnings = 'always'
run(FakeCase(), self.stream, self.args)
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
fh = open(os.path.join(sub_tmpdir, 'test_warnings.py'), 'w')
fh.write("""
import unittest
class Warnings(unittest.TestCase):
def test01(self):
pass
""".format(os.getpid()))
fh.close()
os.chdir(sub_tmpdir)
tests = loadTargets('test_warnings')
result = run(tests, self.stream, self.args)
os.chdir(self.startdir)
self.assertEqual(result.testsRun, 1)
self.assertIn('OK', self.stream.getvalue())

def test_noTestsFound(self):
Expand All @@ -119,10 +145,20 @@ def test_failedSaysSo(self):
"""
A failing test case causes the whole run to report 'FAILED'
"""
class FailCase(unittest.TestCase):
def runTest(self):
self.assertTrue(False)
run(FailCase(), self.stream, self.args)
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
fh = open(os.path.join(sub_tmpdir, 'test_failed.py'), 'w')
fh.write("""
import unittest
class Failed(unittest.TestCase):
def test01(self):
self.assertTrue(False)
""".format(os.getpid()))
fh.close()
os.chdir(sub_tmpdir)
tests = loadTargets('test_failed')
result = run(tests, self.stream, self.args)
os.chdir(self.startdir)
self.assertEqual(result.testsRun, 1)
self.assertIn('FAILED', self.stream.getvalue())

def test_failfast(self):
Expand Down Expand Up @@ -356,6 +392,42 @@ def test_badTest(self):
self.assertRaises(ImportError, run, tests, self.stream, self.args)
os.chdir(TestSubprocesses.startdir)

def test_uncaughtException(self):
"""
Exceptions that escape the test framework get caught by poolRunner and
reported as a failure. For example, the testtools implementation of
TestCase unwisely (but deliberately) lets SystemExit exceptions through.
"""
try:
import testtools
except:
self.skipTest('testtools must be installed to run this test.')
testtools # Make the linter happy

sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
# pkg/__init__.py
fh = open(os.path.join(sub_tmpdir, '__init__.py'), 'w')
fh.write('\n')
fh.close()
# pkg/test/test_target_module.py
fh = open(os.path.join(sub_tmpdir, 'test_uncaught.py'), 'w')
fh.write("""
import testtools
class Uncaught(testtools.TestCase):
def test_uncaught(self):
raise SystemExit(0)
""")
fh.close()
# Load the tests
os.chdir(self.tmpdir)
tests = loadTargets('.')
self.args.subprocesses = 2
run(tests, self.stream, self.args)
os.chdir(TestSubprocesses.startdir)
self.assertIn('FAILED', self.stream.getvalue())



def test_empty(self):
"""
run() does not crash with empty suite and subprocesses
Expand Down
2 changes: 2 additions & 0 deletions requirements-optional.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
django
coverage
testtools
-r requirements.txt
70 changes: 59 additions & 11 deletions test_versions
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/usr/bin/env bash

PYTHON_VERSIONS="${TRAVIS_PYTHON_VERSION:-$(find -E `echo $PATH | tr : ' '` -depth 1 -regex '.*/python(2.7|3.3|3.4)|.*/pypy' 2>/dev/null | sed -E -e 's/.*python//' -e 's/.*pypy/pypy/' )}"
PYTHON_VERSIONS="${TRAVIS_PYTHON_VERSION:-$(find -E `echo $PATH | tr : ' '` -depth 1 -regex '.*/python(2.7|3.[3-9])|.*/pypy' 2>/dev/null | sed -E -e 's/.*python//' -e 's/.*pypy/pypy/' )}"
TESTS="`find green -name test_*.py | sed -e s/.py$// | tr / .` green"


Expand All @@ -13,20 +13,68 @@ if [ "${PYTHON_VERSIONS}" == "" ] ; then
PYTHON_VERSIONS="default"
fi

echo "Identified python versions: `echo ${PYTHON_VERSIONS} | tr '\n' ' '`"

# Make sure each of the pythons has the necessary requirements installed
for PYTHON_VERSION in ${PYTHON_VERSIONS} ; do
if [ "${PYTHON_VERSION}" == "default" ] ; then
PYTHON_VERSION=""
fi
if [ "${PYTHON_VERSION}" == "pypy" ] ; then
PYTHON=pypy
shift
elif [[ -e `which python${PYTHON_VERSION}` ]] ; then
PYTHON=python${PYTHON_VERSION}
shift
else
echo "Failed to determine python binary for python version '${PYTHON_VERSION}'"
exit 4
fi

if ! ${PYTHON} -m pip > /dev/null ; then
echo "Please install pip under ${PYTHON}"
exit 5
fi

if ! ${PYTHON} -m virtualenv --version > /dev/null ; then
echo "Please install virtualenv under ${PYTHON}"
exit 6
fi

VENV_DIR="venv${PYTHON_VERSION}"
if [ ! -d ${VENV_DIR} ] ; then
${PYTHON} -m virtualenv ${VENV_DIR}
fi

echo "Ensuring dependencies are installed for ${VENV_DIR}"

if ! source ${VENV_DIR}/bin/activate ; then
echo "Failed to enter virtualenv"
exit 7
fi
hash -r
${VENV_DIR}/bin/pip install -r requirements-optional.txt | grep -Ev "Requirement already|however version|consider upgrading"
deactivate
done

# Finally, run all the tests
for TEST in ${TESTS} ; do
for PYTHON_VERSION in ${PYTHON_VERSIONS} ; do
if [ "${PYTHON_VERSION}" == "default" ] ; then
PYTHON_VERSION=""
PYTHON="python"
else
VENV_DIR="venv${PYTHON_VERSION}"
PYTHON=${VENV_DIR}/bin/python
fi

echo ""
set -x
# Actually run it!
if ! PYTHONPATH="." ${PYTHON} -m green.cmdline -s 8 ${TEST} ; then
exit 3
fi
for PROCESS_NUM in 1 8 ; do
echo ""
set -x
if ! ./g ${PYTHON_VERSION} -s ${PROCESS_NUM} ${TEST} ; then
exit 3
fi
{ set +x; } 2>/dev/null
done
{ set +x; } 2>/dev/null
done
done

echo -e "\nCompleted internal test suite for Python(s):\n${PYTHON_VERSIONS}\n"
echo -e "\nCompleted internal test suite for Python versions:\n${PYTHON_VERSIONS}\n"

0 comments on commit 97569e1

Please sign in to comment.