Skip to content

Commit

Permalink
[Python] Fix five classes of PEP-8 violations (E101/E111/E128/E302/W191)
Browse files Browse the repository at this point in the history
* E101: indentation contains mixed spaces and tabs
* E111: indentation is not a multiple of four
* E128: continuation line under-indented for visual indent
* E302: expected 2 blank lines, found 1
* W191: indentation contains tabs
  • Loading branch information
practicalswift committed Mar 7, 2016
1 parent 5585687 commit 183da81
Show file tree
Hide file tree
Showing 38 changed files with 1,033 additions and 701 deletions.
2 changes: 1 addition & 1 deletion .pep8
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[flake8]
filename = *.py,Benchmark_Driver,Benchmark_DTrace.in,Benchmark_GuardMalloc.in,Benchmark_RuntimeLeaksRunner.in,build-script,gyb,line-directive,ns-html2rst,recursive-lipo,rth,submit-benchmark-results,update-checkout,viewcfg
ignore = D100,D101,D102,D103,D104,D105,E101,E111,E128,E302,E402,E501,W191
ignore = D100,D101,D102,D103,D104,D105,E402,E501
11 changes: 8 additions & 3 deletions benchmark/scripts/Benchmark_DTrace.in
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import perf_test_driver
XFAIL_LIST = [
]


class DTraceResult(perf_test_driver.Result):

def __init__(self, name, status, output, csv_output):
Expand Down Expand Up @@ -59,11 +60,14 @@ class DTraceResult(perf_test_driver.Result):

print(DTraceResult.data_format(max_test_len).format(*result))


class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):

def __init__(self, binary, xfail_list, csv_output):
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
enable_parallel=False,
opt_levels=['O'])
perf_test_driver.BenchmarkDriver.__init__(
self, binary, xfail_list,
enable_parallel=False,
opt_levels=['O'])
self.csv_output = csv_output

def print_data_header(self, max_test_len):
Expand Down Expand Up @@ -94,6 +98,7 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):

SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))


def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-filter', type=str, default=None,
Expand Down
148 changes: 93 additions & 55 deletions benchmark/scripts/Benchmark_Driver
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import glob

DRIVER_DIR = os.path.dirname(os.path.realpath(__file__))


def parse_results(res, optset):
# Parse lines like this
# #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),PEAK_MEMORY(B)
Expand Down Expand Up @@ -58,10 +59,12 @@ def parse_results(res, optset):
mem_test = {}
mem_test['Data'] = [mem_testresult]
mem_test['Info'] = {}
mem_test['Name'] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
mem_test['Name'] = "nts.swift/mem_maxrss." + \
optset + "." + testname + ".mem"
tests.append(mem_test)
return tests


def submit_to_lnt(data, url):
print "\nSubmitting results to LNT server..."
json_report = {'input_data': json.dumps(data), 'commit': '1'}
Expand All @@ -75,6 +78,7 @@ def submit_to_lnt(data, url):
print "Error:\t", response['error']
sys.exit(1)


def instrument_test(driver_path, test, num_samples):
"""Run a test and instrument its peak memory use"""
test_outputs = []
Expand Down Expand Up @@ -113,14 +117,18 @@ def instrument_test(driver_path, test, num_samples):

return avg_test_output


def get_tests(driver_path):
"""Return a list of available performance tests"""
return subprocess.check_output([driver_path, '--list']).split()[2:]


def get_current_git_branch(git_repo_path):
"""Return the selected branch for the repo `git_repo_path`"""
return subprocess.check_output(['git', '-C', git_repo_path, 'rev-parse',
'--abbrev-ref', 'HEAD'], stderr=subprocess.STDOUT).strip()
return subprocess.check_output(
['git', '-C', git_repo_path, 'rev-parse',
'--abbrev-ref', 'HEAD'], stderr=subprocess.STDOUT).strip()


def log_results(log_directory, driver, formatted_output, swift_repo=None):
"""Log `formatted_output` to a branch specific directory in
Expand All @@ -146,6 +154,7 @@ def log_results(log_directory, driver, formatted_output, swift_repo=None):
with open(log_file, 'w') as f:
f.write(formatted_output)


def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
log_directory=None, swift_repo=None):
"""Run perf tests individually and return results in a format that's
Expand Down Expand Up @@ -192,6 +201,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
log_results(log_directory, driver, formatted_output, swift_repo)
return formatted_output


def submit(args):
print "SVN revision:\t", args.revision
print "Machine name:\t", args.machine
Expand All @@ -208,8 +218,9 @@ def submit(args):
print "Opt level:\t", optset
file = os.path.join(args.tests, "Benchmark_" + optset)
try:
res = run_benchmarks(file, benchmarks=args.benchmark,
num_samples=args.iterations)
res = run_benchmarks(
file, benchmarks=args.benchmark,
num_samples=args.iterations)
data['Tests'].extend(parse_results(res, optset))
except subprocess.CalledProcessError as e:
print "Execution failed.. Test results are empty."
Expand All @@ -227,24 +238,29 @@ def submit(args):
submit_to_lnt(data, args.lnt_host)
return 0


def run(args):
optset = args.optimization
file = os.path.join(args.tests, "Benchmark_" + optset)
run_benchmarks(file, benchmarks=args.benchmarks,
num_samples=args.iterations, verbose=True,
log_directory=args.output_dir,
swift_repo=args.swift_repo)
run_benchmarks(
file, benchmarks=args.benchmarks,
num_samples=args.iterations, verbose=True,
log_directory=args.output_dir,
swift_repo=args.swift_repo)
return 0


def format_name(log_path):
"""Return the filename and directory for a log file"""
return '/'.join(log_path.split('/')[-2:])


def compare_logs(compare_script, new_log, old_log):
"""Return diff of log files at paths `new_log` and `old_log`"""
print 'Comparing %s %s ...' % (format_name(old_log), format_name(new_log))
subprocess.call([compare_script, old_log, new_log])


def compare(args):
log_dir = args.log_dir
swift_repo = args.swift_repo
Expand All @@ -263,7 +279,8 @@ def compare(args):
for branch_dir in [current_branch_dir, master_branch_dir]:
for opt in ['O', 'Onone']:
recent_logs[os.path.basename(branch_dir) + '_' + opt] = sorted(
glob.glob(os.path.join(branch_dir, 'Benchmark_' + opt + '-*.log')),
glob.glob(os.path.join(
branch_dir, 'Benchmark_' + opt + '-*.log')),
key=os.path.getctime, reverse=True)

if current_branch == 'master':
Expand Down Expand Up @@ -311,65 +328,86 @@ def compare(args):

return 0


def positive_int(value):
ivalue = int(value)
if not (ivalue > 0):
raise ValueError
return ivalue


def main():
parser = argparse.ArgumentParser(description='Swift benchmarks driver')
subparsers = parser.add_subparsers()

submit_parser = subparsers.add_parser('submit',
help='run benchmarks and submit results to LNT')
submit_parser.add_argument('-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)
submit_parser.add_argument('-m', '--machine', required=True,
help='LNT machine name')
submit_parser.add_argument('-r', '--revision', required=True,
help='SVN revision of compiler to identify the LNT run', type=int)
submit_parser.add_argument('-l', '--lnt_host', required=True,
help='LNT host to submit results to')
submit_parser.add_argument('-i', '--iterations',
help='number of times to run each test (default: 10)',
type=positive_int, default=10)
submit_parser.add_argument('-o', '--optimization', nargs='+',
help='optimization levels to use (default: O Onone Ounchecked)',
default=['O', 'Onone', 'Ounchecked'])
submit_parser.add_argument('benchmark',
help='benchmark to run (default: all)', nargs='*')
submit_parser = subparsers.add_parser(
'submit',
help='run benchmarks and submit results to LNT')
submit_parser.add_argument(
'-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)
submit_parser.add_argument(
'-m', '--machine', required=True,
help='LNT machine name')
submit_parser.add_argument(
'-r', '--revision', required=True,
help='SVN revision of compiler to identify the LNT run', type=int)
submit_parser.add_argument(
'-l', '--lnt_host', required=True,
help='LNT host to submit results to')
submit_parser.add_argument(
'-i', '--iterations',
help='number of times to run each test (default: 10)',
type=positive_int, default=10)
submit_parser.add_argument(
'-o', '--optimization', nargs='+',
help='optimization levels to use (default: O Onone Ounchecked)',
default=['O', 'Onone', 'Ounchecked'])
submit_parser.add_argument(
'benchmark',
help='benchmark to run (default: all)', nargs='*')
submit_parser.set_defaults(func=submit)

run_parser = subparsers.add_parser('run',
help='run benchmarks and output results to stdout')
run_parser.add_argument('-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)
run_parser.add_argument('-i', '--iterations',
help='number of times to run each test (default: 1)',
type=positive_int, default=1)
run_parser.add_argument('-o', '--optimization',
help='optimization level to use (default: O)', default='O')
run_parser.add_argument('--output-dir',
help='log results to directory (default: no logging)')
run_parser.add_argument('--swift-repo',
help='absolute path to Swift source repo for branch comparison')
run_parser.add_argument('benchmarks',
help='benchmark to run (default: all)', nargs='*')
run_parser = subparsers.add_parser(
'run',
help='run benchmarks and output results to stdout')
run_parser.add_argument(
'-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)
run_parser.add_argument(
'-i', '--iterations',
help='number of times to run each test (default: 1)',
type=positive_int, default=1)
run_parser.add_argument(
'-o', '--optimization',
help='optimization level to use (default: O)', default='O')
run_parser.add_argument(
'--output-dir',
help='log results to directory (default: no logging)')
run_parser.add_argument(
'--swift-repo',
help='absolute path to Swift source repo for branch comparison')
run_parser.add_argument(
'benchmarks',
help='benchmark to run (default: all)', nargs='*')
run_parser.set_defaults(func=run)

compare_parser = subparsers.add_parser('compare',
help='compare benchmark results')
compare_parser.add_argument('--log-dir', required=True,
help='directory containing benchmark logs')
compare_parser.add_argument('--swift-repo', required=True,
help='absolute path to Swift source repo')
compare_parser.add_argument('--compare-script', required=True,
help='absolute path to compare script')
compare_parser = subparsers.add_parser(
'compare',
help='compare benchmark results')
compare_parser.add_argument(
'--log-dir', required=True,
help='directory containing benchmark logs')
compare_parser.add_argument(
'--swift-repo', required=True,
help='absolute path to Swift source repo')
compare_parser.add_argument(
'--compare-script', required=True,
help='absolute path to compare script')
compare_parser.set_defaults(func=compare)

args = parser.parse_args()
Expand Down
11 changes: 8 additions & 3 deletions benchmark/scripts/Benchmark_GuardMalloc.in
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,19 @@ import perf_test_driver
XFAIL_LIST = [
]


class GuardMallocResult(perf_test_driver.Result):

def __init__(self, name, status):
perf_test_driver.Result.__init__(self, name, status, "", XFAIL_LIST)


class GuardMallocBenchmarkDriver(perf_test_driver.BenchmarkDriver):

def __init__(self, binary, xfail_list):
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
enable_parallel=True)
perf_test_driver.BenchmarkDriver.__init__(
self, binary, xfail_list,
enable_parallel=True)
self.new_env = os.environ.copy()
self.new_env['DYLD_INSERT_LIBRARIES'] = '/usr/lib/libgmalloc.dylib'

Expand All @@ -43,7 +47,8 @@ class GuardMallocBenchmarkDriver(perf_test_driver.BenchmarkDriver):
test_name = '({},{})'.format(data['opt'], data['test_name'])
print "Running {}...".format(test_name)
sys.stdout.flush()
status = subprocess.call([data['path'], data['test_name'], '--num-iters=2'],
status = subprocess.call(
[data['path'], data['test_name'], '--num-iters=2'],
env=data['env'], stderr=open('/dev/null', 'w'),
stdout=open('/dev/null', 'w'))
return GuardMallocResult(test_name, status)
Expand Down
7 changes: 5 additions & 2 deletions benchmark/scripts/Benchmark_RuntimeLeaksRunner.in
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,19 @@ IGNORABLE_GLOBAL_OBJC_CLASSES = set([
'_NSJSONReader'
])


class LeaksRunnerResult(perf_test_driver.Result):

def __init__(self, name, status):
perf_test_driver.Result.__init__(self, name, status, "", XFAIL_LIST)


class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):

def __init__(self, binary, xfail_list):
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
enable_parallel=True)
perf_test_driver.BenchmarkDriver.__init__(
self, binary, xfail_list,
enable_parallel=True)

def prepare_input(self, name):
return {}
Expand Down
6 changes: 6 additions & 0 deletions benchmark/scripts/compare_perf_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,14 @@
ShowSpeedup = 1
PrintAllScores = 0


def parse_int(word):
try:
return int(word)
except:
raise Exception("Expected integer value, not " + word)


def get_scores(fname):
scores = {}
worstscores = {}
Expand Down Expand Up @@ -77,9 +79,11 @@ def get_scores(fname):
f.close()
return scores, worstscores, runs, nums


def is_max_score(newscore, maxscore, invert):
return not maxscore or (newscore > maxscore if not invert else newscore < maxscore)


def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
print num.rjust(3),
print key.ljust(25),
Expand Down Expand Up @@ -144,6 +148,7 @@ def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
print "(?)",
print


def print_best_scores(key, scores):
print key,
bestscore = None
Expand All @@ -153,6 +158,7 @@ def print_best_scores(key, scores):
bestscore = score
print ", %d" % bestscore


def usage():
print "repeat.sh <n> Benchmark_O[none|unchecked] > file.times"
print "compare_perf_tests.py <file.times> [<file2.times>]"
Expand Down
Loading

0 comments on commit 183da81

Please sign in to comment.