Skip to content

Commit

Permalink
Merge pull request swiftlang#1598 from practicalswift/fix-80-column-v…
Browse files Browse the repository at this point in the history
…iolations-in-python-code

[Python] Fix 80 column violations
  • Loading branch information
practicalswift committed Mar 10, 2016
2 parents a504d57 + 0796eaa commit 42b7e3f
Show file tree
Hide file tree
Showing 32 changed files with 512 additions and 245 deletions.
3 changes: 2 additions & 1 deletion .pep8
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
[flake8]
filename = *.py,Benchmark_Driver,Benchmark_DTrace.in,Benchmark_GuardMalloc.in,Benchmark_RuntimeLeaksRunner.in,build-script,gyb,line-directive,ns-html2rst,recursive-lipo,rth,submit-benchmark-results,update-checkout,viewcfg
ignore = D100,D101,D102,D103,D104,D105,E402,E501
ignore = D100,D101,D102,D103,D104,D105,E402
max-line-length = 80
36 changes: 26 additions & 10 deletions benchmark/scripts/Benchmark_DTrace.in
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,21 @@ XFAIL_LIST = [
class DTraceResult(perf_test_driver.Result):

def __init__(self, name, status, output, csv_output):
perf_test_driver.Result.__init__(self, name, status, output, XFAIL_LIST)
perf_test_driver.Result.__init__(
self, name, status, output, XFAIL_LIST)
self.csv_output = csv_output

@classmethod
def data_headers(cls):
return ['Name', 'Result', 'strong_retain', 'strong_retain/iter', 'strong_release', 'strong_release/iter']
return [
'Name', 'Result', 'strong_retain', 'strong_retain/iter',
'strong_release', 'strong_release/iter']

@classmethod
def data_format(cls, max_test_len):
non_name_headers = DTraceResult.data_headers()[1:]
fmt = ('{:<%d}' % (max_test_len + 5)) + ''.join(['{:<%d}' % (len(h) + 2) for h in non_name_headers])
fmt = ('{:<%d}' % (max_test_len + 5)) + \
''.join(['{:<%d}' % (len(h) + 2) for h in non_name_headers])
return fmt

@classmethod
Expand Down Expand Up @@ -82,10 +86,15 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
sys.stdout.flush()

def get_results_with_iters(iters):
p = subprocess.Popen(['sudo', 'dtrace', '-s', DTRACE_PATH, '-c', '%s %s %s' % (data['path'], data['test_name'], '--num-iters=%d' % iters)],
stdout=subprocess.PIPE, stderr=open('/dev/null', 'w'))
p = subprocess.Popen([
'sudo', 'dtrace', '-s', DTRACE_PATH,
'-c', '%s %s %s' % (data['path'], data['test_name'],
'--num-iters=%d' % iters)
], stdout=subprocess.PIPE, stderr=open('/dev/null', 'w'))
results = [x for x in p.communicate()[0].split("\n") if len(x) > 0]
return [x.split(',')[1] for x in results[results.index('DTRACE RESULTS') + 1:]]
return [
x.split(',')[1] for x in
results[results.index('DTRACE RESULTS') + 1:]]
iter_2_results = get_results_with_iters(2)
iter_3_results = get_results_with_iters(3)

Expand All @@ -101,10 +110,17 @@ SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))

def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-filter', type=str, default=None,
help='Filter out any test that does not match the given regex')
parser.add_argument('-csv', default=False, action='store_true',
help="Emit csv output", dest='csv_output')
parser.add_argument(
'-filter',
type=str,
default=None,
help='Filter out any test that does not match the given regex')
parser.add_argument(
'-csv',
default=False,
action='store_true',
help="Emit csv output",
dest='csv_output')
return parser.parse_args()

if __name__ == "__main__":
Expand Down
37 changes: 23 additions & 14 deletions benchmark/scripts/Benchmark_RuntimeLeaksRunner.in
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ import perf_test_driver
XFAIL_LIST = [
]

# Global objective-c classes created by various frameworks. We do not care about these.
# Global objective-c classes created by various frameworks. We do not care about
# these.
IGNORABLE_GLOBAL_OBJC_CLASSES = set([
'__NSPlaceholderDate',
'NSCache',
Expand Down Expand Up @@ -60,6 +61,7 @@ class LeaksRunnerResult(perf_test_driver.Result):
print(fmt.format(self.get_name(), self.get_result(),
self.get_count()))


class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):

def __init__(self, binary, xfail_list, num_samples, num_iters):
Expand All @@ -81,54 +83,61 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):

def run_test(self, data, num_iters):
try:
p = subprocess.Popen([data['path'], "--run-all", "--num-samples={}".format(data['num_samples']),
"--num-iters={}".format(num_iters), data['test_name']],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = subprocess.Popen([
data['path'], "--run-all",
"--num-samples={}".format(data['num_samples']),
"--num-iters={}".format(num_iters), data['test_name']],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error_out = p.communicate()[1].split("\n")
except OSError:
print("Child Process Failed! (%s,%s)" % (data['path'], data['test_name']))
print("Child Process Failed! (%s,%s)" % (
data['path'], data['test_name']))
return None

try:
# We grab the second line since swift globals get lazily created in the
# first iteration.
# We grab the second line since swift globals get lazily created in
# the first iteration.
d = json.loads(error_out[1])
d['objc_objects'] = [x for x in d['objc_objects'] if x not in IGNORABLE_GLOBAL_OBJC_CLASSES]
d['objc_objects'] = [x for x in d['objc_objects']
if x not in IGNORABLE_GLOBAL_OBJC_CLASSES]
d['objc_count'] = len(d['objc_objects'])

total_count = d['objc_count'] + d['swift_count']
return total_count
except (KeyError, ValueError):
print("Failed parse output! (%s,%s)" % (data['path'], data['test_name']))
print("Failed parse output! (%s,%s)" %
(data['path'], data['test_name']))
return None


def process_input(self, data):
test_name = '({},{})'.format(data['opt'], data['test_name'])
print("Running {}...".format(test_name))
sys.stdout.flush()
total_count1 = self.run_test(data, data['num_iters'])
if total_count1 is None:
return LeaksRunnerResult(test_name)
total_count2 = self.run_test(data, data['num_iters']+1)
total_count2 = self.run_test(data, data['num_iters'] + 1)
if total_count2 is None:
return LeaksRunnerResult(test_name)
return LeaksRunnerResult(test_name, total_count2 - total_count1)

SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))


def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-filter', type=str, default=None,
help='Filter out any test that does not match the given regex')
parser.add_argument(
'-filter', type=str, default=None,
help='Filter out any test that does not match the given regex')
parser.add_argument('-num-samples', type=int, default=2)
parser.add_argument('-num-iters', type=int, default=2)
return parser.parse_args()

if __name__ == "__main__":
args = parse_args()
l = LeaksRunnerBenchmarkDriver(SWIFT_BIN_DIR, XFAIL_LIST, args.num_samples, args.num_iters)
l = LeaksRunnerBenchmarkDriver(
SWIFT_BIN_DIR, XFAIL_LIST, args.num_samples, args.num_iters)
if l.run(args.filter):
sys.exit(0)
else:
Expand Down
21 changes: 14 additions & 7 deletions benchmark/scripts/compare_perf_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@
# e.g.
# repeat.sh 3 tot/bin/Benchmark_Driver run -o -O > tot.O.times
# repeat.sh 3 mypatch/bin/Benchmark_Driver run -o -O > mypatch.O.times
# compare_perf_tests.py tot.O.times mypatch.O.times | sort -t, -n -k 6 | column -s, -t
# compare_perf_tests.py tot.O.times mypatch.O.times | sort -t, -n -k 6 | \
# column -s, -t

from __future__ import print_function
import re
Expand All @@ -26,8 +27,10 @@
VERBOSE = 0

# #,TEST,SAMPLES,MIN(ms),MAX(ms),MEAN(ms),SD(ms),MEDIAN(ms)
SCORERE = re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
TOTALRE = re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
SCORERE = re.compile(
r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
TOTALRE = re.compile(
r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
NUMGROUP = 1
KEYGROUP = 2
BESTGROUP = 4
Expand Down Expand Up @@ -70,7 +73,8 @@ def get_scores(fname):
scores[m.group(KEYGROUP)] = []
worstscores[m.group(KEYGROUP)] = []
scores[m.group(KEYGROUP)].append(parse_int(m.group(BESTGROUP)))
worstscores[m.group(KEYGROUP)].append(parse_int(m.group(WORSTGROUP)))
worstscores[m.group(KEYGROUP)].append(
parse_int(m.group(WORSTGROUP)))
if is_total:
nums[m.group(KEYGROUP)] = ""
else:
Expand All @@ -83,7 +87,8 @@ def get_scores(fname):


def is_max_score(newscore, maxscore, invert):
return not maxscore or (newscore > maxscore if not invert else newscore < maxscore)
return not maxscore or \
(newscore > maxscore if not invert else newscore < maxscore)


def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
Expand Down Expand Up @@ -129,7 +134,8 @@ def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
print (("%+d" % (bestscore2 - bestscore1)).rjust(9), end="")

if bestscore1 != 0 and bestscore2 != 0:
print (("%+.1f%%" % (((float(bestscore2) / bestscore1) - 1) * 100)).rjust(9), end="")
print (("%+.1f%%" %
(((float(bestscore2) / bestscore1) - 1) * 100)).rjust(9), end="")
if ShowSpeedup:
Num, Den = float(bestscore2), float(bestscore1)
if IsTime:
Expand Down Expand Up @@ -221,4 +227,5 @@ def usage():
if key not in scores2:
print(key, "not in", file2)
continue
compare_scores(key, scores1[key], worstscores1[key], scores2[key], worstscores2[key], runs, nums[key])
compare_scores(key, scores1[key], worstscores1[key], scores2[key],
worstscores2[key], runs, nums[key])
1 change: 1 addition & 0 deletions benchmark/scripts/generate_harness/generate_harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@

# CMakeList multi-source
class MultiSourceBench(object):

def __init__(self, path):
self.name = os.path.basename(path)
self.files = [x for x in os.listdir(path)
Expand Down
25 changes: 18 additions & 7 deletions benchmark/scripts/perf_test_driver/perf_test_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ def __init__(self, name, status, output, xfail_list):
self.name = name
self.status = status
self.output = output
self.is_xfailed = any((re.match(x, self.name) is not None for x in xfail_list))
self.is_xfailed = any(
(re.match(x, self.name) is not None for x in xfail_list))

def is_failure(self):
return self.get_result() in ['FAIL', 'XPASS']
Expand All @@ -51,7 +52,7 @@ def get_data(self):
return self.data

def merge_in_extra_data(self, d):
"""Rather than modifying the extra data dict, just return it as a no-op"""
"""Rather than modifying the extra data dict, return it as a no-op"""
return d

def print_data(self, max_test_len):
Expand All @@ -67,8 +68,10 @@ def _unwrap_self(args):

class BenchmarkDriver(object):

def __init__(self, binary_dir, xfail_list, enable_parallel=False, opt_levels=BenchmarkDriver_OptLevels):
self.targets = [(os.path.join(binary_dir, 'Benchmark_%s' % o), o) for o in opt_levels]
def __init__(self, binary_dir, xfail_list, enable_parallel=False,
opt_levels=BenchmarkDriver_OptLevels):
self.targets = [(os.path.join(binary_dir, 'Benchmark_%s' % o), o)
for o in opt_levels]
self.xfail_list = xfail_list
self.enable_parallel = enable_parallel
self.data = None
Expand All @@ -85,7 +88,8 @@ def process_input(self, data):

def run_for_opt_level(self, binary, opt_level, test_filter):
print("testing driver at path: %s" % binary)
names = [n.strip() for n in subprocess.check_output([binary, "--list"]).split()[2:]]
names = [n.strip() for n in subprocess.check_output(
[binary, "--list"]).split()[2:]]
if test_filter:
regex = re.compile(test_filter)
names = [n for n in names if regex.match(n)]
Expand All @@ -111,7 +115,12 @@ def reduce_results(acc, r):
acc['extra_data'] = r.merge_in_extra_data(acc['extra_data'])
return acc

return reduce(reduce_results, results, {'result': [], 'has_failure': False, 'max_test_len': 0, 'extra_data': {}})
return reduce(reduce_results, results, {
'result': [],
'has_failure': False,
'max_test_len': 0,
'extra_data': {}
})

def print_data(self, data, max_test_len):
print("Results:")
Expand All @@ -121,7 +130,9 @@ def print_data(self, data, max_test_len):
r.print_data(max_test_len)

def run(self, test_filter=None):
self.data = [self.run_for_opt_level(binary, opt_level, test_filter) for binary, opt_level in self.targets]
self.data = [
self.run_for_opt_level(binary, opt_level, test_filter)
for binary, opt_level in self.targets]
max_test_len = reduce(max, [d['max_test_len']for d in self.data])
has_failure = reduce(max, [d['has_failure']for d in self.data])
self.print_data(self.data, max_test_len)
Expand Down
12 changes: 6 additions & 6 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))

# -- General configuration -----------------------------------------------------
# -- General configuration -----------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
Expand Down Expand Up @@ -89,7 +89,7 @@
# modindex_common_prefix = []


# -- Options for HTML output ---------------------------------------------------
# -- Options for HTML output ---------------------------------------------

# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
Expand Down Expand Up @@ -176,7 +176,7 @@
htmlhelp_basename = 'Swiftdoc'


# -- Options for LaTeX output --------------------------------------------------
# -- Options for LaTeX output --------------------------------------------

latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
Expand Down Expand Up @@ -217,7 +217,7 @@
# latex_domain_indices = True


# -- Options for manual page output --------------------------------------------
# -- Options for manual page output --------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
Expand All @@ -230,7 +230,7 @@
# man_show_urls = False


# -- Options for Texinfo output ------------------------------------------------
# -- Options for Texinfo output ------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
Expand All @@ -255,7 +255,7 @@
intersphinx_mapping = {}


# -- Options for extensions ----------------------------------------------------
# -- Options for extensions ----------------------------------------------

# Enable this if you want TODOs to show up in the generated documentation.
todo_include_todos = True
Expand Down
3 changes: 2 additions & 1 deletion docs/scripts/ns-html2rst
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ usage: nshtml2rst < NSString.html > NSString.rst
r'<pre>\1</pre>',
html, flags=re.MULTILINE | re.DOTALL)

# Remove links from <code>...</code>, which doesn't have a rendering in ReST
# Remove links from <code>...</code>, which doesn't have a rendering in
# ReST
html = re.sub(
r'<code>(.*?)<a[^>]*?>(.*?)</a>(.*?)</code>',
r'<code>\1\2\3</code>',
Expand Down
15 changes: 9 additions & 6 deletions test/Driver/Inputs/filelists/check-filelist-abc.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,12 @@

with open(filelistFile, 'r') as f:
lines = f.readlines()
assert lines[0].endswith("/a.swift\n") or lines[0].endswith("/a.swiftmodule\n")
assert lines[1].endswith("/b.swift\n") or lines[1].endswith("/b.swiftmodule\n")
assert lines[2].endswith("/c.swift\n") or lines[2].endswith("/c.swiftmodule\n")
assert(lines[0].endswith("/a.swift\n") or
lines[0].endswith("/a.swiftmodule\n"))
assert(lines[1].endswith("/b.swift\n") or
lines[1].endswith("/b.swiftmodule\n"))
assert(lines[2].endswith("/c.swift\n") or
lines[2].endswith("/c.swiftmodule\n"))

if primaryFile:
print("Handled", os.path.basename(primaryFile))
Expand All @@ -45,7 +48,7 @@
outputListFile = sys.argv[sys.argv.index('-output-filelist') + 1]
with open(outputListFile, 'r') as f:
lines = f.readlines()
assert lines[0].endswith("/a.o\n") or lines[0].endswith("/a.bc\n")
assert lines[1].endswith("/b.o\n") or lines[1].endswith("/b.bc\n")
assert lines[2].endswith("/c.o\n") or lines[2].endswith("/c.bc\n")
assert(lines[0].endswith("/a.o\n") or lines[0].endswith("/a.bc\n"))
assert(lines[1].endswith("/b.o\n") or lines[1].endswith("/b.bc\n"))
assert(lines[2].endswith("/c.o\n") or lines[2].endswith("/c.bc\n"))
print("...with output!")
Loading

0 comments on commit 42b7e3f

Please sign in to comment.