Skip to content

Commit

Permalink
yapf run_tests
Browse files Browse the repository at this point in the history
  • Loading branch information
ncteisen committed Dec 12, 2017
1 parent a69c690 commit 888093c
Show file tree
Hide file tree
Showing 9 changed files with 4,005 additions and 3,470 deletions.
5 changes: 1 addition & 4 deletions tools/distrib/yapf_code.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,7 @@ DIRS=(
'tools/distrib'
'tools/interop_matrix'
'tools/profiling'
'tools/run_tests/python_utils'
'tools/run_tests/sanity'
'tools/run_tests/performance'
'tools/run_tests/artifacts'
'tools/run_tests'
)
EXCLUSIONS=(
'grpcio/grpc_*.py'
Expand Down
302 changes: 154 additions & 148 deletions tools/run_tests/run_build_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Tool to get build statistics from Jenkins and upload to BigQuery."""

from __future__ import print_function
Expand All @@ -27,39 +26,38 @@
import sys
import urllib


gcp_utils_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../gcp/utils'))
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils


_PROJECT_ID = 'grpc-testing'
_HAS_MATRIX = True
_BUILDS = {'gRPC_interop_master': not _HAS_MATRIX,
'gRPC_master_linux': not _HAS_MATRIX,
'gRPC_master_macos': not _HAS_MATRIX,
'gRPC_master_windows': not _HAS_MATRIX,
'gRPC_performance_master': not _HAS_MATRIX,
'gRPC_portability_master_linux': not _HAS_MATRIX,
'gRPC_portability_master_windows': not _HAS_MATRIX,
'gRPC_master_asanitizer_c': not _HAS_MATRIX,
'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
'gRPC_master_msan_c': not _HAS_MATRIX,
'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
'gRPC_master_tsan_cpp': not _HAS_MATRIX,
'gRPC_interop_pull_requests': not _HAS_MATRIX,
'gRPC_performance_pull_requests': not _HAS_MATRIX,
'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
'gRPC_portability_pr_win': not _HAS_MATRIX,
'gRPC_pull_requests_linux': not _HAS_MATRIX,
'gRPC_pull_requests_macos': not _HAS_MATRIX,
'gRPC_pr_win': not _HAS_MATRIX,
'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
_BUILDS = {
'gRPC_interop_master': not _HAS_MATRIX,
'gRPC_master_linux': not _HAS_MATRIX,
'gRPC_master_macos': not _HAS_MATRIX,
'gRPC_master_windows': not _HAS_MATRIX,
'gRPC_performance_master': not _HAS_MATRIX,
'gRPC_portability_master_linux': not _HAS_MATRIX,
'gRPC_portability_master_windows': not _HAS_MATRIX,
'gRPC_master_asanitizer_c': not _HAS_MATRIX,
'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
'gRPC_master_msan_c': not _HAS_MATRIX,
'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
'gRPC_master_tsan_cpp': not _HAS_MATRIX,
'gRPC_interop_pull_requests': not _HAS_MATRIX,
'gRPC_performance_pull_requests': not _HAS_MATRIX,
'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
'gRPC_portability_pr_win': not _HAS_MATRIX,
'gRPC_pull_requests_linux': not _HAS_MATRIX,
'gRPC_pull_requests_macos': not _HAS_MATRIX,
'gRPC_pr_win': not _HAS_MATRIX,
'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
}
_URL_BASE = 'https://grpc-testing.appspot.com/job'

Expand Down Expand Up @@ -99,147 +97,155 @@


def _scrape_for_known_errors(html):
error_list = []
for known_error in _KNOWN_ERRORS:
errors = re.findall(known_error, html)
this_error_count = len(errors)
if this_error_count > 0:
error_list.append({'description': known_error,
'count': this_error_count})
print('====> %d failures due to %s' % (this_error_count, known_error))
return error_list
error_list = []
for known_error in _KNOWN_ERRORS:
errors = re.findall(known_error, html)
this_error_count = len(errors)
if this_error_count > 0:
error_list.append({
'description': known_error,
'count': this_error_count
})
print('====> %d failures due to %s' %
(this_error_count, known_error))
return error_list


def _no_report_files_found(html):
return _NO_REPORT_FILES_FOUND_ERROR in html
return _NO_REPORT_FILES_FOUND_ERROR in html


def _get_last_processed_buildnumber(build_name):
query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
_PROJECT_ID, _DATASET_ID, build_name)
query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
page = bq.jobs().getQueryResults(
pageToken=None,
**query_job['jobReference']).execute(num_retries=3)
if page['rows'][0]['f'][0]['v']:
return int(page['rows'][0]['f'][0]['v'])
return 0
query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
_PROJECT_ID, _DATASET_ID, build_name)
query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
if page['rows'][0]['f'][0]['v']:
return int(page['rows'][0]['f'][0]['v'])
return 0


def _process_matrix(build, url_base):
matrix_list = []
for matrix in build.get_matrix_runs():
matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
matrix.name).groups()[0]
matrix_tuple = matrix_str.split(',')
json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
matrix_dict = {'name': matrix_str,
'duration': matrix.get_duration().total_seconds()}
matrix_dict.update(_process_build(json_url, console_url))
matrix_list.append(matrix_dict)

return matrix_list
matrix_list = []
for matrix in build.get_matrix_runs():
matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
matrix.name).groups()[0]
matrix_tuple = matrix_str.split(',')
json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
matrix_dict = {
'name': matrix_str,
'duration': matrix.get_duration().total_seconds()
}
matrix_dict.update(_process_build(json_url, console_url))
matrix_list.append(matrix_dict)

return matrix_list


def _process_build(json_url, console_url):
build_result = {}
error_list = []
try:
html = urllib.urlopen(json_url).read()
test_result = json.loads(html)
print('====> Parsing result from %s' % json_url)
failure_count = test_result['failCount']
build_result['pass_count'] = test_result['passCount']
build_result['failure_count'] = failure_count
# This means Jenkins failure occurred.
build_result['no_report_files_found'] = _no_report_files_found(html)
# Only check errors if Jenkins failure occurred.
if build_result['no_report_files_found']:
error_list = _scrape_for_known_errors(html)
except Exception as e:
print('====> Got exception for %s: %s.' % (json_url, str(e)))
print('====> Parsing errors from %s.' % console_url)
html = urllib.urlopen(console_url).read()
build_result['pass_count'] = 0
build_result['failure_count'] = 1
# In this case, the string doesn't exist in the result html but the fact
# that we fail to parse the result html indicates Jenkins failure and hence
# no report files were generated.
build_result['no_report_files_found'] = True
error_list = _scrape_for_known_errors(html)

if error_list:
build_result['error'] = error_list
elif build_result['no_report_files_found']:
build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
else:
build_result['error'] = [{'description': '', 'count': 0}]

return build_result
build_result = {}
error_list = []
try:
html = urllib.urlopen(json_url).read()
test_result = json.loads(html)
print('====> Parsing result from %s' % json_url)
failure_count = test_result['failCount']
build_result['pass_count'] = test_result['passCount']
build_result['failure_count'] = failure_count
# This means Jenkins failure occurred.
build_result['no_report_files_found'] = _no_report_files_found(html)
# Only check errors if Jenkins failure occurred.
if build_result['no_report_files_found']:
error_list = _scrape_for_known_errors(html)
except Exception as e:
print('====> Got exception for %s: %s.' % (json_url, str(e)))
print('====> Parsing errors from %s.' % console_url)
html = urllib.urlopen(console_url).read()
build_result['pass_count'] = 0
build_result['failure_count'] = 1
# In this case, the string doesn't exist in the result html but the fact
# that we fail to parse the result html indicates Jenkins failure and hence
# no report files were generated.
build_result['no_report_files_found'] = True
error_list = _scrape_for_known_errors(html)

if error_list:
build_result['error'] = error_list
elif build_result['no_report_files_found']:
build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
else:
build_result['error'] = [{'description': '', 'count': 0}]

return build_result


# parse command line
argp = argparse.ArgumentParser(description='Get build statistics.')
argp.add_argument('-u', '--username', default='jenkins')
argp.add_argument('-b', '--builds',
choices=['all'] + sorted(_BUILDS.keys()),
nargs='+',
default=['all'])
argp.add_argument(
'-b',
'--builds',
choices=['all'] + sorted(_BUILDS.keys()),
nargs='+',
default=['all'])
args = argp.parse_args()

J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken')
bq = big_query_utils.create_big_query()

for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds:
print('====> Build: %s' % build_name)
# Since get_last_completed_build() always fails due to malformatted string
# error, we use get_build_metadata() instead.
job = None
try:
job = J[build_name]
except Exception as e:
print('====> Failed to get build %s: %s.' % (build_name, str(e)))
continue
last_processed_build_number = _get_last_processed_buildnumber(build_name)
last_complete_build_number = job.get_last_completed_buildnumber()
# To avoid processing all builds for a project never looked at. In this case,
# only examine 10 latest builds.
starting_build_number = max(last_processed_build_number+1,
last_complete_build_number-9)
for build_number in xrange(starting_build_number,
last_complete_build_number+1):
print('====> Processing %s build %d.' % (build_name, build_number))
build = None
print('====> Build: %s' % build_name)
# Since get_last_completed_build() always fails due to malformatted string
# error, we use get_build_metadata() instead.
job = None
try:
build = job.get_build_metadata(build_number)
print('====> Build status: %s.' % build.get_status())
if build.get_status() == 'ABORTED':
job = J[build_name]
except Exception as e:
print('====> Failed to get build %s: %s.' % (build_name, str(e)))
continue
# If any build is still running, stop processing this job. Next time, we
# start from where it was left so that all builds are processed
# sequentially.
if build.is_running():
print('====> Build %d is still running.' % build_number)
break
except KeyError:
print('====> Build %s is missing. Skip.' % build_number)
continue
build_result = {'build_number': build_number,
'timestamp': str(build.get_timestamp())}
url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
if _BUILDS[build_name]: # The build has matrix, such as gRPC_master.
build_result['matrix'] = _process_matrix(build, url_base)
else:
json_url = '%s/testReport/api/json' % url_base
console_url = '%s/consoleFull' % url_base
build_result['duration'] = build.get_duration().total_seconds()
build_stat = _process_build(json_url, console_url)
build_result.update(build_stat)
rows = [big_query_utils.make_row(build_number, build_result)]
if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, build_name,
rows):
print('====> Error uploading result to bigquery.')
sys.exit(1)
last_processed_build_number = _get_last_processed_buildnumber(build_name)
last_complete_build_number = job.get_last_completed_buildnumber()
# To avoid processing all builds for a project never looked at. In this case,
# only examine 10 latest builds.
starting_build_number = max(last_processed_build_number + 1,
last_complete_build_number - 9)
for build_number in xrange(starting_build_number,
last_complete_build_number + 1):
print('====> Processing %s build %d.' % (build_name, build_number))
build = None
try:
build = job.get_build_metadata(build_number)
print('====> Build status: %s.' % build.get_status())
if build.get_status() == 'ABORTED':
continue
# If any build is still running, stop processing this job. Next time, we
# start from where it was left so that all builds are processed
# sequentially.
if build.is_running():
print('====> Build %d is still running.' % build_number)
break
except KeyError:
print('====> Build %s is missing. Skip.' % build_number)
continue
build_result = {
'build_number': build_number,
'timestamp': str(build.get_timestamp())
}
url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
if _BUILDS[build_name]: # The build has matrix, such as gRPC_master.
build_result['matrix'] = _process_matrix(build, url_base)
else:
json_url = '%s/testReport/api/json' % url_base
console_url = '%s/consoleFull' % url_base
build_result['duration'] = build.get_duration().total_seconds()
build_stat = _process_build(json_url, console_url)
build_result.update(build_stat)
rows = [big_query_utils.make_row(build_number, build_result)]
if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
build_name, rows):
print('====> Error uploading result to bigquery.')
sys.exit(1)
Loading

0 comments on commit 888093c

Please sign in to comment.