X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=scripts%2Fsystem-tests%2Flava-submit.py;h=a49bacdc2c731618e889afe8545e803220dd29c9;hb=29d9c2b25942c0c47698b2b45a48e72147adab18;hp=5d92177d56fde10d72f4c327ca2a9b81ba38a2d2;hpb=02126236dc58f0d88d92c2869868af3cac3af71c;p=lttng-ci.git diff --git a/scripts/system-tests/lava-submit.py b/scripts/system-tests/lava-submit.py index 5d92177..a49bacd 100644 --- a/scripts/system-tests/lava-submit.py +++ b/scripts/system-tests/lava-submit.py @@ -18,15 +18,19 @@ import argparse import base64 import json import os +import random import sys import time import xmlrpc.client +from urllib.parse import urljoin +from urllib.request import urlretrieve from collections import OrderedDict from enum import Enum USERNAME = 'frdeso' HOSTNAME = 'lava-master.internal.efficios.com' SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com' +OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/" class TestType(Enum): baremetal_benchmarks=1 @@ -70,8 +74,7 @@ def check_job_all_test_cases_state_count(server, job): # Get the benchmark results from the lava bundle # save them as CSV files localy -def fetch_benchmark_results(server, job): - content = get_job_bundle_content(server, job) +def fetch_benchmark_results(build_id): testcases = ['processed_results_close.csv', 'processed_results_ioctl.csv', 'processed_results_open_efault.csv', @@ -79,25 +82,9 @@ def fetch_benchmark_results(server, job): 'processed_results_dup_close.csv', 'processed_results_raw_syscall_getpid.csv', 'processed_results_lttng_test_filter.csv'] - - # The result bundle is a large JSON containing the results of every testcase - # of the LAVA job as well as the files that were attached during the run. - # We need to iterate over this JSON to get the base64 representation of the - # benchmark results produced during the run. - for run in content['test_runs']: - # We only care of the benchmark testcases - if 'benchmark-' in run['test_id']: - if 'test_results' in run: - for res in run['test_results']: - if 'attachments' in res: - for a in res['attachments']: - # We only save the results file - if a['pathname'] in testcases: - with open(a['pathname'],'wb') as f: - # Convert the b64 representation of the - # result file and write it to a file - # in the current working directory - f.write(base64.b64decode(a['content'])) + for testcase in testcases: + url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase)) + urlretrieve(url, testcase) # Parse the attachment of the testcase to fetch the stdout of the test suite def print_test_output(server, job): @@ -111,17 +98,18 @@ def print_test_output(server, job): # Decode the base64 file and split on newlines to iterate # on list - testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n') + testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))) + + testoutput = testoutput.replace('\\n', '\n') # Create a generator to iterate on the lines and keeping # the state of the iterator across the two loops. - testoutput_iter = iter(testoutput) + testoutput_iter = iter(testoutput.split('\n')) for line in testoutput_iter: # Find the header of the test case and start printing # from there if 'LAVA_SIGNAL_STARTTC run-tests' in line: - found = True print('---- TEST SUITE OUTPUT BEGIN ----') for line in testoutput_iter: if 'LAVA_SIGNAL_ENDTC run-tests' not in line: @@ -131,7 +119,6 @@ def print_test_output(server, job): # section break - if found is True: print('----- TEST SUITE OUTPUT END -----') break @@ -185,7 +172,7 @@ def get_config_cmd(build_device): ]) return command -def get_baremetal_benchmarks_cmd(): +def get_baremetal_benchmarks_cmd(build_id): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -193,37 +180,44 @@ def get_baremetal_benchmarks_cmd(): { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/failing-close.yml' + 'testdef': 'lava/system-tests/failing-close.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/failing-ioctl.yml' + 'testdef': 'lava/system-tests/failing-ioctl.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/failing-open-efault.yml' + 'testdef': 'lava/system-tests/failing-open-efault.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/success-dup-close.yml' + 'testdef': 'lava/system-tests/success-dup-close.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml' + 'testdef': 'lava/system-tests/raw-syscall-getpid.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml' + 'testdef': 'lava/system-tests/failing-open-enoent.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml' + 'testdef': 'lava/system-tests/lttng-test-filter.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } } ], 'timeout': 7200 @@ -231,7 +225,7 @@ def get_baremetal_benchmarks_cmd(): }) return command -def get_baremetal_tests_cmd(): +def get_baremetal_tests_cmd(build_id): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -239,7 +233,8 @@ def get_baremetal_tests_cmd(): { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/perf-tests.yml' + 'testdef': 'lava/system-tests/perf-tests.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } } ], 'timeout': 3600 @@ -247,7 +242,7 @@ def get_baremetal_tests_cmd(): }) return command -def get_kvm_tests_cmd(): +def get_kvm_tests_cmd(build_id): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -255,19 +250,23 @@ def get_kvm_tests_cmd(): { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/kernel-tests.yml' + 'testdef': 'lava/system-tests/kernel-tests.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/destructive-tests.yml' + 'testdef': 'lava/system-tests/destructive-tests.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } } ], 'timeout': 7200 } }) return command -def get_kprobes_test_cmd(): + +def get_kprobes_generate_data_cmd(): + random_seed = random.randint(0, 1000000) command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -275,10 +274,28 @@ def get_kprobes_test_cmd(): { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/baremetal-tests/kprobe-fuzzing-tests.yml' + 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml', + 'parameters': { 'RANDOM_SEED': str(random_seed) } } ], - 'timeout': 7200 + 'timeout': 60 + } + }) + return command + +def get_kprobes_test_cmd(round_nb): + command = OrderedDict({ + 'command': 'lava_test_shell', + 'parameters': { + 'testdef_repos': [ + { + 'git-repo': 'https://github.com/lttng/lttng-ci.git', + 'revision': 'master', + 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml', + 'parameters': { 'ROUND_NB': str(round_nb) } + } + ], + 'timeout': 1000 } }) return command @@ -293,7 +310,7 @@ def get_results_cmd(stream_name): command['parameters']['stream']='/anonymous/'+stream_name+'/' return command -def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path): +def get_deploy_cmd_kvm(jenkins_job, kernel_path, lttng_modules_path): command = OrderedDict({ 'command': 'deploy_kernel', 'metadata': {}, @@ -307,14 +324,13 @@ def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modul } }) - command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive'] command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive'] command['parameters']['kernel'] = str(SCP_PATH+kernel_path) command['metadata']['jenkins_jobname'] = jenkins_job return command -def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None): +def get_deploy_cmd_x86(jenkins_job, kernel_path, lttng_modules_path, nb_iter=None): command = OrderedDict({ 'command': 'deploy_kernel', 'metadata': {}, @@ -326,7 +342,6 @@ def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modul } }) - command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path)) command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path)) command['parameters']['kernel'] = str(SCP_PATH+kernel_path) command['metadata']['jenkins_jobname'] = jenkins_job @@ -383,9 +398,9 @@ def main(): parser.add_argument('-t', '--type', required=True) parser.add_argument('-j', '--jobname', required=True) parser.add_argument('-k', '--kernel', required=True) - parser.add_argument('-km', '--kmodule', required=True) parser.add_argument('-lm', '--lmodule', required=True) parser.add_argument('-tc', '--tools-commit', required=True) + parser.add_argument('-id', '--build-id', required=True) parser.add_argument('-uc', '--ust-commit', required=False) args = parser.parse_args() @@ -410,20 +425,20 @@ def main(): if test_type is TestType.baremetal_benchmarks: j = create_new_job(args.jobname, build_device='x86') - j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule)) + j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.lmodule)) elif test_type is TestType.baremetal_tests: j = create_new_job(args.jobname, build_device='x86') - j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule)) + j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.lmodule)) elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests: j = create_new_job(args.jobname, build_device='kvm') - j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule)) + j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.lmodule)) j['actions'].append(get_boot_cmd()) if test_type is TestType.baremetal_benchmarks: j['actions'].append(get_config_cmd('x86')) j['actions'].append(get_env_setup_cmd('x86', args.tools_commit)) - j['actions'].append(get_baremetal_benchmarks_cmd()) + j['actions'].append(get_baremetal_benchmarks_cmd(args.build_id)) j['actions'].append(get_results_cmd(stream_name='benchmark-kernel')) elif test_type is TestType.baremetal_tests: if args.ust_commit is None: @@ -431,7 +446,7 @@ def main(): return -1 j['actions'].append(get_config_cmd('x86')) j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit)) - j['actions'].append(get_baremetal_tests_cmd()) + j['actions'].append(get_baremetal_tests_cmd(args.build_id)) j['actions'].append(get_results_cmd(stream_name='tests-kernel')) elif test_type is TestType.kvm_tests: if args.ust_commit is None: @@ -439,15 +454,16 @@ def main(): return -1 j['actions'].append(get_config_cmd('kvm')) j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit)) - j['actions'].append(get_kvm_tests_cmd()) + j['actions'].append(get_kvm_tests_cmd(args.build_id)) j['actions'].append(get_results_cmd(stream_name='tests-kernel')) elif test_type is TestType.kvm_fuzzing_tests: if args.ust_commit is None: print('Tests runs need -uc/--ust-commit options. Exiting...') return -1 j['actions'].append(get_config_cmd('kvm')) - j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit)) - j['actions'].append(get_kprobes_test_cmd()) + j['actions'].append(get_kprobes_generate_data_cmd()) + for i in range(10): + j['actions'].append(get_kprobes_test_cmd(round_nb=i)) j['actions'].append(get_results_cmd(stream_name='tests-kernel')) else: assert False, 'Unknown test type' @@ -472,7 +488,7 @@ def main(): if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests: print_test_output(server, jobid) elif test_type is TestType.baremetal_benchmarks: - fetch_benchmark_results(server, jobid) + fetch_benchmark_results(args.build_id) print('Job ended with {} status.'.format(jobstatus)) if jobstatus not in 'Complete':