jjb: Add back open_enoent Lava benchmark testcase
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
index 1a3a68f1c76edf66cdd93fef86f8ca833d07785c..8ba92514f8a239a9907c6105310324c0cb315730 100644 (file)
@@ -20,7 +20,7 @@ import json
 import os
 import sys
 import time
-import xmlrpclib
+import xmlrpc.client
 from collections import OrderedDict
 from enum import Enum
 
@@ -29,12 +29,16 @@ HOSTNAME = 'lava-master.internal.efficios.com'
 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
 
 class TestType(Enum):
-    benchmarks=1
-    tests=2
+    baremetal_benchmarks=1
+    baremetal_tests=2
+    kvm_tests=3
 
 def get_job_bundle_content(server, job):
-    bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
-    bundle = server.dashboard.get(bundle_sha)
+    try:
+        bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
+        bundle = server.dashboard.get(bundle_sha)
+    except xmlrpc.client.Fault as f:
+        print('Error while fetching results bundle', f.faultString)
 
     return json.loads(bundle['content'])
 
@@ -62,6 +66,35 @@ def check_job_all_test_cases_state_count(server, job):
                     failed_tests+=1
     return (passed_tests, failed_tests)
 
+# Get the benchmark results from the lava bundle
+# save them as CSV files localy
+def fetch_benchmark_results(server, job):
+    content = get_job_bundle_content(server, job)
+    testcases = ['processed_results_close.csv',
+            'processed_results_open_efault.csv',
+            'processed_results_open_enoent.csv',
+            'processed_results_dup_close.csv',
+            'processed_results_lttng_test_filter.csv']
+
+    # The result bundle is a large JSON containing the results of every testcase
+    # of the LAVA job as well as the files that were attached during the run.
+    # We need to iterate over this JSON to get the base64 representation of the
+    # benchmark results produced during the run.
+    for run in content['test_runs']:
+        # We only care of the benchmark testcases
+        if 'benchmark-' in run['test_id']:
+            if 'test_results' in run:
+                for res in run['test_results']:
+                    if 'attachments' in res:
+                        for a in res['attachments']:
+                            # We only save the results file
+                            if a['pathname'] in testcases:
+                                with open(a['pathname'],'wb') as f:
+                                    # Convert the b64 representation of the
+                                    # result file and write it to a file
+                                    # in the current working directory
+                                    f.write(base64.b64decode(a['content']))
+
 # Parse the attachment of the testcase to fetch the stdout of the test suite
 def print_test_output(server, job):
     content = get_job_bundle_content(server, job)
@@ -74,7 +107,7 @@ def print_test_output(server, job):
 
                     # Decode the base64 file and split on newlines to iterate
                     # on list
-                    testoutput = base64.b64decode(attachment['content']).split('\n')
+                    testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
 
                     # Create a generator to iterate on the lines and keeping
                     # the state of the iterator across the two loops.
@@ -121,17 +154,17 @@ def get_boot_cmd():
 def get_config_cmd(build_device):
     packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
             'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
-            'libelf-dev', 'libmount-dev', 'libxml2']
+            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
+            'libnuma-dev']
     command = OrderedDict({
         'command': 'lava_command_run',
         'parameters': {
             'commands': [
-                'ifup eth0',
-                'route -n',
                 'cat /etc/resolv.conf',
                 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
                 'groupadd tracing'
-                ]
+                ],
+                'timeout':300
             }
         })
     if build_device in 'x86':
@@ -143,11 +176,12 @@ def get_config_cmd(build_device):
                     'depmod -a',
                     'locale-gen en_US.UTF-8',
                     'apt-get update',
+                    'apt-get upgrade',
                     'apt-get install -y {}'.format(' '.join(packages))
                 ])
     return command
 
-def get_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd():
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -162,11 +196,32 @@ def get_benchmarks_cmd():
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
                 },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/success-dup-close.yml'
+                },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
                 },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
+                }
+                ],
+            'timeout': 18000
+            }
+        })
+    return command
+
+def get_baremetal_tests_cmd():
+    command = OrderedDict({
+        'command': 'lava_test_shell',
+        'parameters': {
+            'testdef_repos': [
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
@@ -178,7 +233,7 @@ def get_benchmarks_cmd():
         })
     return command
 
-def get_tests_cmd():
+def get_kvm_tests_cmd():
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -187,6 +242,11 @@ def get_tests_cmd():
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/kernel-tests.yml'
+                },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/destructive-tests.yml'
                 }
                 ],
             'timeout': 18000
@@ -211,8 +271,10 @@ def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modul
             'parameters': {
                 'customize': {},
                 'kernel': None,
-                'rootfs': 'file:///var/lib/lava-server/default/media/images/trusty-grub.img.gz',
-                'target_type': 'ubuntu'
+                'target_type': 'ubuntu',
+                'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
+                'login_prompt': 'kvm02 login:',
+                'username': 'root'
                 }
             })
 
@@ -250,6 +312,8 @@ def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
         'command': 'lava_command_run',
         'parameters': {
             'commands': [
+                'pip3 install --upgrade pip',
+                'hash -r',
                 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
                 'pip3 install vlttng',
                         ],
@@ -257,7 +321,7 @@ def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
             }
         })
 
-    vlttng_cmd = 'vlttng --jobs=16 --profile urcu-master' \
+    vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
                     ' --profile babeltrace-stable-1.4 ' \
                     ' --profile lttng-tools-master' \
                     ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
@@ -290,71 +354,96 @@ def main():
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-km', '--kmodule', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
-    parser.add_argument('-l', '--lava-key', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
     parser.add_argument('-uc', '--ust-commit', required=False)
     args = parser.parse_args()
 
-    if args.type in 'benchmarks':
-        test_type = TestType.benchmarks
-    elif args.type in 'tests':
-        test_type = TestType.tests
+    if args.type in 'baremetal-benchmarks':
+        test_type = TestType.baremetal_benchmarks
+    elif args.type in 'baremetal-tests':
+        test_type = TestType.baremetal_tests
+    elif args.type in 'kvm-tests':
+        test_type = TestType.kvm_tests
     else:
         print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
         return -1
 
-    if test_type is TestType.benchmarks:
+    lava_api_key = None
+    try:
+        lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
+    except Exception as e:
+        print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
+        return -1
+
+    if test_type is TestType.baremetal_benchmarks:
+        j = create_new_job(args.jobname, build_device='x86')
+        j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+    elif test_type is TestType.baremetal_tests:
         j = create_new_job(args.jobname, build_device='x86')
         j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
-    elif test_type  is TestType.tests:
+    elif test_type  is TestType.kvm_tests:
         j = create_new_job(args.jobname, build_device='kvm')
         j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
 
     j['actions'].append(get_boot_cmd())
 
-    if test_type is TestType.benchmarks:
+    if test_type is TestType.baremetal_benchmarks:
         j['actions'].append(get_config_cmd('x86'))
         j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
-        j['actions'].append(get_benchmarks_cmd())
+        j['actions'].append(get_baremetal_benchmarks_cmd())
         j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
-    elif test_type  is TestType.tests:
+    elif test_type is TestType.baremetal_tests:
+        if args.ust_commit is None:
+            print('Tests runs need -uc/--ust-commit options. Exiting...')
+            return -1
+        j['actions'].append(get_config_cmd('x86'))
+        j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
+        j['actions'].append(get_baremetal_tests_cmd())
+        j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
+    elif test_type  is TestType.kvm_tests:
         if args.ust_commit is None:
             print('Tests runs need -uc/--ust-commit options. Exiting...')
             return -1
         j['actions'].append(get_config_cmd('kvm'))
         j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
-        j['actions'].append(get_tests_cmd())
+        j['actions'].append(get_kvm_tests_cmd())
         j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
     else:
         assert False, 'Unknown test type'
 
-    server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
+    server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
 
     jobid = server.scheduler.submit_job(json.dumps(j))
 
     print('Lava jobid:{}'.format(jobid))
+    print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
 
     #Check the status of the job every 30 seconds
     jobstatus = server.scheduler.job_status(jobid)['job_status']
+    not_running = False
     while jobstatus in 'Submitted' or jobstatus in 'Running':
+        if not_running is False and jobstatus in 'Running':
+            print('Job started running')
+            not_running = True
         time.sleep(30)
         jobstatus = server.scheduler.job_status(jobid)['job_status']
 
-    passed, failed=check_job_all_test_cases_state_count(server, jobid)
-
-    if test_type is TestType.tests:
+    if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
         print_test_output(server, jobid)
+    elif test_type is TestType.baremetal_benchmarks:
+        fetch_benchmark_results(server, jobid)
 
     print('Job ended with {} status.'.format(jobstatus))
     if jobstatus not in 'Complete':
         return -1
     else:
+        passed, failed=check_job_all_test_cases_state_count(server, jobid)
         print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
 
-    if failed == 0:
-        return 0
-    else:
-        return -1
+        if failed == 0:
+            return 0
+        else:
+            return -1
 
 if __name__ == "__main__":
     sys.exit(main())
This page took 0.026245 seconds and 4 git commands to generate.