jjb: Add back open_enoent Lava benchmark testcase
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
index a0e340748d50fb2b0a850ad08a00d0cd825da18b..8ba92514f8a239a9907c6105310324c0cb315730 100644 (file)
@@ -20,7 +20,7 @@ import json
 import os
 import sys
 import time
-import xmlrpclib
+import xmlrpc.client
 from collections import OrderedDict
 from enum import Enum
 
@@ -29,12 +29,16 @@ HOSTNAME = 'lava-master.internal.efficios.com'
 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
 
 class TestType(Enum):
-    benchmarks=1
-    tests=2
+    baremetal_benchmarks=1
+    baremetal_tests=2
+    kvm_tests=3
 
 def get_job_bundle_content(server, job):
-    bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
-    bundle = server.dashboard.get(bundle_sha)
+    try:
+        bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
+        bundle = server.dashboard.get(bundle_sha)
+    except xmlrpc.client.Fault as f:
+        print('Error while fetching results bundle', f.faultString)
 
     return json.loads(bundle['content'])
 
@@ -47,13 +51,50 @@ def check_job_all_test_cases_state_count(server, job):
     failed_tests=0
     for run in content['test_runs']:
         for result in run['test_results']:
-            if 'test_case_id' in result:
+            if 'test_case_id' in result :
                 if result['result'] in 'pass':
                     passed_tests+=1
+                elif result['test_case_id'] in 'wait_for_test_image_prompt':
+                    # FIXME:This test is part of the boot action and fails
+                    # randomly but doesn't affect the behaviour of the tests.
+                    # No reply on the Lava IRC channel yet. We should update
+                    # our Lava installation and try to reproduce it. This error
+                    # was encountered ont the KVM trusty image only. Not seen
+                    # on Xenial at this point.
+                    pass
                 else:
                     failed_tests+=1
     return (passed_tests, failed_tests)
 
+# Get the benchmark results from the lava bundle
+# save them as CSV files localy
+def fetch_benchmark_results(server, job):
+    content = get_job_bundle_content(server, job)
+    testcases = ['processed_results_close.csv',
+            'processed_results_open_efault.csv',
+            'processed_results_open_enoent.csv',
+            'processed_results_dup_close.csv',
+            'processed_results_lttng_test_filter.csv']
+
+    # The result bundle is a large JSON containing the results of every testcase
+    # of the LAVA job as well as the files that were attached during the run.
+    # We need to iterate over this JSON to get the base64 representation of the
+    # benchmark results produced during the run.
+    for run in content['test_runs']:
+        # We only care of the benchmark testcases
+        if 'benchmark-' in run['test_id']:
+            if 'test_results' in run:
+                for res in run['test_results']:
+                    if 'attachments' in res:
+                        for a in res['attachments']:
+                            # We only save the results file
+                            if a['pathname'] in testcases:
+                                with open(a['pathname'],'wb') as f:
+                                    # Convert the b64 representation of the
+                                    # result file and write it to a file
+                                    # in the current working directory
+                                    f.write(base64.b64decode(a['content']))
+
 # Parse the attachment of the testcase to fetch the stdout of the test suite
 def print_test_output(server, job):
     content = get_job_bundle_content(server, job)
@@ -66,7 +107,7 @@ def print_test_output(server, job):
 
                     # Decode the base64 file and split on newlines to iterate
                     # on list
-                    testoutput = base64.b64decode(attachment['content']).split('\n')
+                    testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
 
                     # Create a generator to iterate on the lines and keeping
                     # the state of the iterator across the two loops.
@@ -90,15 +131,18 @@ def print_test_output(server, job):
                             print('----- TEST SUITE OUTPUT END -----')
                             break
 
-def create_new_job(name):
+def create_new_job(name, build_device):
     job = OrderedDict({
         'health_check': False,
         'job_name': name,
-        'device_type': 'x86',
-        'tags': [ 'dev-sda1' ],
+        'device_type':build_device,
+        'tags': [ ],
         'timeout': 18000,
         'actions': []
     })
+    if build_device in 'x86':
+        job['tags'].append('dev-sda1')
+
     return job
 
 def get_boot_cmd():
@@ -107,31 +151,37 @@ def get_boot_cmd():
         })
     return command
 
-def get_config_cmd():
+def get_config_cmd(build_device):
     packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
             'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
-            'libelf-dev', 'libmount-dev', 'libxml2', 'python3-pandas', \
-            'python3-numpy']
+            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
+            'libnuma-dev']
     command = OrderedDict({
         'command': 'lava_command_run',
         'parameters': {
             'commands': [
-                'ifup eth0',
-                'route -n',
                 'cat /etc/resolv.conf',
                 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
-                'mount /dev/sda1 /tmp',
-                'rm -rf /tmp/*',
-                'depmod -a',
-                'locale-gen en_US.UTF-8',
-                'apt-get update',
-                'apt-get install -y {}'.format(' '.join(packages)),
-                ]
+                'groupadd tracing'
+                ],
+                'timeout':300
             }
         })
+    if build_device in 'x86':
+        command['parameters']['commands'].extend([
+                    'mount /dev/sda1 /tmp',
+                    'rm -rf /tmp/*'])
+
+    command['parameters']['commands'].extend([
+                    'depmod -a',
+                    'locale-gen en_US.UTF-8',
+                    'apt-get update',
+                    'apt-get upgrade',
+                    'apt-get install -y {}'.format(' '.join(packages))
+                ])
     return command
 
-def get_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd():
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -146,10 +196,20 @@ def get_benchmarks_cmd():
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
                 },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/success-dup-close.yml'
+                },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
+                },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
                 }
                 ],
             'timeout': 18000
@@ -157,7 +217,23 @@ def get_benchmarks_cmd():
         })
     return command
 
-def get_tests_cmd():
+def get_baremetal_tests_cmd():
+    command = OrderedDict({
+        'command': 'lava_test_shell',
+        'parameters': {
+            'testdef_repos': [
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/perf-tests.yml'
+                }
+                ],
+            'timeout': 18000
+            }
+        })
+    return command
+
+def get_kvm_tests_cmd():
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -166,6 +242,11 @@ def get_tests_cmd():
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/kernel-tests.yml'
+                },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/destructive-tests.yml'
                 }
                 ],
             'timeout': 18000
@@ -183,7 +264,28 @@ def get_results_cmd(stream_name):
     command['parameters']['stream']='/anonymous/'+stream_name+'/'
     return command
 
-def get_deploy_cmd(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
+def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
+    command = OrderedDict({
+            'command': 'deploy_kernel',
+            'metadata': {},
+            'parameters': {
+                'customize': {},
+                'kernel': None,
+                'target_type': 'ubuntu',
+                'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
+                'login_prompt': 'kvm02 login:',
+                'username': 'root'
+                }
+            })
+
+    command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
+    command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
+    command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
+    command['metadata']['jenkins_jobname'] = jenkins_job
+
+    return command
+
+def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
     command = OrderedDict({
             'command': 'deploy_kernel',
             'metadata': {},
@@ -198,18 +300,20 @@ def get_deploy_cmd(jenkins_job, kernel_path, linux_modules_path, lttng_modules_p
     command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
     command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
     command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
-    command['metadata']['jenkins_jobname'] = jenkins_job    
+    command['metadata']['jenkins_jobname'] = jenkins_job
     if nb_iter is not None:
         command['metadata']['nb_iterations'] = nb_iter
 
     return command
 
 
-def get_env_setup_cmd(lttng_tools_commit, lttng_ust_commit=None):
+def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
     command = OrderedDict({
         'command': 'lava_command_run',
         'parameters': {
             'commands': [
+                'pip3 install --upgrade pip',
+                'hash -r',
                 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
                 'pip3 install vlttng',
                         ],
@@ -217,7 +321,7 @@ def get_env_setup_cmd(lttng_tools_commit, lttng_ust_commit=None):
             }
         })
 
-    vlttng_cmd = 'vlttng --jobs=16 --profile urcu-master' \
+    vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
                     ' --profile babeltrace-stable-1.4 ' \
                     ' --profile lttng-tools-master' \
                     ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
@@ -228,9 +332,18 @@ def get_env_setup_cmd(lttng_tools_commit, lttng_ust_commit=None):
                     ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
                     ' --profile lttng-ust-no-man-pages'
 
-    vlttng_cmd += " /tmp/virtenv"
+    virtenv_path = None
+    if build_device in 'kvm':
+        virtenv_path = '/root/virtenv'
+    else:
+        virtenv_path = '/tmp/virtenv'
+
+    vlttng_cmd += ' '+virtenv_path
 
     command['parameters']['commands'].append(vlttng_cmd)
+    command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
+    command['parameters']['commands'].append('sync')
+
     return command
 
 def main():
@@ -241,64 +354,96 @@ def main():
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-km', '--kmodule', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
-    parser.add_argument('-l', '--lava-key', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
     parser.add_argument('-uc', '--ust-commit', required=False)
     args = parser.parse_args()
 
-
-    j = create_new_job(args.jobname)
-    j['actions'].append(get_deploy_cmd(args.jobname, args.kernel, args.kmodule, args.lmodule))
-    j['actions'].append(get_boot_cmd())
-    j['actions'].append(get_config_cmd())
-
-    if args.type in 'benchmarks':
-        test_type = TestType.benchmarks
-    elif args.type in 'tests':
-        test_type = TestType.tests
+    if args.type in 'baremetal-benchmarks':
+        test_type = TestType.baremetal_benchmarks
+    elif args.type in 'baremetal-tests':
+        test_type = TestType.baremetal_tests
+    elif args.type in 'kvm-tests':
+        test_type = TestType.kvm_tests
     else:
         print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
         return -1
 
-    if test_type is TestType.benchmarks:
-        j['actions'].append(get_env_setup_cmd(args.tools_commit))
-        j['actions'].append(get_benchmarks_cmd())
+    lava_api_key = None
+    try:
+        lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
+    except Exception as e:
+        print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
+        return -1
+
+    if test_type is TestType.baremetal_benchmarks:
+        j = create_new_job(args.jobname, build_device='x86')
+        j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+    elif test_type is TestType.baremetal_tests:
+        j = create_new_job(args.jobname, build_device='x86')
+        j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+    elif test_type  is TestType.kvm_tests:
+        j = create_new_job(args.jobname, build_device='kvm')
+        j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
+
+    j['actions'].append(get_boot_cmd())
+
+    if test_type is TestType.baremetal_benchmarks:
+        j['actions'].append(get_config_cmd('x86'))
+        j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
+        j['actions'].append(get_baremetal_benchmarks_cmd())
         j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
-    elif test_type  is TestType.tests:
+    elif test_type is TestType.baremetal_tests:
         if args.ust_commit is None:
             print('Tests runs need -uc/--ust-commit options. Exiting...')
             return -1
-        j['actions'].append(get_env_setup_cmd(args.tools_commit, args.ust_commit))
-        j['actions'].append(get_tests_cmd())
+        j['actions'].append(get_config_cmd('x86'))
+        j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
+        j['actions'].append(get_baremetal_tests_cmd())
+        j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
+    elif test_type  is TestType.kvm_tests:
+        if args.ust_commit is None:
+            print('Tests runs need -uc/--ust-commit options. Exiting...')
+            return -1
+        j['actions'].append(get_config_cmd('kvm'))
+        j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
+        j['actions'].append(get_kvm_tests_cmd())
         j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
     else:
         assert False, 'Unknown test type'
 
-    server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
+    server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
 
     jobid = server.scheduler.submit_job(json.dumps(j))
 
+    print('Lava jobid:{}'.format(jobid))
+    print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
+
     #Check the status of the job every 30 seconds
     jobstatus = server.scheduler.job_status(jobid)['job_status']
+    not_running = False
     while jobstatus in 'Submitted' or jobstatus in 'Running':
+        if not_running is False and jobstatus in 'Running':
+            print('Job started running')
+            not_running = True
         time.sleep(30)
         jobstatus = server.scheduler.job_status(jobid)['job_status']
 
+    if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
+        print_test_output(server, jobid)
+    elif test_type is TestType.baremetal_benchmarks:
+        fetch_benchmark_results(server, jobid)
+
     print('Job ended with {} status.'.format(jobstatus))
     if jobstatus not in 'Complete':
         return -1
-
-    passed, failed=check_job_all_test_cases_state_count(server, jobid)
-
-    print('With {} passed tests and {} failed tests.'.format(passed, failed))
-
-    if test_type is TestType.tests:
-        print_test_output(server, jobid)
-
-    if  failed == 0:
-        return 0
     else:
-        return -1
+        passed, failed=check_job_all_test_cases_state_count(server, jobid)
+        print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
+
+        if failed == 0:
+            return 0
+        else:
+            return -1
 
 if __name__ == "__main__":
     sys.exit(main())
This page took 0.0277 seconds and 4 git commands to generate.