lava: Use safe loads for YAML
[lttng-ci.git] / scripts / system-tests / lava2-submit.py
index 47cc61080416f31f386baf0269903218b963364f..5a08abd683236caf2dca1f94f6787abdd8bb2e3c 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
 #
 # This program is free software: you can redistribute it and/or modify
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import argparse
-import base64
 import json
 import os
 import random
+import re
 import sys
 import time
-import yaml
 import xmlrpc.client
-import pprint
-
-from jinja2 import Environment, FileSystemLoader, meta
+from urllib.parse import urljoin
+from urllib.request import urlretrieve
+import yaml
+from jinja2 import Environment, FileSystemLoader
 
 USERNAME = 'lava-jenkins'
-HOSTNAME = 'lava-master-02.internal.efficios.com'
+HOSTNAME = os.environ.get('LAVA_HOST', 'lava-master-03.internal.efficios.com')
+PROTO = os.environ.get('LAVA_PROTO', 'https')
+OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
+
+def parse_stable_version(stable_version_string):
+    # Get the major and minor version numbers from the lttng version string.
+    version_match = re.search('stable-(\d).(\d\d)', stable_version_string)
+
+    if version_match is not None:
+        major_version = int(version_match.group(1))
+        minor_version = int(version_match.group(2))
+    else:
+        # Setting to zero to make the comparison below easier.
+        major_version = 0
+        minor_version = 0
+    return major_version, minor_version
 
-class TestType():
-    baremetal_benchmarks=1
-    baremetal_tests=2
-    kvm_tests=3
-    kvm_fuzzing_tests=4
+
+class TestType:
+    """ Enum like for test type """
+
+    baremetal_tests = 1
+    kvm_tests = 2
     values = {
-        'baremetal-benchmarks' : baremetal_benchmarks,
-        'baremetal-tests' : baremetal_tests,
-        'kvm-tests' : kvm_tests,
-        'kvm-fuzzin-tests' : kvm_fuzzing_tests,
+        'baremetal-tests': baremetal_tests,
+        'kvm-tests': kvm_tests,
     }
 
-class DeviceType():
+
+class DeviceType:
+    """ Enum like for device type """
+
     x86 = 'x86'
     kvm = 'qemu'
-    values = {
-        'kvm' : kvm,
-        'x86' : x86,
-    }
+    values = {'kvm': kvm, 'x86': x86}
 
-def get_packages():
-    return ['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
-            'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
-            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
-            'libnuma-dev', 'python3-dev', 'swig', 'stress']
 
 def get_job_bundle_content(server, job):
     try:
         bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
         bundle = server.dashboard.get(bundle_sha)
-    except xmlrpc.client.Fault as f:
-        print('Error while fetching results bundle', f.faultString)
-        raise f
+    except xmlrpc.client.Fault as error:
+        print('Error while fetching results bundle', error.faultString)
+        raise error
 
     return json.loads(bundle['content'])
 
-# Parse the results bundle to see the run-tests testcase
-# of the lttng-kernel-tests passed successfully
+
 def check_job_all_test_cases_state_count(server, job):
-    content = get_job_bundle_content(server, job)
-
-    # FIXME:Those tests are part of the boot actions and fail randomly but
-    # doesn't affect the behaviour of the tests. We should update our Lava
-    # installation and try to reproduce it. This error was encountered on
-    # Ubuntu 16.04.
-    tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
-
-    passed_tests=0
-    failed_tests=0
-    for run in content['test_runs']:
-        for result in run['test_results']:
-            if 'test_case_id' in result :
-                if result['result'] in 'pass':
-                    passed_tests+=1
-                elif result['test_case_id'] in tests_known_to_fail:
-                    pass
-                else:
-                    failed_tests+=1
+    """
+    Parse the results bundle to see the run-tests testcase
+    of the lttng-kernel-tests passed successfully
+    """
+    print("Testcase result:")
+    content = server.results.get_testjob_results_yaml(str(job))
+    testcases = yaml.load(content)
+
+    passed_tests = 0
+    failed_tests = 0
+    for testcase in testcases:
+        if testcase['result'] != 'pass':
+            print(
+                "\tFAILED {}\n\t\t See {}://{}{}".format(
+                    testcase['name'], PROTO, HOSTNAME, testcase['url']
+                )
+            )
+            failed_tests += 1
+        else:
+            passed_tests += 1
     return (passed_tests, failed_tests)
 
-# Get the benchmark results from the lava bundle
-# save them as CSV files localy
-def fetch_benchmark_results(server, job):
-    content = get_job_bundle_content(server, job)
-    testcases = ['processed_results_close.csv',
-            'processed_results_ioctl.csv',
-            'processed_results_open_efault.csv',
-            'processed_results_open_enoent.csv',
-            'processed_results_dup_close.csv',
-            'processed_results_raw_syscall_getpid.csv',
-            'processed_results_lttng_test_filter.csv']
-
-    # The result bundle is a large JSON containing the results of every testcase
-    # of the LAVA job as well as the files that were attached during the run.
-    # We need to iterate over this JSON to get the base64 representation of the
-    # benchmark results produced during the run.
-    for run in content['test_runs']:
-        # We only care of the benchmark testcases
-        if 'benchmark-' in run['test_id']:
-            if 'test_results' in run:
-                for res in run['test_results']:
-                    if 'attachments' in res:
-                        for a in res['attachments']:
-                            # We only save the results file
-                            if a['pathname'] in testcases:
-                                with open(a['pathname'],'wb') as f:
-                                    # Convert the b64 representation of the
-                                    # result file and write it to a file
-                                    # in the current working directory
-                                    f.write(base64.b64decode(a['content']))
-
-# Parse the attachment of the testcase to fetch the stdout of the test suite
+
 def print_test_output(server, job):
-    content = get_job_bundle_content(server, job)
-    found = False
-
-    for run in content['test_runs']:
-        if run['test_id'] in 'lttng-kernel-test':
-            for attachment in run['attachments']:
-                if attachment['pathname'] in 'stdout.log':
-
-                    # Decode the base64 file and split on newlines to iterate
-                    # on list
-                    testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
-
-                    testoutput = testoutput.replace('\\n', '\n')
-
-                    # Create a generator to iterate on the lines and keeping
-                    # the state of the iterator across the two loops.
-                    testoutput_iter = iter(testoutput.split('\n'))
-                    for line in testoutput_iter:
-
-                        # Find the header of the test case and start printing
-                        # from there
-                        if 'LAVA_SIGNAL_STARTTC run-tests' in line:
-                            print('---- TEST SUITE OUTPUT BEGIN ----')
-                            for line in testoutput_iter:
-                                if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
-                                    print(line)
-                                else:
-                                    # Print until we reach the end of the
-                                    # section
-                                    break
-
-                            print('----- TEST SUITE OUTPUT END -----')
-                            break
-
-def get_vlttng_cmd(device, lttng_tools_commit, lttng_ust_commit=None):
-
-    vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
-                    ' --override projects.babeltrace.build-env.PYTHON=python3' \
-                    ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
-                    ' --profile babeltrace-stable-1.4' \
-                    ' --profile babeltrace-python' \
-                    ' --profile lttng-tools-master' \
-                    ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
-                    ' --profile lttng-tools-no-man-pages'
+    """
+    Parse the attachment of the testcase to fetch the stdout of the test suite
+    """
+    job_finished, log = server.scheduler.jobs.logs(str(job))
+    logs = yaml.load(log.data.decode('ascii'))
+    print_line = False
+    for line in logs:
+        if line['lvl'] != 'target':
+            continue
+        if line['msg'] == '<LAVA_SIGNAL_STARTTC run-tests>':
+            print('---- TEST SUITE OUTPUT BEGIN ----')
+            print_line = True
+            continue
+        if line['msg'] == '<LAVA_SIGNAL_ENDTC run-tests>':
+            print('----- TEST SUITE OUTPUT END -----')
+            print_line = False
+            continue
+        if print_line:
+            print("{} {}".format(line['dt'], line['msg']))
+
+
+def get_vlttng_cmd(
+    lttng_version, lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None
+):
+    """
+    Return vlttng cmd to be used in the job template for setup.
+    """
+
+    major_version, minor_version = parse_stable_version(lttng_version)
+
+    urcu_profile = ""
+    if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11):
+        urcu_profile = "urcu-master"
+    else:
+        urcu_profile = "urcu-stable-0.12"
+
+    # Starting with 2.14, babeltrace2 is the reader for testing.
+    if lttng_version == 'master' or (major_version >= 2 and minor_version >= 14):
+        babeltrace_profile = " --profile babeltrace2-stable-2.0 --profile babeltrace2-python"
+        babeltrace_overrides = " --override projects.babeltrace2.build-env.PYTHON=python3 --override projects.babeltrace2.build-env.PYTHON_CONFIG=python3-config -o projects.babeltrace2.configure+=--disable-man-pages"
+    else:
+        babeltrace_profile = " --profile babeltrace-stable-1.5 --profile babeltrace-python"
+        babeltrace_overrides = " --override projects.babeltrace.build-env.PYTHON=python3 --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config"
+
+
+    vlttng_cmd = (
+        'vlttng --jobs=$(nproc) --profile ' + urcu_profile
+        + babeltrace_profile
+        + babeltrace_overrides
+        + ' --profile lttng-tools-master'
+        ' --override projects.lttng-tools.source='
+        + lttng_tools_url
+        + ' --override projects.lttng-tools.checkout='
+        + lttng_tools_commit
+        + ' --profile lttng-tools-no-man-pages'
+    )
 
     if lttng_ust_commit is not None:
-        vlttng_cmd += ' --profile lttng-ust-master ' \
-                    ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
-                    ' --profile lttng-ust-no-man-pages'
+        vlttng_cmd += (
+            ' --profile lttng-ust-master '
+            ' --override projects.lttng-ust.source='
+            + lttng_ust_url
+            + ' --override projects.lttng-ust.checkout='
+            + lttng_ust_commit
+            + ' --profile lttng-ust-no-man-pages'
+        )
 
-    if device is DeviceType.kvm:
-        vlttng_path = '/root/virtenv'
-    else:
-        vlttng_path = '/tmp/virtenv'
+
+    if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11):
+        vlttng_cmd += (
+            ' --override projects.lttng-tools.configure+=--enable-test-sdt-uprobe'
+        )
+
+    vlttng_path = '/tmp/virtenv'
 
     vlttng_cmd += ' ' + vlttng_path
 
     return vlttng_cmd
 
+
 def main():
-    nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz"
+    send_retry_limit = 10
     test_type = None
     parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
     parser.add_argument('-t', '--type', required=True)
+    parser.add_argument('-lv', '--lttng-version', required=True)
     parser.add_argument('-j', '--jobname', required=True)
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
+    parser.add_argument('-tu', '--tools-url', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
+    parser.add_argument('-id', '--build-id', required=True)
+    parser.add_argument('-uu', '--ust-url', required=False)
     parser.add_argument('-uc', '--ust-commit', required=False)
     parser.add_argument('-d', '--debug', required=False, action='store_true')
+    parser.add_argument(
+        '-r', '--rootfs-url', required=False,
+        default="https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_jammy_2023-05-18.tar.gz"
+    )
+    parser.add_argument('--ci-repo', required=False, default='https://github.com/lttng/lttng-ci.git')
+    parser.add_argument('--ci-branch', required=False, default='master')
     args = parser.parse_args()
 
     if args.type not in TestType.values:
@@ -206,29 +215,38 @@ def main():
     if not args.debug:
         try:
             lava_api_key = os.environ['LAVA2_JENKINS_TOKEN']
-        except Exception as e:
-            print('LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
+        except Exception as error:
+            print(
+                'LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...',
+                error,
+            )
             return -1
 
     jinja_loader = FileSystemLoader(os.path.dirname(os.path.realpath(__file__)))
-    jinja_env = Environment(loader=jinja_loader, trim_blocks=True,
-            lstrip_blocks= True)
+    jinja_env = Environment(loader=jinja_loader, trim_blocks=True, lstrip_blocks=True)
     jinja_template = jinja_env.get_template('template_lava_job.jinja2')
-    template_source = jinja_env.loader.get_source(jinja_env, 'template_lava_job.jinja2')
-    parsed_content = jinja_env.parse(template_source)
-    undef = meta.find_undeclared_variables(parsed_content)
 
     test_type = TestType.values[args.type]
 
-    if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
+    if test_type is TestType.baremetal_tests:
         device_type = DeviceType.x86
-        vlttng_path = '/tmp/virtenv'
-
     else:
         device_type = DeviceType.kvm
-        vlttng_path = '/root/virtenv'
 
-    vlttng_cmd = get_vlttng_cmd(device_type, args.tools_commit, args.ust_commit)
+    vlttng_path = '/tmp/virtenv'
+
+    vlttng_cmd = get_vlttng_cmd(
+        args.lttng_version, args.tools_url, args.tools_commit, args.ust_url, args.ust_commit
+    )
+
+    if args.lttng_version == "master":
+        lttng_version_string = "master"
+    elif args.lttng_version == "canary":
+        lttng_version_string = "2.13"
+    else:
+        major, minor = parse_stable_version(args.lttng_version)
+        lttng_version_string = str(major) + "." + str(minor)
+
 
     context = dict()
     context['DeviceType'] = DeviceType
@@ -236,23 +254,25 @@ def main():
 
     context['job_name'] = args.jobname
     context['test_type'] = test_type
-    context['packages'] = get_packages()
     context['random_seed'] = random.randint(0, 1000000)
     context['device_type'] = device_type
 
     context['vlttng_cmd'] = vlttng_cmd
     context['vlttng_path'] = vlttng_path
+    context['lttng_version_string'] = lttng_version_string
 
     context['kernel_url'] = args.kernel
-    context['nfsrootfs_url'] = nfsrootfs
+    context['nfsrootfs_url'] = args.rootfs_url
     context['lttng_modules_url'] = args.lmodule
+    context['jenkins_build_id'] = args.build_id
 
     context['kprobe_round_nb'] = 10
 
+    context['ci_repo'] = args.ci_repo
+    context['ci_branch'] = args.ci_branch
+
     render = jinja_template.render(context)
 
-    print('Current context:')
-    pprint.pprint(context, indent=4)
     print('Job to be submitted:')
 
     print(render)
@@ -260,40 +280,68 @@ def main():
     if args.debug:
         return 0
 
-    server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
+    server = xmlrpc.client.ServerProxy(
+        '%s://%s:%s@%s/RPC2' % (PROTO, USERNAME, lava_api_key, HOSTNAME)
+    )
 
-    jobid = server.scheduler.submit_job(render)
+    for attempt in range(1, send_retry_limit + 1):
+        try:
+            jobid = server.scheduler.submit_job(render)
+        except xmlrpc.client.ProtocolError as error:
+            print(
+                'Protocol error on submit, sleeping and retrying. Attempt #{}'.format(
+                    attempt
+                )
+            )
+            time.sleep(5)
+            continue
+        else:
+            break
+    # Early exit when the maximum number of retry is reached.
+    if attempt == send_retry_limit:
+            print(
+                'Protocol error on submit, maximum number of retry reached ({})'.format(
+                    attempt
+                )
+            )
+            return -1
 
     print('Lava jobid:{}'.format(jobid))
-    print('Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
-
-    #Check the status of the job every 30 seconds
-    jobstatus = server.scheduler.job_status(jobid)['job_status']
-    not_running = False
-    while jobstatus in 'Submitted' or jobstatus in 'Running':
-        if not_running is False and jobstatus in 'Running':
+    print(
+        'Lava job URL: {}://{}/scheduler/job/{}'.format(
+            PROTO, HOSTNAME, jobid
+        )
+    )
+
+    # Check the status of the job every 30 seconds
+    jobstatus = server.scheduler.job_state(jobid)['job_state']
+    running = False
+    while jobstatus in ['Submitted', 'Scheduling', 'Scheduled', 'Running']:
+        if not running and jobstatus == 'Running':
             print('Job started running')
-            not_running = True
+            running = True
         time.sleep(30)
-        jobstatus = server.scheduler.job_status(jobid)['job_status']
+        try:
+            jobstatus = server.scheduler.job_state(jobid)['job_state']
+        except xmlrpc.client.ProtocolError as error:
+            print('Protocol error, retrying')
+            continue
+    print('Job ended with {} status.'.format(jobstatus))
 
-#    Do not fetch result for now
-#    if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
-#        print_test_output(server, jobid)
-#    elif test_type is TestType.baremetal_benchmarks:
-#        fetch_benchmark_results(server, jobid)
+    if jobstatus != 'Finished':
+        return -1
 
-    print('Job ended with {} status.'.format(jobstatus))
-    if jobstatus not in 'Complete':
+    if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
+        print_test_output(server, jobid)
+
+    passed, failed = check_job_all_test_cases_state_count(server, jobid)
+    print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
+
+    if failed != 0:
         return -1
-    else:
-        passed, failed=check_job_all_test_cases_state_count(server, jobid)
-        print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
 
-        if failed == 0:
-            return 0
-        else:
-            return -1
+    return 0
+
 
 if __name__ == "__main__":
     sys.exit(main())
This page took 0.028467 seconds and 4 git commands to generate.