lava: Use safe loads for YAML
[lttng-ci.git] / scripts / system-tests / lava2-submit.py
index 614a1a973b301748f6d89a9247f7435faad304a4..5a08abd683236caf2dca1f94f6787abdd8bb2e3c 100644 (file)
@@ -18,6 +18,7 @@ import argparse
 import json
 import os
 import random
+import re
 import sys
 import time
 import xmlrpc.client
@@ -27,22 +28,32 @@ import yaml
 from jinja2 import Environment, FileSystemLoader
 
 USERNAME = 'lava-jenkins'
-HOSTNAME = 'lava-master-02.internal.efficios.com'
+HOSTNAME = os.environ.get('LAVA_HOST', 'lava-master-03.internal.efficios.com')
+PROTO = os.environ.get('LAVA_PROTO', 'https')
 OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
 
+def parse_stable_version(stable_version_string):
+    # Get the major and minor version numbers from the lttng version string.
+    version_match = re.search('stable-(\d).(\d\d)', stable_version_string)
+
+    if version_match is not None:
+        major_version = int(version_match.group(1))
+        minor_version = int(version_match.group(2))
+    else:
+        # Setting to zero to make the comparison below easier.
+        major_version = 0
+        minor_version = 0
+    return major_version, minor_version
+
 
 class TestType:
     """ Enum like for test type """
 
-    baremetal_benchmarks = 1
-    baremetal_tests = 2
-    kvm_tests = 3
-    kvm_fuzzing_tests = 4
+    baremetal_tests = 1
+    kvm_tests = 2
     values = {
-        'baremetal-benchmarks': baremetal_benchmarks,
         'baremetal-tests': baremetal_tests,
         'kvm-tests': kvm_tests,
-        'kvm-fuzzing-tests': kvm_fuzzing_tests,
     }
 
 
@@ -72,15 +83,15 @@ def check_job_all_test_cases_state_count(server, job):
     """
     print("Testcase result:")
     content = server.results.get_testjob_results_yaml(str(job))
-    testcases = yaml.unsafe_load(content)
+    testcases = yaml.load(content)
 
     passed_tests = 0
     failed_tests = 0
     for testcase in testcases:
         if testcase['result'] != 'pass':
             print(
-                "\tFAILED {}\n\t\t See http://{}{}".format(
-                    testcase['name'], HOSTNAME, testcase['url']
+                "\tFAILED {}\n\t\t See {}://{}{}".format(
+                    testcase['name'], PROTO, HOSTNAME, testcase['url']
                 )
             )
             failed_tests += 1
@@ -89,32 +100,12 @@ def check_job_all_test_cases_state_count(server, job):
     return (passed_tests, failed_tests)
 
 
-def fetch_benchmark_results(build_id):
-    """
-    Get the benchmark results from the objstore
-    save them as CSV files localy
-    """
-    testcases = [
-        'processed_results_close.csv',
-        'processed_results_ioctl.csv',
-        'processed_results_open_efault.csv',
-        'processed_results_open_enoent.csv',
-        'processed_results_dup_close.csv',
-        'processed_results_raw_syscall_getpid.csv',
-        'processed_results_lttng_test_filter.csv',
-    ]
-    for testcase in testcases:
-        url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
-        print('Fetching {}'.format(url))
-        urlretrieve(url, testcase)
-
-
 def print_test_output(server, job):
     """
     Parse the attachment of the testcase to fetch the stdout of the test suite
     """
     job_finished, log = server.scheduler.jobs.logs(str(job))
-    logs = yaml.unsafe_load(log.data.decode('ascii'))
+    logs = yaml.load(log.data.decode('ascii'))
     print_line = False
     for line in logs:
         if line['lvl'] != 'target':
@@ -125,25 +116,41 @@ def print_test_output(server, job):
             continue
         if line['msg'] == '<LAVA_SIGNAL_ENDTC run-tests>':
             print('----- TEST SUITE OUTPUT END -----')
-            break
+            print_line = False
+            continue
         if print_line:
             print("{} {}".format(line['dt'], line['msg']))
 
 
 def get_vlttng_cmd(
-    lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None
+    lttng_version, lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None
 ):
     """
     Return vlttng cmd to be used in the job template for setup.
     """
 
+    major_version, minor_version = parse_stable_version(lttng_version)
+
+    urcu_profile = ""
+    if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11):
+        urcu_profile = "urcu-master"
+    else:
+        urcu_profile = "urcu-stable-0.12"
+
+    # Starting with 2.14, babeltrace2 is the reader for testing.
+    if lttng_version == 'master' or (major_version >= 2 and minor_version >= 14):
+        babeltrace_profile = " --profile babeltrace2-stable-2.0 --profile babeltrace2-python"
+        babeltrace_overrides = " --override projects.babeltrace2.build-env.PYTHON=python3 --override projects.babeltrace2.build-env.PYTHON_CONFIG=python3-config -o projects.babeltrace2.configure+=--disable-man-pages"
+    else:
+        babeltrace_profile = " --profile babeltrace-stable-1.5 --profile babeltrace-python"
+        babeltrace_overrides = " --override projects.babeltrace.build-env.PYTHON=python3 --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config"
+
+
     vlttng_cmd = (
-        'vlttng --jobs=$(nproc) --profile urcu-master'
-        ' --override projects.babeltrace.build-env.PYTHON=python3'
-        ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config'
-        ' --profile babeltrace-stable-1.4'
-        ' --profile babeltrace-python'
-        ' --profile lttng-tools-master'
+        'vlttng --jobs=$(nproc) --profile ' + urcu_profile
+        + babeltrace_profile
+        + babeltrace_overrides
+        + ' --profile lttng-tools-master'
         ' --override projects.lttng-tools.source='
         + lttng_tools_url
         + ' --override projects.lttng-tools.checkout='
@@ -161,6 +168,12 @@ def get_vlttng_cmd(
             + ' --profile lttng-ust-no-man-pages'
         )
 
+
+    if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11):
+        vlttng_cmd += (
+            ' --override projects.lttng-tools.configure+=--enable-test-sdt-uprobe'
+        )
+
     vlttng_path = '/tmp/virtenv'
 
     vlttng_cmd += ' ' + vlttng_path
@@ -169,10 +182,11 @@ def get_vlttng_cmd(
 
 
 def main():
-    nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_xenial_2018-12-05.tar.gz"
+    send_retry_limit = 10
     test_type = None
     parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
     parser.add_argument('-t', '--type', required=True)
+    parser.add_argument('-lv', '--lttng-version', required=True)
     parser.add_argument('-j', '--jobname', required=True)
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
@@ -182,6 +196,12 @@ def main():
     parser.add_argument('-uu', '--ust-url', required=False)
     parser.add_argument('-uc', '--ust-commit', required=False)
     parser.add_argument('-d', '--debug', required=False, action='store_true')
+    parser.add_argument(
+        '-r', '--rootfs-url', required=False,
+        default="https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_jammy_2023-05-18.tar.gz"
+    )
+    parser.add_argument('--ci-repo', required=False, default='https://github.com/lttng/lttng-ci.git')
+    parser.add_argument('--ci-branch', required=False, default='master')
     args = parser.parse_args()
 
     if args.type not in TestType.values:
@@ -208,7 +228,7 @@ def main():
 
     test_type = TestType.values[args.type]
 
-    if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
+    if test_type is TestType.baremetal_tests:
         device_type = DeviceType.x86
     else:
         device_type = DeviceType.kvm
@@ -216,9 +236,18 @@ def main():
     vlttng_path = '/tmp/virtenv'
 
     vlttng_cmd = get_vlttng_cmd(
-        args.tools_url, args.tools_commit, args.ust_url, args.ust_commit
+        args.lttng_version, args.tools_url, args.tools_commit, args.ust_url, args.ust_commit
     )
 
+    if args.lttng_version == "master":
+        lttng_version_string = "master"
+    elif args.lttng_version == "canary":
+        lttng_version_string = "2.13"
+    else:
+        major, minor = parse_stable_version(args.lttng_version)
+        lttng_version_string = str(major) + "." + str(minor)
+
+
     context = dict()
     context['DeviceType'] = DeviceType
     context['TestType'] = TestType
@@ -230,14 +259,18 @@ def main():
 
     context['vlttng_cmd'] = vlttng_cmd
     context['vlttng_path'] = vlttng_path
+    context['lttng_version_string'] = lttng_version_string
 
     context['kernel_url'] = args.kernel
-    context['nfsrootfs_url'] = nfsrootfs
+    context['nfsrootfs_url'] = args.rootfs_url
     context['lttng_modules_url'] = args.lmodule
     context['jenkins_build_id'] = args.build_id
 
     context['kprobe_round_nb'] = 10
 
+    context['ci_repo'] = args.ci_repo
+    context['ci_branch'] = args.ci_branch
+
     render = jinja_template.render(context)
 
     print('Job to be submitted:')
@@ -248,10 +281,10 @@ def main():
         return 0
 
     server = xmlrpc.client.ServerProxy(
-        'http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME)
+        '%s://%s:%s@%s/RPC2' % (PROTO, USERNAME, lava_api_key, HOSTNAME)
     )
 
-    for attempt in range(10):
+    for attempt in range(1, send_retry_limit + 1):
         try:
             jobid = server.scheduler.submit_job(render)
         except xmlrpc.client.ProtocolError as error:
@@ -264,11 +297,19 @@ def main():
             continue
         else:
             break
+    # Early exit when the maximum number of retry is reached.
+    if attempt == send_retry_limit:
+            print(
+                'Protocol error on submit, maximum number of retry reached ({})'.format(
+                    attempt
+                )
+            )
+            return -1
 
     print('Lava jobid:{}'.format(jobid))
     print(
-        'Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}'.format(
-            jobid
+        'Lava job URL: {}://{}/scheduler/job/{}'.format(
+            PROTO, HOSTNAME, jobid
         )
     )
 
@@ -292,8 +333,6 @@ def main():
 
     if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
         print_test_output(server, jobid)
-    elif test_type is TestType.baremetal_benchmarks:
-        fetch_benchmark_results(args.build_id)
 
     passed, failed = check_job_all_test_cases_state_count(server, jobid)
     print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
This page took 0.027511 seconds and 4 git commands to generate.