Cutoff for lttng-modules 2.7 is 4.8.
[lttng-ci.git] / scripts / system-tests / lava-submit.py
index 682e2249e11cc7fe5229baf081330a6e564059fe..a49bacdc2c731618e889afe8545e803220dd29c9 100644 (file)
@@ -18,15 +18,19 @@ import argparse
 import base64
 import json
 import os
+import random
 import sys
 import time
 import xmlrpc.client
+from urllib.parse import urljoin
+from urllib.request import urlretrieve
 from collections import OrderedDict
 from enum import Enum
 
 USERNAME = 'frdeso'
 HOSTNAME = 'lava-master.internal.efficios.com'
 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
+OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
 
 class TestType(Enum):
     baremetal_benchmarks=1
@@ -70,8 +74,7 @@ def check_job_all_test_cases_state_count(server, job):
 
 # Get the benchmark results from the lava bundle
 # save them as CSV files localy
-def fetch_benchmark_results(server, job):
-    content = get_job_bundle_content(server, job)
+def fetch_benchmark_results(build_id):
     testcases = ['processed_results_close.csv',
             'processed_results_ioctl.csv',
             'processed_results_open_efault.csv',
@@ -79,25 +82,9 @@ def fetch_benchmark_results(server, job):
             'processed_results_dup_close.csv',
             'processed_results_raw_syscall_getpid.csv',
             'processed_results_lttng_test_filter.csv']
-
-    # The result bundle is a large JSON containing the results of every testcase
-    # of the LAVA job as well as the files that were attached during the run.
-    # We need to iterate over this JSON to get the base64 representation of the
-    # benchmark results produced during the run.
-    for run in content['test_runs']:
-        # We only care of the benchmark testcases
-        if 'benchmark-' in run['test_id']:
-            if 'test_results' in run:
-                for res in run['test_results']:
-                    if 'attachments' in res:
-                        for a in res['attachments']:
-                            # We only save the results file
-                            if a['pathname'] in testcases:
-                                with open(a['pathname'],'wb') as f:
-                                    # Convert the b64 representation of the
-                                    # result file and write it to a file
-                                    # in the current working directory
-                                    f.write(base64.b64decode(a['content']))
+    for testcase in testcases:
+        url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
+        urlretrieve(url, testcase)
 
 # Parse the attachment of the testcase to fetch the stdout of the test suite
 def print_test_output(server, job):
@@ -185,7 +172,7 @@ def get_config_cmd(build_device):
                 ])
     return command
 
-def get_baremetal_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd(build_id):
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -193,37 +180,44 @@ def get_baremetal_benchmarks_cmd():
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/failing-close.yml'
+                    'testdef': 'lava/system-tests/failing-close.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/failing-ioctl.yml'
+                    'testdef': 'lava/system-tests/failing-ioctl.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/failing-open-efault.yml'
+                    'testdef': 'lava/system-tests/failing-open-efault.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/success-dup-close.yml'
+                    'testdef': 'lava/system-tests/success-dup-close.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
+                    'testdef': 'lava/system-tests/raw-syscall-getpid.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/failing-open-enoent.yml'
+                    'testdef': 'lava/system-tests/failing-open-enoent.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/lttng-test-filter.yml'
+                    'testdef': 'lava/system-tests/lttng-test-filter.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 }
                 ],
             'timeout': 7200
@@ -231,7 +225,7 @@ def get_baremetal_benchmarks_cmd():
         })
     return command
 
-def get_baremetal_tests_cmd():
+def get_baremetal_tests_cmd(build_id):
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -239,7 +233,8 @@ def get_baremetal_tests_cmd():
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/perf-tests.yml'
+                    'testdef': 'lava/system-tests/perf-tests.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 }
                 ],
             'timeout': 3600
@@ -247,7 +242,7 @@ def get_baremetal_tests_cmd():
         })
     return command
 
-def get_kvm_tests_cmd():
+def get_kvm_tests_cmd(build_id):
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -255,12 +250,14 @@ def get_kvm_tests_cmd():
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/kernel-tests.yml'
+                    'testdef': 'lava/system-tests/kernel-tests.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/destructive-tests.yml'
+                    'testdef': 'lava/system-tests/destructive-tests.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 }
                 ],
             'timeout': 7200
@@ -269,6 +266,7 @@ def get_kvm_tests_cmd():
     return command
 
 def get_kprobes_generate_data_cmd():
+    random_seed = random.randint(0, 1000000)
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -276,7 +274,8 @@ def get_kprobes_generate_data_cmd():
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml'
+                    'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml',
+                    'parameters': { 'RANDOM_SEED': str(random_seed) }
                 }
                 ],
             'timeout': 60
@@ -311,7 +310,7 @@ def get_results_cmd(stream_name):
     command['parameters']['stream']='/anonymous/'+stream_name+'/'
     return command
 
-def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
+def get_deploy_cmd_kvm(jenkins_job, kernel_path, lttng_modules_path):
     command = OrderedDict({
             'command': 'deploy_kernel',
             'metadata': {},
@@ -325,14 +324,13 @@ def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modul
                 }
             })
 
-    command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
     command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
     command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
     command['metadata']['jenkins_jobname'] = jenkins_job
 
     return command
 
-def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
+def get_deploy_cmd_x86(jenkins_job, kernel_path, lttng_modules_path, nb_iter=None):
     command = OrderedDict({
             'command': 'deploy_kernel',
             'metadata': {},
@@ -344,7 +342,6 @@ def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modul
                 }
             })
 
-    command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
     command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
     command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
     command['metadata']['jenkins_jobname'] = jenkins_job
@@ -401,9 +398,9 @@ def main():
     parser.add_argument('-t', '--type', required=True)
     parser.add_argument('-j', '--jobname', required=True)
     parser.add_argument('-k', '--kernel', required=True)
-    parser.add_argument('-km', '--kmodule', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
+    parser.add_argument('-id', '--build-id', required=True)
     parser.add_argument('-uc', '--ust-commit', required=False)
     args = parser.parse_args()
 
@@ -428,20 +425,20 @@ def main():
 
     if test_type is TestType.baremetal_benchmarks:
         j = create_new_job(args.jobname, build_device='x86')
-        j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+        j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.lmodule))
     elif test_type is TestType.baremetal_tests:
         j = create_new_job(args.jobname, build_device='x86')
-        j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+        j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.lmodule))
     elif test_type  is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
         j = create_new_job(args.jobname, build_device='kvm')
-        j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
+        j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.lmodule))
 
     j['actions'].append(get_boot_cmd())
 
     if test_type is TestType.baremetal_benchmarks:
         j['actions'].append(get_config_cmd('x86'))
         j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
-        j['actions'].append(get_baremetal_benchmarks_cmd())
+        j['actions'].append(get_baremetal_benchmarks_cmd(args.build_id))
         j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
     elif test_type is TestType.baremetal_tests:
         if args.ust_commit is None:
@@ -449,7 +446,7 @@ def main():
             return -1
         j['actions'].append(get_config_cmd('x86'))
         j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
-        j['actions'].append(get_baremetal_tests_cmd())
+        j['actions'].append(get_baremetal_tests_cmd(args.build_id))
         j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
     elif test_type  is TestType.kvm_tests:
         if args.ust_commit is None:
@@ -457,7 +454,7 @@ def main():
             return -1
         j['actions'].append(get_config_cmd('kvm'))
         j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
-        j['actions'].append(get_kvm_tests_cmd())
+        j['actions'].append(get_kvm_tests_cmd(args.build_id))
         j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
     elif test_type is TestType.kvm_fuzzing_tests:
         if args.ust_commit is None:
@@ -491,7 +488,7 @@ def main():
     if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
         print_test_output(server, jobid)
     elif test_type is TestType.baremetal_benchmarks:
-        fetch_benchmark_results(server, jobid)
+        fetch_benchmark_results(args.build_id)
 
     print('Job ended with {} status.'.format(jobstatus))
     if jobstatus not in 'Complete':
This page took 0.027435 seconds and 4 git commands to generate.