jjb: Add back open_enoent Lava benchmark testcase
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
index e4fbc7feed262f30cfd2257add51f9f650e00756..8ba92514f8a239a9907c6105310324c0cb315730 100644 (file)
@@ -70,7 +70,11 @@ def check_job_all_test_cases_state_count(server, job):
 # save them as CSV files localy
 def fetch_benchmark_results(server, job):
     content = get_job_bundle_content(server, job)
-    testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
+    testcases = ['processed_results_close.csv',
+            'processed_results_open_efault.csv',
+            'processed_results_open_enoent.csv',
+            'processed_results_dup_close.csv',
+            'processed_results_lttng_test_filter.csv']
 
     # The result bundle is a large JSON containing the results of every testcase
     # of the LAVA job as well as the files that were attached during the run.
@@ -78,7 +82,7 @@ def fetch_benchmark_results(server, job):
     # benchmark results produced during the run.
     for run in content['test_runs']:
         # We only care of the benchmark testcases
-        if 'benchmark-syscall-' in run['test_id']:
+        if 'benchmark-' in run['test_id']:
             if 'test_results' in run:
                 for res in run['test_results']:
                     if 'attachments' in res:
@@ -150,7 +154,8 @@ def get_boot_cmd():
 def get_config_cmd(build_device):
     packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
             'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
-            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev']
+            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
+            'libnuma-dev']
     command = OrderedDict({
         'command': 'lava_command_run',
         'parameters': {
@@ -158,7 +163,8 @@ def get_config_cmd(build_device):
                 'cat /etc/resolv.conf',
                 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
                 'groupadd tracing'
-                ]
+                ],
+                'timeout':300
             }
         })
     if build_device in 'x86':
@@ -190,10 +196,20 @@ def get_baremetal_benchmarks_cmd():
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
                 },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/success-dup-close.yml'
+                },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
+                },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
                 }
                 ],
             'timeout': 18000
@@ -354,9 +370,9 @@ def main():
 
     lava_api_key = None
     try:
-        lava_api_key = os.environ['LAVA_FRDESO_TOKEN']
+        lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
     except Exception as e:
-        print('LAVA_FRDESO_TOKEN not found in the environment variable. Exiting...', e )
+        print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
         return -1
 
     if test_type is TestType.baremetal_benchmarks:
@@ -400,6 +416,7 @@ def main():
     jobid = server.scheduler.submit_job(json.dumps(j))
 
     print('Lava jobid:{}'.format(jobid))
+    print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
 
     #Check the status of the job every 30 seconds
     jobstatus = server.scheduler.job_status(jobid)['job_status']
This page took 0.024583 seconds and 4 git commands to generate.