LAVA: get linux modules and embed them in lttng-modules tarball
[lttng-ci.git] / scripts / system-tests / lava2-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import random
22 import sys
23 import time
24 import yaml
25 import xmlrpc.client
26 import pprint
27
28 from jinja2 import Environment, FileSystemLoader, meta
29
30 USERNAME = 'lava-jenkins'
31 HOSTNAME = 'lava-master-02.internal.efficios.com'
32 OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
33
34 class TestType():
35 baremetal_benchmarks=1
36 baremetal_tests=2
37 kvm_tests=3
38 kvm_fuzzing_tests=4
39 values = {
40 'baremetal-benchmarks' : baremetal_benchmarks,
41 'baremetal-tests' : baremetal_tests,
42 'kvm-tests' : kvm_tests,
43 'kvm-fuzzin-tests' : kvm_fuzzing_tests,
44 }
45
46 class DeviceType():
47 x86 = 'x86'
48 kvm = 'qemu'
49 values = {
50 'kvm' : kvm,
51 'x86' : x86,
52 }
53
54 def get_job_bundle_content(server, job):
55 try:
56 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
57 bundle = server.dashboard.get(bundle_sha)
58 except xmlrpc.client.Fault as f:
59 print('Error while fetching results bundle', f.faultString)
60 raise f
61
62 return json.loads(bundle['content'])
63
64 # Parse the results bundle to see the run-tests testcase
65 # of the lttng-kernel-tests passed successfully
66 def check_job_all_test_cases_state_count(server, job):
67 print("Testcase result:")
68 content = server.results.get_testjob_results_yaml(str(job))
69 testcases = yaml.load(content)
70
71 passed_tests=0
72 failed_tests=0
73 for testcase in testcases:
74 if testcase['result'] != 'pass':
75 print("\tFAILED {}\n\t\t See http://{}{}".format(
76 testcase['name'],
77 HOSTNAME,
78 testcase['url']
79 ))
80 failed_tests+=1
81 else:
82 passed_tests+=1
83 return (passed_tests, failed_tests)
84
85 # Get the benchmark results from the objstore
86 # save them as CSV files localy
87 def fetch_benchmark_results(build_id):
88 testcases = ['processed_results_close.csv',
89 'processed_results_ioctl.csv',
90 'processed_results_open_efault.csv',
91 'processed_results_open_enoent.csv',
92 'processed_results_dup_close.csv',
93 'processed_results_raw_syscall_getpid.csv',
94 'processed_results_lttng_test_filter.csv']
95 for testcase in testcases:
96 url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
97 urlretrieve(url, testcase)
98
99 # Parse the attachment of the testcase to fetch the stdout of the test suite
100 def print_test_output(server, job):
101 job_finished, log = server.scheduler.jobs.logs(str(job))
102 logs = yaml.load(log.data.decode('ascii'))
103 print_line = False
104 for line in logs:
105 if line['lvl'] != 'target':
106 continue
107 if line['msg'] == '<LAVA_SIGNAL_STARTTC run-tests>':
108 print('---- TEST SUITE OUTPUT BEGIN ----')
109 print_line = True
110 continue
111 if line['msg'] == '<LAVA_SIGNAL_ENDTC run-tests>':
112 print('----- TEST SUITE OUTPUT END -----')
113 break
114 if print_line:
115 print("{} {}".format(line['dt'], line['msg']))
116
117 def get_vlttng_cmd(device, lttng_tools_commit, lttng_ust_commit=None):
118
119 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
120 ' --override projects.babeltrace.build-env.PYTHON=python3' \
121 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
122 ' --profile babeltrace-stable-1.4' \
123 ' --profile babeltrace-python' \
124 ' --profile lttng-tools-master' \
125 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
126 ' --profile lttng-tools-no-man-pages'
127
128 if lttng_ust_commit is not None:
129 vlttng_cmd += ' --profile lttng-ust-master ' \
130 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
131 ' --profile lttng-ust-no-man-pages'
132
133 if device is DeviceType.kvm:
134 vlttng_path = '/root/virtenv'
135 else:
136 vlttng_path = '/tmp/virtenv'
137
138 vlttng_cmd += ' ' + vlttng_path
139
140 return vlttng_cmd
141
142 def main():
143 nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_bionic_2018-11-29.tar.gz"
144 test_type = None
145 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
146 parser.add_argument('-t', '--type', required=True)
147 parser.add_argument('-j', '--jobname', required=True)
148 parser.add_argument('-k', '--kernel', required=True)
149 parser.add_argument('-lm', '--lmodule', required=True)
150 parser.add_argument('-tc', '--tools-commit', required=True)
151 parser.add_argument('-id', '--build-id', required=True)
152 parser.add_argument('-uc', '--ust-commit', required=False)
153 parser.add_argument('-d', '--debug', required=False, action='store_true')
154 args = parser.parse_args()
155
156 if args.type not in TestType.values:
157 print('argument -t/--type {} unrecognized.'.format(args.type))
158 print('Possible values are:')
159 for k in TestType.values:
160 print('\t {}'.format(k))
161 return -1
162
163 lava_api_key = None
164 if not args.debug:
165 try:
166 lava_api_key = os.environ['LAVA2_JENKINS_TOKEN']
167 except Exception as e:
168 print('LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
169 return -1
170
171 jinja_loader = FileSystemLoader(os.path.dirname(os.path.realpath(__file__)))
172 jinja_env = Environment(loader=jinja_loader, trim_blocks=True,
173 lstrip_blocks= True)
174 jinja_template = jinja_env.get_template('template_lava_job.jinja2')
175 template_source = jinja_env.loader.get_source(jinja_env, 'template_lava_job.jinja2')
176 parsed_content = jinja_env.parse(template_source)
177 undef = meta.find_undeclared_variables(parsed_content)
178
179 test_type = TestType.values[args.type]
180
181 if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
182 device_type = DeviceType.x86
183 else:
184 device_type = DeviceType.kvm
185
186 vlttng_path = '/tmp/virtenv'
187
188 vlttng_cmd = get_vlttng_cmd(device_type, args.tools_commit, args.ust_commit)
189
190 context = dict()
191 context['DeviceType'] = DeviceType
192 context['TestType'] = TestType
193
194 context['job_name'] = args.jobname
195 context['test_type'] = test_type
196 context['random_seed'] = random.randint(0, 1000000)
197 context['device_type'] = device_type
198
199 context['vlttng_cmd'] = vlttng_cmd
200 context['vlttng_path'] = vlttng_path
201
202 context['kernel_url'] = args.kernel
203 context['nfsrootfs_url'] = nfsrootfs
204 context['lttng_modules_url'] = args.lmodule
205 context['jenkins_build_id'] = args.build_id
206
207 context['kprobe_round_nb'] = 10
208
209 render = jinja_template.render(context)
210
211 print('Job to be submitted:')
212
213 print(render)
214
215 if args.debug:
216 return 0
217
218 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
219
220 jobid = server.scheduler.submit_job(render)
221
222 print('Lava jobid:{}'.format(jobid))
223 print('Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}'.format(jobid))
224
225 #Check the status of the job every 30 seconds
226 jobstatus = server.scheduler.job_state(jobid)['job_state']
227 running = False
228 while jobstatus in ['Submitted','Scheduling','Scheduled','Running']:
229 if not running and jobstatus == 'Running':
230 print('Job started running')
231 running = True
232 time.sleep(30)
233 jobstatus = server.scheduler.job_state(jobid)['job_state']
234 print('Job ended with {} status.'.format(jobstatus))
235
236 if jobstatus != 'Finished':
237 return -1
238
239 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
240 print_test_output(server, jobid)
241 elif test_type is TestType.baremetal_benchmarks:
242 fetch_benchmark_results(args.build_id)
243
244 passed, failed=check_job_all_test_cases_state_count(server, jobid)
245 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
246
247 if failed == 0:
248 return 0
249 else:
250 return -1
251
252 if __name__ == "__main__":
253 sys.exit(main())
This page took 0.043093 seconds and 4 git commands to generate.