jjb: lava: remove useless and periodically failing git clone
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
CommitLineData
b3d73c46
FD
1#!/usr/bin/python
2# Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3#
4# This program is free software: you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
6# the Free Software Foundation, either version 3 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17import argparse
18import base64
19import json
20import os
21import sys
22import time
f42b2b7e 23import xmlrpc.client
b3d73c46
FD
24from collections import OrderedDict
25from enum import Enum
26
27USERNAME = 'frdeso'
28HOSTNAME = 'lava-master.internal.efficios.com'
29SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31class TestType(Enum):
1ac7fa2c
FD
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
a2243f89 35 kvm_fuzzing_tests=4
b3d73c46
FD
36
37def get_job_bundle_content(server, job):
534a243d
FD
38 try:
39 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
40 bundle = server.dashboard.get(bundle_sha)
f42b2b7e
FD
41 except xmlrpc.client.Fault as f:
42 print('Error while fetching results bundle', f.faultString)
b3d73c46
FD
43
44 return json.loads(bundle['content'])
45
46# Parse the results bundle to see the run-tests testcase
47# of the lttng-kernel-tests passed successfully
48def check_job_all_test_cases_state_count(server, job):
49 content = get_job_bundle_content(server, job)
50
1d393f82
FD
51 # FIXME:Those tests are part of the boot actions and fail randomly but
52 # doesn't affect the behaviour of the tests. We should update our Lava
53 # installation and try to reproduce it. This error was encountered on
54 # Ubuntu 16.04.
55 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
56
b3d73c46
FD
57 passed_tests=0
58 failed_tests=0
59 for run in content['test_runs']:
60 for result in run['test_results']:
3e7245a4 61 if 'test_case_id' in result :
b3d73c46
FD
62 if result['result'] in 'pass':
63 passed_tests+=1
1d393f82 64 elif result['test_case_id'] in tests_known_to_fail:
3e7245a4 65 pass
b3d73c46
FD
66 else:
67 failed_tests+=1
68 return (passed_tests, failed_tests)
69
104ed94b
FD
70# Get the benchmark results from the lava bundle
71# save them as CSV files localy
72def fetch_benchmark_results(server, job):
73 content = get_job_bundle_content(server, job)
70e85c98 74 testcases = ['processed_results_close.csv',
c863d7ca 75 'processed_results_ioctl.csv',
70e85c98 76 'processed_results_open_efault.csv',
6ebb7306 77 'processed_results_open_enoent.csv',
dff1609b 78 'processed_results_dup_close.csv',
0ca122b6 79 'processed_results_raw_syscall_getpid.csv',
dff1609b 80 'processed_results_lttng_test_filter.csv']
104ed94b
FD
81
82 # The result bundle is a large JSON containing the results of every testcase
83 # of the LAVA job as well as the files that were attached during the run.
84 # We need to iterate over this JSON to get the base64 representation of the
85 # benchmark results produced during the run.
86 for run in content['test_runs']:
87 # We only care of the benchmark testcases
dff1609b 88 if 'benchmark-' in run['test_id']:
104ed94b
FD
89 if 'test_results' in run:
90 for res in run['test_results']:
91 if 'attachments' in res:
92 for a in res['attachments']:
93 # We only save the results file
94 if a['pathname'] in testcases:
f42b2b7e 95 with open(a['pathname'],'wb') as f:
104ed94b
FD
96 # Convert the b64 representation of the
97 # result file and write it to a file
98 # in the current working directory
99 f.write(base64.b64decode(a['content']))
100
b3d73c46
FD
101# Parse the attachment of the testcase to fetch the stdout of the test suite
102def print_test_output(server, job):
103 content = get_job_bundle_content(server, job)
104 found = False
105
106 for run in content['test_runs']:
107 if run['test_id'] in 'lttng-kernel-test':
108 for attachment in run['attachments']:
109 if attachment['pathname'] in 'stdout.log':
110
111 # Decode the base64 file and split on newlines to iterate
112 # on list
f42b2b7e 113 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
b3d73c46
FD
114
115 # Create a generator to iterate on the lines and keeping
116 # the state of the iterator across the two loops.
117 testoutput_iter = iter(testoutput)
118 for line in testoutput_iter:
119
120 # Find the header of the test case and start printing
121 # from there
122 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
123 found = True
124 print('---- TEST SUITE OUTPUT BEGIN ----')
125 for line in testoutput_iter:
126 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
127 print(line)
128 else:
129 # Print until we reach the end of the
130 # section
131 break
132
133 if found is True:
134 print('----- TEST SUITE OUTPUT END -----')
135 break
136
dc9700c9 137def create_new_job(name, build_device):
b3d73c46
FD
138 job = OrderedDict({
139 'health_check': False,
140 'job_name': name,
10ca6e3a 141 'device_type': build_device,
dc9700c9 142 'tags': [ ],
10ca6e3a 143 'timeout': 7200,
b3d73c46
FD
144 'actions': []
145 })
dc9700c9
FD
146 if build_device in 'x86':
147 job['tags'].append('dev-sda1')
148
b3d73c46
FD
149 return job
150
151def get_boot_cmd():
152 command = OrderedDict({
153 'command': 'boot_image'
154 })
155 return command
156
dc9700c9 157def get_config_cmd(build_device):
b3d73c46
FD
158 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
159 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
e1504b01 160 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
30b89c1f 161 'libnuma-dev', 'python3-dev', 'swig', 'stress']
b3d73c46
FD
162 command = OrderedDict({
163 'command': 'lava_command_run',
164 'parameters': {
165 'commands': [
b3d73c46
FD
166 'cat /etc/resolv.conf',
167 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
dc9700c9 168 'groupadd tracing'
38cfd62c
FD
169 ],
170 'timeout':300
b3d73c46
FD
171 }
172 })
dc9700c9
FD
173 if build_device in 'x86':
174 command['parameters']['commands'].extend([
175 'mount /dev/sda1 /tmp',
176 'rm -rf /tmp/*'])
177
178 command['parameters']['commands'].extend([
179 'depmod -a',
180 'locale-gen en_US.UTF-8',
181 'apt-get update',
819f0b86 182 'apt-get upgrade',
dc9700c9
FD
183 'apt-get install -y {}'.format(' '.join(packages))
184 ])
b3d73c46
FD
185 return command
186
1ac7fa2c 187def get_baremetal_benchmarks_cmd():
b3d73c46
FD
188 command = OrderedDict({
189 'command': 'lava_test_shell',
190 'parameters': {
191 'testdef_repos': [
192 {
193 'git-repo': 'https://github.com/lttng/lttng-ci.git',
194 'revision': 'master',
195 'testdef': 'lava/baremetal-tests/failing-close.yml'
196 },
c863d7ca
FD
197 {
198 'git-repo': 'https://github.com/lttng/lttng-ci.git',
199 'revision': 'master',
200 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
201 },
b3d73c46
FD
202 {
203 'git-repo': 'https://github.com/lttng/lttng-ci.git',
204 'revision': 'master',
205 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
206 },
207 {
208 'git-repo': 'https://github.com/lttng/lttng-ci.git',
209 'revision': 'master',
70e85c98 210 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
dff1609b 211 },
0ca122b6
FD
212 {
213 'git-repo': 'https://github.com/lttng/lttng-ci.git',
214 'revision': 'master',
215 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
216 },
6ebb7306
FD
217 {
218 'git-repo': 'https://github.com/lttng/lttng-ci.git',
219 'revision': 'master',
220 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
221 },
dff1609b
FD
222 {
223 'git-repo': 'https://github.com/lttng/lttng-ci.git',
224 'revision': 'master',
225 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
1ac7fa2c
FD
226 }
227 ],
10ca6e3a 228 'timeout': 7200
1ac7fa2c
FD
229 }
230 })
231 return command
232
233def get_baremetal_tests_cmd():
234 command = OrderedDict({
235 'command': 'lava_test_shell',
236 'parameters': {
237 'testdef_repos': [
f3d4ee9f
FD
238 {
239 'git-repo': 'https://github.com/lttng/lttng-ci.git',
240 'revision': 'master',
241 'testdef': 'lava/baremetal-tests/perf-tests.yml'
b3d73c46
FD
242 }
243 ],
10ca6e3a 244 'timeout': 3600
b3d73c46
FD
245 }
246 })
247 return command
248
1ac7fa2c 249def get_kvm_tests_cmd():
b3d73c46
FD
250 command = OrderedDict({
251 'command': 'lava_test_shell',
252 'parameters': {
253 'testdef_repos': [
254 {
255 'git-repo': 'https://github.com/lttng/lttng-ci.git',
256 'revision': 'master',
257 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
ee02050f
FD
258 },
259 {
260 'git-repo': 'https://github.com/lttng/lttng-ci.git',
261 'revision': 'master',
262 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
9d9c173f
FD
263 }
264 ],
958065a8 265 'timeout': 7200
9d9c173f
FD
266 }
267 })
268 return command
269def get_kprobes_test_cmd():
270 command = OrderedDict({
271 'command': 'lava_test_shell',
272 'parameters': {
273 'testdef_repos': [
30b89c1f
FD
274 {
275 'git-repo': 'https://github.com/lttng/lttng-ci.git',
276 'revision': 'master',
277 'testdef': 'lava/baremetal-tests/kprobe-fuzzing-tests.yml'
b3d73c46
FD
278 }
279 ],
10ca6e3a 280 'timeout': 7200
b3d73c46
FD
281 }
282 })
283 return command
284
285def get_results_cmd(stream_name):
286 command = OrderedDict({
287 'command': 'submit_results',
288 'parameters': {
289 'server': 'http://lava-master.internal.efficios.com/RPC2/'
290 }
291 })
292 command['parameters']['stream']='/anonymous/'+stream_name+'/'
293 return command
294
dc9700c9
FD
295def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
296 command = OrderedDict({
297 'command': 'deploy_kernel',
298 'metadata': {},
299 'parameters': {
300 'customize': {},
301 'kernel': None,
819f0b86
FD
302 'target_type': 'ubuntu',
303 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
304 'login_prompt': 'kvm02 login:',
305 'username': 'root'
dc9700c9
FD
306 }
307 })
308
309 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
310 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
311 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
312 command['metadata']['jenkins_jobname'] = jenkins_job
313
314 return command
315
316def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
b3d73c46
FD
317 command = OrderedDict({
318 'command': 'deploy_kernel',
319 'metadata': {},
320 'parameters': {
321 'overlays': [],
322 'kernel': None,
323 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
324 'target_type': 'ubuntu'
325 }
326 })
327
328 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
329 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
330 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
dc9700c9 331 command['metadata']['jenkins_jobname'] = jenkins_job
b3d73c46
FD
332 if nb_iter is not None:
333 command['metadata']['nb_iterations'] = nb_iter
334
335 return command
336
337
dc9700c9 338def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
b3d73c46
FD
339 command = OrderedDict({
340 'command': 'lava_command_run',
341 'parameters': {
342 'commands': [
819f0b86 343 'pip3 install --upgrade pip',
46fb8afa 344 'hash -r',
b3d73c46
FD
345 'pip3 install vlttng',
346 ],
10ca6e3a 347 'timeout': 3600
b3d73c46
FD
348 }
349 })
350
4418be37 351 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
c6204172
FD
352 ' --override projects.babeltrace.build-env.PYTHON=python3' \
353 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
354 ' --profile babeltrace-stable-1.4' \
355 ' --profile babeltrace-python' \
b3d73c46
FD
356 ' --profile lttng-tools-master' \
357 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
358 ' --profile lttng-tools-no-man-pages'
359
360 if lttng_ust_commit is not None:
361 vlttng_cmd += ' --profile lttng-ust-master ' \
362 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
363 ' --profile lttng-ust-no-man-pages'
364
dc9700c9
FD
365 virtenv_path = None
366 if build_device in 'kvm':
367 virtenv_path = '/root/virtenv'
368 else:
369 virtenv_path = '/tmp/virtenv'
370
371 vlttng_cmd += ' '+virtenv_path
b3d73c46
FD
372
373 command['parameters']['commands'].append(vlttng_cmd)
dc9700c9
FD
374 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
375 command['parameters']['commands'].append('sync')
376
b3d73c46
FD
377 return command
378
379def main():
380 test_type = None
381 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
382 parser.add_argument('-t', '--type', required=True)
383 parser.add_argument('-j', '--jobname', required=True)
384 parser.add_argument('-k', '--kernel', required=True)
385 parser.add_argument('-km', '--kmodule', required=True)
386 parser.add_argument('-lm', '--lmodule', required=True)
b3d73c46
FD
387 parser.add_argument('-tc', '--tools-commit', required=True)
388 parser.add_argument('-uc', '--ust-commit', required=False)
389 args = parser.parse_args()
390
1ac7fa2c
FD
391 if args.type in 'baremetal-benchmarks':
392 test_type = TestType.baremetal_benchmarks
393 elif args.type in 'baremetal-tests':
394 test_type = TestType.baremetal_tests
395 elif args.type in 'kvm-tests':
396 test_type = TestType.kvm_tests
a2243f89
FD
397 elif args.type in 'kvm-fuzzing-tests':
398 test_type = TestType.kvm_fuzzing_tests
b3d73c46
FD
399 else:
400 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
401 return -1
402
8cef2adf
FD
403 lava_api_key = None
404 try:
30003819 405 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
f42b2b7e 406 except Exception as e:
30003819 407 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
8cef2adf
FD
408 return -1
409
1ac7fa2c 410 if test_type is TestType.baremetal_benchmarks:
dc9700c9
FD
411 j = create_new_job(args.jobname, build_device='x86')
412 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
1ac7fa2c
FD
413 elif test_type is TestType.baremetal_tests:
414 j = create_new_job(args.jobname, build_device='x86')
415 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
a2243f89 416 elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
dc9700c9
FD
417 j = create_new_job(args.jobname, build_device='kvm')
418 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
419
420 j['actions'].append(get_boot_cmd())
421
1ac7fa2c 422 if test_type is TestType.baremetal_benchmarks:
dc9700c9
FD
423 j['actions'].append(get_config_cmd('x86'))
424 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
1ac7fa2c 425 j['actions'].append(get_baremetal_benchmarks_cmd())
b3d73c46 426 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
1ac7fa2c
FD
427 elif test_type is TestType.baremetal_tests:
428 if args.ust_commit is None:
429 print('Tests runs need -uc/--ust-commit options. Exiting...')
430 return -1
431 j['actions'].append(get_config_cmd('x86'))
432 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
433 j['actions'].append(get_baremetal_tests_cmd())
434 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
435 elif test_type is TestType.kvm_tests:
b3d73c46
FD
436 if args.ust_commit is None:
437 print('Tests runs need -uc/--ust-commit options. Exiting...')
438 return -1
dc9700c9
FD
439 j['actions'].append(get_config_cmd('kvm'))
440 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
1ac7fa2c 441 j['actions'].append(get_kvm_tests_cmd())
a2243f89
FD
442 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
443 elif test_type is TestType.kvm_fuzzing_tests:
444 if args.ust_commit is None:
445 print('Tests runs need -uc/--ust-commit options. Exiting...')
446 return -1
447 j['actions'].append(get_config_cmd('kvm'))
448 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
9d9c173f 449 j['actions'].append(get_kprobes_test_cmd())
b3d73c46
FD
450 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
451 else:
452 assert False, 'Unknown test type'
453
f42b2b7e 454 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
b3d73c46
FD
455
456 jobid = server.scheduler.submit_job(json.dumps(j))
457
21e89f7e 458 print('Lava jobid:{}'.format(jobid))
0467dff8 459 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
21e89f7e 460
b3d73c46
FD
461 #Check the status of the job every 30 seconds
462 jobstatus = server.scheduler.job_status(jobid)['job_status']
d9997fe4 463 not_running = False
b3d73c46 464 while jobstatus in 'Submitted' or jobstatus in 'Running':
d9997fe4
FD
465 if not_running is False and jobstatus in 'Running':
466 print('Job started running')
467 not_running = True
b3d73c46
FD
468 time.sleep(30)
469 jobstatus = server.scheduler.job_status(jobid)['job_status']
470
534a243d 471 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
b3d73c46 472 print_test_output(server, jobid)
104ed94b
FD
473 elif test_type is TestType.baremetal_benchmarks:
474 fetch_benchmark_results(server, jobid)
b3d73c46 475
d1b69f4d
FD
476 print('Job ended with {} status.'.format(jobstatus))
477 if jobstatus not in 'Complete':
478 return -1
479 else:
73c1d4bc 480 passed, failed=check_job_all_test_cases_state_count(server, jobid)
d1b69f4d
FD
481 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
482
73c1d4bc
FD
483 if failed == 0:
484 return 0
485 else:
486 return -1
b3d73c46
FD
487
488if __name__ == "__main__":
489 sys.exit(main())
This page took 0.045895 seconds and 4 git commands to generate.