Base jinja template for lava submission
[lttng-ci.git] / scripts / system-tests / lava-v2-submit.py
CommitLineData
878b4840
JR
1#!/usr/bin/python
2# Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3#
4# This program is free software: you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
6# the Free Software Foundation, either version 3 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17import argparse
18import base64
19import json
20import os
21import random
22import sys
23import time
24import xmlrpc.client
25from collections import OrderedDict
26from enum import Enum
27
28USERNAME = 'frdeso'
29HOSTNAME = 'lava-master.internal.efficios.com'
30SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
31
32class TestType():
33 baremetal_benchmarks=1
34 baremetal_tests=2
35 kvm_tests=3
36 kvm_fuzzing_tests=4
37 values = {
38 'baremetal-benchmarks' : baremetal_benchmarks,
39 'baremetal-tests' : baremetal_tests,
40 'kvm-tests' : kvm_tests,
41 'kvm-fuzzin-tests' : kvm_fuzzing_tests,
42 }
43
44def get_job_bundle_content(server, job):
45 try:
46 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
47 bundle = server.dashboard.get(bundle_sha)
48 except xmlrpc.client.Fault as f:
49 print('Error while fetching results bundle', f.faultString)
50 raise f
51
52 return json.loads(bundle['content'])
53
54# Parse the results bundle to see the run-tests testcase
55# of the lttng-kernel-tests passed successfully
56def check_job_all_test_cases_state_count(server, job):
57 content = get_job_bundle_content(server, job)
58
59 # FIXME:Those tests are part of the boot actions and fail randomly but
60 # doesn't affect the behaviour of the tests. We should update our Lava
61 # installation and try to reproduce it. This error was encountered on
62 # Ubuntu 16.04.
63 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
64
65 passed_tests=0
66 failed_tests=0
67 for run in content['test_runs']:
68 for result in run['test_results']:
69 if 'test_case_id' in result :
70 if result['result'] in 'pass':
71 passed_tests+=1
72 elif result['test_case_id'] in tests_known_to_fail:
73 pass
74 else:
75 failed_tests+=1
76 return (passed_tests, failed_tests)
77
78# Get the benchmark results from the lava bundle
79# save them as CSV files localy
80def fetch_benchmark_results(server, job):
81 content = get_job_bundle_content(server, job)
82 testcases = ['processed_results_close.csv',
83 'processed_results_ioctl.csv',
84 'processed_results_open_efault.csv',
85 'processed_results_open_enoent.csv',
86 'processed_results_dup_close.csv',
87 'processed_results_raw_syscall_getpid.csv',
88 'processed_results_lttng_test_filter.csv']
89
90 # The result bundle is a large JSON containing the results of every testcase
91 # of the LAVA job as well as the files that were attached during the run.
92 # We need to iterate over this JSON to get the base64 representation of the
93 # benchmark results produced during the run.
94 for run in content['test_runs']:
95 # We only care of the benchmark testcases
96 if 'benchmark-' in run['test_id']:
97 if 'test_results' in run:
98 for res in run['test_results']:
99 if 'attachments' in res:
100 for a in res['attachments']:
101 # We only save the results file
102 if a['pathname'] in testcases:
103 with open(a['pathname'],'wb') as f:
104 # Convert the b64 representation of the
105 # result file and write it to a file
106 # in the current working directory
107 f.write(base64.b64decode(a['content']))
108
109# Parse the attachment of the testcase to fetch the stdout of the test suite
110def print_test_output(server, job):
111 content = get_job_bundle_content(server, job)
112 found = False
113
114 for run in content['test_runs']:
115 if run['test_id'] in 'lttng-kernel-test':
116 for attachment in run['attachments']:
117 if attachment['pathname'] in 'stdout.log':
118
119 # Decode the base64 file and split on newlines to iterate
120 # on list
121 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
122
123 testoutput = testoutput.replace('\\n', '\n')
124
125 # Create a generator to iterate on the lines and keeping
126 # the state of the iterator across the two loops.
127 testoutput_iter = iter(testoutput.split('\n'))
128 for line in testoutput_iter:
129
130 # Find the header of the test case and start printing
131 # from there
132 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
133 print('---- TEST SUITE OUTPUT BEGIN ----')
134 for line in testoutput_iter:
135 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
136 print(line)
137 else:
138 # Print until we reach the end of the
139 # section
140 break
141
142 print('----- TEST SUITE OUTPUT END -----')
143 break
144
145def create_new_job(name, build_device):
146 job = OrderedDict({
147 'health_check': False,
148 'job_name': name,
149 'device_type': build_device,
150 'tags': [ ],
151 'timeout': 7200,
152 'actions': []
153 })
154 if build_device in 'x86':
155 job['tags'].append('dev-sda1')
156
157 return job
158
159def get_boot_cmd():
160 command = OrderedDict({
161 'command': 'boot_image'
162 })
163 return command
164
165def get_config_cmd(build_device):
166 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
167 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
168 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
169 'libnuma-dev', 'python3-dev', 'swig', 'stress']
170 command = OrderedDict({
171 'command': 'lava_command_run',
172 'parameters': {
173 'commands': [
174 'cat /etc/resolv.conf',
175 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
176 'groupadd tracing'
177 ],
178 'timeout':300
179 }
180 })
181 if build_device in 'x86':
182 command['parameters']['commands'].extend([
183 'mount /dev/sda1 /tmp',
184 'rm -rf /tmp/*'])
185
186 command['parameters']['commands'].extend([
187 'depmod -a',
188 'locale-gen en_US.UTF-8',
189 'apt-get update',
190 'apt-get upgrade',
191 'apt-get install -y {}'.format(' '.join(packages))
192 ])
193 return command
194
195def get_baremetal_benchmarks_cmd():
196 command = OrderedDict({
197 'command': 'lava_test_shell',
198 'parameters': {
199 'testdef_repos': [
200 {
201 'git-repo': 'https://github.com/lttng/lttng-ci.git',
202 'revision': 'master',
203 'testdef': 'lava/system-tests/failing-close.yml'
204 },
205 {
206 'git-repo': 'https://github.com/lttng/lttng-ci.git',
207 'revision': 'master',
208 'testdef': 'lava/system-tests/failing-ioctl.yml'
209 },
210 {
211 'git-repo': 'https://github.com/lttng/lttng-ci.git',
212 'revision': 'master',
213 'testdef': 'lava/system-tests/failing-open-efault.yml'
214 },
215 {
216 'git-repo': 'https://github.com/lttng/lttng-ci.git',
217 'revision': 'master',
218 'testdef': 'lava/system-tests/success-dup-close.yml'
219 },
220 {
221 'git-repo': 'https://github.com/lttng/lttng-ci.git',
222 'revision': 'master',
223 'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
224 },
225 {
226 'git-repo': 'https://github.com/lttng/lttng-ci.git',
227 'revision': 'master',
228 'testdef': 'lava/system-tests/failing-open-enoent.yml'
229 },
230 {
231 'git-repo': 'https://github.com/lttng/lttng-ci.git',
232 'revision': 'master',
233 'testdef': 'lava/system-tests/lttng-test-filter.yml'
234 }
235 ],
236 'timeout': 7200
237 }
238 })
239 return command
240
241def get_baremetal_tests_cmd():
242 command = OrderedDict({
243 'command': 'lava_test_shell',
244 'parameters': {
245 'testdef_repos': [
246 {
247 'git-repo': 'https://github.com/lttng/lttng-ci.git',
248 'revision': 'master',
249 'testdef': 'lava/system-tests/perf-tests.yml'
250 }
251 ],
252 'timeout': 3600
253 }
254 })
255 return command
256
257def get_kvm_tests_cmd():
258 command = OrderedDict({
259 'command': 'lava_test_shell',
260 'parameters': {
261 'testdef_repos': [
262 {
263 'git-repo': 'https://github.com/lttng/lttng-ci.git',
264 'revision': 'master',
265 'testdef': 'lava/system-tests/kernel-tests.yml'
266 },
267 {
268 'git-repo': 'https://github.com/lttng/lttng-ci.git',
269 'revision': 'master',
270 'testdef': 'lava/system-tests/destructive-tests.yml'
271 }
272 ],
273 'timeout': 7200
274 }
275 })
276 return command
277
278def get_kprobes_generate_data_cmd():
279 random_seed = random.randint(0, 1000000)
280 command = OrderedDict({
281 'command': 'lava_test_shell',
282 'parameters': {
283 'testdef_repos': [
284 {
285 'git-repo': 'https://github.com/lttng/lttng-ci.git',
286 'revision': 'master',
287 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml',
288 'parameters': { 'RANDOM_SEED': str(random_seed) }
289 }
290 ],
291 'timeout': 60
292 }
293 })
294 return command
295
296def get_kprobes_test_cmd(round_nb):
297 command = OrderedDict({
298 'command': 'lava_test_shell',
299 'parameters': {
300 'testdef_repos': [
301 {
302 'git-repo': 'https://github.com/lttng/lttng-ci.git',
303 'revision': 'master',
304 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml',
305 'parameters': { 'ROUND_NB': str(round_nb) }
306 }
307 ],
308 'timeout': 1000
309 }
310 })
311 return command
312
313def get_results_cmd(stream_name):
314 command = OrderedDict({
315 'command': 'submit_results',
316 'parameters': {
317 'server': 'http://lava-master.internal.efficios.com/RPC2/'
318 }
319 })
320 command['parameters']['stream']='/anonymous/'+stream_name+'/'
321 return command
322
323def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
324 command = OrderedDict({
325 'command': 'deploy_kernel',
326 'metadata': {},
327 'parameters': {
328 'customize': {},
329 'kernel': None,
330 'target_type': 'ubuntu',
331 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
332 'login_prompt': 'kvm02 login:',
333 'username': 'root'
334 }
335 })
336
337 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
338 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
339 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
340 command['metadata']['jenkins_jobname'] = jenkins_job
341
342 return command
343
344def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
345 command = OrderedDict({
346 'command': 'deploy_kernel',
347 'metadata': {},
348 'parameters': {
349 'overlays': [],
350 'kernel': None,
351 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
352 'target_type': 'ubuntu'
353 }
354 })
355
356 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
357 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
358 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
359 command['metadata']['jenkins_jobname'] = jenkins_job
360 if nb_iter is not None:
361 command['metadata']['nb_iterations'] = nb_iter
362
363 return command
364
365
366def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
367 command = OrderedDict({
368 'command': 'lava_command_run',
369 'parameters': {
370 'commands': [
371 'pip3 install --upgrade pip',
372 'hash -r',
373 'pip3 install vlttng',
374 ],
375 'timeout': 3600
376 }
377 })
378
379 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
380 ' --override projects.babeltrace.build-env.PYTHON=python3' \
381 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
382 ' --profile babeltrace-stable-1.4' \
383 ' --profile babeltrace-python' \
384 ' --profile lttng-tools-master' \
385 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
386 ' --profile lttng-tools-no-man-pages'
387
388 if lttng_ust_commit is not None:
389 vlttng_cmd += ' --profile lttng-ust-master ' \
390 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
391 ' --profile lttng-ust-no-man-pages'
392
393 virtenv_path = None
394 if build_device in 'kvm':
395 virtenv_path = '/root/virtenv'
396 else:
397 virtenv_path = '/tmp/virtenv'
398
399 vlttng_cmd += ' '+virtenv_path
400
401 command['parameters']['commands'].append(vlttng_cmd)
402 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
403 command['parameters']['commands'].append('sync')
404
405 return command
406
407def main():
408 test_type = None
409 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
410 parser.add_argument('-t', '--type', required=True)
411 parser.add_argument('-j', '--jobname', required=True)
412 parser.add_argument('-k', '--kernel', required=True)
413 parser.add_argument('-km', '--kmodule', required=True)
414 parser.add_argument('-lm', '--lmodule', required=True)
415 parser.add_argument('-tc', '--tools-commit', required=True)
416 parser.add_argument('-uc', '--ust-commit', required=False)
f23dc688 417 parser.add_argument('-d', '--debug', required=False, action='store_true')
878b4840
JR
418 args = parser.parse_args()
419
420 if args.type not in TestType.values:
421 print('argument -t/--type {} unrecognized.'.format(args.type))
422 print('Possible values are:')
423 for k in TestType.values:
424 print('\t {}'.format(k))
425 return -1
426 test_type = TestType.values[args.type]
427
428 lava_api_key = None
f23dc688
JR
429 if not args.debug:
430 try:
431 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
432 except Exception as e:
433 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
434 return -1
878b4840
JR
435
436 if test_type is TestType.baremetal_benchmarks:
437 j = create_new_job(args.jobname, build_device='x86')
438 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
439 elif test_type is TestType.baremetal_tests:
440 j = create_new_job(args.jobname, build_device='x86')
441 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
442 elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
443 j = create_new_job(args.jobname, build_device='kvm')
444 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
445
446 j['actions'].append(get_boot_cmd())
447
448 if test_type is TestType.baremetal_benchmarks:
449 j['actions'].append(get_config_cmd('x86'))
450 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
451 j['actions'].append(get_baremetal_benchmarks_cmd())
452 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
453 elif test_type is TestType.baremetal_tests:
454 if args.ust_commit is None:
455 print('Tests runs need -uc/--ust-commit options. Exiting...')
456 return -1
457 j['actions'].append(get_config_cmd('x86'))
458 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
459 j['actions'].append(get_baremetal_tests_cmd())
460 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
461 elif test_type is TestType.kvm_tests:
462 if args.ust_commit is None:
463 print('Tests runs need -uc/--ust-commit options. Exiting...')
464 return -1
465 j['actions'].append(get_config_cmd('kvm'))
466 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
467 j['actions'].append(get_kvm_tests_cmd())
468 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
469 elif test_type is TestType.kvm_fuzzing_tests:
470 if args.ust_commit is None:
471 print('Tests runs need -uc/--ust-commit options. Exiting...')
472 return -1
473 j['actions'].append(get_config_cmd('kvm'))
474 j['actions'].append(get_kprobes_generate_data_cmd())
475 for i in range(10):
476 j['actions'].append(get_kprobes_test_cmd(round_nb=i))
477 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
478 else:
479 assert False, 'Unknown test type'
480
f23dc688
JR
481 if args.debug:
482 print(json.dumps(j, indent=4, separators=(',', ': ')))
483 return 0
484
878b4840
JR
485 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
486
487 jobid = server.scheduler.submit_job(json.dumps(j))
488
489 print('Lava jobid:{}'.format(jobid))
490 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
491
492 #Check the status of the job every 30 seconds
493 jobstatus = server.scheduler.job_status(jobid)['job_status']
494 not_running = False
495 while jobstatus in 'Submitted' or jobstatus in 'Running':
496 if not_running is False and jobstatus in 'Running':
497 print('Job started running')
498 not_running = True
499 time.sleep(30)
500 jobstatus = server.scheduler.job_status(jobid)['job_status']
501
502 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
503 print_test_output(server, jobid)
504 elif test_type is TestType.baremetal_benchmarks:
505 fetch_benchmark_results(server, jobid)
506
507 print('Job ended with {} status.'.format(jobstatus))
508 if jobstatus not in 'Complete':
509 return -1
510 else:
511 passed, failed=check_job_all_test_cases_state_count(server, jobid)
512 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
513
514 if failed == 0:
515 return 0
516 else:
517 return -1
518
519if __name__ == "__main__":
520 sys.exit(main())
This page took 0.040963 seconds and 4 git commands to generate.