jjb: Lava: Add kretprobe fuzzing
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
CommitLineData
b3d73c46
FD
1#!/usr/bin/python
2# Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3#
4# This program is free software: you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
6# the Free Software Foundation, either version 3 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17import argparse
18import base64
19import json
20import os
21import sys
22import time
f42b2b7e 23import xmlrpc.client
b3d73c46
FD
24from collections import OrderedDict
25from enum import Enum
26
27USERNAME = 'frdeso'
28HOSTNAME = 'lava-master.internal.efficios.com'
29SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31class TestType(Enum):
1ac7fa2c
FD
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
b3d73c46
FD
35
36def get_job_bundle_content(server, job):
534a243d
FD
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
f42b2b7e
FD
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
b3d73c46
FD
42
43 return json.loads(bundle['content'])
44
45# Parse the results bundle to see the run-tests testcase
46# of the lttng-kernel-tests passed successfully
47def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
1d393f82
FD
50 # FIXME:Those tests are part of the boot actions and fail randomly but
51 # doesn't affect the behaviour of the tests. We should update our Lava
52 # installation and try to reproduce it. This error was encountered on
53 # Ubuntu 16.04.
54 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
55
b3d73c46
FD
56 passed_tests=0
57 failed_tests=0
58 for run in content['test_runs']:
59 for result in run['test_results']:
3e7245a4 60 if 'test_case_id' in result :
b3d73c46
FD
61 if result['result'] in 'pass':
62 passed_tests+=1
1d393f82 63 elif result['test_case_id'] in tests_known_to_fail:
3e7245a4 64 pass
b3d73c46
FD
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
104ed94b
FD
69# Get the benchmark results from the lava bundle
70# save them as CSV files localy
71def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
70e85c98 73 testcases = ['processed_results_close.csv',
c863d7ca 74 'processed_results_ioctl.csv',
70e85c98 75 'processed_results_open_efault.csv',
6ebb7306 76 'processed_results_open_enoent.csv',
dff1609b 77 'processed_results_dup_close.csv',
0ca122b6 78 'processed_results_raw_syscall_getpid.csv',
dff1609b 79 'processed_results_lttng_test_filter.csv']
104ed94b
FD
80
81 # The result bundle is a large JSON containing the results of every testcase
82 # of the LAVA job as well as the files that were attached during the run.
83 # We need to iterate over this JSON to get the base64 representation of the
84 # benchmark results produced during the run.
85 for run in content['test_runs']:
86 # We only care of the benchmark testcases
dff1609b 87 if 'benchmark-' in run['test_id']:
104ed94b
FD
88 if 'test_results' in run:
89 for res in run['test_results']:
90 if 'attachments' in res:
91 for a in res['attachments']:
92 # We only save the results file
93 if a['pathname'] in testcases:
f42b2b7e 94 with open(a['pathname'],'wb') as f:
104ed94b
FD
95 # Convert the b64 representation of the
96 # result file and write it to a file
97 # in the current working directory
98 f.write(base64.b64decode(a['content']))
99
b3d73c46
FD
100# Parse the attachment of the testcase to fetch the stdout of the test suite
101def print_test_output(server, job):
102 content = get_job_bundle_content(server, job)
103 found = False
104
105 for run in content['test_runs']:
106 if run['test_id'] in 'lttng-kernel-test':
107 for attachment in run['attachments']:
108 if attachment['pathname'] in 'stdout.log':
109
110 # Decode the base64 file and split on newlines to iterate
111 # on list
f42b2b7e 112 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
b3d73c46
FD
113
114 # Create a generator to iterate on the lines and keeping
115 # the state of the iterator across the two loops.
116 testoutput_iter = iter(testoutput)
117 for line in testoutput_iter:
118
119 # Find the header of the test case and start printing
120 # from there
121 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
122 found = True
123 print('---- TEST SUITE OUTPUT BEGIN ----')
124 for line in testoutput_iter:
125 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
126 print(line)
127 else:
128 # Print until we reach the end of the
129 # section
130 break
131
132 if found is True:
133 print('----- TEST SUITE OUTPUT END -----')
134 break
135
dc9700c9 136def create_new_job(name, build_device):
b3d73c46
FD
137 job = OrderedDict({
138 'health_check': False,
139 'job_name': name,
dc9700c9
FD
140 'device_type':build_device,
141 'tags': [ ],
b3d73c46
FD
142 'timeout': 18000,
143 'actions': []
144 })
dc9700c9
FD
145 if build_device in 'x86':
146 job['tags'].append('dev-sda1')
147
b3d73c46
FD
148 return job
149
150def get_boot_cmd():
151 command = OrderedDict({
152 'command': 'boot_image'
153 })
154 return command
155
dc9700c9 156def get_config_cmd(build_device):
b3d73c46
FD
157 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
158 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
e1504b01 159 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
30b89c1f 160 'libnuma-dev', 'python3-dev', 'swig', 'stress']
b3d73c46
FD
161 command = OrderedDict({
162 'command': 'lava_command_run',
163 'parameters': {
164 'commands': [
b3d73c46
FD
165 'cat /etc/resolv.conf',
166 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
dc9700c9 167 'groupadd tracing'
38cfd62c
FD
168 ],
169 'timeout':300
b3d73c46
FD
170 }
171 })
dc9700c9
FD
172 if build_device in 'x86':
173 command['parameters']['commands'].extend([
174 'mount /dev/sda1 /tmp',
175 'rm -rf /tmp/*'])
176
177 command['parameters']['commands'].extend([
178 'depmod -a',
179 'locale-gen en_US.UTF-8',
180 'apt-get update',
819f0b86 181 'apt-get upgrade',
dc9700c9
FD
182 'apt-get install -y {}'.format(' '.join(packages))
183 ])
b3d73c46
FD
184 return command
185
1ac7fa2c 186def get_baremetal_benchmarks_cmd():
b3d73c46
FD
187 command = OrderedDict({
188 'command': 'lava_test_shell',
189 'parameters': {
190 'testdef_repos': [
191 {
192 'git-repo': 'https://github.com/lttng/lttng-ci.git',
193 'revision': 'master',
194 'testdef': 'lava/baremetal-tests/failing-close.yml'
195 },
c863d7ca
FD
196 {
197 'git-repo': 'https://github.com/lttng/lttng-ci.git',
198 'revision': 'master',
199 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
200 },
b3d73c46
FD
201 {
202 'git-repo': 'https://github.com/lttng/lttng-ci.git',
203 'revision': 'master',
204 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
205 },
206 {
207 'git-repo': 'https://github.com/lttng/lttng-ci.git',
208 'revision': 'master',
70e85c98 209 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
dff1609b 210 },
0ca122b6
FD
211 {
212 'git-repo': 'https://github.com/lttng/lttng-ci.git',
213 'revision': 'master',
214 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
215 },
6ebb7306
FD
216 {
217 'git-repo': 'https://github.com/lttng/lttng-ci.git',
218 'revision': 'master',
219 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
220 },
dff1609b
FD
221 {
222 'git-repo': 'https://github.com/lttng/lttng-ci.git',
223 'revision': 'master',
224 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
1ac7fa2c
FD
225 }
226 ],
227 'timeout': 18000
228 }
229 })
230 return command
231
232def get_baremetal_tests_cmd():
233 command = OrderedDict({
234 'command': 'lava_test_shell',
235 'parameters': {
236 'testdef_repos': [
f3d4ee9f
FD
237 {
238 'git-repo': 'https://github.com/lttng/lttng-ci.git',
239 'revision': 'master',
240 'testdef': 'lava/baremetal-tests/perf-tests.yml'
b3d73c46
FD
241 }
242 ],
243 'timeout': 18000
244 }
245 })
246 return command
247
1ac7fa2c 248def get_kvm_tests_cmd():
b3d73c46
FD
249 command = OrderedDict({
250 'command': 'lava_test_shell',
251 'parameters': {
252 'testdef_repos': [
253 {
254 'git-repo': 'https://github.com/lttng/lttng-ci.git',
255 'revision': 'master',
256 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
ee02050f
FD
257 },
258 {
259 'git-repo': 'https://github.com/lttng/lttng-ci.git',
260 'revision': 'master',
261 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
30b89c1f
FD
262 },
263 {
264 'git-repo': 'https://github.com/lttng/lttng-ci.git',
265 'revision': 'master',
266 'testdef': 'lava/baremetal-tests/kprobe-fuzzing-tests.yml'
b3d73c46
FD
267 }
268 ],
269 'timeout': 18000
270 }
271 })
272 return command
273
274def get_results_cmd(stream_name):
275 command = OrderedDict({
276 'command': 'submit_results',
277 'parameters': {
278 'server': 'http://lava-master.internal.efficios.com/RPC2/'
279 }
280 })
281 command['parameters']['stream']='/anonymous/'+stream_name+'/'
282 return command
283
dc9700c9
FD
284def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
285 command = OrderedDict({
286 'command': 'deploy_kernel',
287 'metadata': {},
288 'parameters': {
289 'customize': {},
290 'kernel': None,
819f0b86
FD
291 'target_type': 'ubuntu',
292 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
293 'login_prompt': 'kvm02 login:',
294 'username': 'root'
dc9700c9
FD
295 }
296 })
297
298 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
299 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
300 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
301 command['metadata']['jenkins_jobname'] = jenkins_job
302
303 return command
304
305def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
b3d73c46
FD
306 command = OrderedDict({
307 'command': 'deploy_kernel',
308 'metadata': {},
309 'parameters': {
310 'overlays': [],
311 'kernel': None,
312 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
313 'target_type': 'ubuntu'
314 }
315 })
316
317 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
318 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
319 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
dc9700c9 320 command['metadata']['jenkins_jobname'] = jenkins_job
b3d73c46
FD
321 if nb_iter is not None:
322 command['metadata']['nb_iterations'] = nb_iter
323
324 return command
325
326
dc9700c9 327def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
b3d73c46
FD
328 command = OrderedDict({
329 'command': 'lava_command_run',
330 'parameters': {
331 'commands': [
819f0b86 332 'pip3 install --upgrade pip',
46fb8afa 333 'hash -r',
b3d73c46
FD
334 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
335 'pip3 install vlttng',
336 ],
337 'timeout': 18000
338 }
339 })
340
4418be37 341 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
c6204172
FD
342 ' --override projects.babeltrace.build-env.PYTHON=python3' \
343 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
344 ' --profile babeltrace-stable-1.4' \
345 ' --profile babeltrace-python' \
b3d73c46
FD
346 ' --profile lttng-tools-master' \
347 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
348 ' --profile lttng-tools-no-man-pages'
349
350 if lttng_ust_commit is not None:
351 vlttng_cmd += ' --profile lttng-ust-master ' \
352 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
353 ' --profile lttng-ust-no-man-pages'
354
dc9700c9
FD
355 virtenv_path = None
356 if build_device in 'kvm':
357 virtenv_path = '/root/virtenv'
358 else:
359 virtenv_path = '/tmp/virtenv'
360
361 vlttng_cmd += ' '+virtenv_path
b3d73c46
FD
362
363 command['parameters']['commands'].append(vlttng_cmd)
dc9700c9
FD
364 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
365 command['parameters']['commands'].append('sync')
366
b3d73c46
FD
367 return command
368
369def main():
370 test_type = None
371 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
372 parser.add_argument('-t', '--type', required=True)
373 parser.add_argument('-j', '--jobname', required=True)
374 parser.add_argument('-k', '--kernel', required=True)
375 parser.add_argument('-km', '--kmodule', required=True)
376 parser.add_argument('-lm', '--lmodule', required=True)
b3d73c46
FD
377 parser.add_argument('-tc', '--tools-commit', required=True)
378 parser.add_argument('-uc', '--ust-commit', required=False)
379 args = parser.parse_args()
380
1ac7fa2c
FD
381 if args.type in 'baremetal-benchmarks':
382 test_type = TestType.baremetal_benchmarks
383 elif args.type in 'baremetal-tests':
384 test_type = TestType.baremetal_tests
385 elif args.type in 'kvm-tests':
386 test_type = TestType.kvm_tests
b3d73c46
FD
387 else:
388 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
389 return -1
390
8cef2adf
FD
391 lava_api_key = None
392 try:
30003819 393 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
f42b2b7e 394 except Exception as e:
30003819 395 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
8cef2adf
FD
396 return -1
397
1ac7fa2c 398 if test_type is TestType.baremetal_benchmarks:
dc9700c9
FD
399 j = create_new_job(args.jobname, build_device='x86')
400 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
1ac7fa2c
FD
401 elif test_type is TestType.baremetal_tests:
402 j = create_new_job(args.jobname, build_device='x86')
403 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
404 elif test_type is TestType.kvm_tests:
dc9700c9
FD
405 j = create_new_job(args.jobname, build_device='kvm')
406 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
407
408 j['actions'].append(get_boot_cmd())
409
1ac7fa2c 410 if test_type is TestType.baremetal_benchmarks:
dc9700c9
FD
411 j['actions'].append(get_config_cmd('x86'))
412 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
1ac7fa2c 413 j['actions'].append(get_baremetal_benchmarks_cmd())
b3d73c46 414 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
1ac7fa2c
FD
415 elif test_type is TestType.baremetal_tests:
416 if args.ust_commit is None:
417 print('Tests runs need -uc/--ust-commit options. Exiting...')
418 return -1
419 j['actions'].append(get_config_cmd('x86'))
420 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
421 j['actions'].append(get_baremetal_tests_cmd())
422 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
423 elif test_type is TestType.kvm_tests:
b3d73c46
FD
424 if args.ust_commit is None:
425 print('Tests runs need -uc/--ust-commit options. Exiting...')
426 return -1
dc9700c9
FD
427 j['actions'].append(get_config_cmd('kvm'))
428 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
1ac7fa2c 429 j['actions'].append(get_kvm_tests_cmd())
b3d73c46
FD
430 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
431 else:
432 assert False, 'Unknown test type'
433
f42b2b7e 434 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
b3d73c46
FD
435
436 jobid = server.scheduler.submit_job(json.dumps(j))
437
21e89f7e 438 print('Lava jobid:{}'.format(jobid))
0467dff8 439 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
21e89f7e 440
b3d73c46
FD
441 #Check the status of the job every 30 seconds
442 jobstatus = server.scheduler.job_status(jobid)['job_status']
d9997fe4 443 not_running = False
b3d73c46 444 while jobstatus in 'Submitted' or jobstatus in 'Running':
d9997fe4
FD
445 if not_running is False and jobstatus in 'Running':
446 print('Job started running')
447 not_running = True
b3d73c46
FD
448 time.sleep(30)
449 jobstatus = server.scheduler.job_status(jobid)['job_status']
450
534a243d 451 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
b3d73c46 452 print_test_output(server, jobid)
104ed94b
FD
453 elif test_type is TestType.baremetal_benchmarks:
454 fetch_benchmark_results(server, jobid)
b3d73c46 455
d1b69f4d
FD
456 print('Job ended with {} status.'.format(jobstatus))
457 if jobstatus not in 'Complete':
458 return -1
459 else:
73c1d4bc 460 passed, failed=check_job_all_test_cases_state_count(server, jobid)
d1b69f4d
FD
461 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
462
73c1d4bc
FD
463 if failed == 0:
464 return 0
465 else:
466 return -1
b3d73c46
FD
467
468if __name__ == "__main__":
469 sys.exit(main())
This page took 0.045255 seconds and 4 git commands to generate.