jjb: lava: remove useless and periodically failing git clone
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35 kvm_fuzzing_tests=4
36
37 def get_job_bundle_content(server, job):
38 try:
39 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
40 bundle = server.dashboard.get(bundle_sha)
41 except xmlrpc.client.Fault as f:
42 print('Error while fetching results bundle', f.faultString)
43
44 return json.loads(bundle['content'])
45
46 # Parse the results bundle to see the run-tests testcase
47 # of the lttng-kernel-tests passed successfully
48 def check_job_all_test_cases_state_count(server, job):
49 content = get_job_bundle_content(server, job)
50
51 # FIXME:Those tests are part of the boot actions and fail randomly but
52 # doesn't affect the behaviour of the tests. We should update our Lava
53 # installation and try to reproduce it. This error was encountered on
54 # Ubuntu 16.04.
55 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
56
57 passed_tests=0
58 failed_tests=0
59 for run in content['test_runs']:
60 for result in run['test_results']:
61 if 'test_case_id' in result :
62 if result['result'] in 'pass':
63 passed_tests+=1
64 elif result['test_case_id'] in tests_known_to_fail:
65 pass
66 else:
67 failed_tests+=1
68 return (passed_tests, failed_tests)
69
70 # Get the benchmark results from the lava bundle
71 # save them as CSV files localy
72 def fetch_benchmark_results(server, job):
73 content = get_job_bundle_content(server, job)
74 testcases = ['processed_results_close.csv',
75 'processed_results_ioctl.csv',
76 'processed_results_open_efault.csv',
77 'processed_results_open_enoent.csv',
78 'processed_results_dup_close.csv',
79 'processed_results_raw_syscall_getpid.csv',
80 'processed_results_lttng_test_filter.csv']
81
82 # The result bundle is a large JSON containing the results of every testcase
83 # of the LAVA job as well as the files that were attached during the run.
84 # We need to iterate over this JSON to get the base64 representation of the
85 # benchmark results produced during the run.
86 for run in content['test_runs']:
87 # We only care of the benchmark testcases
88 if 'benchmark-' in run['test_id']:
89 if 'test_results' in run:
90 for res in run['test_results']:
91 if 'attachments' in res:
92 for a in res['attachments']:
93 # We only save the results file
94 if a['pathname'] in testcases:
95 with open(a['pathname'],'wb') as f:
96 # Convert the b64 representation of the
97 # result file and write it to a file
98 # in the current working directory
99 f.write(base64.b64decode(a['content']))
100
101 # Parse the attachment of the testcase to fetch the stdout of the test suite
102 def print_test_output(server, job):
103 content = get_job_bundle_content(server, job)
104 found = False
105
106 for run in content['test_runs']:
107 if run['test_id'] in 'lttng-kernel-test':
108 for attachment in run['attachments']:
109 if attachment['pathname'] in 'stdout.log':
110
111 # Decode the base64 file and split on newlines to iterate
112 # on list
113 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
114
115 # Create a generator to iterate on the lines and keeping
116 # the state of the iterator across the two loops.
117 testoutput_iter = iter(testoutput)
118 for line in testoutput_iter:
119
120 # Find the header of the test case and start printing
121 # from there
122 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
123 found = True
124 print('---- TEST SUITE OUTPUT BEGIN ----')
125 for line in testoutput_iter:
126 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
127 print(line)
128 else:
129 # Print until we reach the end of the
130 # section
131 break
132
133 if found is True:
134 print('----- TEST SUITE OUTPUT END -----')
135 break
136
137 def create_new_job(name, build_device):
138 job = OrderedDict({
139 'health_check': False,
140 'job_name': name,
141 'device_type': build_device,
142 'tags': [ ],
143 'timeout': 7200,
144 'actions': []
145 })
146 if build_device in 'x86':
147 job['tags'].append('dev-sda1')
148
149 return job
150
151 def get_boot_cmd():
152 command = OrderedDict({
153 'command': 'boot_image'
154 })
155 return command
156
157 def get_config_cmd(build_device):
158 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
159 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
160 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
161 'libnuma-dev', 'python3-dev', 'swig', 'stress']
162 command = OrderedDict({
163 'command': 'lava_command_run',
164 'parameters': {
165 'commands': [
166 'cat /etc/resolv.conf',
167 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
168 'groupadd tracing'
169 ],
170 'timeout':300
171 }
172 })
173 if build_device in 'x86':
174 command['parameters']['commands'].extend([
175 'mount /dev/sda1 /tmp',
176 'rm -rf /tmp/*'])
177
178 command['parameters']['commands'].extend([
179 'depmod -a',
180 'locale-gen en_US.UTF-8',
181 'apt-get update',
182 'apt-get upgrade',
183 'apt-get install -y {}'.format(' '.join(packages))
184 ])
185 return command
186
187 def get_baremetal_benchmarks_cmd():
188 command = OrderedDict({
189 'command': 'lava_test_shell',
190 'parameters': {
191 'testdef_repos': [
192 {
193 'git-repo': 'https://github.com/lttng/lttng-ci.git',
194 'revision': 'master',
195 'testdef': 'lava/baremetal-tests/failing-close.yml'
196 },
197 {
198 'git-repo': 'https://github.com/lttng/lttng-ci.git',
199 'revision': 'master',
200 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
201 },
202 {
203 'git-repo': 'https://github.com/lttng/lttng-ci.git',
204 'revision': 'master',
205 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
206 },
207 {
208 'git-repo': 'https://github.com/lttng/lttng-ci.git',
209 'revision': 'master',
210 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
211 },
212 {
213 'git-repo': 'https://github.com/lttng/lttng-ci.git',
214 'revision': 'master',
215 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
216 },
217 {
218 'git-repo': 'https://github.com/lttng/lttng-ci.git',
219 'revision': 'master',
220 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
221 },
222 {
223 'git-repo': 'https://github.com/lttng/lttng-ci.git',
224 'revision': 'master',
225 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
226 }
227 ],
228 'timeout': 7200
229 }
230 })
231 return command
232
233 def get_baremetal_tests_cmd():
234 command = OrderedDict({
235 'command': 'lava_test_shell',
236 'parameters': {
237 'testdef_repos': [
238 {
239 'git-repo': 'https://github.com/lttng/lttng-ci.git',
240 'revision': 'master',
241 'testdef': 'lava/baremetal-tests/perf-tests.yml'
242 }
243 ],
244 'timeout': 3600
245 }
246 })
247 return command
248
249 def get_kvm_tests_cmd():
250 command = OrderedDict({
251 'command': 'lava_test_shell',
252 'parameters': {
253 'testdef_repos': [
254 {
255 'git-repo': 'https://github.com/lttng/lttng-ci.git',
256 'revision': 'master',
257 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
258 },
259 {
260 'git-repo': 'https://github.com/lttng/lttng-ci.git',
261 'revision': 'master',
262 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
263 }
264 ],
265 'timeout': 7200
266 }
267 })
268 return command
269 def get_kprobes_test_cmd():
270 command = OrderedDict({
271 'command': 'lava_test_shell',
272 'parameters': {
273 'testdef_repos': [
274 {
275 'git-repo': 'https://github.com/lttng/lttng-ci.git',
276 'revision': 'master',
277 'testdef': 'lava/baremetal-tests/kprobe-fuzzing-tests.yml'
278 }
279 ],
280 'timeout': 7200
281 }
282 })
283 return command
284
285 def get_results_cmd(stream_name):
286 command = OrderedDict({
287 'command': 'submit_results',
288 'parameters': {
289 'server': 'http://lava-master.internal.efficios.com/RPC2/'
290 }
291 })
292 command['parameters']['stream']='/anonymous/'+stream_name+'/'
293 return command
294
295 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
296 command = OrderedDict({
297 'command': 'deploy_kernel',
298 'metadata': {},
299 'parameters': {
300 'customize': {},
301 'kernel': None,
302 'target_type': 'ubuntu',
303 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
304 'login_prompt': 'kvm02 login:',
305 'username': 'root'
306 }
307 })
308
309 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
310 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
311 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
312 command['metadata']['jenkins_jobname'] = jenkins_job
313
314 return command
315
316 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
317 command = OrderedDict({
318 'command': 'deploy_kernel',
319 'metadata': {},
320 'parameters': {
321 'overlays': [],
322 'kernel': None,
323 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
324 'target_type': 'ubuntu'
325 }
326 })
327
328 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
329 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
330 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
331 command['metadata']['jenkins_jobname'] = jenkins_job
332 if nb_iter is not None:
333 command['metadata']['nb_iterations'] = nb_iter
334
335 return command
336
337
338 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
339 command = OrderedDict({
340 'command': 'lava_command_run',
341 'parameters': {
342 'commands': [
343 'pip3 install --upgrade pip',
344 'hash -r',
345 'pip3 install vlttng',
346 ],
347 'timeout': 3600
348 }
349 })
350
351 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
352 ' --override projects.babeltrace.build-env.PYTHON=python3' \
353 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
354 ' --profile babeltrace-stable-1.4' \
355 ' --profile babeltrace-python' \
356 ' --profile lttng-tools-master' \
357 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
358 ' --profile lttng-tools-no-man-pages'
359
360 if lttng_ust_commit is not None:
361 vlttng_cmd += ' --profile lttng-ust-master ' \
362 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
363 ' --profile lttng-ust-no-man-pages'
364
365 virtenv_path = None
366 if build_device in 'kvm':
367 virtenv_path = '/root/virtenv'
368 else:
369 virtenv_path = '/tmp/virtenv'
370
371 vlttng_cmd += ' '+virtenv_path
372
373 command['parameters']['commands'].append(vlttng_cmd)
374 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
375 command['parameters']['commands'].append('sync')
376
377 return command
378
379 def main():
380 test_type = None
381 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
382 parser.add_argument('-t', '--type', required=True)
383 parser.add_argument('-j', '--jobname', required=True)
384 parser.add_argument('-k', '--kernel', required=True)
385 parser.add_argument('-km', '--kmodule', required=True)
386 parser.add_argument('-lm', '--lmodule', required=True)
387 parser.add_argument('-tc', '--tools-commit', required=True)
388 parser.add_argument('-uc', '--ust-commit', required=False)
389 args = parser.parse_args()
390
391 if args.type in 'baremetal-benchmarks':
392 test_type = TestType.baremetal_benchmarks
393 elif args.type in 'baremetal-tests':
394 test_type = TestType.baremetal_tests
395 elif args.type in 'kvm-tests':
396 test_type = TestType.kvm_tests
397 elif args.type in 'kvm-fuzzing-tests':
398 test_type = TestType.kvm_fuzzing_tests
399 else:
400 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
401 return -1
402
403 lava_api_key = None
404 try:
405 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
406 except Exception as e:
407 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
408 return -1
409
410 if test_type is TestType.baremetal_benchmarks:
411 j = create_new_job(args.jobname, build_device='x86')
412 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
413 elif test_type is TestType.baremetal_tests:
414 j = create_new_job(args.jobname, build_device='x86')
415 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
416 elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
417 j = create_new_job(args.jobname, build_device='kvm')
418 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
419
420 j['actions'].append(get_boot_cmd())
421
422 if test_type is TestType.baremetal_benchmarks:
423 j['actions'].append(get_config_cmd('x86'))
424 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
425 j['actions'].append(get_baremetal_benchmarks_cmd())
426 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
427 elif test_type is TestType.baremetal_tests:
428 if args.ust_commit is None:
429 print('Tests runs need -uc/--ust-commit options. Exiting...')
430 return -1
431 j['actions'].append(get_config_cmd('x86'))
432 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
433 j['actions'].append(get_baremetal_tests_cmd())
434 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
435 elif test_type is TestType.kvm_tests:
436 if args.ust_commit is None:
437 print('Tests runs need -uc/--ust-commit options. Exiting...')
438 return -1
439 j['actions'].append(get_config_cmd('kvm'))
440 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
441 j['actions'].append(get_kvm_tests_cmd())
442 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
443 elif test_type is TestType.kvm_fuzzing_tests:
444 if args.ust_commit is None:
445 print('Tests runs need -uc/--ust-commit options. Exiting...')
446 return -1
447 j['actions'].append(get_config_cmd('kvm'))
448 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
449 j['actions'].append(get_kprobes_test_cmd())
450 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
451 else:
452 assert False, 'Unknown test type'
453
454 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
455
456 jobid = server.scheduler.submit_job(json.dumps(j))
457
458 print('Lava jobid:{}'.format(jobid))
459 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
460
461 #Check the status of the job every 30 seconds
462 jobstatus = server.scheduler.job_status(jobid)['job_status']
463 not_running = False
464 while jobstatus in 'Submitted' or jobstatus in 'Running':
465 if not_running is False and jobstatus in 'Running':
466 print('Job started running')
467 not_running = True
468 time.sleep(30)
469 jobstatus = server.scheduler.job_status(jobid)['job_status']
470
471 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
472 print_test_output(server, jobid)
473 elif test_type is TestType.baremetal_benchmarks:
474 fetch_benchmark_results(server, jobid)
475
476 print('Job ended with {} status.'.format(jobstatus))
477 if jobstatus not in 'Complete':
478 return -1
479 else:
480 passed, failed=check_job_all_test_cases_state_count(server, jobid)
481 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
482
483 if failed == 0:
484 return 0
485 else:
486 return -1
487
488 if __name__ == "__main__":
489 sys.exit(main())
This page took 0.04147 seconds and 4 git commands to generate.