LAVA: Upload results to obj.internal.efficios.com
[lttng-ci.git] / scripts / system-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import random
22 import sys
23 import time
24 import xmlrpc.client
25 from urllib.parse import urljoin
26 from urllib.request import urlretrieve
27 from collections import OrderedDict
28 from enum import Enum
29
30 USERNAME = 'frdeso'
31 HOSTNAME = 'lava-master.internal.efficios.com'
32 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
33 OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
34
35 class TestType(Enum):
36 baremetal_benchmarks=1
37 baremetal_tests=2
38 kvm_tests=3
39 kvm_fuzzing_tests=4
40
41 def get_job_bundle_content(server, job):
42 try:
43 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
44 bundle = server.dashboard.get(bundle_sha)
45 except xmlrpc.client.Fault as f:
46 print('Error while fetching results bundle', f.faultString)
47 raise f
48
49 return json.loads(bundle['content'])
50
51 # Parse the results bundle to see the run-tests testcase
52 # of the lttng-kernel-tests passed successfully
53 def check_job_all_test_cases_state_count(server, job):
54 content = get_job_bundle_content(server, job)
55
56 # FIXME:Those tests are part of the boot actions and fail randomly but
57 # doesn't affect the behaviour of the tests. We should update our Lava
58 # installation and try to reproduce it. This error was encountered on
59 # Ubuntu 16.04.
60 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
61
62 passed_tests=0
63 failed_tests=0
64 for run in content['test_runs']:
65 for result in run['test_results']:
66 if 'test_case_id' in result :
67 if result['result'] in 'pass':
68 passed_tests+=1
69 elif result['test_case_id'] in tests_known_to_fail:
70 pass
71 else:
72 failed_tests+=1
73 return (passed_tests, failed_tests)
74
75 # Get the benchmark results from the lava bundle
76 # save them as CSV files localy
77 def fetch_benchmark_results(build_id):
78 testcases = ['processed_results_close.csv',
79 'processed_results_ioctl.csv',
80 'processed_results_open_efault.csv',
81 'processed_results_open_enoent.csv',
82 'processed_results_dup_close.csv',
83 'processed_results_raw_syscall_getpid.csv',
84 'processed_results_lttng_test_filter.csv']
85 for testcase in testcases:
86 url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
87 urlretrieve(url, case)
88
89 # Parse the attachment of the testcase to fetch the stdout of the test suite
90 def print_test_output(server, job):
91 content = get_job_bundle_content(server, job)
92 found = False
93
94 for run in content['test_runs']:
95 if run['test_id'] in 'lttng-kernel-test':
96 for attachment in run['attachments']:
97 if attachment['pathname'] in 'stdout.log':
98
99 # Decode the base64 file and split on newlines to iterate
100 # on list
101 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
102
103 testoutput = testoutput.replace('\\n', '\n')
104
105 # Create a generator to iterate on the lines and keeping
106 # the state of the iterator across the two loops.
107 testoutput_iter = iter(testoutput.split('\n'))
108 for line in testoutput_iter:
109
110 # Find the header of the test case and start printing
111 # from there
112 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
113 print('---- TEST SUITE OUTPUT BEGIN ----')
114 for line in testoutput_iter:
115 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
116 print(line)
117 else:
118 # Print until we reach the end of the
119 # section
120 break
121
122 print('----- TEST SUITE OUTPUT END -----')
123 break
124
125 def create_new_job(name, build_device):
126 job = OrderedDict({
127 'health_check': False,
128 'job_name': name,
129 'device_type': build_device,
130 'tags': [ ],
131 'timeout': 7200,
132 'actions': []
133 })
134 if build_device in 'x86':
135 job['tags'].append('dev-sda1')
136
137 return job
138
139 def get_boot_cmd():
140 command = OrderedDict({
141 'command': 'boot_image'
142 })
143 return command
144
145 def get_config_cmd(build_device):
146 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
147 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
148 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
149 'libnuma-dev', 'python3-dev', 'swig', 'stress']
150 command = OrderedDict({
151 'command': 'lava_command_run',
152 'parameters': {
153 'commands': [
154 'cat /etc/resolv.conf',
155 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
156 'groupadd tracing'
157 ],
158 'timeout':300
159 }
160 })
161 if build_device in 'x86':
162 command['parameters']['commands'].extend([
163 'mount /dev/sda1 /tmp',
164 'rm -rf /tmp/*'])
165
166 command['parameters']['commands'].extend([
167 'depmod -a',
168 'locale-gen en_US.UTF-8',
169 'apt-get update',
170 'apt-get upgrade',
171 'apt-get install -y {}'.format(' '.join(packages))
172 ])
173 return command
174
175 def get_baremetal_benchmarks_cmd(build_id):
176 command = OrderedDict({
177 'command': 'lava_test_shell',
178 'parameters': {
179 'testdef_repos': [
180 {
181 'git-repo': 'https://github.com/lttng/lttng-ci.git',
182 'revision': 'master',
183 'testdef': 'lava/system-tests/failing-close.yml',
184 'parameters': { 'JENKINS_BUILD_ID': build_id }
185 },
186 {
187 'git-repo': 'https://github.com/lttng/lttng-ci.git',
188 'revision': 'master',
189 'testdef': 'lava/system-tests/failing-ioctl.yml',
190 'parameters': { 'JENKINS_BUILD_ID': build_id }
191 },
192 {
193 'git-repo': 'https://github.com/lttng/lttng-ci.git',
194 'revision': 'master',
195 'testdef': 'lava/system-tests/failing-open-efault.yml',
196 'parameters': { 'JENKINS_BUILD_ID': build_id }
197 },
198 {
199 'git-repo': 'https://github.com/lttng/lttng-ci.git',
200 'revision': 'master',
201 'testdef': 'lava/system-tests/success-dup-close.yml',
202 'parameters': { 'JENKINS_BUILD_ID': build_id }
203 },
204 {
205 'git-repo': 'https://github.com/lttng/lttng-ci.git',
206 'revision': 'master',
207 'testdef': 'lava/system-tests/raw-syscall-getpid.yml',
208 'parameters': { 'JENKINS_BUILD_ID': build_id }
209 },
210 {
211 'git-repo': 'https://github.com/lttng/lttng-ci.git',
212 'revision': 'master',
213 'testdef': 'lava/system-tests/failing-open-enoent.yml',
214 'parameters': { 'JENKINS_BUILD_ID': build_id }
215 },
216 {
217 'git-repo': 'https://github.com/lttng/lttng-ci.git',
218 'revision': 'master',
219 'testdef': 'lava/system-tests/lttng-test-filter.yml',
220 'parameters': { 'JENKINS_BUILD_ID': build_id }
221 }
222 ],
223 'timeout': 7200
224 }
225 })
226 return command
227
228 def get_baremetal_tests_cmd(build_id):
229 command = OrderedDict({
230 'command': 'lava_test_shell',
231 'parameters': {
232 'testdef_repos': [
233 {
234 'git-repo': 'https://github.com/lttng/lttng-ci.git',
235 'revision': 'master',
236 'testdef': 'lava/system-tests/perf-tests.yml',
237 'parameters': { 'JENKINS_BUILD_ID': build_id }
238 }
239 ],
240 'timeout': 3600
241 }
242 })
243 return command
244
245 def get_kvm_tests_cmd(build_id):
246 command = OrderedDict({
247 'command': 'lava_test_shell',
248 'parameters': {
249 'testdef_repos': [
250 {
251 'git-repo': 'https://github.com/lttng/lttng-ci.git',
252 'revision': 'master',
253 'testdef': 'lava/system-tests/kernel-tests.yml',
254 'parameters': { 'JENKINS_BUILD_ID': build_id }
255 },
256 {
257 'git-repo': 'https://github.com/lttng/lttng-ci.git',
258 'revision': 'master',
259 'testdef': 'lava/system-tests/destructive-tests.yml',
260 'parameters': { 'JENKINS_BUILD_ID': build_id }
261 }
262 ],
263 'timeout': 7200
264 }
265 })
266 return command
267
268 def get_kprobes_generate_data_cmd():
269 random_seed = random.randint(0, 1000000)
270 command = OrderedDict({
271 'command': 'lava_test_shell',
272 'parameters': {
273 'testdef_repos': [
274 {
275 'git-repo': 'https://github.com/lttng/lttng-ci.git',
276 'revision': 'master',
277 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml',
278 'parameters': { 'RANDOM_SEED': str(random_seed) }
279 }
280 ],
281 'timeout': 60
282 }
283 })
284 return command
285
286 def get_kprobes_test_cmd(round_nb):
287 command = OrderedDict({
288 'command': 'lava_test_shell',
289 'parameters': {
290 'testdef_repos': [
291 {
292 'git-repo': 'https://github.com/lttng/lttng-ci.git',
293 'revision': 'master',
294 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml',
295 'parameters': { 'ROUND_NB': str(round_nb) }
296 }
297 ],
298 'timeout': 1000
299 }
300 })
301 return command
302
303 def get_results_cmd(stream_name):
304 command = OrderedDict({
305 'command': 'submit_results',
306 'parameters': {
307 'server': 'http://lava-master.internal.efficios.com/RPC2/'
308 }
309 })
310 command['parameters']['stream']='/anonymous/'+stream_name+'/'
311 return command
312
313 def get_deploy_cmd_kvm(jenkins_job, kernel_path, lttng_modules_path):
314 command = OrderedDict({
315 'command': 'deploy_kernel',
316 'metadata': {},
317 'parameters': {
318 'customize': {},
319 'kernel': None,
320 'target_type': 'ubuntu',
321 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
322 'login_prompt': 'kvm02 login:',
323 'username': 'root'
324 }
325 })
326
327 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
328 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
329 command['metadata']['jenkins_jobname'] = jenkins_job
330
331 return command
332
333 def get_deploy_cmd_x86(jenkins_job, kernel_path, lttng_modules_path, nb_iter=None):
334 command = OrderedDict({
335 'command': 'deploy_kernel',
336 'metadata': {},
337 'parameters': {
338 'overlays': [],
339 'kernel': None,
340 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
341 'target_type': 'ubuntu'
342 }
343 })
344
345 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
346 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
347 command['metadata']['jenkins_jobname'] = jenkins_job
348 if nb_iter is not None:
349 command['metadata']['nb_iterations'] = nb_iter
350
351 return command
352
353
354 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
355 command = OrderedDict({
356 'command': 'lava_command_run',
357 'parameters': {
358 'commands': [
359 'pip3 install --upgrade pip',
360 'hash -r',
361 'pip3 install vlttng',
362 ],
363 'timeout': 3600
364 }
365 })
366
367 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
368 ' --override projects.babeltrace.build-env.PYTHON=python3' \
369 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
370 ' --profile babeltrace-stable-1.4' \
371 ' --profile babeltrace-python' \
372 ' --profile lttng-tools-master' \
373 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
374 ' --profile lttng-tools-no-man-pages'
375
376 if lttng_ust_commit is not None:
377 vlttng_cmd += ' --profile lttng-ust-master ' \
378 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
379 ' --profile lttng-ust-no-man-pages'
380
381 virtenv_path = None
382 if build_device in 'kvm':
383 virtenv_path = '/root/virtenv'
384 else:
385 virtenv_path = '/tmp/virtenv'
386
387 vlttng_cmd += ' '+virtenv_path
388
389 command['parameters']['commands'].append(vlttng_cmd)
390 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
391 command['parameters']['commands'].append('sync')
392
393 return command
394
395 def main():
396 test_type = None
397 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
398 parser.add_argument('-t', '--type', required=True)
399 parser.add_argument('-j', '--jobname', required=True)
400 parser.add_argument('-k', '--kernel', required=True)
401 parser.add_argument('-lm', '--lmodule', required=True)
402 parser.add_argument('-tc', '--tools-commit', required=True)
403 parser.add_argument('-id', '--build-id', required=True)
404 parser.add_argument('-uc', '--ust-commit', required=False)
405 args = parser.parse_args()
406
407 if args.type in 'baremetal-benchmarks':
408 test_type = TestType.baremetal_benchmarks
409 elif args.type in 'baremetal-tests':
410 test_type = TestType.baremetal_tests
411 elif args.type in 'kvm-tests':
412 test_type = TestType.kvm_tests
413 elif args.type in 'kvm-fuzzing-tests':
414 test_type = TestType.kvm_fuzzing_tests
415 else:
416 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
417 return -1
418
419 lava_api_key = None
420 try:
421 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
422 except Exception as e:
423 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
424 return -1
425
426 if test_type is TestType.baremetal_benchmarks:
427 j = create_new_job(args.jobname, build_device='x86')
428 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.lmodule))
429 elif test_type is TestType.baremetal_tests:
430 j = create_new_job(args.jobname, build_device='x86')
431 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.lmodule))
432 elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
433 j = create_new_job(args.jobname, build_device='kvm')
434 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.lmodule))
435
436 j['actions'].append(get_boot_cmd())
437
438 if test_type is TestType.baremetal_benchmarks:
439 j['actions'].append(get_config_cmd('x86'))
440 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
441 j['actions'].append(get_baremetal_benchmarks_cmd(args.build_id))
442 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
443 elif test_type is TestType.baremetal_tests:
444 if args.ust_commit is None:
445 print('Tests runs need -uc/--ust-commit options. Exiting...')
446 return -1
447 j['actions'].append(get_config_cmd('x86'))
448 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
449 j['actions'].append(get_baremetal_tests_cmd(args.build_id))
450 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
451 elif test_type is TestType.kvm_tests:
452 if args.ust_commit is None:
453 print('Tests runs need -uc/--ust-commit options. Exiting...')
454 return -1
455 j['actions'].append(get_config_cmd('kvm'))
456 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
457 j['actions'].append(get_kvm_tests_cmd(args.build_id))
458 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
459 elif test_type is TestType.kvm_fuzzing_tests:
460 if args.ust_commit is None:
461 print('Tests runs need -uc/--ust-commit options. Exiting...')
462 return -1
463 j['actions'].append(get_config_cmd('kvm'))
464 j['actions'].append(get_kprobes_generate_data_cmd())
465 for i in range(10):
466 j['actions'].append(get_kprobes_test_cmd(round_nb=i))
467 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
468 else:
469 assert False, 'Unknown test type'
470
471 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
472
473 jobid = server.scheduler.submit_job(json.dumps(j))
474
475 print('Lava jobid:{}'.format(jobid))
476 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
477
478 #Check the status of the job every 30 seconds
479 jobstatus = server.scheduler.job_status(jobid)['job_status']
480 not_running = False
481 while jobstatus in 'Submitted' or jobstatus in 'Running':
482 if not_running is False and jobstatus in 'Running':
483 print('Job started running')
484 not_running = True
485 time.sleep(30)
486 jobstatus = server.scheduler.job_status(jobid)['job_status']
487
488 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
489 print_test_output(server, jobid)
490 elif test_type is TestType.baremetal_benchmarks:
491 fetch_benchmark_results(args.build_id)
492
493 print('Job ended with {} status.'.format(jobstatus))
494 if jobstatus not in 'Complete':
495 return -1
496 else:
497 passed, failed=check_job_all_test_cases_state_count(server, jobid)
498 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
499
500 if failed == 0:
501 return 0
502 else:
503 return -1
504
505 if __name__ == "__main__":
506 sys.exit(main())
This page took 0.040948 seconds and 4 git commands to generate.