jjb: Add ioctl testcase to Lava benchmarks
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36 def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 passed_tests=0
51 failed_tests=0
52 for run in content['test_runs']:
53 for result in run['test_results']:
54 if 'test_case_id' in result :
55 if result['result'] in 'pass':
56 passed_tests+=1
57 elif result['test_case_id'] in 'wait_for_test_image_prompt':
58 # FIXME:This test is part of the boot action and fails
59 # randomly but doesn't affect the behaviour of the tests.
60 # No reply on the Lava IRC channel yet. We should update
61 # our Lava installation and try to reproduce it. This error
62 # was encountered ont the KVM trusty image only. Not seen
63 # on Xenial at this point.
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv',
74 'processed_results_ioctl.csv',
75 'processed_results_open_efault.csv',
76 'processed_results_open_enoent.csv',
77 'processed_results_dup_close.csv',
78 'processed_results_lttng_test_filter.csv']
79
80 # The result bundle is a large JSON containing the results of every testcase
81 # of the LAVA job as well as the files that were attached during the run.
82 # We need to iterate over this JSON to get the base64 representation of the
83 # benchmark results produced during the run.
84 for run in content['test_runs']:
85 # We only care of the benchmark testcases
86 if 'benchmark-' in run['test_id']:
87 if 'test_results' in run:
88 for res in run['test_results']:
89 if 'attachments' in res:
90 for a in res['attachments']:
91 # We only save the results file
92 if a['pathname'] in testcases:
93 with open(a['pathname'],'wb') as f:
94 # Convert the b64 representation of the
95 # result file and write it to a file
96 # in the current working directory
97 f.write(base64.b64decode(a['content']))
98
99 # Parse the attachment of the testcase to fetch the stdout of the test suite
100 def print_test_output(server, job):
101 content = get_job_bundle_content(server, job)
102 found = False
103
104 for run in content['test_runs']:
105 if run['test_id'] in 'lttng-kernel-test':
106 for attachment in run['attachments']:
107 if attachment['pathname'] in 'stdout.log':
108
109 # Decode the base64 file and split on newlines to iterate
110 # on list
111 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
112
113 # Create a generator to iterate on the lines and keeping
114 # the state of the iterator across the two loops.
115 testoutput_iter = iter(testoutput)
116 for line in testoutput_iter:
117
118 # Find the header of the test case and start printing
119 # from there
120 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
121 found = True
122 print('---- TEST SUITE OUTPUT BEGIN ----')
123 for line in testoutput_iter:
124 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
125 print(line)
126 else:
127 # Print until we reach the end of the
128 # section
129 break
130
131 if found is True:
132 print('----- TEST SUITE OUTPUT END -----')
133 break
134
135 def create_new_job(name, build_device):
136 job = OrderedDict({
137 'health_check': False,
138 'job_name': name,
139 'device_type':build_device,
140 'tags': [ ],
141 'timeout': 18000,
142 'actions': []
143 })
144 if build_device in 'x86':
145 job['tags'].append('dev-sda1')
146
147 return job
148
149 def get_boot_cmd():
150 command = OrderedDict({
151 'command': 'boot_image'
152 })
153 return command
154
155 def get_config_cmd(build_device):
156 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
157 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
158 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
159 'libnuma-dev']
160 command = OrderedDict({
161 'command': 'lava_command_run',
162 'parameters': {
163 'commands': [
164 'cat /etc/resolv.conf',
165 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
166 'groupadd tracing'
167 ],
168 'timeout':300
169 }
170 })
171 if build_device in 'x86':
172 command['parameters']['commands'].extend([
173 'mount /dev/sda1 /tmp',
174 'rm -rf /tmp/*'])
175
176 command['parameters']['commands'].extend([
177 'depmod -a',
178 'locale-gen en_US.UTF-8',
179 'apt-get update',
180 'apt-get upgrade',
181 'apt-get install -y {}'.format(' '.join(packages))
182 ])
183 return command
184
185 def get_baremetal_benchmarks_cmd():
186 command = OrderedDict({
187 'command': 'lava_test_shell',
188 'parameters': {
189 'testdef_repos': [
190 {
191 'git-repo': 'https://github.com/lttng/lttng-ci.git',
192 'revision': 'master',
193 'testdef': 'lava/baremetal-tests/failing-close.yml'
194 },
195 {
196 'git-repo': 'https://github.com/lttng/lttng-ci.git',
197 'revision': 'master',
198 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
199 },
200 {
201 'git-repo': 'https://github.com/lttng/lttng-ci.git',
202 'revision': 'master',
203 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
204 },
205 {
206 'git-repo': 'https://github.com/lttng/lttng-ci.git',
207 'revision': 'master',
208 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
209 },
210 {
211 'git-repo': 'https://github.com/lttng/lttng-ci.git',
212 'revision': 'master',
213 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
214 },
215 {
216 'git-repo': 'https://github.com/lttng/lttng-ci.git',
217 'revision': 'master',
218 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
219 }
220 ],
221 'timeout': 18000
222 }
223 })
224 return command
225
226 def get_baremetal_tests_cmd():
227 command = OrderedDict({
228 'command': 'lava_test_shell',
229 'parameters': {
230 'testdef_repos': [
231 {
232 'git-repo': 'https://github.com/lttng/lttng-ci.git',
233 'revision': 'master',
234 'testdef': 'lava/baremetal-tests/perf-tests.yml'
235 }
236 ],
237 'timeout': 18000
238 }
239 })
240 return command
241
242 def get_kvm_tests_cmd():
243 command = OrderedDict({
244 'command': 'lava_test_shell',
245 'parameters': {
246 'testdef_repos': [
247 {
248 'git-repo': 'https://github.com/lttng/lttng-ci.git',
249 'revision': 'master',
250 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
251 },
252 {
253 'git-repo': 'https://github.com/lttng/lttng-ci.git',
254 'revision': 'master',
255 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
256 }
257 ],
258 'timeout': 18000
259 }
260 })
261 return command
262
263 def get_results_cmd(stream_name):
264 command = OrderedDict({
265 'command': 'submit_results',
266 'parameters': {
267 'server': 'http://lava-master.internal.efficios.com/RPC2/'
268 }
269 })
270 command['parameters']['stream']='/anonymous/'+stream_name+'/'
271 return command
272
273 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
274 command = OrderedDict({
275 'command': 'deploy_kernel',
276 'metadata': {},
277 'parameters': {
278 'customize': {},
279 'kernel': None,
280 'target_type': 'ubuntu',
281 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
282 'login_prompt': 'kvm02 login:',
283 'username': 'root'
284 }
285 })
286
287 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
288 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
289 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
290 command['metadata']['jenkins_jobname'] = jenkins_job
291
292 return command
293
294 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
295 command = OrderedDict({
296 'command': 'deploy_kernel',
297 'metadata': {},
298 'parameters': {
299 'overlays': [],
300 'kernel': None,
301 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
302 'target_type': 'ubuntu'
303 }
304 })
305
306 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
307 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
308 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
309 command['metadata']['jenkins_jobname'] = jenkins_job
310 if nb_iter is not None:
311 command['metadata']['nb_iterations'] = nb_iter
312
313 return command
314
315
316 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
317 command = OrderedDict({
318 'command': 'lava_command_run',
319 'parameters': {
320 'commands': [
321 'pip3 install --upgrade pip',
322 'hash -r',
323 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
324 'pip3 install vlttng',
325 ],
326 'timeout': 18000
327 }
328 })
329
330 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
331 ' --profile babeltrace-stable-1.4 ' \
332 ' --profile lttng-tools-master' \
333 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
334 ' --profile lttng-tools-no-man-pages'
335
336 if lttng_ust_commit is not None:
337 vlttng_cmd += ' --profile lttng-ust-master ' \
338 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
339 ' --profile lttng-ust-no-man-pages'
340
341 virtenv_path = None
342 if build_device in 'kvm':
343 virtenv_path = '/root/virtenv'
344 else:
345 virtenv_path = '/tmp/virtenv'
346
347 vlttng_cmd += ' '+virtenv_path
348
349 command['parameters']['commands'].append(vlttng_cmd)
350 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
351 command['parameters']['commands'].append('sync')
352
353 return command
354
355 def main():
356 test_type = None
357 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
358 parser.add_argument('-t', '--type', required=True)
359 parser.add_argument('-j', '--jobname', required=True)
360 parser.add_argument('-k', '--kernel', required=True)
361 parser.add_argument('-km', '--kmodule', required=True)
362 parser.add_argument('-lm', '--lmodule', required=True)
363 parser.add_argument('-tc', '--tools-commit', required=True)
364 parser.add_argument('-uc', '--ust-commit', required=False)
365 args = parser.parse_args()
366
367 if args.type in 'baremetal-benchmarks':
368 test_type = TestType.baremetal_benchmarks
369 elif args.type in 'baremetal-tests':
370 test_type = TestType.baremetal_tests
371 elif args.type in 'kvm-tests':
372 test_type = TestType.kvm_tests
373 else:
374 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
375 return -1
376
377 lava_api_key = None
378 try:
379 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
380 except Exception as e:
381 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
382 return -1
383
384 if test_type is TestType.baremetal_benchmarks:
385 j = create_new_job(args.jobname, build_device='x86')
386 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
387 elif test_type is TestType.baremetal_tests:
388 j = create_new_job(args.jobname, build_device='x86')
389 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
390 elif test_type is TestType.kvm_tests:
391 j = create_new_job(args.jobname, build_device='kvm')
392 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
393
394 j['actions'].append(get_boot_cmd())
395
396 if test_type is TestType.baremetal_benchmarks:
397 j['actions'].append(get_config_cmd('x86'))
398 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
399 j['actions'].append(get_baremetal_benchmarks_cmd())
400 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
401 elif test_type is TestType.baremetal_tests:
402 if args.ust_commit is None:
403 print('Tests runs need -uc/--ust-commit options. Exiting...')
404 return -1
405 j['actions'].append(get_config_cmd('x86'))
406 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
407 j['actions'].append(get_baremetal_tests_cmd())
408 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
409 elif test_type is TestType.kvm_tests:
410 if args.ust_commit is None:
411 print('Tests runs need -uc/--ust-commit options. Exiting...')
412 return -1
413 j['actions'].append(get_config_cmd('kvm'))
414 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
415 j['actions'].append(get_kvm_tests_cmd())
416 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
417 else:
418 assert False, 'Unknown test type'
419
420 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
421
422 jobid = server.scheduler.submit_job(json.dumps(j))
423
424 print('Lava jobid:{}'.format(jobid))
425 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
426
427 #Check the status of the job every 30 seconds
428 jobstatus = server.scheduler.job_status(jobid)['job_status']
429 not_running = False
430 while jobstatus in 'Submitted' or jobstatus in 'Running':
431 if not_running is False and jobstatus in 'Running':
432 print('Job started running')
433 not_running = True
434 time.sleep(30)
435 jobstatus = server.scheduler.job_status(jobid)['job_status']
436
437 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
438 print_test_output(server, jobid)
439 elif test_type is TestType.baremetal_benchmarks:
440 fetch_benchmark_results(server, jobid)
441
442 print('Job ended with {} status.'.format(jobstatus))
443 if jobstatus not in 'Complete':
444 return -1
445 else:
446 passed, failed=check_job_all_test_cases_state_count(server, jobid)
447 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
448
449 if failed == 0:
450 return 0
451 else:
452 return -1
453
454 if __name__ == "__main__":
455 sys.exit(main())
This page took 0.043209 seconds and 5 git commands to generate.