jjb: Add back open_enoent Lava benchmark testcase
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36 def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 passed_tests=0
51 failed_tests=0
52 for run in content['test_runs']:
53 for result in run['test_results']:
54 if 'test_case_id' in result :
55 if result['result'] in 'pass':
56 passed_tests+=1
57 elif result['test_case_id'] in 'wait_for_test_image_prompt':
58 # FIXME:This test is part of the boot action and fails
59 # randomly but doesn't affect the behaviour of the tests.
60 # No reply on the Lava IRC channel yet. We should update
61 # our Lava installation and try to reproduce it. This error
62 # was encountered ont the KVM trusty image only. Not seen
63 # on Xenial at this point.
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv',
74 'processed_results_open_efault.csv',
75 'processed_results_open_enoent.csv',
76 'processed_results_dup_close.csv',
77 'processed_results_lttng_test_filter.csv']
78
79 # The result bundle is a large JSON containing the results of every testcase
80 # of the LAVA job as well as the files that were attached during the run.
81 # We need to iterate over this JSON to get the base64 representation of the
82 # benchmark results produced during the run.
83 for run in content['test_runs']:
84 # We only care of the benchmark testcases
85 if 'benchmark-' in run['test_id']:
86 if 'test_results' in run:
87 for res in run['test_results']:
88 if 'attachments' in res:
89 for a in res['attachments']:
90 # We only save the results file
91 if a['pathname'] in testcases:
92 with open(a['pathname'],'wb') as f:
93 # Convert the b64 representation of the
94 # result file and write it to a file
95 # in the current working directory
96 f.write(base64.b64decode(a['content']))
97
98 # Parse the attachment of the testcase to fetch the stdout of the test suite
99 def print_test_output(server, job):
100 content = get_job_bundle_content(server, job)
101 found = False
102
103 for run in content['test_runs']:
104 if run['test_id'] in 'lttng-kernel-test':
105 for attachment in run['attachments']:
106 if attachment['pathname'] in 'stdout.log':
107
108 # Decode the base64 file and split on newlines to iterate
109 # on list
110 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
111
112 # Create a generator to iterate on the lines and keeping
113 # the state of the iterator across the two loops.
114 testoutput_iter = iter(testoutput)
115 for line in testoutput_iter:
116
117 # Find the header of the test case and start printing
118 # from there
119 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
120 found = True
121 print('---- TEST SUITE OUTPUT BEGIN ----')
122 for line in testoutput_iter:
123 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
124 print(line)
125 else:
126 # Print until we reach the end of the
127 # section
128 break
129
130 if found is True:
131 print('----- TEST SUITE OUTPUT END -----')
132 break
133
134 def create_new_job(name, build_device):
135 job = OrderedDict({
136 'health_check': False,
137 'job_name': name,
138 'device_type':build_device,
139 'tags': [ ],
140 'timeout': 18000,
141 'actions': []
142 })
143 if build_device in 'x86':
144 job['tags'].append('dev-sda1')
145
146 return job
147
148 def get_boot_cmd():
149 command = OrderedDict({
150 'command': 'boot_image'
151 })
152 return command
153
154 def get_config_cmd(build_device):
155 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
156 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
157 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
158 'libnuma-dev']
159 command = OrderedDict({
160 'command': 'lava_command_run',
161 'parameters': {
162 'commands': [
163 'cat /etc/resolv.conf',
164 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
165 'groupadd tracing'
166 ],
167 'timeout':300
168 }
169 })
170 if build_device in 'x86':
171 command['parameters']['commands'].extend([
172 'mount /dev/sda1 /tmp',
173 'rm -rf /tmp/*'])
174
175 command['parameters']['commands'].extend([
176 'depmod -a',
177 'locale-gen en_US.UTF-8',
178 'apt-get update',
179 'apt-get upgrade',
180 'apt-get install -y {}'.format(' '.join(packages))
181 ])
182 return command
183
184 def get_baremetal_benchmarks_cmd():
185 command = OrderedDict({
186 'command': 'lava_test_shell',
187 'parameters': {
188 'testdef_repos': [
189 {
190 'git-repo': 'https://github.com/lttng/lttng-ci.git',
191 'revision': 'master',
192 'testdef': 'lava/baremetal-tests/failing-close.yml'
193 },
194 {
195 'git-repo': 'https://github.com/lttng/lttng-ci.git',
196 'revision': 'master',
197 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
198 },
199 {
200 'git-repo': 'https://github.com/lttng/lttng-ci.git',
201 'revision': 'master',
202 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
203 },
204 {
205 'git-repo': 'https://github.com/lttng/lttng-ci.git',
206 'revision': 'master',
207 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
208 },
209 {
210 'git-repo': 'https://github.com/lttng/lttng-ci.git',
211 'revision': 'master',
212 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
213 }
214 ],
215 'timeout': 18000
216 }
217 })
218 return command
219
220 def get_baremetal_tests_cmd():
221 command = OrderedDict({
222 'command': 'lava_test_shell',
223 'parameters': {
224 'testdef_repos': [
225 {
226 'git-repo': 'https://github.com/lttng/lttng-ci.git',
227 'revision': 'master',
228 'testdef': 'lava/baremetal-tests/perf-tests.yml'
229 }
230 ],
231 'timeout': 18000
232 }
233 })
234 return command
235
236 def get_kvm_tests_cmd():
237 command = OrderedDict({
238 'command': 'lava_test_shell',
239 'parameters': {
240 'testdef_repos': [
241 {
242 'git-repo': 'https://github.com/lttng/lttng-ci.git',
243 'revision': 'master',
244 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
245 },
246 {
247 'git-repo': 'https://github.com/lttng/lttng-ci.git',
248 'revision': 'master',
249 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
250 }
251 ],
252 'timeout': 18000
253 }
254 })
255 return command
256
257 def get_results_cmd(stream_name):
258 command = OrderedDict({
259 'command': 'submit_results',
260 'parameters': {
261 'server': 'http://lava-master.internal.efficios.com/RPC2/'
262 }
263 })
264 command['parameters']['stream']='/anonymous/'+stream_name+'/'
265 return command
266
267 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
268 command = OrderedDict({
269 'command': 'deploy_kernel',
270 'metadata': {},
271 'parameters': {
272 'customize': {},
273 'kernel': None,
274 'target_type': 'ubuntu',
275 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
276 'login_prompt': 'kvm02 login:',
277 'username': 'root'
278 }
279 })
280
281 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
282 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
283 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
284 command['metadata']['jenkins_jobname'] = jenkins_job
285
286 return command
287
288 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
289 command = OrderedDict({
290 'command': 'deploy_kernel',
291 'metadata': {},
292 'parameters': {
293 'overlays': [],
294 'kernel': None,
295 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
296 'target_type': 'ubuntu'
297 }
298 })
299
300 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
301 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
302 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
303 command['metadata']['jenkins_jobname'] = jenkins_job
304 if nb_iter is not None:
305 command['metadata']['nb_iterations'] = nb_iter
306
307 return command
308
309
310 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
311 command = OrderedDict({
312 'command': 'lava_command_run',
313 'parameters': {
314 'commands': [
315 'pip3 install --upgrade pip',
316 'hash -r',
317 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
318 'pip3 install vlttng',
319 ],
320 'timeout': 18000
321 }
322 })
323
324 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
325 ' --profile babeltrace-stable-1.4 ' \
326 ' --profile lttng-tools-master' \
327 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
328 ' --profile lttng-tools-no-man-pages'
329
330 if lttng_ust_commit is not None:
331 vlttng_cmd += ' --profile lttng-ust-master ' \
332 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
333 ' --profile lttng-ust-no-man-pages'
334
335 virtenv_path = None
336 if build_device in 'kvm':
337 virtenv_path = '/root/virtenv'
338 else:
339 virtenv_path = '/tmp/virtenv'
340
341 vlttng_cmd += ' '+virtenv_path
342
343 command['parameters']['commands'].append(vlttng_cmd)
344 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
345 command['parameters']['commands'].append('sync')
346
347 return command
348
349 def main():
350 test_type = None
351 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
352 parser.add_argument('-t', '--type', required=True)
353 parser.add_argument('-j', '--jobname', required=True)
354 parser.add_argument('-k', '--kernel', required=True)
355 parser.add_argument('-km', '--kmodule', required=True)
356 parser.add_argument('-lm', '--lmodule', required=True)
357 parser.add_argument('-tc', '--tools-commit', required=True)
358 parser.add_argument('-uc', '--ust-commit', required=False)
359 args = parser.parse_args()
360
361 if args.type in 'baremetal-benchmarks':
362 test_type = TestType.baremetal_benchmarks
363 elif args.type in 'baremetal-tests':
364 test_type = TestType.baremetal_tests
365 elif args.type in 'kvm-tests':
366 test_type = TestType.kvm_tests
367 else:
368 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
369 return -1
370
371 lava_api_key = None
372 try:
373 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
374 except Exception as e:
375 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
376 return -1
377
378 if test_type is TestType.baremetal_benchmarks:
379 j = create_new_job(args.jobname, build_device='x86')
380 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
381 elif test_type is TestType.baremetal_tests:
382 j = create_new_job(args.jobname, build_device='x86')
383 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
384 elif test_type is TestType.kvm_tests:
385 j = create_new_job(args.jobname, build_device='kvm')
386 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
387
388 j['actions'].append(get_boot_cmd())
389
390 if test_type is TestType.baremetal_benchmarks:
391 j['actions'].append(get_config_cmd('x86'))
392 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
393 j['actions'].append(get_baremetal_benchmarks_cmd())
394 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
395 elif test_type is TestType.baremetal_tests:
396 if args.ust_commit is None:
397 print('Tests runs need -uc/--ust-commit options. Exiting...')
398 return -1
399 j['actions'].append(get_config_cmd('x86'))
400 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
401 j['actions'].append(get_baremetal_tests_cmd())
402 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
403 elif test_type is TestType.kvm_tests:
404 if args.ust_commit is None:
405 print('Tests runs need -uc/--ust-commit options. Exiting...')
406 return -1
407 j['actions'].append(get_config_cmd('kvm'))
408 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
409 j['actions'].append(get_kvm_tests_cmd())
410 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
411 else:
412 assert False, 'Unknown test type'
413
414 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
415
416 jobid = server.scheduler.submit_job(json.dumps(j))
417
418 print('Lava jobid:{}'.format(jobid))
419 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
420
421 #Check the status of the job every 30 seconds
422 jobstatus = server.scheduler.job_status(jobid)['job_status']
423 not_running = False
424 while jobstatus in 'Submitted' or jobstatus in 'Running':
425 if not_running is False and jobstatus in 'Running':
426 print('Job started running')
427 not_running = True
428 time.sleep(30)
429 jobstatus = server.scheduler.job_status(jobid)['job_status']
430
431 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
432 print_test_output(server, jobid)
433 elif test_type is TestType.baremetal_benchmarks:
434 fetch_benchmark_results(server, jobid)
435
436 print('Job ended with {} status.'.format(jobstatus))
437 if jobstatus not in 'Complete':
438 return -1
439 else:
440 passed, failed=check_job_all_test_cases_state_count(server, jobid)
441 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
442
443 if failed == 0:
444 return 0
445 else:
446 return -1
447
448 if __name__ == "__main__":
449 sys.exit(main())
This page took 0.040339 seconds and 4 git commands to generate.