Lava: Use Python3 for lava job submission
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36 def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 passed_tests=0
51 failed_tests=0
52 for run in content['test_runs']:
53 for result in run['test_results']:
54 if 'test_case_id' in result :
55 if result['result'] in 'pass':
56 passed_tests+=1
57 elif result['test_case_id'] in 'wait_for_test_image_prompt':
58 # FIXME:This test is part of the boot action and fails
59 # randomly but doesn't affect the behaviour of the tests.
60 # No reply on the Lava IRC channel yet. We should update
61 # our Lava installation and try to reproduce it. This error
62 # was encountered ont the KVM trusty image only. Not seen
63 # on Xenial at this point.
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
74
75 # The result bundle is a large JSON containing the results of every testcase
76 # of the LAVA job as well as the files that were attached during the run.
77 # We need to iterate over this JSON to get the base64 representation of the
78 # benchmark results produced during the run.
79 for run in content['test_runs']:
80 # We only care of the benchmark testcases
81 if 'benchmark-syscall-' in run['test_id']:
82 if 'test_results' in run:
83 for res in run['test_results']:
84 if 'attachments' in res:
85 for a in res['attachments']:
86 # We only save the results file
87 if a['pathname'] in testcases:
88 with open(a['pathname'],'wb') as f:
89 # Convert the b64 representation of the
90 # result file and write it to a file
91 # in the current working directory
92 f.write(base64.b64decode(a['content']))
93
94 # Parse the attachment of the testcase to fetch the stdout of the test suite
95 def print_test_output(server, job):
96 content = get_job_bundle_content(server, job)
97 found = False
98
99 for run in content['test_runs']:
100 if run['test_id'] in 'lttng-kernel-test':
101 for attachment in run['attachments']:
102 if attachment['pathname'] in 'stdout.log':
103
104 # Decode the base64 file and split on newlines to iterate
105 # on list
106 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
107
108 # Create a generator to iterate on the lines and keeping
109 # the state of the iterator across the two loops.
110 testoutput_iter = iter(testoutput)
111 for line in testoutput_iter:
112
113 # Find the header of the test case and start printing
114 # from there
115 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
116 found = True
117 print('---- TEST SUITE OUTPUT BEGIN ----')
118 for line in testoutput_iter:
119 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
120 print(line)
121 else:
122 # Print until we reach the end of the
123 # section
124 break
125
126 if found is True:
127 print('----- TEST SUITE OUTPUT END -----')
128 break
129
130 def create_new_job(name, build_device):
131 job = OrderedDict({
132 'health_check': False,
133 'job_name': name,
134 'device_type':build_device,
135 'tags': [ ],
136 'timeout': 18000,
137 'actions': []
138 })
139 if build_device in 'x86':
140 job['tags'].append('dev-sda1')
141
142 return job
143
144 def get_boot_cmd():
145 command = OrderedDict({
146 'command': 'boot_image'
147 })
148 return command
149
150 def get_config_cmd(build_device):
151 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
152 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
153 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev']
154 command = OrderedDict({
155 'command': 'lava_command_run',
156 'parameters': {
157 'commands': [
158 'cat /etc/resolv.conf',
159 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
160 'groupadd tracing'
161 ]
162 }
163 })
164 if build_device in 'x86':
165 command['parameters']['commands'].extend([
166 'mount /dev/sda1 /tmp',
167 'rm -rf /tmp/*'])
168
169 command['parameters']['commands'].extend([
170 'depmod -a',
171 'locale-gen en_US.UTF-8',
172 'apt-get update',
173 'apt-get upgrade',
174 'apt-get install -y {}'.format(' '.join(packages))
175 ])
176 return command
177
178 def get_baremetal_benchmarks_cmd():
179 command = OrderedDict({
180 'command': 'lava_test_shell',
181 'parameters': {
182 'testdef_repos': [
183 {
184 'git-repo': 'https://github.com/lttng/lttng-ci.git',
185 'revision': 'master',
186 'testdef': 'lava/baremetal-tests/failing-close.yml'
187 },
188 {
189 'git-repo': 'https://github.com/lttng/lttng-ci.git',
190 'revision': 'master',
191 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
192 },
193 {
194 'git-repo': 'https://github.com/lttng/lttng-ci.git',
195 'revision': 'master',
196 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
197 }
198 ],
199 'timeout': 18000
200 }
201 })
202 return command
203
204 def get_baremetal_tests_cmd():
205 command = OrderedDict({
206 'command': 'lava_test_shell',
207 'parameters': {
208 'testdef_repos': [
209 {
210 'git-repo': 'https://github.com/lttng/lttng-ci.git',
211 'revision': 'master',
212 'testdef': 'lava/baremetal-tests/perf-tests.yml'
213 }
214 ],
215 'timeout': 18000
216 }
217 })
218 return command
219
220 def get_kvm_tests_cmd():
221 command = OrderedDict({
222 'command': 'lava_test_shell',
223 'parameters': {
224 'testdef_repos': [
225 {
226 'git-repo': 'https://github.com/lttng/lttng-ci.git',
227 'revision': 'master',
228 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
229 },
230 {
231 'git-repo': 'https://github.com/lttng/lttng-ci.git',
232 'revision': 'master',
233 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
234 }
235 ],
236 'timeout': 18000
237 }
238 })
239 return command
240
241 def get_results_cmd(stream_name):
242 command = OrderedDict({
243 'command': 'submit_results',
244 'parameters': {
245 'server': 'http://lava-master.internal.efficios.com/RPC2/'
246 }
247 })
248 command['parameters']['stream']='/anonymous/'+stream_name+'/'
249 return command
250
251 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
252 command = OrderedDict({
253 'command': 'deploy_kernel',
254 'metadata': {},
255 'parameters': {
256 'customize': {},
257 'kernel': None,
258 'target_type': 'ubuntu',
259 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
260 'login_prompt': 'kvm02 login:',
261 'username': 'root'
262 }
263 })
264
265 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
266 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
267 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
268 command['metadata']['jenkins_jobname'] = jenkins_job
269
270 return command
271
272 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
273 command = OrderedDict({
274 'command': 'deploy_kernel',
275 'metadata': {},
276 'parameters': {
277 'overlays': [],
278 'kernel': None,
279 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
280 'target_type': 'ubuntu'
281 }
282 })
283
284 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
285 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
286 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
287 command['metadata']['jenkins_jobname'] = jenkins_job
288 if nb_iter is not None:
289 command['metadata']['nb_iterations'] = nb_iter
290
291 return command
292
293
294 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
295 command = OrderedDict({
296 'command': 'lava_command_run',
297 'parameters': {
298 'commands': [
299 'pip3 install --upgrade pip',
300 'hash -r',
301 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
302 'pip3 install vlttng',
303 ],
304 'timeout': 18000
305 }
306 })
307
308 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
309 ' --profile babeltrace-stable-1.4 ' \
310 ' --profile lttng-tools-master' \
311 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
312 ' --profile lttng-tools-no-man-pages'
313
314 if lttng_ust_commit is not None:
315 vlttng_cmd += ' --profile lttng-ust-master ' \
316 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
317 ' --profile lttng-ust-no-man-pages'
318
319 virtenv_path = None
320 if build_device in 'kvm':
321 virtenv_path = '/root/virtenv'
322 else:
323 virtenv_path = '/tmp/virtenv'
324
325 vlttng_cmd += ' '+virtenv_path
326
327 command['parameters']['commands'].append(vlttng_cmd)
328 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
329 command['parameters']['commands'].append('sync')
330
331 return command
332
333 def main():
334 test_type = None
335 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
336 parser.add_argument('-t', '--type', required=True)
337 parser.add_argument('-j', '--jobname', required=True)
338 parser.add_argument('-k', '--kernel', required=True)
339 parser.add_argument('-km', '--kmodule', required=True)
340 parser.add_argument('-lm', '--lmodule', required=True)
341 parser.add_argument('-tc', '--tools-commit', required=True)
342 parser.add_argument('-uc', '--ust-commit', required=False)
343 args = parser.parse_args()
344
345 if args.type in 'baremetal-benchmarks':
346 test_type = TestType.baremetal_benchmarks
347 elif args.type in 'baremetal-tests':
348 test_type = TestType.baremetal_tests
349 elif args.type in 'kvm-tests':
350 test_type = TestType.kvm_tests
351 else:
352 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
353 return -1
354
355 lava_api_key = None
356 try:
357 lava_api_key = os.environ['LAVA_FRDESO_TOKEN']
358 except Exception as e:
359 print('LAVA_FRDESO_TOKEN not found in the environment variable. Exiting...', e )
360 return -1
361
362 if test_type is TestType.baremetal_benchmarks:
363 j = create_new_job(args.jobname, build_device='x86')
364 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
365 elif test_type is TestType.baremetal_tests:
366 j = create_new_job(args.jobname, build_device='x86')
367 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
368 elif test_type is TestType.kvm_tests:
369 j = create_new_job(args.jobname, build_device='kvm')
370 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
371
372 j['actions'].append(get_boot_cmd())
373
374 if test_type is TestType.baremetal_benchmarks:
375 j['actions'].append(get_config_cmd('x86'))
376 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
377 j['actions'].append(get_baremetal_benchmarks_cmd())
378 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
379 elif test_type is TestType.baremetal_tests:
380 if args.ust_commit is None:
381 print('Tests runs need -uc/--ust-commit options. Exiting...')
382 return -1
383 j['actions'].append(get_config_cmd('x86'))
384 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
385 j['actions'].append(get_baremetal_tests_cmd())
386 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
387 elif test_type is TestType.kvm_tests:
388 if args.ust_commit is None:
389 print('Tests runs need -uc/--ust-commit options. Exiting...')
390 return -1
391 j['actions'].append(get_config_cmd('kvm'))
392 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
393 j['actions'].append(get_kvm_tests_cmd())
394 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
395 else:
396 assert False, 'Unknown test type'
397
398 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
399
400 jobid = server.scheduler.submit_job(json.dumps(j))
401
402 print('Lava jobid:{}'.format(jobid))
403
404 #Check the status of the job every 30 seconds
405 jobstatus = server.scheduler.job_status(jobid)['job_status']
406 not_running = False
407 while jobstatus in 'Submitted' or jobstatus in 'Running':
408 if not_running is False and jobstatus in 'Running':
409 print('Job started running')
410 not_running = True
411 time.sleep(30)
412 jobstatus = server.scheduler.job_status(jobid)['job_status']
413
414 passed, failed=check_job_all_test_cases_state_count(server, jobid)
415
416 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
417 print_test_output(server, jobid)
418 elif test_type is TestType.baremetal_benchmarks:
419 fetch_benchmark_results(server, jobid)
420
421 print('Job ended with {} status.'.format(jobstatus))
422 if jobstatus not in 'Complete':
423 return -1
424 else:
425 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
426
427 if failed == 0:
428 return 0
429 else:
430 return -1
431
432 if __name__ == "__main__":
433 sys.exit(main())
This page took 0.041343 seconds and 5 git commands to generate.