34ad7118563298c098527b7a54a75f1f165d90f6
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36 def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 passed_tests=0
51 failed_tests=0
52 for run in content['test_runs']:
53 for result in run['test_results']:
54 if 'test_case_id' in result :
55 if result['result'] in 'pass':
56 passed_tests+=1
57 elif result['test_case_id'] in 'wait_for_test_image_prompt':
58 # FIXME:This test is part of the boot action and fails
59 # randomly but doesn't affect the behaviour of the tests.
60 # No reply on the Lava IRC channel yet. We should update
61 # our Lava installation and try to reproduce it. This error
62 # was encountered ont the KVM trusty image only. Not seen
63 # on Xenial at this point.
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv',
74 'processed_results_open_efault.csv',
75 'processed_results_dup_close.csv',
76 'processed_results_lttng_test_filter.csv']
77
78 # The result bundle is a large JSON containing the results of every testcase
79 # of the LAVA job as well as the files that were attached during the run.
80 # We need to iterate over this JSON to get the base64 representation of the
81 # benchmark results produced during the run.
82 for run in content['test_runs']:
83 # We only care of the benchmark testcases
84 if 'benchmark-' in run['test_id']:
85 if 'test_results' in run:
86 for res in run['test_results']:
87 if 'attachments' in res:
88 for a in res['attachments']:
89 # We only save the results file
90 if a['pathname'] in testcases:
91 with open(a['pathname'],'wb') as f:
92 # Convert the b64 representation of the
93 # result file and write it to a file
94 # in the current working directory
95 f.write(base64.b64decode(a['content']))
96
97 # Parse the attachment of the testcase to fetch the stdout of the test suite
98 def print_test_output(server, job):
99 content = get_job_bundle_content(server, job)
100 found = False
101
102 for run in content['test_runs']:
103 if run['test_id'] in 'lttng-kernel-test':
104 for attachment in run['attachments']:
105 if attachment['pathname'] in 'stdout.log':
106
107 # Decode the base64 file and split on newlines to iterate
108 # on list
109 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
110
111 # Create a generator to iterate on the lines and keeping
112 # the state of the iterator across the two loops.
113 testoutput_iter = iter(testoutput)
114 for line in testoutput_iter:
115
116 # Find the header of the test case and start printing
117 # from there
118 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
119 found = True
120 print('---- TEST SUITE OUTPUT BEGIN ----')
121 for line in testoutput_iter:
122 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
123 print(line)
124 else:
125 # Print until we reach the end of the
126 # section
127 break
128
129 if found is True:
130 print('----- TEST SUITE OUTPUT END -----')
131 break
132
133 def create_new_job(name, build_device):
134 job = OrderedDict({
135 'health_check': False,
136 'job_name': name,
137 'device_type':build_device,
138 'tags': [ ],
139 'timeout': 18000,
140 'actions': []
141 })
142 if build_device in 'x86':
143 job['tags'].append('dev-sda1')
144
145 return job
146
147 def get_boot_cmd():
148 command = OrderedDict({
149 'command': 'boot_image'
150 })
151 return command
152
153 def get_config_cmd(build_device):
154 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
155 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
156 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
157 'libnuma-dev']
158 command = OrderedDict({
159 'command': 'lava_command_run',
160 'parameters': {
161 'commands': [
162 'cat /etc/resolv.conf',
163 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
164 'groupadd tracing'
165 ],
166 'timeout':300
167 }
168 })
169 if build_device in 'x86':
170 command['parameters']['commands'].extend([
171 'mount /dev/sda1 /tmp',
172 'rm -rf /tmp/*'])
173
174 command['parameters']['commands'].extend([
175 'depmod -a',
176 'locale-gen en_US.UTF-8',
177 'apt-get update',
178 'apt-get upgrade',
179 'apt-get install -y {}'.format(' '.join(packages))
180 ])
181 return command
182
183 def get_baremetal_benchmarks_cmd():
184 command = OrderedDict({
185 'command': 'lava_test_shell',
186 'parameters': {
187 'testdef_repos': [
188 {
189 'git-repo': 'https://github.com/lttng/lttng-ci.git',
190 'revision': 'master',
191 'testdef': 'lava/baremetal-tests/failing-close.yml'
192 },
193 {
194 'git-repo': 'https://github.com/lttng/lttng-ci.git',
195 'revision': 'master',
196 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
197 },
198 {
199 'git-repo': 'https://github.com/lttng/lttng-ci.git',
200 'revision': 'master',
201 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
202 },
203 {
204 'git-repo': 'https://github.com/lttng/lttng-ci.git',
205 'revision': 'master',
206 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
207 }
208 ],
209 'timeout': 18000
210 }
211 })
212 return command
213
214 def get_baremetal_tests_cmd():
215 command = OrderedDict({
216 'command': 'lava_test_shell',
217 'parameters': {
218 'testdef_repos': [
219 {
220 'git-repo': 'https://github.com/lttng/lttng-ci.git',
221 'revision': 'master',
222 'testdef': 'lava/baremetal-tests/perf-tests.yml'
223 }
224 ],
225 'timeout': 18000
226 }
227 })
228 return command
229
230 def get_kvm_tests_cmd():
231 command = OrderedDict({
232 'command': 'lava_test_shell',
233 'parameters': {
234 'testdef_repos': [
235 {
236 'git-repo': 'https://github.com/lttng/lttng-ci.git',
237 'revision': 'master',
238 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
239 },
240 {
241 'git-repo': 'https://github.com/lttng/lttng-ci.git',
242 'revision': 'master',
243 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
244 }
245 ],
246 'timeout': 18000
247 }
248 })
249 return command
250
251 def get_results_cmd(stream_name):
252 command = OrderedDict({
253 'command': 'submit_results',
254 'parameters': {
255 'server': 'http://lava-master.internal.efficios.com/RPC2/'
256 }
257 })
258 command['parameters']['stream']='/anonymous/'+stream_name+'/'
259 return command
260
261 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
262 command = OrderedDict({
263 'command': 'deploy_kernel',
264 'metadata': {},
265 'parameters': {
266 'customize': {},
267 'kernel': None,
268 'target_type': 'ubuntu',
269 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
270 'login_prompt': 'kvm02 login:',
271 'username': 'root'
272 }
273 })
274
275 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
276 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
277 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
278 command['metadata']['jenkins_jobname'] = jenkins_job
279
280 return command
281
282 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
283 command = OrderedDict({
284 'command': 'deploy_kernel',
285 'metadata': {},
286 'parameters': {
287 'overlays': [],
288 'kernel': None,
289 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
290 'target_type': 'ubuntu'
291 }
292 })
293
294 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
295 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
296 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
297 command['metadata']['jenkins_jobname'] = jenkins_job
298 if nb_iter is not None:
299 command['metadata']['nb_iterations'] = nb_iter
300
301 return command
302
303
304 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
305 command = OrderedDict({
306 'command': 'lava_command_run',
307 'parameters': {
308 'commands': [
309 'pip3 install --upgrade pip',
310 'hash -r',
311 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
312 'pip3 install vlttng',
313 ],
314 'timeout': 18000
315 }
316 })
317
318 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
319 ' --profile babeltrace-stable-1.4 ' \
320 ' --profile lttng-tools-master' \
321 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
322 ' --profile lttng-tools-no-man-pages'
323
324 if lttng_ust_commit is not None:
325 vlttng_cmd += ' --profile lttng-ust-master ' \
326 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
327 ' --profile lttng-ust-no-man-pages'
328
329 virtenv_path = None
330 if build_device in 'kvm':
331 virtenv_path = '/root/virtenv'
332 else:
333 virtenv_path = '/tmp/virtenv'
334
335 vlttng_cmd += ' '+virtenv_path
336
337 command['parameters']['commands'].append(vlttng_cmd)
338 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
339 command['parameters']['commands'].append('sync')
340
341 return command
342
343 def main():
344 test_type = None
345 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
346 parser.add_argument('-t', '--type', required=True)
347 parser.add_argument('-j', '--jobname', required=True)
348 parser.add_argument('-k', '--kernel', required=True)
349 parser.add_argument('-km', '--kmodule', required=True)
350 parser.add_argument('-lm', '--lmodule', required=True)
351 parser.add_argument('-tc', '--tools-commit', required=True)
352 parser.add_argument('-uc', '--ust-commit', required=False)
353 args = parser.parse_args()
354
355 if args.type in 'baremetal-benchmarks':
356 test_type = TestType.baremetal_benchmarks
357 elif args.type in 'baremetal-tests':
358 test_type = TestType.baremetal_tests
359 elif args.type in 'kvm-tests':
360 test_type = TestType.kvm_tests
361 else:
362 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
363 return -1
364
365 lava_api_key = None
366 try:
367 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
368 except Exception as e:
369 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
370 return -1
371
372 if test_type is TestType.baremetal_benchmarks:
373 j = create_new_job(args.jobname, build_device='x86')
374 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
375 elif test_type is TestType.baremetal_tests:
376 j = create_new_job(args.jobname, build_device='x86')
377 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
378 elif test_type is TestType.kvm_tests:
379 j = create_new_job(args.jobname, build_device='kvm')
380 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
381
382 j['actions'].append(get_boot_cmd())
383
384 if test_type is TestType.baremetal_benchmarks:
385 j['actions'].append(get_config_cmd('x86'))
386 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
387 j['actions'].append(get_baremetal_benchmarks_cmd())
388 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
389 elif test_type is TestType.baremetal_tests:
390 if args.ust_commit is None:
391 print('Tests runs need -uc/--ust-commit options. Exiting...')
392 return -1
393 j['actions'].append(get_config_cmd('x86'))
394 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
395 j['actions'].append(get_baremetal_tests_cmd())
396 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
397 elif test_type is TestType.kvm_tests:
398 if args.ust_commit is None:
399 print('Tests runs need -uc/--ust-commit options. Exiting...')
400 return -1
401 j['actions'].append(get_config_cmd('kvm'))
402 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
403 j['actions'].append(get_kvm_tests_cmd())
404 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
405 else:
406 assert False, 'Unknown test type'
407
408 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
409
410 jobid = server.scheduler.submit_job(json.dumps(j))
411
412 print('Lava jobid:{}'.format(jobid))
413 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
414
415 #Check the status of the job every 30 seconds
416 jobstatus = server.scheduler.job_status(jobid)['job_status']
417 not_running = False
418 while jobstatus in 'Submitted' or jobstatus in 'Running':
419 if not_running is False and jobstatus in 'Running':
420 print('Job started running')
421 not_running = True
422 time.sleep(30)
423 jobstatus = server.scheduler.job_status(jobid)['job_status']
424
425 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
426 print_test_output(server, jobid)
427 elif test_type is TestType.baremetal_benchmarks:
428 fetch_benchmark_results(server, jobid)
429
430 print('Job ended with {} status.'.format(jobstatus))
431 if jobstatus not in 'Complete':
432 return -1
433 else:
434 passed, failed=check_job_all_test_cases_state_count(server, jobid)
435 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
436
437 if failed == 0:
438 return 0
439 else:
440 return -1
441
442 if __name__ == "__main__":
443 sys.exit(main())
This page took 0.04685 seconds and 3 git commands to generate.