2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 from collections
import OrderedDict
28 HOSTNAME
= 'lava-master.internal.efficios.com'
29 SCP_PATH
= 'scp://jenkins-lava@storage.internal.efficios.com'
32 baremetal_benchmarks
=1
36 def get_job_bundle_content(server
, job
):
38 bundle_sha
= server
.scheduler
.job_status(str(job
))['bundle_sha1']
39 bundle
= server
.dashboard
.get(bundle_sha
)
40 except xmlrpc
.client
.Fault
as f
:
41 print('Error while fetching results bundle', f
.faultString
)
43 return json
.loads(bundle
['content'])
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server
, job
):
48 content
= get_job_bundle_content(server
, job
)
52 for run
in content
['test_runs']:
53 for result
in run
['test_results']:
54 if 'test_case_id' in result
:
55 if result
['result'] in 'pass':
57 elif result
['test_case_id'] in 'wait_for_test_image_prompt':
58 # FIXME:This test is part of the boot action and fails
59 # randomly but doesn't affect the behaviour of the tests.
60 # No reply on the Lava IRC channel yet. We should update
61 # our Lava installation and try to reproduce it. This error
62 # was encountered ont the KVM trusty image only. Not seen
63 # on Xenial at this point.
67 return (passed_tests
, failed_tests
)
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server
, job
):
72 content
= get_job_bundle_content(server
, job
)
73 testcases
= ['processed_results_close.csv',
74 'processed_results_open_efault.csv',
75 'processed_results_open_enoent.csv',
76 'processed_results_dup_close.csv',
77 'processed_results_lttng_test_filter.csv']
79 # The result bundle is a large JSON containing the results of every testcase
80 # of the LAVA job as well as the files that were attached during the run.
81 # We need to iterate over this JSON to get the base64 representation of the
82 # benchmark results produced during the run.
83 for run
in content
['test_runs']:
84 # We only care of the benchmark testcases
85 if 'benchmark-' in run
['test_id']:
86 if 'test_results' in run
:
87 for res
in run
['test_results']:
88 if 'attachments' in res
:
89 for a
in res
['attachments']:
90 # We only save the results file
91 if a
['pathname'] in testcases
:
92 with
open(a
['pathname'],'wb') as f
:
93 # Convert the b64 representation of the
94 # result file and write it to a file
95 # in the current working directory
96 f
.write(base64
.b64decode(a
['content']))
98 # Parse the attachment of the testcase to fetch the stdout of the test suite
99 def print_test_output(server
, job
):
100 content
= get_job_bundle_content(server
, job
)
103 for run
in content
['test_runs']:
104 if run
['test_id'] in 'lttng-kernel-test':
105 for attachment
in run
['attachments']:
106 if attachment
['pathname'] in 'stdout.log':
108 # Decode the base64 file and split on newlines to iterate
110 testoutput
= str(base64
.b64decode(bytes(attachment
['content'], encoding
='UTF-8'))).split('\n')
112 # Create a generator to iterate on the lines and keeping
113 # the state of the iterator across the two loops.
114 testoutput_iter
= iter(testoutput
)
115 for line
in testoutput_iter
:
117 # Find the header of the test case and start printing
119 if 'LAVA_SIGNAL_STARTTC run-tests' in line
:
121 print('---- TEST SUITE OUTPUT BEGIN ----')
122 for line
in testoutput_iter
:
123 if 'LAVA_SIGNAL_ENDTC run-tests' not in line
:
126 # Print until we reach the end of the
131 print('----- TEST SUITE OUTPUT END -----')
134 def create_new_job(name
, build_device
):
136 'health_check': False,
138 'device_type':build_device
,
143 if build_device
in 'x86':
144 job
['tags'].append('dev-sda1')
149 command
= OrderedDict({
150 'command': 'boot_image'
154 def get_config_cmd(build_device
):
155 packages
=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
156 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
157 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
159 command
= OrderedDict({
160 'command': 'lava_command_run',
163 'cat /etc/resolv.conf',
164 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
170 if build_device
in 'x86':
171 command
['parameters']['commands'].extend([
172 'mount /dev/sda1 /tmp',
175 command
['parameters']['commands'].extend([
177 'locale-gen en_US.UTF-8',
180 'apt-get install -y {}'.format(' '.join(packages
))
184 def get_baremetal_benchmarks_cmd():
185 command
= OrderedDict({
186 'command': 'lava_test_shell',
190 'git-repo': 'https://github.com/lttng/lttng-ci.git',
191 'revision': 'master',
192 'testdef': 'lava/baremetal-tests/failing-close.yml'
195 'git-repo': 'https://github.com/lttng/lttng-ci.git',
196 'revision': 'master',
197 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
200 'git-repo': 'https://github.com/lttng/lttng-ci.git',
201 'revision': 'master',
202 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
205 'git-repo': 'https://github.com/lttng/lttng-ci.git',
206 'revision': 'master',
207 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
210 'git-repo': 'https://github.com/lttng/lttng-ci.git',
211 'revision': 'master',
212 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
220 def get_baremetal_tests_cmd():
221 command
= OrderedDict({
222 'command': 'lava_test_shell',
226 'git-repo': 'https://github.com/lttng/lttng-ci.git',
227 'revision': 'master',
228 'testdef': 'lava/baremetal-tests/perf-tests.yml'
236 def get_kvm_tests_cmd():
237 command
= OrderedDict({
238 'command': 'lava_test_shell',
242 'git-repo': 'https://github.com/lttng/lttng-ci.git',
243 'revision': 'master',
244 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
247 'git-repo': 'https://github.com/lttng/lttng-ci.git',
248 'revision': 'master',
249 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
257 def get_results_cmd(stream_name
):
258 command
= OrderedDict({
259 'command': 'submit_results',
261 'server': 'http://lava-master.internal.efficios.com/RPC2/'
264 command
['parameters']['stream']='/anonymous/'+stream_name
+'/'
267 def get_deploy_cmd_kvm(jenkins_job
, kernel_path
, linux_modules_path
, lttng_modules_path
):
268 command
= OrderedDict({
269 'command': 'deploy_kernel',
274 'target_type': 'ubuntu',
275 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
276 'login_prompt': 'kvm02 login:',
281 command
['parameters']['customize'][SCP_PATH
+linux_modules_path
]=['rootfs:/','archive']
282 command
['parameters']['customize'][SCP_PATH
+lttng_modules_path
]=['rootfs:/','archive']
283 command
['parameters']['kernel'] = str(SCP_PATH
+kernel_path
)
284 command
['metadata']['jenkins_jobname'] = jenkins_job
288 def get_deploy_cmd_x86(jenkins_job
, kernel_path
, linux_modules_path
, lttng_modules_path
, nb_iter
=None):
289 command
= OrderedDict({
290 'command': 'deploy_kernel',
295 'nfsrootfs': str(SCP_PATH
+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
296 'target_type': 'ubuntu'
300 command
['parameters']['overlays'].append( str(SCP_PATH
+linux_modules_path
))
301 command
['parameters']['overlays'].append( str(SCP_PATH
+lttng_modules_path
))
302 command
['parameters']['kernel'] = str(SCP_PATH
+kernel_path
)
303 command
['metadata']['jenkins_jobname'] = jenkins_job
304 if nb_iter
is not None:
305 command
['metadata']['nb_iterations'] = nb_iter
310 def get_env_setup_cmd(build_device
, lttng_tools_commit
, lttng_ust_commit
=None):
311 command
= OrderedDict({
312 'command': 'lava_command_run',
315 'pip3 install --upgrade pip',
317 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
318 'pip3 install vlttng',
324 vlttng_cmd
= 'vlttng --jobs=$(nproc) --profile urcu-master' \
325 ' --profile babeltrace-stable-1.4 ' \
326 ' --profile lttng-tools-master' \
327 ' --override projects.lttng-tools.checkout='+lttng_tools_commit
+ \
328 ' --profile lttng-tools-no-man-pages'
330 if lttng_ust_commit
is not None:
331 vlttng_cmd
+= ' --profile lttng-ust-master ' \
332 ' --override projects.lttng-ust.checkout='+lttng_ust_commit
+ \
333 ' --profile lttng-ust-no-man-pages'
336 if build_device
in 'kvm':
337 virtenv_path
= '/root/virtenv'
339 virtenv_path
= '/tmp/virtenv'
341 vlttng_cmd
+= ' '+virtenv_path
343 command
['parameters']['commands'].append(vlttng_cmd
)
344 command
['parameters']['commands'].append('ln -s '+virtenv_path
+' /root/lttngvenv')
345 command
['parameters']['commands'].append('sync')
351 parser
= argparse
.ArgumentParser(description
='Launch baremetal test using Lava')
352 parser
.add_argument('-t', '--type', required
=True)
353 parser
.add_argument('-j', '--jobname', required
=True)
354 parser
.add_argument('-k', '--kernel', required
=True)
355 parser
.add_argument('-km', '--kmodule', required
=True)
356 parser
.add_argument('-lm', '--lmodule', required
=True)
357 parser
.add_argument('-tc', '--tools-commit', required
=True)
358 parser
.add_argument('-uc', '--ust-commit', required
=False)
359 args
= parser
.parse_args()
361 if args
.type in 'baremetal-benchmarks':
362 test_type
= TestType
.baremetal_benchmarks
363 elif args
.type in 'baremetal-tests':
364 test_type
= TestType
.baremetal_tests
365 elif args
.type in 'kvm-tests':
366 test_type
= TestType
.kvm_tests
368 print('argument -t/--type {} unrecognized. Exiting...'.format(args
.type))
373 lava_api_key
= os
.environ
['LAVA_JENKINS_TOKEN']
374 except Exception as e
:
375 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e
)
378 if test_type
is TestType
.baremetal_benchmarks
:
379 j
= create_new_job(args
.jobname
, build_device
='x86')
380 j
['actions'].append(get_deploy_cmd_x86(args
.jobname
, args
.kernel
, args
.kmodule
, args
.lmodule
))
381 elif test_type
is TestType
.baremetal_tests
:
382 j
= create_new_job(args
.jobname
, build_device
='x86')
383 j
['actions'].append(get_deploy_cmd_x86(args
.jobname
, args
.kernel
, args
.kmodule
, args
.lmodule
))
384 elif test_type
is TestType
.kvm_tests
:
385 j
= create_new_job(args
.jobname
, build_device
='kvm')
386 j
['actions'].append(get_deploy_cmd_kvm(args
.jobname
, args
.kernel
, args
.kmodule
, args
.lmodule
))
388 j
['actions'].append(get_boot_cmd())
390 if test_type
is TestType
.baremetal_benchmarks
:
391 j
['actions'].append(get_config_cmd('x86'))
392 j
['actions'].append(get_env_setup_cmd('x86', args
.tools_commit
))
393 j
['actions'].append(get_baremetal_benchmarks_cmd())
394 j
['actions'].append(get_results_cmd(stream_name
='benchmark-kernel'))
395 elif test_type
is TestType
.baremetal_tests
:
396 if args
.ust_commit
is None:
397 print('Tests runs need -uc/--ust-commit options. Exiting...')
399 j
['actions'].append(get_config_cmd('x86'))
400 j
['actions'].append(get_env_setup_cmd('x86', args
.tools_commit
, args
.ust_commit
))
401 j
['actions'].append(get_baremetal_tests_cmd())
402 j
['actions'].append(get_results_cmd(stream_name
='tests-kernel'))
403 elif test_type
is TestType
.kvm_tests
:
404 if args
.ust_commit
is None:
405 print('Tests runs need -uc/--ust-commit options. Exiting...')
407 j
['actions'].append(get_config_cmd('kvm'))
408 j
['actions'].append(get_env_setup_cmd('kvm', args
.tools_commit
, args
.ust_commit
))
409 j
['actions'].append(get_kvm_tests_cmd())
410 j
['actions'].append(get_results_cmd(stream_name
='tests-kernel'))
412 assert False, 'Unknown test type'
414 server
= xmlrpc
.client
.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME
, lava_api_key
, HOSTNAME
))
416 jobid
= server
.scheduler
.submit_job(json
.dumps(j
))
418 print('Lava jobid:{}'.format(jobid
))
419 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid
))
421 #Check the status of the job every 30 seconds
422 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
424 while jobstatus
in 'Submitted' or jobstatus
in 'Running':
425 if not_running
is False and jobstatus
in 'Running':
426 print('Job started running')
429 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
431 if test_type
is TestType
.kvm_tests
or test_type
is TestType
.baremetal_tests
:
432 print_test_output(server
, jobid
)
433 elif test_type
is TestType
.baremetal_benchmarks
:
434 fetch_benchmark_results(server
, jobid
)
436 print('Job ended with {} status.'.format(jobstatus
))
437 if jobstatus
not in 'Complete':
440 passed
, failed
=check_job_all_test_cases_state_count(server
, jobid
)
441 print('With {} passed and {} failed Lava test cases.'.format(passed
, failed
))
448 if __name__
== "__main__":
This page took 0.040339 seconds and 4 git commands to generate.