f9c7f0614be02e3d1dc5648645369d996c0f7faa
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 from collections
import OrderedDict
28 HOSTNAME
= 'lava-master.internal.efficios.com'
29 SCP_PATH
= 'scp://jenkins-lava@storage.internal.efficios.com'
35 def get_job_bundle_content(server
, job
):
36 bundle_sha
= server
.scheduler
.job_status(str(job
))['bundle_sha1']
37 bundle
= server
.dashboard
.get(bundle_sha
)
39 return json
.loads(bundle
['content'])
41 # Parse the results bundle to see the run-tests testcase
42 # of the lttng-kernel-tests passed successfully
43 def check_job_all_test_cases_state_count(server
, job
):
44 content
= get_job_bundle_content(server
, job
)
48 for run
in content
['test_runs']:
49 for result
in run
['test_results']:
50 if 'test_case_id' in result
:
51 if result
['result'] in 'pass':
55 return (passed_tests
, failed_tests
)
57 # Parse the attachment of the testcase to fetch the stdout of the test suite
58 def print_test_output(server
, job
):
59 content
= get_job_bundle_content(server
, job
)
62 for run
in content
['test_runs']:
63 if run
['test_id'] in 'lttng-kernel-test':
64 for attachment
in run
['attachments']:
65 if attachment
['pathname'] in 'stdout.log':
67 # Decode the base64 file and split on newlines to iterate
69 testoutput
= base64
.b64decode(attachment
['content']).split('\n')
71 # Create a generator to iterate on the lines and keeping
72 # the state of the iterator across the two loops.
73 testoutput_iter
= iter(testoutput
)
74 for line
in testoutput_iter
:
76 # Find the header of the test case and start printing
78 if 'LAVA_SIGNAL_STARTTC run-tests' in line
:
80 print('---- TEST SUITE OUTPUT BEGIN ----')
81 for line
in testoutput_iter
:
82 if 'LAVA_SIGNAL_ENDTC run-tests' not in line
:
85 # Print until we reach the end of the
90 print('----- TEST SUITE OUTPUT END -----')
93 def create_new_job(name
, build_device
):
95 'health_check': False,
97 'device_type':build_device
,
102 if build_device
in 'x86':
103 job
['tags'].append('dev-sda1')
108 command
= OrderedDict({
109 'command': 'boot_image'
113 def get_config_cmd(build_device
):
114 packages
=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
115 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
116 'libelf-dev', 'libmount-dev', 'libxml2', 'python3-pandas', \
118 command
= OrderedDict({
119 'command': 'lava_command_run',
124 'cat /etc/resolv.conf',
125 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
130 if build_device
in 'x86':
131 command
['parameters']['commands'].extend([
132 'mount /dev/sda1 /tmp',
135 command
['parameters']['commands'].extend([
137 'locale-gen en_US.UTF-8',
139 'apt-get install -y {}'.format(' '.join(packages
))
143 def get_benchmarks_cmd():
144 command
= OrderedDict({
145 'command': 'lava_test_shell',
149 'git-repo': 'https://github.com/lttng/lttng-ci.git',
150 'revision': 'master',
151 'testdef': 'lava/baremetal-tests/failing-close.yml'
154 'git-repo': 'https://github.com/lttng/lttng-ci.git',
155 'revision': 'master',
156 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
159 'git-repo': 'https://github.com/lttng/lttng-ci.git',
160 'revision': 'master',
161 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
164 'git-repo': 'https://github.com/lttng/lttng-ci.git',
165 'revision': 'master',
166 'testdef': 'lava/baremetal-tests/perf-tests.yml'
175 command
= OrderedDict({
176 'command': 'lava_test_shell',
180 'git-repo': 'https://github.com/lttng/lttng-ci.git',
181 'revision': 'master',
182 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
190 def get_results_cmd(stream_name
):
191 command
= OrderedDict({
192 'command': 'submit_results',
194 'server': 'http://lava-master.internal.efficios.com/RPC2/'
197 command
['parameters']['stream']='/anonymous/'+stream_name
+'/'
200 def get_deploy_cmd_kvm(jenkins_job
, kernel_path
, linux_modules_path
, lttng_modules_path
):
201 command
= OrderedDict({
202 'command': 'deploy_kernel',
207 'rootfs': 'file:///var/lib/lava-server/default/media/images/trusty-grub.img.gz',
208 'target_type': 'ubuntu'
212 command
['parameters']['customize'][SCP_PATH
+linux_modules_path
]=['rootfs:/','archive']
213 command
['parameters']['customize'][SCP_PATH
+lttng_modules_path
]=['rootfs:/','archive']
214 command
['parameters']['kernel'] = str(SCP_PATH
+kernel_path
)
215 command
['metadata']['jenkins_jobname'] = jenkins_job
219 def get_deploy_cmd_x86(jenkins_job
, kernel_path
, linux_modules_path
, lttng_modules_path
, nb_iter
=None):
220 command
= OrderedDict({
221 'command': 'deploy_kernel',
226 'nfsrootfs': str(SCP_PATH
+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
227 'target_type': 'ubuntu'
231 command
['parameters']['overlays'].append( str(SCP_PATH
+linux_modules_path
))
232 command
['parameters']['overlays'].append( str(SCP_PATH
+lttng_modules_path
))
233 command
['parameters']['kernel'] = str(SCP_PATH
+kernel_path
)
234 command
['metadata']['jenkins_jobname'] = jenkins_job
235 if nb_iter
is not None:
236 command
['metadata']['nb_iterations'] = nb_iter
241 def get_env_setup_cmd(build_device
, lttng_tools_commit
, lttng_ust_commit
=None):
242 command
= OrderedDict({
243 'command': 'lava_command_run',
246 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
247 'pip3 install vlttng',
253 vlttng_cmd
= 'vlttng --jobs=16 --profile urcu-master' \
254 ' --profile babeltrace-stable-1.4 ' \
255 ' --profile lttng-tools-master' \
256 ' --override projects.lttng-tools.checkout='+lttng_tools_commit
+ \
257 ' --profile lttng-tools-no-man-pages'
259 if lttng_ust_commit
is not None:
260 vlttng_cmd
+= ' --profile lttng-ust-master ' \
261 ' --override projects.lttng-ust.checkout='+lttng_ust_commit
+ \
262 ' --profile lttng-ust-no-man-pages'
265 if build_device
in 'kvm':
266 virtenv_path
= '/root/virtenv'
268 virtenv_path
= '/tmp/virtenv'
270 vlttng_cmd
+= ' '+virtenv_path
272 command
['parameters']['commands'].append(vlttng_cmd
)
273 command
['parameters']['commands'].append('ln -s '+virtenv_path
+' /root/lttngvenv')
274 command
['parameters']['commands'].append('sync')
280 parser
= argparse
.ArgumentParser(description
='Launch baremetal test using Lava')
281 parser
.add_argument('-t', '--type', required
=True)
282 parser
.add_argument('-j', '--jobname', required
=True)
283 parser
.add_argument('-k', '--kernel', required
=True)
284 parser
.add_argument('-km', '--kmodule', required
=True)
285 parser
.add_argument('-lm', '--lmodule', required
=True)
286 parser
.add_argument('-l', '--lava-key', required
=True)
287 parser
.add_argument('-tc', '--tools-commit', required
=True)
288 parser
.add_argument('-uc', '--ust-commit', required
=False)
289 args
= parser
.parse_args()
291 if args
.type in 'benchmarks':
292 test_type
= TestType
.benchmarks
293 elif args
.type in 'tests':
294 test_type
= TestType
.tests
296 print('argument -t/--type {} unrecognized. Exiting...'.format(args
.type))
299 if test_type
is TestType
.benchmarks
:
300 j
= create_new_job(args
.jobname
, build_device
='x86')
301 j
['actions'].append(get_deploy_cmd_x86(args
.jobname
, args
.kernel
, args
.kmodule
, args
.lmodule
))
302 elif test_type
is TestType
.tests
:
303 j
= create_new_job(args
.jobname
, build_device
='kvm')
304 j
['actions'].append(get_deploy_cmd_kvm(args
.jobname
, args
.kernel
, args
.kmodule
, args
.lmodule
))
306 j
['actions'].append(get_boot_cmd())
308 if test_type
is TestType
.benchmarks
:
309 j
['actions'].append(get_config_cmd('x86'))
310 j
['actions'].append(get_env_setup_cmd('x86', args
.tools_commit
))
311 j
['actions'].append(get_benchmarks_cmd())
312 j
['actions'].append(get_results_cmd(stream_name
='benchmark-kernel'))
313 elif test_type
is TestType
.tests
:
314 if args
.ust_commit
is None:
315 print('Tests runs need -uc/--ust-commit options. Exiting...')
317 j
['actions'].append(get_config_cmd('kvm'))
318 j
['actions'].append(get_env_setup_cmd('kvm', args
.tools_commit
, args
.ust_commit
))
319 j
['actions'].append(get_tests_cmd())
320 j
['actions'].append(get_results_cmd(stream_name
='tests-kernel'))
322 assert False, 'Unknown test type'
324 server
= xmlrpclib
.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME
, args
.lava_key
, HOSTNAME
))
326 jobid
= server
.scheduler
.submit_job(json
.dumps(j
))
328 print('Lava jobid:{}'.format(jobid
))
330 #Check the status of the job every 30 seconds
331 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
332 while jobstatus
in 'Submitted' or jobstatus
in 'Running':
334 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
336 print('Job ended with {} status.'.format(jobstatus
))
337 if jobstatus
not in 'Complete':
340 passed
, failed
=check_job_all_test_cases_state_count(server
, jobid
)
342 print('With {} passed and {} failed Lava test cases.'.format(passed
, failed
))
344 if test_type
is TestType
.tests
:
345 print_test_output(server
, jobid
)
352 if __name__
== "__main__":
This page took 0.037764 seconds and 3 git commands to generate.