LAVA: Upload results to obj.internal.efficios.com
[lttng-ci.git] / scripts / system-tests / lava2-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import random
22 import sys
23 import time
24 import yaml
25 import xmlrpc.client
26 import pprint
27
28 from jinja2 import Environment, FileSystemLoader, meta
29
30 USERNAME = 'lava-jenkins'
31 HOSTNAME = 'lava-master-02.internal.efficios.com'
32
33 class TestType():
34 baremetal_benchmarks=1
35 baremetal_tests=2
36 kvm_tests=3
37 kvm_fuzzing_tests=4
38 values = {
39 'baremetal-benchmarks' : baremetal_benchmarks,
40 'baremetal-tests' : baremetal_tests,
41 'kvm-tests' : kvm_tests,
42 'kvm-fuzzin-tests' : kvm_fuzzing_tests,
43 }
44
45 class DeviceType():
46 x86 = 'x86'
47 kvm = 'qemu'
48 values = {
49 'kvm' : kvm,
50 'x86' : x86,
51 }
52
53 def get_packages():
54 return ['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
55 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
56 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
57 'libnuma-dev', 'python3-dev', 'swig', 'stress']
58
59 def get_job_bundle_content(server, job):
60 try:
61 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
62 bundle = server.dashboard.get(bundle_sha)
63 except xmlrpc.client.Fault as f:
64 print('Error while fetching results bundle', f.faultString)
65 raise f
66
67 return json.loads(bundle['content'])
68
69 # Parse the results bundle to see the run-tests testcase
70 # of the lttng-kernel-tests passed successfully
71 def check_job_all_test_cases_state_count(server, job):
72 content = get_job_bundle_content(server, job)
73
74 # FIXME:Those tests are part of the boot actions and fail randomly but
75 # doesn't affect the behaviour of the tests. We should update our Lava
76 # installation and try to reproduce it. This error was encountered on
77 # Ubuntu 16.04.
78 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
79
80 passed_tests=0
81 failed_tests=0
82 for run in content['test_runs']:
83 for result in run['test_results']:
84 if 'test_case_id' in result :
85 if result['result'] in 'pass':
86 passed_tests+=1
87 elif result['test_case_id'] in tests_known_to_fail:
88 pass
89 else:
90 failed_tests+=1
91 return (passed_tests, failed_tests)
92
93 # Get the benchmark results from the lava bundle
94 # save them as CSV files localy
95 def fetch_benchmark_results(server, job):
96 content = get_job_bundle_content(server, job)
97 testcases = ['processed_results_close.csv',
98 'processed_results_ioctl.csv',
99 'processed_results_open_efault.csv',
100 'processed_results_open_enoent.csv',
101 'processed_results_dup_close.csv',
102 'processed_results_raw_syscall_getpid.csv',
103 'processed_results_lttng_test_filter.csv']
104
105 # The result bundle is a large JSON containing the results of every testcase
106 # of the LAVA job as well as the files that were attached during the run.
107 # We need to iterate over this JSON to get the base64 representation of the
108 # benchmark results produced during the run.
109 for run in content['test_runs']:
110 # We only care of the benchmark testcases
111 if 'benchmark-' in run['test_id']:
112 if 'test_results' in run:
113 for res in run['test_results']:
114 if 'attachments' in res:
115 for a in res['attachments']:
116 # We only save the results file
117 if a['pathname'] in testcases:
118 with open(a['pathname'],'wb') as f:
119 # Convert the b64 representation of the
120 # result file and write it to a file
121 # in the current working directory
122 f.write(base64.b64decode(a['content']))
123
124 # Parse the attachment of the testcase to fetch the stdout of the test suite
125 def print_test_output(server, job):
126 content = get_job_bundle_content(server, job)
127 found = False
128
129 for run in content['test_runs']:
130 if run['test_id'] in 'lttng-kernel-test':
131 for attachment in run['attachments']:
132 if attachment['pathname'] in 'stdout.log':
133
134 # Decode the base64 file and split on newlines to iterate
135 # on list
136 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
137
138 testoutput = testoutput.replace('\\n', '\n')
139
140 # Create a generator to iterate on the lines and keeping
141 # the state of the iterator across the two loops.
142 testoutput_iter = iter(testoutput.split('\n'))
143 for line in testoutput_iter:
144
145 # Find the header of the test case and start printing
146 # from there
147 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
148 print('---- TEST SUITE OUTPUT BEGIN ----')
149 for line in testoutput_iter:
150 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
151 print(line)
152 else:
153 # Print until we reach the end of the
154 # section
155 break
156
157 print('----- TEST SUITE OUTPUT END -----')
158 break
159
160 def get_vlttng_cmd(device, lttng_tools_commit, lttng_ust_commit=None):
161
162 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
163 ' --override projects.babeltrace.build-env.PYTHON=python3' \
164 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
165 ' --profile babeltrace-stable-1.4' \
166 ' --profile babeltrace-python' \
167 ' --profile lttng-tools-master' \
168 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
169 ' --profile lttng-tools-no-man-pages'
170
171 if lttng_ust_commit is not None:
172 vlttng_cmd += ' --profile lttng-ust-master ' \
173 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
174 ' --profile lttng-ust-no-man-pages'
175
176 if device is DeviceType.kvm:
177 vlttng_path = '/root/virtenv'
178 else:
179 vlttng_path = '/tmp/virtenv'
180
181 vlttng_cmd += ' ' + vlttng_path
182
183 return vlttng_cmd
184
185 def main():
186 nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz"
187 test_type = None
188 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
189 parser.add_argument('-t', '--type', required=True)
190 parser.add_argument('-j', '--jobname', required=True)
191 parser.add_argument('-k', '--kernel', required=True)
192 parser.add_argument('-lm', '--lmodule', required=True)
193 parser.add_argument('-tc', '--tools-commit', required=True)
194 parser.add_argument('-id', '--build-id', required=True)
195 parser.add_argument('-uc', '--ust-commit', required=False)
196 parser.add_argument('-d', '--debug', required=False, action='store_true')
197 args = parser.parse_args()
198
199 if args.type not in TestType.values:
200 print('argument -t/--type {} unrecognized.'.format(args.type))
201 print('Possible values are:')
202 for k in TestType.values:
203 print('\t {}'.format(k))
204 return -1
205
206 lava_api_key = None
207 if not args.debug:
208 try:
209 lava_api_key = os.environ['LAVA2_JENKINS_TOKEN']
210 except Exception as e:
211 print('LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
212 return -1
213
214 jinja_loader = FileSystemLoader(os.path.dirname(os.path.realpath(__file__)))
215 jinja_env = Environment(loader=jinja_loader, trim_blocks=True,
216 lstrip_blocks= True)
217 jinja_template = jinja_env.get_template('template_lava_job.jinja2')
218 template_source = jinja_env.loader.get_source(jinja_env, 'template_lava_job.jinja2')
219 parsed_content = jinja_env.parse(template_source)
220 undef = meta.find_undeclared_variables(parsed_content)
221
222 test_type = TestType.values[args.type]
223
224 if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
225 device_type = DeviceType.x86
226 vlttng_path = '/tmp/virtenv'
227
228 else:
229 device_type = DeviceType.kvm
230 vlttng_path = '/root/virtenv'
231
232 vlttng_cmd = get_vlttng_cmd(device_type, args.tools_commit, args.ust_commit)
233
234 context = dict()
235 context['DeviceType'] = DeviceType
236 context['TestType'] = TestType
237
238 context['job_name'] = args.jobname
239 context['test_type'] = test_type
240 context['packages'] = get_packages()
241 context['random_seed'] = random.randint(0, 1000000)
242 context['device_type'] = device_type
243
244 context['vlttng_cmd'] = vlttng_cmd
245 context['vlttng_path'] = vlttng_path
246
247 context['kernel_url'] = args.kernel
248 context['nfsrootfs_url'] = nfsrootfs
249 context['lttng_modules_url'] = args.lmodule
250 context['jenkins_build_id'] = args.build_id
251
252 context['kprobe_round_nb'] = 10
253
254 render = jinja_template.render(context)
255
256 print('Current context:')
257 pprint.pprint(context, indent=4)
258 print('Job to be submitted:')
259
260 print(render)
261
262 if args.debug:
263 return 0
264
265 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
266
267 jobid = server.scheduler.submit_job(render)
268
269 print('Lava jobid:{}'.format(jobid))
270 print('Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
271
272 #Check the status of the job every 30 seconds
273 jobstatus = server.scheduler.job_status(jobid)['job_status']
274 not_running = False
275 while jobstatus in 'Submitted' or jobstatus in 'Running':
276 if not_running is False and jobstatus in 'Running':
277 print('Job started running')
278 not_running = True
279 time.sleep(30)
280 jobstatus = server.scheduler.job_status(jobid)['job_status']
281
282 # Do not fetch result for now
283 # if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
284 # print_test_output(server, jobid)
285 # elif test_type is TestType.baremetal_benchmarks:
286 # fetch_benchmark_results(server, jobid)
287
288 print('Job ended with {} status.'.format(jobstatus))
289 if jobstatus not in 'Complete':
290 return -1
291 else:
292 passed, failed=check_job_all_test_cases_state_count(server, jobid)
293 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
294
295 if failed == 0:
296 return 0
297 else:
298 return -1
299
300 if __name__ == "__main__":
301 sys.exit(main())
This page took 0.037275 seconds and 5 git commands to generate.