triggers:
- pollscm:
- cron: "@hourly"
+ cron: "H * * * 1-5"
logrotate:
numToKeep: 10
- 'kvm-tests'
triggers:
- pollscm:
- cron: "@hourly"
+ cron: "H * * * 1-5"
logrotate:
numToKeep: 10
recipients: 'francis.deslauriers@efficios.com'
- ircbot:
strategy: new-failure-and-fixed
+ message-type: summary
channels:
- name: '#lttng'
- workspace-cleanup
- 'baremetal-tests'
triggers:
- pollscm:
- cron: "@hourly"
+ cron: "H * * * 1-5"
logrotate:
numToKeep: 10
recipients: 'francis.deslauriers@efficios.com'
- ircbot:
strategy: new-failure-and-fixed
+ message-type: summary
channels:
- name: '#lttng'
- workspace-cleanup
name: baremetal_tests_k{kversion}_l{lttngversion}_canary
defaults: baremetal_tests
triggers:
- - timed: "@daily"
+ - timed: "0 1 * * 1-5"
- job-template:
name: vm_tests_k{kversion}_l{lttngversion}_canary
defaults: vm_tests
triggers:
- - timed: "@daily"
+ - timed: "0 1 * * 1-5"
- job-template:
name: baremetal_benchmarks_k{kversion}_l{lttngversion}_canary
defaults: baremetal_benchmarks
triggers:
- - timed: "@daily"
+ - timed: "0 1 * * 1-5"
- job-template:
name: vm_tests_kmainline_t{toolsversion}_m{modulesversion}_u{ustversion}
defaults: vm_tests
triggers:
- pollscm:
- cron: "@daily"
+ cron: "0 1 * * 1-5"
wrappers:
- workspace-cleanup
- timestamps
defaults: baremetal_tests
triggers:
- pollscm:
- cron: "@daily"
+ cron: "0 1 * * 1-5"
wrappers:
- workspace-cleanup
- timestamps
defaults: baremetal_benchmarks
triggers:
- pollscm:
- cron: "@daily"
+ cron: "0 1 * * 1-5"
wrappers:
- workspace-cleanup
- timestamps
concurrent: true
logrotate:
- numToKeep: 50
+ numToKeep: 100
node: 'x86-64'
wrappers:
--- /dev/null
+metadata:
+ format: Lava-Test Test Definition 1.0
+ name: benchmark-syscall-failing-ioctl
+ description: "Perform syscall tracing benchmark of failing ioctl"
+params:
+ JENKINS_JOBNAME: "default jobname"
+
+install:
+ deps:
+ - python3-pandas
+ - python3-numpy
+ git-repos:
+ - url: https://github.com/frdeso/syscall-bench-it.git
+ destination: benchmarks
+ branch: master
+ - url: https://github.com/frdeso/lttng-ci
+ destination: ci
+ branch: baremetal_tests
+ steps:
+ - export TMPDIR="/tmp"
+ - ulimit -c unlimited
+ - mkdir -p coredump
+ - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
+run:
+ steps:
+ - source /root/lttngvenv/activate
+ - cd benchmarks
+ - lava-test-case build-benchmarks --shell "make"
+ - lava-test-case run-benchmarks --shell "./run.sh failing-ioctl sys_ioctl"
+ - lava-test-case-attach run-benchmarks "./results.csv"
+ - cd -
+ - cd ci
+ - python3 ./scripts/lttng-baremetal-tests/parse-results.py ../benchmarks/results.csv
+ - mv ./processed_results.csv ../processed_results_ioctl.csv
+ - cd -
+ - tar czf coredump.tar.gz coredump
+ - lava-test-case-attach run-benchmarks coredump.tar.gz
+ - lava-test-case-attach run-benchmarks "./processed_results_ioctl.csv"
--- /dev/null
+metadata:
+ format: Lava-Test Test Definition 1.0
+ name: benchmark-syscall-failing-open-enoent
+ description: "Perform syscall tracing benchmark of failing open-enoent"
+params:
+ JENKINS_JOBNAME: "default jobname"
+
+install:
+ deps:
+ - python3-pandas
+ - python3-numpy
+ git-repos:
+ - url: https://github.com/frdeso/syscall-bench-it.git
+ destination: benchmarks
+ branch: master
+ - url: https://github.com/lttng/lttng-ci
+ destination: ci
+ branch: master
+ steps:
+ - export TMPDIR="/tmp"
+ - ulimit -c unlimited
+ - mkdir -p coredump
+ - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
+run:
+ steps:
+ - source /root/lttngvenv/activate
+ - cd benchmarks
+ - lava-test-case build-benchmarks --shell "make"
+ - lava-test-case run-benchmarks --shell "./run.sh failing-open-enoent sys_open"
+ - lava-test-case-attach run-benchmarks "./results.csv"
+ - cd -
+ - cd ci
+ - python3 ./scripts/lttng-baremetal-tests/parse-results.py ../benchmarks/results.csv
+ - mv ./processed_results.csv ../processed_results_open_enoent.csv
+ - cd -
+ - tar czf coredump.tar.gz coredump
+ - lava-test-case-attach run-benchmarks coredump.tar.gz
+ - lava-test-case-attach run-benchmarks "./processed_results_open_enoent.csv"
+
--- /dev/null
+metadata:
+ format: Lava-Test Test Definition 1.0
+ name: benchmark-raw-syscall-getpid
+ description: "Perform syscall tracing benchmark of the raw syscall getpid"
+params:
+ JENKINS_JOBNAME: "default jobname"
+
+install:
+ deps:
+ - python3-pandas
+ - python3-numpy
+ git-repos:
+ - url: https://github.com/frdeso/syscall-bench-it.git
+ destination: benchmarks
+ branch: master
+ - url: https://github.com/lttng/lttng-ci
+ destination: ci
+ branch: master
+ steps:
+ - export TMPDIR="/tmp"
+ - ulimit -c unlimited
+ - mkdir -p coredump
+ - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
+run:
+ steps:
+ - source /root/lttngvenv/activate
+ - cd benchmarks
+ - lava-test-case build-benchmarks --shell "make"
+ - lava-test-case run-benchmarks --shell "./run.sh raw-syscall-getpid sys_getpid"
+ - lava-test-case-attach run-benchmarks "./results.csv"
+ - cd -
+ - cd ci
+ - python3 ./scripts/lttng-baremetal-tests/parse-results.py ../benchmarks/results.csv
+ - mv ./processed_results.csv ../processed_results_raw_syscall_getpid.csv
+ - cd -
+ - tar czf coredump.tar.gz coredump
+ - lava-test-case-attach run-benchmarks coredump.tar.gz
+ - lava-test-case-attach run-benchmarks "./processed_results_raw_syscall_getpid.csv"
plots = [baseline, lttng, one_thr, two_thr, four_thr, eight_thr, sixteen_thr]
- title='Meantime per syscalls for {} testcase'.format(graph_type)
+ title='Meantime per event for {} testcase'.format(graph_type)
# Create a axe object for each sub-plots
f, arrax = plt.subplots(len(plots), sharex=True, figsize=(16, 25))
ax.set_ylim(0)
ax.grid()
ax.set_xlabel('Jenkins Build ID')
- ax.set_ylabel('Meantime per syscall [us]')
+ ax.set_ylabel('Meantime per event [us]')
ax.xaxis.set_major_locator(MaxNLocator(integer=True, nbins=30))
def check_job_all_test_cases_state_count(server, job):
content = get_job_bundle_content(server, job)
+ # FIXME:Those tests are part of the boot actions and fail randomly but
+ # doesn't affect the behaviour of the tests. We should update our Lava
+ # installation and try to reproduce it. This error was encountered on
+ # Ubuntu 16.04.
+ tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
+
passed_tests=0
failed_tests=0
for run in content['test_runs']:
if 'test_case_id' in result :
if result['result'] in 'pass':
passed_tests+=1
- elif result['test_case_id'] in 'wait_for_test_image_prompt':
- # FIXME:This test is part of the boot action and fails
- # randomly but doesn't affect the behaviour of the tests.
- # No reply on the Lava IRC channel yet. We should update
- # our Lava installation and try to reproduce it. This error
- # was encountered ont the KVM trusty image only. Not seen
- # on Xenial at this point.
+ elif result['test_case_id'] in tests_known_to_fail:
pass
else:
failed_tests+=1
def fetch_benchmark_results(server, job):
content = get_job_bundle_content(server, job)
testcases = ['processed_results_close.csv',
+ 'processed_results_ioctl.csv',
'processed_results_open_efault.csv',
+ 'processed_results_open_enoent.csv',
'processed_results_dup_close.csv',
+ 'processed_results_raw_syscall_getpid.csv',
'processed_results_lttng_test_filter.csv']
# The result bundle is a large JSON containing the results of every testcase
packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
- 'libnuma-dev']
+ 'libnuma-dev', 'python3-dev']
command = OrderedDict({
'command': 'lava_command_run',
'parameters': {
'revision': 'master',
'testdef': 'lava/baremetal-tests/failing-close.yml'
},
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
+ },
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
'revision': 'master',
'testdef': 'lava/baremetal-tests/success-dup-close.yml'
},
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
+ },
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
+ },
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
})
vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
- ' --profile babeltrace-stable-1.4 ' \
+ ' --override projects.babeltrace.build-env.PYTHON=python3' \
+ ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
+ ' --profile babeltrace-stable-1.4' \
+ ' --profile babeltrace-python' \
' --profile lttng-tools-master' \
' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
' --profile lttng-tools-no-man-pages'
# Copy the result files for each benchmark and metadata on storage server
$SCP_COMMAND ./processed_results_close.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/close.csv"
+$SCP_COMMAND ./processed_results_ioctl.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/ioctl.csv"
$SCP_COMMAND ./processed_results_open_efault.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/open-efault.csv"
+$SCP_COMMAND ./processed_results_open_enoent.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/open-enoent.csv"
$SCP_COMMAND ./processed_results_dup_close.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/dup-close.csv"
$SCP_COMMAND ./processed_results_lttng_test_filter.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/lttng-test-filter.csv"
+$SCP_COMMAND ./processed_results_raw_syscall_getpid.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/raw_syscall_getpid.csv"
$SCP_COMMAND ./metadata.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/metadata.csv"