jjb: lttng-modules: Add riscv64 to crossbuilds
[lttng-ci.git] / scripts / system-tests / system-trigger.groovy
index 394227cb9645d51337b88aea9ad32a9bc980b967..6b1028d2a4cd07b1189c04a73f16ed252e5fbc31 100644 (file)
@@ -162,52 +162,37 @@ class VanillaKVersion implements Comparable<VanillaKVersion> {
   }
 }
 
-class RunConfiguration {
-  def linuxBranch
-  def linuxTagId
-  def lttngBranch
-  def lttngModulesCommitId
-  def lttngToolsCommitId
-  def lttngUstCommitId
-  RunConfiguration(linuxBranch, linuxTagId, lttngBranch, lttngToolsCommitId,
-                  lttngModulesCommitId, lttngUstCommitId) {
-    this.linuxBranch = linuxBranch
-    this.linuxTagId = linuxTagId
-    this.lttngBranch = lttngBranch
-    this.lttngModulesCommitId = lttngModulesCommitId
-    this.lttngToolsCommitId = lttngToolsCommitId
-    this.lttngUstCommitId = lttngUstCommitId
-  }
-
-  String toString() {
-    return "${this.linuxBranch}:{${this.linuxTagId}}, ${this.lttngBranch}:{${this.lttngModulesCommitId}, ${this.lttngToolsCommitId}, ${this.lttngUstCommitId}}"
+// Save the hashmap containing all the jobs and their status to disk. We can do
+// that because this job is configured to always run on the master node on
+// Jenkins.
+def SaveCurrentJobsToWorkspace = { currentJobs, ondiskpath->
+  try {
+    File myFile = new File(ondiskpath);
+    myFile.createNewFile();
+    def out = new ObjectOutputStream(new FileOutputStream(ondiskpath))
+    out.writeObject(currentJobs)
+    out.close()
+  } catch (e) {
+    println("Failed to save previous Git object IDs to disk." + e);
   }
 }
 
-def LoadPreviousIdsFromWorkspace = { ondiskpath ->
-  def previousIds = []
+// Load the hashmap containing all the jobs and their last status from disk.
+// It's possible because this job is configured to always run on the master
+// node on Jenkins
+def LoadPreviousJobsFromWorkspace = { ondiskpath ->
+  def previousJobs = [:]
   try {
     File myFile = new File(ondiskpath);
     def input = new ObjectInputStream(new FileInputStream(ondiskpath))
-    previousIds = input.readObject()
+    previousJobs = input.readObject()
     input.close()
-  } catch (all) {
-    println("Failed to load previous ids from disk.")
+  } catch (e) {
+    println("Failed to load previous runs from disk." + e);
   }
-  return previousIds
+  return previousJobs
 }
 
-def saveCurrentIdsToWorkspace = { currentIds, ondiskpath ->
-  try {
-    File myFile = new File(ondiskpath);
-    myFile.createNewFile();
-    def out = new ObjectOutputStream(new FileOutputStream(ondiskpath))
-    out.writeObject(currentIds)
-    out.close()
-  } catch (all) {
-    println("Failed to save previous ids from disk.")
-  }
-}
 
 def GetHeadCommits = { remoteRepo, branchesOfInterest ->
   def remoteHeads = [:]
@@ -278,43 +263,70 @@ def GetLastTagIds = { remoteRepo, branchesOfInterest ->
   return remoteLastTagCommit
 }
 
-def CraftJobName = { jobType, runConfig ->
-  return "${jobType}_k${runConfig.linuxBranch}_l${runConfig.lttngBranch}"
+def CraftJobName = { jobType, linuxBranch, lttngBranch ->
+  return "${jobType}_k${linuxBranch}_l${lttngBranch}"
 }
 
-def LaunchJob = { jobName, runConfig ->
+def LaunchJob = { jobName, jobInfo ->
   def job = Hudson.instance.getJob(jobName)
+  if (job == null) {
+    println(String.format("Failed to find job by name '%s'", jobName))
+    return null;
+  }
   def params = []
   for (paramdef in job.getProperty(ParametersDefinitionProperty.class).getParameterDefinitions()) {
-    params += paramdef.getDefaultParameterValue();
+    // If there is a default value for this parameter, use it. Don't use empty
+    // default value parameters.
+    if (paramdef.getDefaultParameterValue() != null) {
+      params += paramdef.getDefaultParameterValue();
+    }
+  }
+
+  params.add(new StringParameterValue('LTTNG_TOOLS_COMMIT_ID', jobInfo['config']['toolsCommit']))
+  params.add(new StringParameterValue('LTTNG_MODULES_COMMIT_ID', jobInfo['config']['modulesCommit']))
+  params.add(new StringParameterValue('LTTNG_UST_COMMIT_ID', jobInfo['config']['ustCommit']))
+  params.add(new StringParameterValue('KERNEL_TAG_ID', jobInfo['config']['linuxTagID']))
+  def currBuild = job.scheduleBuild2(0, new Cause.UpstreamCause(build), new ParametersAction(params))
+
+  if (currBuild != null ) {
+    println("Launching job: ${HyperlinkNote.encodeTo('/' + job.url, job.fullDisplayName)}");
+  } else {
+    println("Job ${jobName} not found or deactivated.");
   }
 
-  params.add(new StringParameterValue('tools_commit_id', runConfig.lttngToolsCommitId))
-  params.add(new StringParameterValue('modules_commit_id', runConfig.lttngModulesCommitId))
-  params.add(new StringParameterValue('ust_commit_id', runConfig.lttngUstCommitId))
-  params.add(new StringParameterValue('kernel_tag_id', runConfig.linuxTagId))
-  job.scheduleBuild2(0, new Cause.UpstreamCause(build), new ParametersAction(params))
-  println "Launching job: ${HyperlinkNote.encodeTo('/' + job.url, job.fullDisplayName)}"
+  return currBuild
 }
 
-def jobTypes = ['baremetal_tests', 'vm_tests', 'baremetal_benchmarks']
 final String toolsRepo = "https://github.com/lttng/lttng-tools.git"
 final String modulesRepo = "https://github.com/lttng/lttng-modules.git"
 final String ustRepo = "https://github.com/lttng/lttng-ust.git"
 final String linuxRepo = "git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git"
 
-final String toolsOnDiskPath = build.getEnvironment(listener).get('WORKSPACE') + "/on-disk-tools-ref"
-final String modulesOnDiskPath = build.getEnvironment(listener).get('WORKSPACE') + "/on-disk-modules-ref"
-final String ustOnDiskPath = build.getEnvironment(listener).get('WORKSPACE') + "/on-disk-ust-ref"
-final String linuxOnDiskPath = build.getEnvironment(listener).get('WORKSPACE') + "/on-disk-linux-ref"
-
-def recentLttngBranchesOfInterest = ['master', 'stable-2.10', 'stable-2.9']
-def recentLinuxBranchesOfInterest = ['master', 'linux-4.9.y', 'linux-4.4.y']
-
-def legacyLttngBranchesOfInterest = ['stable-2.7']
-def legacyLinuxBranchesOfInterest = ['linux-3.18.y', 'linux-4.4.y']
-
-// Generate configurations of interest
+final String pastJobsPath = build.getEnvironment(listener).get('WORKSPACE') + "/pastjobs";
+
+def recentLttngBranchesOfInterest = ['master',
+  'stable-2.13',
+  'stable-2.12']
+def recentLinuxBranchesOfInterest = ['master',
+  'linux-6.1.y',
+  'linux-5.15.y',
+  'linux-5.10.y',
+  'linux-5.4.y',
+  'linux-4.19.y',
+  'linux-4.14.y',
+]
+
+def legacyLttngBranchesOfInterest = []
+def legacyLinuxBranchesOfInterest = [
+    'linux-5.14.y',
+    'linux-4.18.y',
+    'linux-4.12.y',
+    'linux-4.9.y',
+]
+
+def vmLinuxBranchesOfInterest = []
+
+// Generate configurations of interest.
 def configurationOfInterest = [] as Set
 
 recentLttngBranchesOfInterest.each { lttngBranch ->
@@ -330,133 +342,210 @@ legacyLttngBranchesOfInterest.each { lttngBranch ->
 }
 
 def lttngBranchesOfInterest = recentLttngBranchesOfInterest + legacyLttngBranchesOfInterest
-def linuxBranchesOfInterest = recentLinuxBranchesOfInterest + legacyLinuxBranchesOfInterest
+def linuxBranchesOfInterest = recentLinuxBranchesOfInterest + legacyLinuxBranchesOfInterest + vmLinuxBranchesOfInterest
 
-// For Linux branches, we look for new non-RC tags
+// For LTTng branches, we look for new commits.
 def toolsHeadCommits = GetHeadCommits(toolsRepo, lttngBranchesOfInterest)
 def modulesHeadCommits = GetHeadCommits(modulesRepo, lttngBranchesOfInterest)
 def ustHeadCommits = GetHeadCommits(ustRepo, lttngBranchesOfInterest)
 
-// For LTTng branches, we look for new commits
+// For Linux branches, we look for new non-RC tags.
 def linuxLastTagIds = GetLastTagIds(linuxRepo, linuxBranchesOfInterest)
 
-// Load previously build Linux tag ids
-def oldLinuxTags = LoadPreviousIdsFromWorkspace(linuxOnDiskPath) as Set
-
-// Load previously built LTTng commit ids
-def oldToolsHeadCommits = LoadPreviousIdsFromWorkspace(toolsOnDiskPath) as Set
-def oldModulesHeadCommits = LoadPreviousIdsFromWorkspace(modulesOnDiskPath) as Set
-def oldUstHeadCommits = LoadPreviousIdsFromWorkspace(ustOnDiskPath) as Set
-
-def newOldLinuxTags = oldLinuxTags
-def newOldToolsHeadCommits = oldToolsHeadCommits
-def newOldModulesHeadCommits = oldModulesHeadCommits
-def newOldUstHeadCommits = oldUstHeadCommits
-
-def canaryRunConfigs = [] as Set
-canaryRunConfigs.add(
-    ['v4.4.9', '1a1a512b983108015ced1e7a7c7775cfeec42d8c', 'v2.8.1','d11e0db', '7fd9215', '514a87f'] as RunConfiguration)
-
-def runConfigs = [] as Set
-
-// For each top of branch kernel tags that were not seen before, schedule one
-// job for each lttng/linux tracked configurations
-linuxLastTagIds.each { linuxTag ->
-  if (!oldLinuxTags.contains(linuxTag.value)) {
-    lttngBranchesOfInterest.each { lttngBranch ->
-      if (configurationOfInterest.contains([lttngBranch, linuxTag.key])) {
-        runConfigs.add([linuxTag.key, linuxTag.value,
-                    lttngBranch, toolsHeadCommits[lttngBranch],
-                    modulesHeadCommits[lttngBranch], ustHeadCommits[lttngBranch]]
-                    as RunConfiguration)
-
-        newOldLinuxTags.add(linuxTag.value)
-      }
+def CraftConfig = { linuxBr, lttngBr ->
+  def job = [:];
+  job['config'] = [:];
+  job['config']['linuxBranch'] = linuxBr;
+  job['config']['lttngBranch'] = lttngBr;
+  job['config']['linuxTagID'] = linuxLastTagIds[linuxBr];
+  job['config']['toolsCommit'] = toolsHeadCommits[lttngBr];
+  job['config']['modulesCommit'] = modulesHeadCommits[lttngBr];
+  job['config']['ustCommit'] = ustHeadCommits[lttngBr];
+  job['status'] = 'NOT_SET';
+  job['build'] = null;
+  return job;
+}
+
+// Check what type of jobs should be triggered.
+triggerJobName = build.project.getFullDisplayName();
+if (triggerJobName.contains("vm_tests")) {
+  jobType = 'vm_tests';
+  recentLttngBranchesOfInterest.each { lttngBranch ->
+    vmLinuxBranchesOfInterest.each { linuxBranch ->
+      configurationOfInterest.add([lttngBranch, linuxBranch])
     }
   }
+} else if (triggerJobName.contains("baremetal_tests")) {
+  jobType = 'baremetal_tests';
+}
+
+// Hashmap containing all the jobs, their configuration (commit id, etc. )and
+// their status (SUCCEEDED, FAILED, etc.). This Hashmap is made of basic strings
+// rather than objects and enums because strings are easily serializable.
+def currentJobs = [:];
+
+// Get an up to date view of all the branches of interest.
+configurationOfInterest.each { lttngBr, linuxBr  ->
+  def jobName = CraftJobName(jobType, linuxBr, lttngBr);
+  currentJobs[jobName] = CraftConfig(linuxBr, lttngBr);
 }
 
-// For each top of branch commits that were not seen before, schedule one job
-// for each lttng/linux tracked configurations
-toolsHeadCommits.each { toolsHead ->
-  if (!oldToolsHeadCommits.contains(toolsHead.value)) {
-    linuxLastTagIds.each { linuxTag ->
-      def lttngBranch = toolsHead.key
-      if (configurationOfInterest.contains([lttngBranch, linuxTag.key])) {
-        runConfigs.add([linuxTag.key, linuxTag.value,
-                    lttngBranch, toolsHeadCommits[lttngBranch],
-                    modulesHeadCommits[lttngBranch], ustHeadCommits[lttngBranch]]
-                    as RunConfiguration)
-
-        newOldToolsHeadCommits.add(toolsHead.value)
+//Add canary job
+def jobNameCanary = jobType + "_kcanary_lcanary";
+currentJobs[jobNameCanary] = [:];
+currentJobs[jobNameCanary]['config'] = [:];
+currentJobs[jobNameCanary]['config']['linuxBranch'] = 'v5.15.112';
+currentJobs[jobNameCanary]['config']['lttngBranch'] = 'v2.13.9';
+currentJobs[jobNameCanary]['config']['linuxTagID'] ='9d6bde853685609a631871d7c12be94fdf8d912e'; // v5.15.112
+currentJobs[jobNameCanary]['config']['toolsCommit'] = '2ff0385718ff894b3d0e06f3961334c20c5436f8' // v2.13.9
+currentJobs[jobNameCanary]['config']['modulesCommit'] = 'da1f5a264fff33fc5a9518e519fb0084bf1074af' // v2.13.9
+currentJobs[jobNameCanary]['config']['ustCommit'] = 'de624c20694f69702b42c5d47b5bcf692293a238' // v2.13.5
+currentJobs[jobNameCanary]['status'] = 'NOT_SET';
+currentJobs[jobNameCanary]['build'] = null;
+
+def pastJobs = LoadPreviousJobsFromWorkspace(pastJobsPath);
+
+def failedRuns = []
+def abortedRuns = []
+def isFailed = false
+def isAborted = false
+def ongoingJobs = 0;
+
+currentJobs.each { jobName, jobInfo ->
+  // If the job ran in the past, we check if the IDs changed since.
+  // Fetch past results only if the job is not of type canary.
+  if (!jobName.contains('_kcanary_lcanary') && pastJobs.containsKey(jobName) &&
+         build.getBuildVariables().get('FORCE_JOB_RUN') == 'false') {
+    pastJob = pastJobs[jobName];
+
+    // If the code has not changed report previous status.
+    if (pastJob['config'] == jobInfo['config']) {
+      // if the config has not changed, we keep it.
+      // if it's failed, we don't launch a new job and keep it failed.
+      jobInfo['status'] = pastJob['status'];
+      if (pastJob['status'] == 'FAILED' &&
+            build.getBuildVariables().get('FORCE_FAILED_JOB_RUN') == 'false') {
+        println("${jobName} as not changed since the last failed run. Don't run it again.");
+        // Marked the umbrella job for failure but still run the jobs that since the
+        // last run.
+        isFailed = true;
+        return;
+      } else if (pastJob['status'] == 'ABORTED') {
+        println("${jobName} as not changed since last aborted run. Run it again.");
+      } else if (pastJob['status'] == 'SUCCEEDED') {
+        println("${jobName} as not changed since the last successful run. Don't run it again.");
+        return;
       }
     }
   }
+
+  jobInfo['status'] = 'PENDING';
+  jobInfo['build'] = LaunchJob(jobName, jobInfo);
+  if (jobInfo['build'] != null) {
+    ongoingJobs += 1;
+  }
 }
 
-// For each top of branch commits that were not seen before, schedule one job
-// for each lttng/linux tracked configurations
-modulesHeadCommits.each { modulesHead ->
-  if (!oldModulesHeadCommits.contains(modulesHead.value)) {
-    linuxLastTagIds.each { linuxTag ->
-      def lttngBranch = modulesHead.key
-      if (configurationOfInterest.contains([lttngBranch, linuxTag.key])) {
-        runConfigs.add([linuxTag.key, linuxTag.value,
-                    lttngBranch, toolsHeadCommits[lttngBranch],
-                    modulesHeadCommits[lttngBranch], ustHeadCommits[lttngBranch]]
-                    as RunConfiguration)
-
-        newOldModulesHeadCommits.add(modulesHead.value)
-      }
-    }
+// Some jobs may have a null build immediately if LaunchJob
+// failed for some reason, those jobs can immediately be removed.
+def jobKeys = currentJobs.collect { jobName, jobInfo ->
+    return jobName;
+}
+jobKeys.each { k ->
+  if (currentJobs.get(k)['build'] == null) {
+    println(String.format("Removing job '%s' since build is null", k));
+    currentJobs.remove(k);
   }
 }
 
-// For each top of branch commits that were not seen before, schedule one job
-// for each lttng/linux tracked configurations
-ustHeadCommits.each { ustHead ->
-  if (!oldUstHeadCommits.contains(ustHead.value)) {
-    linuxLastTagIds.each { linuxTag ->
-      def lttngBranch = ustHead.key
-      if (configurationOfInterest.contains([lttngBranch, linuxTag.key])) {
-        runConfigs.add([linuxTag.key, linuxTag.value,
-                    lttngBranch, toolsHeadCommits[lttngBranch],
-                    modulesHeadCommits[lttngBranch], ustHeadCommits[lttngBranch]]
-                    as RunConfiguration)
-
-        newOldUstHeadCommits.add(ustHead.value)
+while (ongoingJobs > 0) {
+  currentJobs.each { jobName, jobInfo ->
+
+    if (jobInfo['status'] != 'PENDING') {
+      return;
+    }
+
+    jobBuild = jobInfo['build']
+
+    // The isCancelled() method checks if the run was cancelled before
+    // execution. We consider such run as being aborted.
+    if (jobBuild.isCancelled()) {
+      println("${jobName} was cancelled before launch.")
+      isAborted = true;
+      abortedRuns.add(jobName);
+      ongoingJobs -= 1;
+      jobInfo['status'] = 'ABORTED'
+      // Invalidate the build field, as it's not serializable and we don't need
+      // it anymore.
+      jobInfo['build'] = null;
+    } else if (jobBuild.isDone()) {
+
+      jobExitStatus = jobBuild.get();
+
+      // Invalidate the build field, as it's not serializable and we don't need
+      // it anymore.
+      jobInfo['build'] = null;
+      println("${jobExitStatus.fullDisplayName} completed with status ${jobExitStatus.result}.");
+
+      // If the job didn't succeed, add its name to the right list so it can
+      // be printed at the end of the execution.
+      ongoingJobs -= 1;
+      switch (jobExitStatus.result) {
+      case Result.ABORTED:
+        isAborted = true;
+        abortedRuns.add(jobName);
+        jobInfo['status'] = 'ABORTED'
+        break;
+      case Result.FAILURE:
+        isFailed = true;
+        failedRuns.add(jobName);
+        jobInfo['status'] = 'FAILED'
+        break;
+      case Result.SUCCESS:
+        jobInfo['status'] = 'SUCCEEDED'
+        break;
+      default:
+        break;
       }
     }
   }
-}
 
-// Save the tag and commit IDs scheduled in the past and during this run to the workspace
-saveCurrentIdsToWorkspace(newOldLinuxTags, linuxOnDiskPath)
-saveCurrentIdsToWorkspace(newOldToolsHeadCommits, toolsOnDiskPath)
-saveCurrentIdsToWorkspace(newOldModulesHeadCommits, modulesOnDiskPath)
-saveCurrentIdsToWorkspace(newOldUstHeadCommits, ustOnDiskPath)
-
-// Launch jobs
-println("Schedule canary jobs once a day")
-canaryRunConfigs.each { config ->
-  jobTypes.each { type ->
-    LaunchJob(type + '_canary', config)
+  // Sleep before the next iteration.
+  try {
+    Thread.sleep(30000)
+  } catch(e) {
+    if (e in InterruptedException) {
+      build.setResult(hudson.model.Result.ABORTED)
+      throw new InterruptedException()
+    } else {
+      throw(e)
+    }
   }
 }
 
-if (runConfigs.size() > 0) {
-  println("Schedule jobs because of code changes.")
-  runConfigs.each { config ->
-    jobTypes.each { type ->
-      LaunchJob(CraftJobName(type, config), config);
-    }
+//All jobs are done running. Save their exit status to disk.
+SaveCurrentJobsToWorkspace(currentJobs, pastJobsPath);
 
-    // Jobs to run only on master branchs of both linux and lttng
-    if (config.linuxBranch.contains('master') &&
-        config.lttngBranch.contains('master')) {
-      LaunchJob(CraftJobName('vm_tests_fuzzing', config), config)
-    }
+// Get log of failed runs.
+if (failedRuns.size() > 0) {
+  println("Failed job(s):");
+  for (failedRun in failedRuns) {
+    println("\t" + failedRun)
+  }
+}
+
+// Get log of aborted runs.
+if (abortedRuns.size() > 0) {
+  println("Cancelled job(s):");
+  for (cancelledRun in abortedRuns) {
+    println("\t" + cancelledRun)
   }
-} else {
-  println("No new commit or tags, nothing more to do.")
+}
+
+// Mark this build as Failed if atleast one child build has failed and mark as
+// aborted if there was no failure but atleast one job aborted.
+if (isFailed) {
+  build.setResult(hudson.model.Result.FAILURE)
+} else if (isAborted) {
+  build.setResult(hudson.model.Result.ABORTED)
 }
This page took 0.041779 seconds and 4 git commands to generate.