diff --git a/analysis/framework.py b/analysis/framework.py index 7538f34fcf27522cfab826918561fd6b13c94479..02de96ee6df33f5ad0c091a798b0ccda25767297 100644 --- a/analysis/framework.py +++ b/analysis/framework.py @@ -175,7 +175,7 @@ class HTCondorWorkflow(law.contrib.htcondor.HTCondorWorkflow, HTCondorRemoteStor htcondor_user_proxy = luigi.Parameter() htcondor_remote_job = luigi.BoolParameter(default=True) htcondor_request_cpus = luigi.Parameter() - htcondor_use_cpus = luigi.Parameter() + htcondor_use_cpus = SpaceSeparatedListParameter(cls=luigi.Parameter) htcondor_universe = luigi.Parameter() htcondor_walltime = luigi.Parameter() # Set default for new luigi parameter htcondor_request_disk @@ -212,6 +212,7 @@ class HTCondorWorkflow(law.contrib.htcondor.HTCondorWorkflow, HTCondorRemoteStor config.custom_content.append(("x509userproxy", self.htcondor_user_proxy)) config.custom_content.append(("+RemoteJob", self.htcondor_remote_job)) config.custom_content.append(("request_cpus", self.htcondor_request_cpus)) + # config.custom_content.append(("request_cpus", self.htcondor_request_cpus[self.branch_data['index']])) config.custom_content.append(("universe", self.htcondor_universe)) config.custom_content.append(("+RequestWalltime", self.htcondor_walltime)) config.custom_content.append(("RequestDisk", self.htcondor_request_disk)) diff --git a/analysis/tasks/Warmup.py b/analysis/tasks/Warmup.py index 07c800efea9aa64d53392019807815040ce745e1..fd246da14dcd79a5efc36445f4141beba3646d76 100644 --- a/analysis/tasks/Warmup.py +++ b/analysis/tasks/Warmup.py @@ -34,6 +34,7 @@ class Warmup(Task, TarballExtractionMixin, HTCondorWorkflow, law.LocalWorkflow): starting_seed = luigi.IntParameter() warmup_events = SpaceSeparatedListParameter(cls=luigi.IntParameter) warmup_iterations = SpaceSeparatedListParameter(cls=luigi.IntParameter) + multi_channel = SpaceSeparatedListParameter(cls=luigi.Parameter) NNLOJET_RUNMODE = 'warmup' NNLOJET_UNITPHASE = '! UNIT_PHASE' @@ -55,6 +56,7 @@ class Warmup(Task, TarballExtractionMixin, HTCondorWorkflow, law.LocalWorkflow): 'seed': i + self.starting_seed, 'events': self.warmup_events[i], 'iterations': self.warmup_iterations[i], + 'multi_channel': self.multi_channel[i], } return branchmap @@ -134,6 +136,7 @@ class Warmup(Task, TarballExtractionMixin, HTCondorWorkflow, law.LocalWorkflow): 'seed': self.branch_data['seed'], 'iterations': self.branch_data['iterations'], 'events': self.branch_data['events'], + 'multi_channel': self.branch_data['multi_channel'], 'runmode': self.NNLOJET_RUNMODE, 'unit_phase': self.NNLOJET_UNITPHASE } @@ -143,7 +146,8 @@ class Warmup(Task, TarballExtractionMixin, HTCondorWorkflow, law.LocalWorkflow): # KR: With hyperthreading it might be necessary to request more cpus on a node than one wants to use for the calculation! # --> Differentiate between request and use CPUs! # os.environ['OMP_NUM_THREADS'] = self.htcondor_request_cpus - os.environ['OMP_NUM_THREADS'] = self.htcondor_use_cpus + logger.debug("Setting no. of threads to use: self.htcondor_use_cpus[index] = {}".format(self.htcondor_use_cpus[self.branch_data['index']])) + os.environ['OMP_NUM_THREADS'] = self.htcondor_use_cpus[self.branch_data['index']] logger.debug("Starting NNLOJET with runcard: %s", runfile) diff --git a/analysis/util.py b/analysis/util.py index 70e9cfd2442aaf17888670a578ad6b6322d6eaec..8583779d311a9763389917068bc86bb13fa5272a 100644 --- a/analysis/util.py +++ b/analysis/util.py @@ -57,7 +57,7 @@ def createRuncard(baseRuncard, params): # '@RESET_VEGAS_GRID@' : params['reset_vegas_grid'], # '@LIPS_REDUCE@' : params['lips_reduce'], # '@ANGULAR_AVERAGE@' : params['angular_average'], - # '@MULTI_CHANNEL@' : params['multi_channel'], + '@MULTI_CHANNEL@' : params['multi_channel'], '@EVENTS@' : params['events'], '@ITERATIONS@' : params['iterations'] }