Select Git revision
luigi.cfg.template
-
Klaus Rabbertz authored
Numerous changes to fix issues from transition to NNLOJET modules2; added actual luigi/law configs as templates; added CMS7TeV dijet setup as example runcard and steering
Klaus Rabbertz authoredNumerous changes to fix issues from transition to NNLOJET modules2; added actual luigi/law configs as templates; added CMS7TeV dijet setup as example runcard and steering
luigi.cfg.template 19.82 KiB
[core]
no_lock = True
# Set local scheduler
local_scheduler = True
#default-scheduler-host = condorcentral.etp.kit.edu
#default-scheduler-port = 8082
[scheduler]
retry_count = 0
retry_delay = 86400
[worker]
keep_alive = False
ping_interval = 20
wait_interval = 20
max_reschedules = 1
[DEFAULT]
# Switch my debugging on/off (not yet implemented in most tasks)
#my_debug = True
# Name of your analysis
name = fnl2412eff-fc-v2
# If known, set proper Rivet_Id for data comparison plots
rivetid = CMS_2013_I1208923
# NNLOJET process and job name (the jobname is $channel-$jobnameext), and technical cutoff with leading 'y'
process = 2jetfc
jobnameext = CMS7-ak07
cutoff = y1.00E-08
# NNLOJET channels
# Complete LIST to be used in production; serves to create dictionary with settings per channel; settings must have same lengths!
allchannels = ["LO", "R", "V", "RRa", "RRb", "RV", "VV"]
# Default space-separated list of channels to be used in specific tasks; can be used as command line option.
# With coloured particles in final state (jets): 7 channels
channels = LO R V RRa RRb RV VV
# No coloured particle in final state (W,Z,H): 6 channels
# channels = LO R V RR RV VV
# Grid merging setup as defined in combine.ini; check compatibility
final_tables = {
"NLO": ["LO", "R", "V"],
"NLO_only": ["R", "V"],
# Use the following lines when NNLO is ready to be merged in as well (don't forget to add/remove a comma above)
"NNLO_only": ["RRa", "RRb", "RV", "VV"],
"NNLO": ["LO", "R", "V", "RRa", "RRb", "RV", "VV"]
}
# List of all observables (APPLfast grid names for NNLOJET histograms)
observables = [
"fnl2412eff-fc-v2_ym0_m12",
"fnl2412eff-fc-v2_ym1_m12",
"fnl2412eff-fc-v2_ym2_m12",
"fnl2412eff-fc-v2_ym3_m12",
"fnl2412eff-fc-v2_ym4_m12"
]
# Default htcondor job submission configuration (modifiable for each task)
htcondor_accounting_group = cms.jet
htcondor_requirements = (TARGET.ProvidesCPU==true)
htcondor_remote_job = True
#htcondor_user_proxy = /tmp/x509up_u12010
htcondor_user_proxy = /home/rabbertz/.globus/x509_proxy
# Time in seconds
htcondor_walltime = 84000
htcondor_request_cpus = 1
htcondor_use_cpus = 1
# For all cores in total
htcondor_request_memory = 4096
htcondor_universe = docker
htcondor_docker_image = mschnepf/slc7-condocker
# Disk usage on WN (def = 1000000)
htcondor_request_disk = 2000000
# Create log files in htcondor jobs
transfer_logs = True
# Set tolerance for workflow success with failed branches
tolerance = 0
# Submit only missing htcondor workflow branches (should always be true)
only_missing = True
# Do not resubmit automatically (default is 5); important for debugging!
#retries = 3
retries = 0
# Bootstrap file to be sourced at beginning of htcondor jobs
bootstrap_file = bootstrap_NNLOJET_modules2_rev6591_bridge-0.0.46-WiP_202307.sh
# Local directories, defaults are 'grids', 'plots', 'tags', 'warmups'
# - unpacking and merging of grids in local directories $merge_dir/$name/[$channel|Combined]
#merge_dir = grids
# - production of plots in local directories $plots_dir/$name
#plots_dir = plots
# - storing tags to mark tasks as finished in separate local directories $tags_dir/[Combine|FnloCppread]
#tags_dir = tags
# - unpacking of NNLOJET and fastNLO warmups into local directories $warm_dir/[warmup|rewarmup|fastwarm|mergefastwarm]/$name
#warm_dir = warmups
# fastNLO cppread options for grid/table comparison to NNLOJET original
# Confer the NNLOJET run file
# PDF choice for production
pdf = NNPDF31_nnlo_as_0118
# Central scale choice for grid closure, cf. NNLOJET run file & nnlobridge setup [kScale1, kScale2, scale12, scale21, kProd = scale1*scale2]
# Either one scale for all or one per observable
scale = kScale1
# Central scale choice(s) for flex-table evaluation, cf. fastNLO enumerator EScaleFunctionalForm: "0"=kScale1, "1"=kScale2, "2"=kQuadraticSum, ..., "9"=kProd, ...
# For now one number for both, mur and muf; fixed-scale tables must have 0 here
scaleindex = 0
# Central plus scale variations, cf. NNLOJET run file & nnlobridge setup
# None: --> 1; fixed-scale: --> 7; flex-scale low pT: --> -6; flex-scale high pT: --> -12
# Either one number for all or one per observable
scalecombs = 1 1 1 1 1
ascode = LHAPDF
norm = no
# Included in analysis.tar.gz
# Path to base runcard file
base_runcard = 2jetfc.fnl2412eff-fc-v2.run
# Local directory with all steering files
steer_dir = steeringfiles
#
# START of the ACTION
#
[Warmup]
# produced @ grid storage under Warmup
# Override some defaults for this task
# bootstrap_file =
# htcondor config
# If BWFORCLUSTER is too full
#htcondor_requirements = (TARGET.ProvidesCPU==true)
#htcondor_requirements = (TARGET.CloudSite=="blade")
htcondor_request_cpus = 1
htcondor_use_cpus = 1
#htcondor_requirements = (TARGET.CloudSite=="topas")
#htcondor_request_cpus = 42
#htcondor_use_cpus = 28
htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
#htcondor_request_cpus = 20
#htcondor_use_cpus = 20
# for all cores in total
#htcondor_request_memory = 8000
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# NNLOJET event count and integration steps for every channel
# MUST be of the same length as channels!
starting_seed = 0
# 1st test setup, <~ 5 minutes
# (main integration may have run separately ...;
# see also luigi_warmup_nemo and luigi_warmup_topas, if existing, configs for settings)
# first seed for first branch, counting upwards
warmup_events = 10000 10000 10000 1000 1000 1000 1000
warmup_iterations = 3 3 3 3 3 3 3
# time in seconds
htcondor_walltime = 3600
# Make warmup unusable without explicit editing to avoid reproducing 'lost' warmups on-the-fly!
# Such jobs would either run for a long time or be of very low quality.
# With seeds starting below zero, NNLOJET reports an error, but unfortunately with return code=0.
# This is captured in the Warmup task.
#starting_seed = -9
# Real warmup is from Joao Pires at git@gitlab.com:ayh/dijet_fits.git: results/CMS-dijet-7TeV-mu-mjj/NNLOJET_v1
[CopyWarmup]
# untar'ed to local storage under $warm_dir/warmup/$name
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# Up to NLO only
#channels = LO R V
[PlotVegasGrids]
# produced @ local storage under $plots_dir
# gnuplotted into local storage $warm_dir/warmup/$name
# Needs gnuplot version > 4.6 --> does not work on Centos7 portal machines
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# Up to NLO only
#channels = LO R V
[Rewarmup]
# produced @ grid storage under Rewarmup
# Override some defaults for this task
# bootstrap_file =
# htcondor config
#htcondor_requirements = (TARGET.CloudSite=="blade")
htcondor_request_cpus = 1
htcondor_use_cpus = 1
#htcondor_requirements = (TARGET.CloudSite=="topas")
#htcondor_request_cpus = 42
#htcondor_use_cpus = 28
htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
#htcondor_request_cpus = 20
#htcondor_use_cpus = 20
# for all cores in total
htcondor_request_memory = 8000
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# NNLOJET event count and integration steps for every channel
# MUST be of the same length as channels!
# Test setup
# first seed for first branch, counting upwards
starting_seed = 10
warmup_events = 1000 1000 1000 1000 1000 1000 1000
warmup_iterations = 3 3 3 3 3 3 3
# time in seconds
htcondor_walltime = 1800
# No rewarmup was run!
[CopyRewarmup]
# untar'ed to local storage under $warm_dir/rewarmup/$name
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
[FastWarm]
# produced @ grid storage under FastWarm
# override some defaults for this task
# accept as complete at 95%
acceptance = 0.95
# htcondor config
htcondor_request_memory = 2000
# If BWFORCLUSTER is too full
#htcondor_requirements = (TARGET.ProvidesCPU==true)
# If ETP blades are too slow
htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
# Everything fast
#htcondor_requirements = ((TARGET.CloudSite=="BWFORCLUSTER")||(TARGET.CloudSite=="schnepf")||(TARGET.CloudSite=="topas"))
# Up to 6h
#htcondor_requirements = ((TARGET.CloudSite=="schnepf")||(TARGET.CloudSite=="topas")||(TARGET.CloudSite=="forHLR2"))
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# NNLOJET event count and number of jobs for each channel
starting_seeds = 1000 2000 3000 4000 5000 6000 7000
# 1st test setup, <~ 5 minutes
fastwarm_events = 800 200 400 50 800 200 400
#fastwarm_events = 800000 200000 400000 50000 800000 200000 400000
#fastwarm_jobs = 1 1 1 1 1 1 1
fastwarm_jobs = 2 2 2 2 2 2 2
# time in seconds
htcondor_walltime = 180
# 2nd test setup, <~ 20-30 minutes (11.4, 14.5, 10.5, 11.7, 12.0, 14.1, 8.5 min)
#fastwarm_events = 5000000 2000000 4000000 500000 5000000 2000000 4000000
#fastwarm_jobs = 2 2 2 2 2 2 2
# time in seconds
#htcondor_walltime = 1800
# Final setup, ~22 hours (oder 11 x 2)
#fastwarm_events = 250000000 90000000 250000000 25000000 250000000 100000000 300000000
#fastwarm_jobs = 20 20 20 20 20 20 20
# time in seconds
#htcondor_walltime = 84000
# Only test setup run; reuse merged fast warmup from fnl2412e
[CopyFastWarm]
# untar'ed to local storage under $warm_dir/fastwarm/$name
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
[MergeFastWarm]
# produced @ grid storage under MergeFastWarm
# Reuse merged fast warmup from fnl2412e for fix- and flex-scale tables
[CopyMergeFastWarm]
# untar'ed to local storage under $warm_dir/mergefastwarm/$name
[FastProd]
# produced @ grid storage under FastProd
# Uncomment to define subdirectory per allchannel to delimit file numbers; default '.'
# fastprod_subdirs = LO R V ...
# override some defaults for this task
# accept as complete at 95%
acceptance = 0.95
# If BWFORCLUSTER is too full
#htcondor_requirements = (TARGET.ProvidesCPU==true)
#htcondor_requirements = (TARGET.Cloudsite=="blade")
#htcondor_requirements = (TARGET.CloudSite=="topas")
# If ETP blades are too slow
htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
# Everything fast
#htcondor_requirements = ((TARGET.CloudSite=="BWFORCLUSTER")||(TARGET.CloudSite=="schnepf")||(TARGET.CloudSite=="topas"))
#htcondor_request_memory = 7500
htcondor_request_memory = 4000
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
#fastprod_subdirs = LO R . . . . .
# NNLOJET seeds, event count, and number of jobs for each channel
starting_seeds = 10000 20000 30000 40000 50000 60000 70000
# 1st test setup, ~20 min (14.2, 14.0, 12.0, 18.5, 21.8, 14.8, 14.1 min)
fastprod_events = 4000000 220000 500000 3300 4200 3100 100000
#fastprod_jobs = 1 1 1 1 1 1 1
fastprod_jobs = 2 2 2 2 2 2 2
# time in seconds
htcondor_walltime = 1800
# 2nd test setup, ~3-5 h (4.9, 3.7, 4.0, 3.9, 3.5, 4.2, 3.7 h)
#fastprod_events = 80000000 3500000 10000000 40000 400000 50000 1600000
#fastprod_jobs = 3 3 3 3 3 3 3
# time in seconds
#htcondor_walltime = 40000
# Final setup, ~22 h (21.8, 21.0, 21.0, 21.3, 20.9, 21.2, 20.9 h)
#fastprod_events = 360000000 20000000 55000000 225000 2500000 260000 9500000
#fastprod_jobs = 100 100 100 100 100 100 500
#fastprod_jobs = 100 100 100 200 100 200 500
#fastprod_events = 360000000 20000000 55000000 245000 2500000 280000 9500000
#fastprod_jobs = 100 100 100 1200 100 1200 500
#htcondor_walltime = 86000
[CopyTables]
# copied to local storage under $merge_dir/$name/CHANNEL, see above
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# Up to NLO only
#channels = LO R V
# accept as complete at 95%
acceptance = 0.95
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (FastProd).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
force = True
[FnloCppread]
# produced @ local storage under $merge_dir/$name/CHANNEL
# ATTENTION: Combine now requires FnloCppread instead of CopyTables such that all grids have been checked for readability beforehand!
# ATTENTION: Incomplete table availability with unlimited branches and mutiple workers leads to unending task
# --> Needs to be stopped/killed manually
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# Accept as complete at 95%
acceptance = 0.95
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (CopyTables).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
force = True
[Combine]
# produced @ local storage under $merge_dir/$name/Combined
# Always consumes only one worker, but uses multiple threads inside the script by NNLOJET as set below by number of cores
# Always needs all channels up to some order LO, NLO, or NNLO
channels = LO R V RRa RRb RV VV
# Path to combine.ini config; check compatibility to desired order
combine_ini = combine.ini
# Update of only part of the contributions
#combine_ini = combine-Real.ini
#combine_ini = combine-RRaRV.ini
# Number of cores for NNLOJET combine script
cores = 20
[Recombine]
# produced @ local storage under $merge_dir/$name/Combined
# Produces the files for adding stat. uncertainties to the grids; profits from multiple workers
# Needs all input channels up to some order plus combined channels as defined in combine_ini
channels = LO R V RRa RRb RV VV NLO NLO_only NNLO NNLO_only
# Path to combine.ini config; check compatibility to desired order
combine_ini = combine.ini
# Update of only part of the contributions
#combine_ini = combine-Real.ini
#combine_ini = combine-RRaRV.ini
# Number of cores for NNLOJET combine script
cores = 20
[MergeFastProd]
# produced @ local storage under $merge_dir/$name/Combined/Final
htcondor_requirements = (TARGET.ProvidesEkpResources==true)
# Always needs all channels up to some order LO, NLO, or NNLO
channels = LO R V RRa RRb RV VV
# Up to LO only
#channels = LO
# Up to NLO only
#channels = LO R V
# NNLOJET weight file to use for merging (LO, NLO, or NNLO)
weightorder = NNLO
# Accept as complete at 95%
acceptance = 0.95
# Execute workflow as local workflow instead of htcondor workflow (useful for merging small amount of grids, to be removed later)
workflow = local
[MergeFinal]
# produced @ local storage under $merge_dir/$name/Combined/Final
# Channels are set via final_tables dictionary
[AddStatunc]
# produced @ local storage under $merge_dir/$name/Combined/Final
# Requires Recombine and MergeFinal to have run
# ATTENTION: Requires fastNLO v2.5!
# Should be added for each order
orders = [ "LO", "NLO", "NNLO" ]
# Rivet_Id must have been defined in default section.
# Make sure SteerModify.str contains a scenario description with RIVET_ID=
# Then provide here the histogram identifiers for data comparison plots if possible
histoids = ["d02-x01-y01", "d02-x01-y02", "d02-x01-y03", "d02-x01-y04", "d02-x01-y05", "d02-x01-y01", "d02-x01-y02", "d02-x01-y03", "d02-x01-y04", "d02-x01-y05" ]
[FnloCppreadFinal]
# produced @ local storage under $merge_dir/$name/Combined/Final
# Needs all input channels up to some order plus combined channels as defined in combine_ini
channels = LO R V RRa RRb RV VV NLO NLO_only NNLO NNLO_only
#######################
# New plotting setup! #
#######################
[PlotRuntime]
# produced @ local storage under $plots_dir
# Only requires CopyTables to have run
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# Up to NLO only
#channels = LO R V
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (CopyTables).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
force = True
[SingleGridClosure]
# produced @ local storage under $plots_dir
# Requires FnloCppread to have run
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# Up to NLO only
#channels = LO R V
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (FnloCppread).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
force = True
[MultiGridClosure]
# produced @ local storage under $plots_dir
# Requires FnloCppread to have run
# Uncomment the following line to have only channels changing with jet size R to complement other analysis
#channels = R RRa RRb RV
# Up to NLO only
#channels = LO R V
# Needs correct fastNLO 'scalecombs' parameter to be set, see above
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (FnloCppread).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
force = True
[MergedGridClosure]
# produced @ local storage under $plots_dir
# Requires FnloCppreadFinal to have run
# Needs all input channels up to some order plus combined channels as defined in combine_ini
channels = LO R V RRa RRb RV VV NLO NLO_only NNLO NNLO_only
# Up to NLO only
#channels = LO R V
# Only subset available
#channels = NNLO
[StatUncertainty]
# produced @ local storage under $plots_dir
# Highest available order (if NNLO too imprecise, maybe just up to NLO as well)
orders = [ "NNLO" ]
# Only needs one grid to read scenario info; numbers are from NNLOJET dat files
# Uncomment to show statistical uncertainties only for summed up channels per order (LO, NLO_only, NNLO_only)
#channellist = sum
[Absolute]
# produced @ local storage under $plots_dir
# Needs all input channels up to some order plus combined channels as defined in combine_ini
channels = LO R V RRa RRb RV VV NLO NLO_only NNLO NNLO_only
# Up to NLO only
#channels = LO R V
# Only subset available
#channels = NNLO
[KfacComparison]
# produced @ local storage under $plots_dir
# Highest available order
orders = ["NNLO"]
[ScaleUncertainty]
# produced @ local storage under $plots_dir
# Highest available order (if NNLO too imprecise, maybe just up to NLO as well)
orders = [ "NNLO" ]
# Needs correct fastNLO 'scaleindex' parameter to be set
# Only one scale choice in this production
scaleindex = 0
# Show statistical uncertainties from corresponding NNLOJET dat files directly, "", or re-evaluated from fastNLO grid, "0"
# Note: '0' needs '_v25' tables (output of task AddStatunc)
#datfiles = [ "", "0" ]
datfiles = [ "" ]
[PdfUncertainty]
# produced @ local storage under $plots_dir
# Might be interesting for each order (Attention: ME and PDF orders are not matched!)
orders = [ "NNLO" ]
pdfsets = CT14nnlo,MMHT2014nnlo68cl,NNPDF31_nnlo_as_0118,ABMP16als118_5_nnlo
# Needs correct fastNLO 'scaleindex' parameter to be set
# One scale choice fully sufficient here
scaleindex = 0
[DataComparison]
# produced @ local storage under $plots_dir
# Requires AddStatunc to have run
# Produces Rivet-like comparison plots to data, if available.
# Rivet_Id must have been defined in default section.
# ATTENTION: Requires fastNLO + YODA + Rivet setup with Python2 because of malfunction in Rivet with Python3
# Show comparison to all orders
orders = [ "LO", "NLO", "NNLO" ]
# Needs correct fastNLO 'scalename' parameter to be set
scalenames = [ "kScale1" ]
# Select theory uncertainty to show, default is 6P scale uncertainty, alternatives are:
# NN, 2P, 6P, HS, HA, HP, HC, MC, L6, AS, ST (see `fnlo-tk-yodaout -h` for details)
#uncertainty = ST
# Rescale uncertainties to be shown by factor, default=1
#dxsrescale = 10