Skip to content
Snippets Groups Projects
Commit 4fde5999 authored by Klaus Rabbertz's avatar Klaus Rabbertz
Browse files

Clean up master setup

parent d8a8f30c
No related branches found
No related tags found
No related merge requests found
[core]
no_lock = True
# Set local scheduler
#local_scheduler = True
default-scheduler-host = condorcentral.etp.kit.edu
default-scheduler-port = 8082
[worker]
keep_alive = False
ping_interval = 20
wait_interval = 20
max_reschedules = 1
[DEFAULT]
# Switch my debugging on/off (not yet implemented in most tasks)
my_debug = True
# Name of your analysis
name = fnl5872
# NNLOJET process and job name (the jobname is $channel-$jobnameext), and technical cutoff with leading 'y'
process = 2jet
jobnameext = CMS13-ak08
cutoff = y1d-8
# NNLOJET channels (append "a", "b" for RR region flag)
#channels = LO R V RRa RRb RV VV
#kinematics = vBa vRa vBa RRa RRb vRa vBa
channels = R RRa RRb RV
kinematics = vRa RRa RRb vRa
# Grid merging setup as defined in combine.ini; check compatibility
final_tables = {
"NLO": ["LO", "R", "V"],
"NLO_only": ["R", "V"],
"NNLO_only": ["RRa", "RRb", "RV", "VV"],
"NNLO": ["LO", "R", "V", "RRa", "RRb", "RV", "VV"]
}
# List of all observables (APPLfast grid names for NNLOJET histograms)
# TODO: fix vs. flex need different parameters (scale, scalecombs) in FnloCppread!!!
observables = [
"fnl5872_yb0_ys0_ptavgj12",
"fnl5872_yb1_ys0_ptavgj12",
"fnl5872_yb2_ys0_ptavgj12",
"fnl5872_yb3_ys0_ptavgj12",
"fnl5872_yb4_ys0_ptavgj12",
"fnl5872_yb0_ys1_ptavgj12",
"fnl5872_yb1_ys1_ptavgj12",
"fnl5872_yb2_ys1_ptavgj12",
"fnl5872_yb3_ys1_ptavgj12",
"fnl5872_yb0_ys2_ptavgj12",
"fnl5872_yb1_ys2_ptavgj12",
"fnl5872_yb2_ys2_ptavgj12",
"fnl5872_yb0_ys3_ptavgj12",
"fnl5872_yb1_ys3_ptavgj12",
"fnl5872_yb0_ys4_ptavgj12"
]
# Grid storage protocol and path usable from submitting machine and worker nodes of cluster
# Job in- and output will be stored in $wlcg_path under subdirectory of analysis $name
wlcg_path = srm://cmssrm-kit.gridka.de:8443/srm/managerv2?SFN=/pnfs/gridka.de/cms/disk-only/store/user/krabbert/law-analysis
gsi_path = gsiftp://cmssrm-kit.gridka.de:2811//pnfs/gridka.de/cms/disk-only/store/user/krabbert/law-analysis
# Default htcondor job submission configuration (modifiable for each task)
htcondor_accounting_group = cms.jet
htcondor_requirements = (TARGET.ProvidesCPU==true)
htcondor_remote_job = True
htcondor_user_proxy = /tmp/x509up_u12010
# Time in seconds
htcondor_walltime = 84000
htcondor_request_cpus = 1
htcondor_use_cpus = 1
# For all cores in total
htcondor_request_memory = 4096
htcondor_universe = docker
htcondor_docker_image = mschnepf/slc6-condocker
# Create log files in htcondor jobs
transfer_logs = True
# Set tolerance for workflow success with failed branches
tolerance = 0
# Submit only missing htcondor workflow branches (should always be true)
only_missing = True
# Bootstrap file to be sourced at beginning of htcondor jobs
bootstrap_file = bootstrap_NNLOJET_rev5419.sh
# Local directories
# - unpacking of NNLOJET and fastNLO warmups into local directories $warm_dir/[warmup|rewarmup|fastwarm|mergefastwarm]/$name
warm_dir = warmups
# - unpacking and merging of grids in local directories $merge_dir/$name/[$channel|Combined]
merge_dir = grids
# - production of plots in local directories $plots_dir/$name
plots_dir = plots
# fastNLO cppread options for grid/table comparison to NNLOJET original
# TODO: fix vs. flex need different parameters (scale, scalecombs) in FnloCppread!!!
# Confer the NNLOJET run file
# PDF choice for production
pdf = CT14nnlo
# Central scale choice in production, cf. NNLOJET run file & nnlobridge setup [scale1, scale2, scale12, scale21]
scale = scale1
# Central scale choice for flex-table evaluation, cf. fastNLO enumerator "0"=kScale1, "1"=kScale2, "2"=kQuadraticSum, ...
scaleindex = 0
# Central plus scale variations, cf. NNLOJET run file & nnlobridge setup
# Fixed-scale: --> 7; flex-scale low pT: --> -6; flex-scale high pT: --> -12
scalecombs = -12
ascode = LHAPDF
norm = no
#
# START of the ACTION
#
[BaseRuncard]
# copied to grid storage into BaseRuncard
# Path to base runcard file
source_path = 2jet.fnl5872.run
[Steeringfiles]
# copied to grid storage into Steeringfiles
# Local directory with all steering files
source_path = steeringfiles
[Warmup]
# produced @ grid storage under Warmup
# Override some defaults for this task
bootstrap_file = bootstrap_NNLOJET_rev5419_multicore.sh
# htcondor config
#htcondor_requirements = (TARGET.CloudSite=="blade")
#htcondor_request_cpus = 2
#htcondor_use_cpus = 2
htcondor_requirements = (TARGET.CloudSite=="topas")
htcondor_request_cpus = 42
htcondor_use_cpus = 28
#htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
#htcondor_request_cpus = 20
#htcondor_use_cpus = 20
# for all cores in total
htcondor_request_memory = 8000
# NNLOJET event count and integration steps for every channel
# MUST be of the same length as channels!
# Test setup
# (here test settings only since main integration may run separately;
# see also luigi_warmup_nemo and luigi_warmup_topas, if existing, configs for settings)
#htcondor_request_cpus = 4
#htcondor_use_cpus = 4
# first seed for first branch, counting upwards
#starting_seed = 0
#channels = RRa RRb
#warmup_events = 10000 10000
#warmup_iterations = 3 3
# time in seconds
#htcondor_walltime = 3600
# Real setup
# first seed for first branch, counting upwards
starting_seed = 0
channels = R RRa RRb RV
warmup_events = 16000000 1000000 5000000 1000000
warmup_iterations = 10 10 10 10
# --> 41.3h, 18.9h, ?, ? for 28 cores @ topas
# time in seconds
htcondor_walltime = 250000
[CopyWarmup]
# untar'ed to local storage under $warm_dir/warmup/$name
[PlotVegasGrids]
# gnuplotted into local storage $warm_dir/warmup/$name
# Needs gnuplot version > 4.6 --> does not work on Centos7 portal machines
[Rewarmup]
# produced @ grid storage under Rewarmup
# produced @ grid storage under Rewarmup
# Override some defaults for this task
bootstrap_file = bootstrap_NNLOJET_rev5419_multicore.sh
# htcondor config
#htcondor_requirements = (TARGET.CloudSite=="blade")
#htcondor_request_cpus = 2
#htcondor_use_cpus = 2
htcondor_requirements = (TARGET.CloudSite=="topas")
htcondor_request_cpus = 42
htcondor_use_cpus = 28
#htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
#htcondor_request_cpus = 20
#htcondor_use_cpus = 20
# for all cores in total
htcondor_request_memory = 8000
# NNLOJET event count and integration steps for every channel
# MUST be of the same length as channels!
# Test setup
# first seed for first branch, counting upwards
starting_seed = 10
channels = R RRa RRb RV
warmup_events = 1000 1000 1000 1000
warmup_iterations = 3 3 3 3
# time in seconds
htcondor_walltime = 3600
[CopyRewarmup]
# untar'ed to local storage under $warm_dir/rewarmup/$name
[FastWarm]
# produced @ grid storage under FastWarm
# override some defaults for this task
# accept as complete at 95%
acceptance = 0.95
# htcondor config
htcondor_request_memory = 2000
# If BWFORCLUSTER is too full
#htcondor_requirements = (TARGET.ProvidesCPU==true)
# If ETP blades are too slow
htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
# NNLOJET event count and number of jobs for each channel
starting_seeds = 1000 2000 3000 4000 5000 6000 7000
# 1st test setup, <~ 5 minutes
fastwarm_events = 800000 200000 400000 50000 800000 200000 400000
fastwarm_jobs = 2 2 2 2 2 2 2
# time in seconds
htcondor_walltime = 1800
# 2nd test setup, <~ 20-30 minutes (11.4, 14.5, 10.5, 11.7, 12.0, 14.1, 8.5 min)
#fastwarm_events = 5000000 2000000 4000000 500000 5000000 2000000 4000000
#fastwarm_jobs = 2 2 2 2 2 2 2
# time in seconds
#htcondor_walltime = 1800
# Final setup, ~22 hours (oder 11 x 2)
#fastwarm_events = 250000000 90000000 250000000 25000000 250000000 100000000 300000000
#fastwarm_jobs = 20 20 20 20 20 20 20
# time in seconds
#htcondor_walltime = 84000
[CopyFastWarm]
# untar'ed to local storage under $warm_dir/fastwarm/$name
[MergeFastWarm]
# produced @ grid storage under MergeFastWarm
[CopyMergeFastWarm]
# untar'ed to local storage under $warm_dir/mergefastwarm/$name
[FastProd]
# produced @ grid storage under FastProd
# override some defaults for this task
# accept as complete at 95%
acceptance = 0.95
# If BWFORCLUSTER is too full
#htcondor_requirements = (TARGET.ProvidesCPU==true)
#htcondor_requirements = (TARGET.Cloudsite=="blade")
# If ETP blades are too slow
htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
#htcondor_request_memory = 7500
htcondor_request_memory = 2000
# NNLOJET seeds, event count, and number of jobs for each channel
starting_seeds = 10000 20000 30000 40000 50000 60000 70000
# 1st test setup, ~20 min (14.2, 14.0, 12.0, 18.5, 21.8, 14.8, 14.1 min)
#fastprod_events = 4000000 220000 500000 3300 42000 3100 100000
#fastprod_jobs = 1 1 1 1 1 1 1
# time in seconds
#htcondor_walltime = 1800
# 2nd test setup, ~3-5 h (4.9, 3.7, 4.0, 3.9, 3.5, 4.2, 3.7 h)
#fastprod_events = 80000000 3500000 10000000 40000 400000 50000 1600000
#fastprod_jobs = 3 3 3 3 3 3 3
# time in seconds
#htcondor_walltime = 40000
# Final setup, ~22 h (21.8, 21.0, 21.0, 21.3, 20.9, 21.2, 20.9)
fastprod_events = 360000000 20000000 55000000 225000 2500000 260000 9500000
fastprod_jobs = 100 100 100 100 100 100 100
# v3: Switch back to 24h
#fastprod_jobs = 100 500 500 2000 1000 2000 500
htcondor_walltime = 84000
# Final setup 44h
#fastprod_events = 700000000 35000000 100000000 400000 4000000 500000 15000000
# v1:
#fastprod_jobs = 100 100 100 100 100 100 100
#fastprod_jobs = 100 100 100 2000 1000 2000 500
# v2:
#fastprod_jobs = 100 500 500 1000 100 100 100
###fastprod_jobs = 150 500 500 5000 2000 5000 1000
#htcondor_walltime = 172000
[CopyTables]
# copied to local storage under $merge_dir/$name/CHANNEL, see above
# Up to NLO only
#channels = LO R V
# accept as complete at 95%
acceptance = 0.95
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (FastProd).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
ignore_incompleteness = True
[Combine]
# produced @ local storage under $merge_dir/$name/Combined
# Path to combine.ini config
combine_ini = combine.ini
# Number of cores for NNLOJET combine script
cores = 20
[MergeFastProd]
# produced @ local storage under $merge_dir/$name/Combined/Final
htcondor_requirements = (TARGET.ProvidesEkpResources==true)
# Up to NLO only
#channels = LO R V
# NNLOJET weight file to use for merging (LO, NLO, or NNLO)
weightorder = NNLO
# Accept as complete at 95%
acceptance = 0.95
# Execute workflow as local workflow instead of htcondor workflow (useful for merging small amount of grids, to be removed later)
workflow = local
[MergeFinal]
# produced @ local storage under $merge_dir/$name/Combined/Final
[FnloCppread]
# produced @ local storage under $merge_dir/$name/CHANNEL
# Accept as complete at 95%
acceptance = 0.95
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (CopyTables).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
ignore_incompleteness = True
[FnloCppreadFinal]
# produced @ local storage under $merge_dir/$name/Combined/Final
# Up to NLO only
#channels = LO R V
#######################
# New plotting setup! #
#######################
[SingleGridClosure]
# produced @ local storage under $plots_dir
# Up to NLO only
#channels = LO R V
[MultiGridClosure]
# produced @ local storage under $plots_dir
# Up to NLO only
#channels = LO R V
# Needs correct fastNLO 'scalecombs' parameter to be set, see above
[MergedGridClosure]
# produced @ local storage under $plots_dir
# Up to NLO only
#channels = LO R V
[Absolute]
# produced @ local storage under $plots_dir
# Up to NLO only
#channels = LO R V
[KfacComparison]
# produced @ local storage under $plots_dir
# Highest available order
orders = ["NNLO"]
[ScaleUncertainty]
# produced @ local storage under $plots_dir
# Highest available order (if NNLO too imprecise, maybe just up to NLO as well)
orders = ["NLO", "NNLO" ]
# Needs correct fastNLO 'scaleindex' parameter to be set, see above
[PdfUncertainty]
# produced @ local storage under $plots_dir
# Might be interesting for each order (Attention: ME and PDF orders are not matched!)
orders = ["LO", "NLO", "NNLO" ]
pdfsets = CT14nnlo,MMHT2014nnlo68cl,NNPDF31_nnlo_as_0118,ABMP16als118_5_nnlo
# Needs correct fastNLO 'scaleindex' parameter to be set, see above
###############
# DEPRECATED! #
###############
#[SingleScalecheck]
# produced @ local storage under $plots_dir
# --> SingleGridClosure
#channels = LO R V
#[Approxtest]
# produced @ local storage under $plots_dir
# --> MultiGridClosure
#channels = LO R V
#fscl = 7
#[AbsoluteAll]
# produced @ local storage under $plots_dir
# --> KfacComparison
#channels = LO R V
[core]
no_lock = True
# Set local scheduler
#local_scheduler = True
default-scheduler-host = condorcentral.etp.kit.edu
default-scheduler-port = 8082
[worker]
keep_alive = False
ping_interval = 20
wait_interval = 20
max_reschedules = 1
[DEFAULT]
# Switch my debugging on/off (not yet implemented in most tasks)
my_debug = True
# Name of your analysis
name = fnlzj5862z
# NNLOJET process
process = ZJ
# NNLOJET channels (append "a", "b" for RR region flag)
channels = LO R V RRa RRb RV VV
# Grid merging setup as defined in combine.ini; check compatibility
final_tables = {
"NLO": ["LO", "R", "V"],
"NLO_only": ["R", "V"]
# Add the following lines when NNLO is ready to be merged in as well (don't forget to add a comma above)
# "NNLO_only": ["RRa", "RRb", "RV", "VV"],
# "NNLO": ["LO", "R", "V", "RRa", "RRb", "RV", "VV"]
}
# List of all observables (APPLfast grid names for NNLOJET histograms)
# TODO: fix vs. flex need different parameters (scale, scalecombs) in FnloCppread!!!
observables = [
"fnlzj5862z_yb0_ys0_ptavgZj",
"fnlzj5862z_yb1_ys0_ptavgZj",
"fnlzj5862z_yb2_ys0_ptavgZj",
"fnlzj5862z_yb0_ys1_ptavgZj",
"fnlzj5862z_yb1_ys1_ptavgZj",
"fnlzj5862z_yb0_ys2_ptavgZj"
]
# Grid storage protocol and path usable from submitting machine and worker nodes of cluster
# Job in- and output will be stored in $wlcg_path under subdirectory of analysis $name
wlcg_path = srm://cmssrm-kit.gridka.de:8443/srm/managerv2?SFN=/pnfs/gridka.de/cms/disk-only/store/user/krabbert/law-analysis
gsi_path = gsiftp://cmssrm-kit.gridka.de:2811//pnfs/gridka.de/cms/disk-only/store/user/krabbert/law-analysis
# Default htcondor job submission configuration (modifiable for each task)
htcondor_accounting_group = cms.jet
htcondor_requirements = (TARGET.ProvidesCPU==true)
htcondor_remote_job = True
htcondor_user_proxy = /tmp/x509up_u12010
# Time in seconds
htcondor_walltime = 84000
htcondor_request_cpus = 1
htcondor_use_cpus = 1
# For all cores in total
htcondor_request_memory = 4096
htcondor_universe = docker
htcondor_docker_image = mschnepf/slc6-condocker
# Create log files in htcondor jobs
transfer_logs = True
# Set tolerance for workflow success with failed branches
tolerance = 0
# Submit only missing htcondor workflow branches (should always be true)
only_missing = True
# Bootstrap file to be sourced at beginning of htcondor jobs
bootstrap_file = bootstrap_NNLOJET_rev5419.sh
# Local directory for merging of grids inside $merge_dir/$name
merge_dir = mergedgrids
# fastNLO cppread options for grid/table comparison to NNLOJET original
# TODO: fix vs. flex need different parameters (scale, scalecombs) in FnloCppread!!!
# Confer the NNLOJET run file
# PDF choice for production
pdf = CT14nnlo
# Central scale choice in production, cf. NNLOJET run file & nnlobridge setup [scale1, scale2, scale12, scale21]
scale = scale1
# Central scale choice for flex-table evaluation, cf. fastNLO enumerator "0"=kScale1, "1"=kScale2, "2"=kQuadraticSum, ...
scaleindex = 0
# Central plus scale variations, cf. NNLOJET run file & nnlobridge setup
# Fixed-scale: --> 7; flex-scale low pT: --> -6; flex-scale high pT: --> -12
scalecombs = -12
ascode = LHAPDF
norm = no
# local directory for plots
plots_dir = plots
#
# START of the ACTION
#
[BaseRuncard]
# copied to grid storage into BaseRuncard
# Path to base runcard file
source_path = ZJ.fnlzj5862z.run
[Steeringfiles]
# copied to grid storage into Steeringfiles
# Local directory with all steering files
source_path = steeringfiles
[Warmup]
# produced @ grid storage under Warmup
# Override some defaults for this task
# htcondor config
#htcondor_request_cpus = 42
#htcondor_use_cpus = 28
#htcondor_request_cpus = 20
#htcondor_use_cpus = 20
bootstrap_file = bootstrap_NNLOJET_rev5419_multicore.sh
# for all cores in total
htcondor_request_memory = 8000
#htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
htcondor_requirements = (TARGET.CloudSite=="topas")
# NNLOJET event count and integration steps for every channel
# MUST be of the same length as channels!
# MUST have exactly ONE space between numbers
# Test setup
# (here test settings only since main integration may run separately;
# see also luigi_warmup_nemo and luigi_warmup_topas, if existing, configs for settings)
#htcondor_request_cpus = 4
#htcondor_use_cpus = 4
# first seed for first branch, counting upwards
#starting_seed = 0
#channels = LO R V RRa RRb RV VV
#warmup_events = 10000 10000 10000 1000 1000 1000 1000
#warmup_iterations = 3 3 3 3 3 3 3
# time in seconds
#htcondor_walltime = 3600
# Real setup
# (here test settings only since main integration run separately;
# see luigi_warmup_nemo and luigi_warmup_topas configs for settings)
htcondor_request_cpus = 42
htcondor_use_cpus = 28
# first seed for first branch, counting upwards
starting_seed = 0
#channels = LO R V RRa RRb RV VV
# NNLO not ok and VV didn't finish with 7M; produce VV fake warmup to run at least some reasonable LO, NLO jobs
#warmup_events = 100000000 16000000 60000000 1000000 1000000 1000000 100000
#warmup_iterations = 10 10 10 10 10 10 5
# time in seconds
#htcondor_walltime = 250000
# LO, NLO only setup just to fulfill reqs for later FastProd step
channels = LO R V
# NNLO not ok and VV didn't finish with 7M; produce VV fake warmup to run at least some reasonable LO, NLO jobs
warmup_events = 100000000 16000000 60000000
warmup_iterations = 10 10 10
# time in seconds
htcondor_walltime = 250000
# Reproduce NNLO warmup with improved settings
#starting_seed = 3
#channels = RRa RRb RV VV
# NNLO not ok and VV didn't finish with 7M; produce VV fake warmup to run at least some reasonable LO, NLO jobs
#warmup_events = 10000000 10000000 5000000 1000000
#warmup_iterations = 10 10 10 10
# time in seconds
#htcondor_walltime = 250000
[FastWarm]
# produced @ grid storage under FastWarm
# override some defaults for this task
# accept as complete at 95%
acceptance = 0.95
# htcondor config
htcondor_request_memory = 2000
# If BWFORCLUSTER is too full
#htcondor_requirements = (TARGET.ProvidesCPU==true)
# If ETP blades are too slow
htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
# NNLOJET event count and number of jobs for each channel
starting_seeds = 1000 2000 3000 4000 5000 6000 7000
# 1st test setup, <~ 5 minutes
#fastwarm_events = 800000 200000 400000 50000 800000 200000 400000
#fastwarm_jobs = 2 2 2 2 2 2 2
# time in seconds
#htcondor_walltime = 1800
# 2nd test setup, <~ 20-30 minutes (26.2, 12.5, 23.0, 4.8, 51., 13.8, 22.4 min)
#fastwarm_events = 5000000 2000000 4000000 500000 5000000 2000000 4000000
#fastwarm_jobs = 2 2 2 2 2 2 2
# time in seconds
#htcondor_walltime = 1800
# Final setup, ~22 hours (oder 11 x 2)
fastwarm_events = 250000000 200000000 220000000 130000000 120000000 180000000 230000000
fastwarm_jobs = 20 20 20 1 1 1 1
# time in seconds
htcondor_walltime = 84000
[MergeFastWarm]
# produced @ grid storage under MergeFastWarm
[FastProd]
# produced @ grid storage under FastProd
# override some defaults for this task
# accept as complete at 95%
acceptance = 0.95
# If BWFORCLUSTER is too full
#htcondor_requirements = (TARGET.ProvidesCPU==true)
#htcondor_requirements = (TARGET.Cloudsite=="blade")
# If ETP blades are too slow
htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
htcondor_request_memory = 7500
#htcondor_request_memory = 2000
# NNLOJET seeds, event count, and number of jobs for each channel
#starting_seeds = 10000 20000 30000 40000 50000 60000 70000
# 1st test setup with LO+NLO only, ~20 min (23, 11, 43 min)
#channels = LO R V
#starting_seeds = 10000 20000 30000
#fastprod_events = 4000000 200000 500000
#fastprod_jobs = 1 1 1
#htcondor_walltime =3600
# 2nd test setup with LO+NLO only, ~3-5 h ( 1.0, 2.6, 3.5 h)
#channels = LO R V
#starting_seeds = 10000 20000 30000
#fastprod_events = 10000000 4000000 2500000
#fastprod_jobs = 3 3 3
#htcondor_walltime = 40000
# Final setup, ~22 h (21.8, 21.0, 21.0, 21.3, 20.9, 21.2, 20.9)
channels = LO R V
starting_seeds = 10000 20000 30000
fastprod_events = 220000000 32000000 15000000
fastprod_jobs = 300 500 500
htcondor_walltime = 84000
# 1st test setup, ~20 min (19.3, 9.2, 7.2, 12.6, 16.6, 10.5, 6.8 min)
#fastprod_events = 4000000 220000 500000 3300 42000 3100 100000
#fastprod_jobs = 1 1 1 1 1 1 1
# time in seconds
#htcondor_walltime = 1800
# 2nd test setup, ~3-5 h (3.6, 3.7, 3.6, 3.2, 3.5, 4.0, 4.0 h)
#fastprod_events = 45000000 5500000 15000000 50000 550000 70000 3500000
#fastprod_jobs = 3 3 3 3 3 3 3
# time in seconds
#htcondor_walltime = 40000
# Final setup, ~22 h (21.8, 21.0, 21.0, 21.3, 20.9, 21.2, 20.9)
#fastprod_events = 270000000 30000000 90000000 340000 3200000 380000 18000000
#fastprod_jobs = 100 100 100 100 100 100 100
#fastprod_jobs = 100 500 500 2000 1000 2000 500
#htcondor_walltime = 84000
# Final setup 44h
#fastprod_events = 700000000 35000000 100000000 400000 4000000 500000 15000000
# v1:
#fastprod_jobs = 100 100 100 100 100 100 100
#fastprod_jobs = 100 100 100 2000 1000 2000 500
# v2:
#fastprod_jobs = 100 500 500 1000 100 100 100
###fastprod_jobs = 150 500 500 5000 2000 5000 1000
#htcondor_walltime = 172000
[CopyTables]
# copied to local storage under $merge_dir/$name/CHANNEL, see above
# Up to NLO only
channels = LO R V
# accept as complete at 95%
acceptance = 0.99
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (FastProd).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
ignore_incompleteness = True
[Combine]
# produced @ local storage under $merge_dir/$name/Combined
# Path to combine.ini config
combine_ini = combine.ini
# Number of cores for NNLOJET combine script
cores = 20
[MergeFastProd]
# produced @ local storage under $merge_dir/$name/Combined/Final
htcondor_requirements = (TARGET.ProvidesEkpResources==true)
# Up to NLO only
channels = LO R V
# NNLOJET weight file to use for merging (LO, NLO, or NNLO)
weightorder = NLO
# Accept as complete at 95%
acceptance = 0.95
# Execute workflow as local workflow instead of htcondor workflow (useful for merging small amount of grids, to be removed later)
workflow = local
[MergeFinal]
# produced @ local storage under $merge_dir/$name/Combined/Final
[FnloCppread]
# produced @ local storage under $merge_dir/$name/CHANNEL
# Accept as complete at 95%
acceptance = 0.95
# Uncomment the following line to have this task ignore the level
# of completeness of the previous required task (CopyTables).
# In that case this task works on whatever is ready to be treated,
# but interrupts the chain of requirements in the workflow.
ignore_incompleteness = True
[FnloCppreadFinal]
# produced @ local storage under $merge_dir/$name/Combined/Final
# Up to NLO only
channels = LO R V
#######################
# New plotting setup! #
#######################
[SingleGridClosure]
# produced @ local storage under $plots_dir
# Up to NLO only
channels = LO R V
[MultiGridClosure]
# produced @ local storage under $plots_dir
# Up to NLO only
channels = LO R V
# Needs correct fastNLO 'scalecombs' parameter to be set, see above
[MergedGridClosure]
# produced @ local storage under $plots_dir
# Up to NLO only
channels = LO R V
[Absolute]
# produced @ local storage under $plots_dir
# Up to NLO only
channels = LO R V
[KfacComparison]
# produced @ local storage under $plots_dir
# Up to NLO only
orders = ["NLO"]
[ScaleUncertainty]
# produced @ local storage under $plots_dir
# Up to NLO only
orders = ["LO", "NLO" ]
# Needs correct fastNLO 'scaleindex' parameter to be set, see above
[PdfUncertainty]
# produced @ local storage under $plots_dir
# Up to NLO only
orders = ["LO", "NLO" ]
pdfsets = CT14nnlo,MMHT2014nnlo68cl,NNPDF31_nnlo_as_0118,ABMP16als118_5_nnlo
# Needs correct fastNLO 'scaleindex' parameter to be set, see above
###############
# DEPRECATED! #
###############
[SingleScalecheck]
# produced @ local storage under $plots_dir
# --> SingleGridClosure
channels = LO R V
[Approxtest]
# produced @ local storage under $plots_dir
# --> MultiGridClosure
channels = LO R V
fscl = 7
[AbsoluteAll]
# produced @ local storage under $plots_dir
# --> KfacComparison
channels = LO R V
[core]
no_lock = True
[worker]
keep_alive = False
ping_interval = 20
wait_interval = 20
max_reschedules = 0
[DEFAULT]
# name of your analysis
name = ZJtriple_ptz
# NNLOJET process
process = ZJ
# NNLOJET channels (append "a", "b" for RR region flag)
channels = LO R V RRa RRb RV VV
# merged grids (make sure it's compatible with your combine.ini config)
final_tables = {
"NLO": ["LO", "R", "V"],
"NLO_only": ["R", "V"],
"NNLO_only": ["RRa", "RRb", "RV", "VV"],
"NNLO": ["LO", "R", "V", "RRa", "RRb", "RV", "VV"]
}
# list of all observables (APPLfast grid names for NNLOJET histograms)
observables = [
"ZJtriple_yb0_ystar0_ptz",
"ZJtriple_yb0_ystar1_ptz",
"ZJtriple_yb0_ystar2_ptz",
"ZJtriple_yb0_ystar3_ptz",
"ZJtriple_yb0_ystar4_ptz",
"ZJtriple_yb1_ystar0_ptz",
"ZJtriple_yb1_ystar1_ptz",
"ZJtriple_yb1_ystar2_ptz",
"ZJtriple_yb1_ystar3_ptz",
"ZJtriple_yb2_ystar0_ptz",
"ZJtriple_yb2_ystar1_ptz",
"ZJtriple_yb2_ystar2_ptz",
"ZJtriple_yb3_ystar0_ptz",
"ZJtriple_yb3_ystar1_ptz",
"ZJtriple_yb4_ystar0_ptz"
]
# grid storage protocol and path usable from submitting machine and worker nodes of cluster
# job in- and output will be stored in $wlcg_path under subdirectory of analysis $name
wlcg_path = srm://cmssrm-kit.gridka.de:8443/srm/managerv2?SFN=/pnfs/gridka.de/cms/disk-only/store/user/aheidelb/law-analysis
# default htcondor job submission configuration (modifiable for each task)
htcondor_accounting_group = cms.jet
htcondor_requirements = (TARGET.ProvidesCPU==true)
htcondor_remote_job = True
htcondor_user_proxy = /tmp/x509up_u12265
# time in seconds
htcondor_walltime = 84000
htcondor_request_cpus = 1
# for all cores in total
htcondor_request_memory = 4096
htcondor_universe = docker
htcondor_docker_image = mschnepf/slc6-condocker
# create log files in htcondor jobs
transfer_logs = True
# set local scheduler
local_scheduler = True
# set tolerance for workflow success with failed branches
tolerance = 0
# submit only missing htcondor workflow branches (should always be true)
only_missing = True
# bootstrap file to be sourced at beginning of htcondor jobs
bootstrap_file = bootstrap.sh
# local directory for merging of grids inside $merge_dir/$name
merge_dir = mergedgrids
# fastNLO cppread options for grid/table evaluation
pdf = CT14nnlo
scalecombs = -6
ascode = LHAPDF
norm = no
scale = scale12
# local directory for plots
plots_dir = plots
#
# START of the ACTION
#
[BaseRuncard]
# copied to grid storage into BaseRuncard
# path to base runcard file
source_path = ZJ.ZJtriple.run
[Steeringfiles]
# copied to grid storage into Steeringfiles
# local directory with all steering files
source_path = steeringfiles
[Warmup]
# produced @ grid storage under Warmup
# override some defaults for this task
# htcondor config
htcondor_request_cpus = 24
bootstrap_file = multicore_bootstrap.sh
# for all cores in total
htcondor_request_memory = 35000
# time in seconds
htcondor_walltime = 180000
# NNLOJET event count and integration steps for every channel
# MUST be of the same length as channels!
# MUST have exactly ONE space between numbers
# (here test settings only since main integration run separately)
warmup_events = 50000000 25000000 25000000 4000000 4000000 4000000 4000000
warmup_iterations = 10 10 10 10 10 10 10
# first seed for first branch, counting upwards
starting_seed = 0
[FastWarm]
# produced @ grid storage under FastWarm
# override tolerance to 5% failed jobs
tolerance = 0.05
# override some defaults for this task
# htcondor config
htcondor_requirements = ((TARGET.CLOUDSITE=="BWFORCLUSTER")||(TARGET.ProvidesEkpResources==true))
# If ETP blades are too slow
#htcondor_requirements = (TARGET.CloudSite=="BWFORCLUSTER")
# NNLOJET event count and number of jobs for each channel
#fastwarm_events = 200000000 200000000 200000000 100000000 100000000 100000000 100000000
#fastwarm_jobs = 10 10 10 10 10 10 10
# Test setup
fastwarm_events = 200000 200000 200000 100000 100000 100000 100000
fastwarm_jobs = 1 1 1 1 1 1 1
starting_seeds = 1000 2000 3000 4000 5000 6000 7000
# time in seconds
htcondor_walltime = 1800
[MergeFastWarm]
# produced @ grid storage under MergeFastWarm
[FastProd]
# produced @ grid storage under FastProd
# override some defaults for this task
# htcondor config
#htcondor_requirements = ((TARGET.CLOUDSITE=="BWFORCLUSTER")||(TARGET.ProvidesEkpResources==true))
htcondor_requirements = (TARGET.CLOUDSITE=="BWFORCLUSTER")
htcondor_request_memory = 7500
# NNLOJET event count and number of jobs for each channel
fastprod_events = 250000000 45000000 22000000 800000 700000 250000 800000
fastprod_jobs = 100 100 100 100 100 100 100
starting_seeds = 10000 20000 30000 40000 50000 60000 70000
# Test setup
#fastprod_events = 250000 45000 22000 800 700 250 800
#fastprod_jobs = 1 1 1 1 1 1 1
#starting_seeds = 10000 20000 30000 40000 50000 60000 70000
# time in seconds
#htcondor_walltime = 1800
[CopyTables]
# copied to local storage under $merge_dir/$name/CHANNEL, see above
[Combine]
# produced @ local storage under $merge_dir/$name/Combined
# path to combine.ini config
combine_ini = combine.ini
# number of cores for NNLOJET combine script
cores = 20
[MergeFastProd]
# produced @ local storage under $merge_dir/$name/Combined/Final
htcondor_requirements = (TARGET.ProvidesEkpResources==true)
# execute workflow as local workflow instead of htcondor workflow (useful for merging small amount of grids, to be removed later)
workflow = local
[MergeFinal]
# produced @ local storage under $merge_dir/$name/Combined/Final
[FnloCppread]
# produced @ local storage under $merge_dir/$name/CHANNEL
[FnloCppreadFinal]
# produced @ local storage under $merge_dir/$name/Combined/Final
[SingleScalecheck]
# produced @ local storage under $plots_dir
[Approxtest]
# produced @ local storage under $plots_dir
fscl = 7
[Absolute]
# produced @ local storage under $plots_dir
[AbsoluteAll]
# produced @ local storage under $plots_dir
File moved
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment