Skip to content
Snippets Groups Projects
Commit f7618c0d authored by Christoph Heidecker's avatar Christoph Heidecker
Browse files

* Added time conversion factor to processing time vs. data throughput plot

parent 771e674a
Branches
No related tags found
No related merge requests found
......@@ -33,37 +33,6 @@ class InputData:
color='red')
exit(-1)
def higgs60sg(self):
# Higgs skimming tests with not-tuned SSD Raid0:
# ----------------------------------------------
# -> run 3:
# bug in update hook leads to percentage shift (60 jobs, 10 files/job)
# self.navix_monitor_file_list = [r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-0%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-10%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-20%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-30%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-40%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-50%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-60%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-70%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-80%.mon']
# Higgs skimming tests with tuned SSD Raid0:
# ------------------------------------------
# -> run 4:
# bug in update hook leads to percentage shift (60 jobs, 10 files/job)
# self.navix_monitor_file_list = [r'data/skimming/ekpsg/04-tuned-SSDs-Raid0/NaviX.mon',
# r'data/skimming/ekpsg/04-tuned-SSDs-Raid0/NaviX2.mon']
# -> run 5:
# first monitoring file was split since it contained unknown manual tests (60 jobs, 10 files/job)
# self.navix_monitor_file_list = [r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180817.2.newlog',
# r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180818.newlog',
# r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180827']
# Danger: Some files were cached already. Hence some of the test runs are corrupted.
# First run of the last run period file (successful test run):
self.navix_monitor_file_list = [r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180827.run1']
# self.navix_monitor_file_list = [r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180827']
def copy60sg(self):
# Copy tests with tuned SSD Raid0:
# -------------------------------------
......@@ -81,6 +50,25 @@ class InputData:
# self.navix_monitor_file_list = [r'data/copy/ekpsg/05-tuned-SSDs-Raid0-new-log/NaviX.mon.debug.copy.small.180823']
self.navix_monitor_file_list = [r'data/copy/ekpsg/05-tuned-SSDs-Raid0-new-log/NaviX.mon.Copy.180824']
def copy_topas(self):
# -> run 0:
# Too many jobs for all worker nodes together
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX_TOPAS_COPY_v0.mon']
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX_TOPAS_COPY_v1.mon']
self.navix_monitor_file_list = [r'data/copy/topas/NaviX_TOPAS_COPY_v0.mon',
r'data/copy/topas/NaviX_TOPAS_COPY_v1.mon']
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX_TOPAS_small.mon']
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX_to_big.mon']
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX.mon.copyJob80Hold.20190111']
def copy_nemo(self):
# -> run 1:
# Cleaned monitoring log of this run, since there were additional jobs in the monitoring log.
# Maybe this caused the weired behavior of the benchmarks.
# self.navix_monitor_file_list = [r'data/copy/NEMO/NaviX.run1.cleaned.mon']
# -> run 2:
self.navix_monitor_file_list = [r'data/copy/nemo/NaviX.run2.mon']
def copy_tsy_v1(self):
# -> run 1:
# First test-run for bug-fixing, cache volume was too small
......@@ -95,34 +83,43 @@ class InputData:
# Successful run with high remote and low cache transfer rate
self.navix_monitor_file_list = [r'data/copy/tsy/NaviX-5.mon.tsy']
def copy_nemo(self):
# -> run 1:
# Cleaned monitoring log of this run, since there were additional jobs in the monitoring log.
# Maybe this caused the weired behavior of the benchmarks.
# self.navix_monitor_file_list = [r'data/copy/NEMO/NaviX.run1.cleaned.mon']
# -> run 2:
self.navix_monitor_file_list = [r'data/copy/nemo/NaviX.run2.mon']
def higgs60sg(self):
# Higgs skimming tests with not-tuned SSD Raid0:
# ----------------------------------------------
# -> run 3:
# bug in update hook leads to percentage shift (60 jobs, 10 files/job)
# self.navix_monitor_file_list = [r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-0%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-10%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-20%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-30%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-40%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-50%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-60%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-70%.mon',
# r'data/skimming/ekpsg/03_not-tuned-SSDs-Raid0/NaviX-80%.mon']
def copy_topas(self):
# -> run 0:
# Too many jobs for all worker nodes together
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX_TOPAS_COPY_v0.mon']
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX_TOPAS_COPY_v1.mon']
self.navix_monitor_file_list = [r'data/copy/topas/NaviX_TOPAS_COPY_v0.mon',
r'data/copy/topas/NaviX_TOPAS_COPY_v1.mon']
# Higgs skimming tests with tuned SSD Raid0:
# ------------------------------------------
# -> run 4:
# bug in update hook leads to percentage shift (60 jobs, 10 files/job)
# self.navix_monitor_file_list = [r'data/skimming/ekpsg/04-tuned-SSDs-Raid0/NaviX.mon',
# r'data/skimming/ekpsg/04-tuned-SSDs-Raid0/NaviX2.mon']
# -> run 5:
# first monitoring file was split since it contained unknown manual tests (60 jobs, 10 files/job)
# self.navix_monitor_file_list = [r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180817.2.newlog',
# r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180818.newlog',
# r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180827']
# Danger: Some files were cached already. Hence some of the test runs are corrupted.
# First run of the last run period file (successful test run):
self.navix_monitor_file_list = [r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180827.run1']
# self.navix_monitor_file_list = [r'data/skimming/ekpsg/05-tuned-SSDs-Raid0/NaviX.mon.Skimming.180827']
def jec_sg(self):
self.navix_monitor_file_list = [r'data/jec/sg/NaviX.mon']
def jec_topas(self):
self.navix_monitor_file_list = [r'data/jec/topas/NaviX_TOPAS_JEC_v1.mon',
r'data/jec/topas/NaviX_TOPAS_JEC_v2.mon']
def jec_sg(self):
self.navix_monitor_file_list = [r'data/jec/sg/NaviX.mon']
def jec_nemo(self):
self.navix_monitor_file_list = [r'data/jec/nemo/NaviX.mon']
def copy_topas(self):
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX_TOPAS_small.mon']
# self.navix_monitor_file_list = [r'data/copy/topas/NaviX_to_big.mon']
self.navix_monitor_file_list = [r'data/copy/topas/NaviX.mon.copyJob80Hold.20190111']
......@@ -268,13 +268,40 @@ def plot_errorbar_file_duplicity_vs_time(df, export_as):
def plot_scatter_data_rate_vs_runtime(df, export_as):
title = "Runtime over data throughput"
x_label = 'Data throughput (MB/s)'
y_label = 'Processing time (s)'
y_label = 'Processing time'
x = 'data_rate'
y = 'Runtime'
mode = plot_mode()
(data_frame_list, label_list, weights_list) = prepare_lists(df)
y_minis = []
y_maxis = []
for data_frame in data_frame_list:
y_minis.append(np.float(data_frame[y].min()))
y_maxis.append(np.float(data_frame[y].max()))
y_min = np.nanmin(y_minis)
y_max = np.nanmax(y_maxis)
if y_max - y_min < 10 * 60:
convertion_factor = 1.
y_label += ' (s)'
elif y_max - y_min < 10 * 60 * 60:
convertion_factor = 60
y_label += ' (min)'
elif y_max - y_min < 10 * 60 * 60 * 24:
convertion_factor = (60 * 60)
y_label += ' (h)'
else:
convertion_factor = (60 * 60 * 24)
y_label += ' (d)'
for data_frame in data_frame_list:
data_frame[y] = data_frame[y].apply(lambda time: time/convertion_factor)
try:
plot_2d_scatter(data_frame_list, label_list, x=x, y=y, title=title, x_label=x_label, y_label=y_label,
x_log=False, y_log=False, show_plot=False, save_plot=True, plot_mode=mode, export_as=export_as)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment