Moved DEBUG option to config files

This commit is contained in:
saundersp
2023-07-14 23:57:58 +02:00
parent e6194ac485
commit 399024da7a
12 changed files with 280 additions and 268 deletions

View File

@ -1,12 +1,7 @@
from numba import float64, uint32, cuda, int32, uint16
from config import COMPILE_WITH_C
from numba import float64, uint32, cuda, int32
from config import COMPILE_WITH_C, NB_THREADS, NB_THREADS_2D, NB_THREADS_3D, M
import numpy as np
NB_THREADS = 1024
NB_THREADS_2D = (32, 32)
NB_THREADS_3D = (16, 16, 4)
M = int(np.log2(NB_THREADS_2D[1]))
if COMPILE_WITH_C:
from numba import njit
else:

View File

@ -2,6 +2,7 @@ from toolbox import picke_multi_loader, format_time_ns, unit_test_argsort_2d
from typing import List, Tuple
from time import perf_counter_ns
import numpy as np
from config import OUT_DIR, DATA_DIR
def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e-8) -> None:
"""Test if the each result is equals to other devices.
@ -20,32 +21,32 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
fnc_s = perf_counter_ns()
n_total= 0
n_success = 0
print(f"\n| {'Unit testing':<37} | {'Test state':<10} | {'Time spent (ns)':<17} | {'Formatted time spent':<29} |")
print(f"|{'-'*39}|{'-'*12}|{'-'*19}|{'-'*31}|")
print(f"\n| {'Unit testing':<37} | {'Test state':<10} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |")
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
for filename in ["X_train_feat", "X_test_feat", "X_train_ii", "X_test_ii"]:
print(f"{filename}...", end = "\r")
bs = picke_multi_loader([f"{filename}_{label}" for label in labels], "./out")
bs = picke_multi_loader([f"{filename}_{label}" for label in labels], OUT_DIR)
for i, (b1, l1) in enumerate(zip(bs, labels)):
if b1 is None:
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
#print(f"| {filename:<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
for j, (b2, l2) in enumerate(zip(bs, labels)):
if i >= j:
continue
if b2 is None:
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
n_total += 1
s = perf_counter_ns()
state = np.abs(b1 - b2).mean() < tol
e = perf_counter_ns() - s
if state:
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
for filename, featname in zip(["X_train_feat_argsort", "X_test_feat_argsort"], ["X_train_feat", "X_test_feat"]):
print(f"Loading {filename}...", end = "\r")
@ -53,14 +54,14 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
bs = []
for label in labels:
if feat is None:
feat_tmp = picke_multi_loader([f"{featname}_{label}"], "./out")[0]
feat_tmp = picke_multi_loader([f"{featname}_{label}"], OUT_DIR)[0]
if feat_tmp is not None:
feat = feat_tmp
bs.append(picke_multi_loader([f"{filename}_{label}"], "./out")[0])
bs.append(picke_multi_loader([f"{filename}_{label}"], OUT_DIR)[0])
for i, (b1, l1) in enumerate(zip(bs, labels)):
if b1 is None:
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
#print(f"| {filename:<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
if feat is not None:
n_total += 1
@ -68,26 +69,26 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
state = unit_test_argsort_2d(feat, b1)
e = perf_counter_ns() - s
if state:
print(f"| {filename:<22} - {l1:<4} argsort | {'Passed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {filename:<22} - {l1:<4} argsort | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {filename:<22} - {l1:<4} argsort | {'Failed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {filename:<22} - {l1:<4} argsort | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
for j, (b2, l2) in enumerate(zip(bs, labels)):
if i >= j:
continue
if b2 is None:
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
n_total += 1
s = perf_counter_ns()
state = np.abs(b1 - b2).mean() < tol
e = perf_counter_ns() - s
if state:
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
for T in TS:
for filename in ["alphas", "final_classifiers"]:
@ -96,32 +97,33 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
for i, (b1, l1) in enumerate(zip(bs, labels)):
if b1 is None:
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
for j, (b2, l2) in enumerate(zip(bs, labels)):
if i >= j:
continue
if b2 is None:
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
n_total += 1
s = perf_counter_ns()
state = np.abs(b1 - b2).mean() < tol
e = perf_counter_ns() - s
if state:
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"|{'-'*39}|{'-'*12}|{'-'*19}|{'-'*31}|")
e = perf_counter_ns() - fnc_s
print(f"| {'Unit testing summary':<37} | {str(n_success) + '/' + str(n_total):>10} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
def load_datasets(data_dir: str = "../data") -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
e = perf_counter_ns() - fnc_s
print(f"| {'Unit testing summary':<37} | {str(n_success) + '/' + str(n_total):>10} | {e:>18,} | {format_time_ns(e):<29} |")
def load_datasets(data_dir: str = DATA_DIR) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Load the datasets.
Args:
data_dir (str, optional): [description]. Defaults to "../data".
data_dir (str, optional): [description]. Defaults to DATA_DIR (see config.py).
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: [description]

View File

@ -1,3 +1,14 @@
import numpy as np
DATA_DIR = "../data"
OUT_DIR = "./out"
MODEL_DIR = "./models"
NB_THREADS = 1024
NB_THREADS_2D = (32, 32)
NB_THREADS_3D = (16, 16, 4)
M = int(np.log2(NB_THREADS_2D[1]))
# Save state to avoid recalulation on restart
SAVE_STATE = True
# Redo the state even if it's already saved
@ -5,7 +16,7 @@ FORCE_REDO = False
# Use NJIT to greatly accelerate runtime
COMPILE_WITH_C = False
# Use GPU to greatly accelerate runtime (as priority over NJIT)
GPU_BOOSTED = False
GPU_BOOSTED = True
# Number of weak classifiers
# TS = [1]
# TS = [1, 5, 10]
@ -13,3 +24,12 @@ GPU_BOOSTED = False
# TS = [1, 5, 10, 25, 50, 100, 200]
# TS = [1, 5, 10, 25, 50, 100, 200, 300]
TS = [1, 5, 10, 25, 50, 100, 200, 300, 400, 500, 1000]
# Enable verbose output (for debugging purposes)
__DEBUG = False
# Debugging options
if __DEBUG:
IDX_INSPECT = 4548
IDX_INSPECT_OFFSET = 100
np.seterr(all = 'raise')
# Debug option (image width * log_10(length) + extra characters)
np.set_printoptions(linewidth = 19 * 6 + 3)

View File

@ -12,9 +12,9 @@ from time import perf_counter_ns
from os import makedirs
import numpy as np
#np.seterr(all = 'raise')
from config import FORCE_REDO, COMPILE_WITH_C, GPU_BOOSTED, TS, SAVE_STATE
from config import FORCE_REDO, COMPILE_WITH_C, GPU_BOOSTED, TS, SAVE_STATE, MODEL_DIR, __DEBUG
if __DEBUG:
from config import IDX_INSPECT, IDX_INSPECT_OFFSET
if GPU_BOOSTED:
from ViolaJonesGPU import apply_features, set_integral_image, argsort
@ -26,12 +26,6 @@ else:
from ViolaJonesCPU import apply_features, set_integral_image, argsort
label = 'CPU' if COMPILE_WITH_C else 'PY'
# FIXME Debug code
# IDX_INSPECT = 0
# IDX_INSPECT = 2
IDX_INSPECT = 4548
IDX_INSPECT_OFFSET = 100
def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Train the weak classifiers.
@ -45,25 +39,23 @@ def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) ->
"""
feats = state_saver("Building features", "feats", lambda: build_features(X_train.shape[1], X_train.shape[2]), FORCE_REDO, SAVE_STATE)
# FIXME Debug code
# print("feats")
# print(feats.shape)
# print(feats[IDX_INSPECT].ravel())
# return 0, 0
if __DEBUG:
print("feats")
print(feats.shape)
print(feats[IDX_INSPECT].ravel())
X_train_ii = state_saver(f"Converting training set to integral images ({label})", f"X_train_ii_{label}",
lambda: set_integral_image(X_train), FORCE_REDO, SAVE_STATE)
X_test_ii = state_saver(f"Converting testing set to integral images ({label})", f"X_test_ii_{label}",
lambda: set_integral_image(X_test), FORCE_REDO, SAVE_STATE)
# FIXME Debug code
# print("X_train_ii")
# print(X_train_ii.shape)
# print(X_train_ii[IDX_INSPECT])
# print("X_test_ii")
# print(X_test_ii.shape)
# print(X_test_ii[IDX_INSPECT])
# return 0, 0
if __DEBUG:
print("X_train_ii")
print(X_train_ii.shape)
print(X_train_ii[IDX_INSPECT])
print("X_test_ii")
print(X_test_ii.shape)
print(X_test_ii[IDX_INSPECT])
X_train_feat = state_saver(f"Applying features to training set ({label})", f"X_train_feat_{label}",
lambda: apply_features(feats, X_train_ii), FORCE_REDO, SAVE_STATE)
@ -71,14 +63,13 @@ def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) ->
lambda: apply_features(feats, X_test_ii), FORCE_REDO, SAVE_STATE)
del X_train_ii, X_test_ii, feats
# FIXME Debug code
# print("X_train_feat")
# print(X_train_feat.shape)
# print(X_train_feat[IDX_INSPECT, : IDX_INSPECT_OFFSET])
# print("X_test_feat")
# print(X_test_feat.shape)
# print(X_test_feat[IDX_INSPECT, : IDX_INSPECT_OFFSET])
# return 0, 0
if __DEBUG:
print("X_train_feat")
print(X_train_feat.shape)
print(X_train_feat[IDX_INSPECT, : IDX_INSPECT_OFFSET])
print("X_test_feat")
print(X_test_feat.shape)
print(X_test_feat[IDX_INSPECT, : IDX_INSPECT_OFFSET])
#indices = state_saver("Selecting best features training set", "indices", force_redo = True, save_state = SAVE_STATE,
# fnc = lambda: SelectPercentile(f_classif, percentile = 10).fit(X_train_feat.T, y_train).get_support(indices = True))
@ -96,40 +87,35 @@ def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) ->
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
#return 0, 0
X_train_feat_argsort = state_saver(f"Precalculating training set argsort ({label})", f"X_train_feat_argsort_{label}",
lambda: argsort(X_train_feat), FORCE_REDO, SAVE_STATE)
# FIXME Debug code
# print("X_train_feat_argsort")
# print(X_train_feat_argsort.shape)
# print(X_train_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
# benchmark_function("Arg unit test", lambda: unit_test_argsort_2d(X_train_feat, X_train_feat_argsort))
# return 0, 0
if __DEBUG:
print("X_train_feat_argsort")
print(X_train_feat_argsort.shape)
print(X_train_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
benchmark_function("Arg unit test", lambda: unit_test_argsort_2d(X_train_feat, X_train_feat_argsort))
# X_test_feat_argsort = state_saver(f"Precalculating testing set argsort ({label})", f"X_test_feat_argsort_{label}",
# lambda: argsort(X_test_feat), True, False)
X_test_feat_argsort = state_saver(f"Precalculating testing set argsort ({label})", f"X_test_feat_argsort_{label}",
lambda: argsort(X_test_feat), FORCE_REDO, SAVE_STATE)
# FIXME Debug code
# print("X_test_feat_argsort")
# print(X_test_feat_argsort.shape)
# print(X_test_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
# benchmark_function("Arg unit test", lambda: unit_test_argsort_2d(X_test_feat, X_test_feat_argsort))
# return 0, 0
# del X_test_feat_argsort
if __DEBUG:
print("X_test_feat_argsort")
print(X_test_feat_argsort.shape)
print(X_test_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
benchmark_function("Arg unit test", lambda: unit_test_argsort_2d(X_test_feat, X_test_feat_argsort))
del X_test_feat_argsort
print(f"\n| {'Training':<49} | {'Time spent (ns)':<17} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*19}|{'-'*31}|")
print(f"\n| {'Training':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
for T in TS:
# alphas, final_classifiers = state_saver(f"ViolaJones T = {T:<3} ({label})", [f"alphas_{T}_{label}", f"final_classifiers_{T}_{label}"],
state_saver(f"ViolaJones T = {T:<4} ({label})", [f"alphas_{T}_{label}", f"final_classifiers_{T}_{label}"],
lambda: train_viola_jones(T, X_train_feat, X_train_feat_argsort, y_train), FORCE_REDO, SAVE_STATE, "./models")
# FIXME Debug code
# print("alphas")
# print(alphas)
# print("final_classifiers")
# print(final_classifiers)
alphas, final_classifiers = state_saver(f"ViolaJones T = {T:<3} ({label})", [f"alphas_{T}_{label}", f"final_classifiers_{T}_{label}"],
lambda: train_viola_jones(T, X_train_feat, X_train_feat_argsort, y_train), FORCE_REDO, SAVE_STATE, MODEL_DIR)
if __DEBUG:
print("alphas")
print(alphas)
print("final_classifiers")
print(final_classifiers)
return X_train_feat, X_test_feat
@ -183,43 +169,37 @@ def _main_() -> None:
for folder_name in ["models", "out"]:
makedirs(folder_name, exist_ok = True)
print(f"| {'Preprocessing':<49} | {'Time spent (ns)':<17} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*19}|{'-'*31}|")
print(f"| {'Preprocessing':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
X_train, y_train, X_test, y_test = state_saver("Loading sets", ["X_train", "y_train", "X_test", "y_test"],
load_datasets, FORCE_REDO, SAVE_STATE)
# FIXME Debug option (image width * log_10(length) + extra characters)
# np.set_printoptions(linewidth = 19 * 6 + 3)
# FIXME Debug code
# print("X_train")
# print(X_train.shape)
# print(X_train[IDX_INSPECT])
# print("X_test")
# print(X_test.shape)
# print(X_test[IDX_INSPECT])
# print("y_train")
# print(y_train.shape)
# print(y_train[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
# print("y_test")
# print(y_test.shape)
# print(y_test[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
# return
if __DEBUG:
print("X_train")
print(X_train.shape)
print(X_train[IDX_INSPECT])
print("X_test")
print(X_test.shape)
print(X_test[IDX_INSPECT])
print("y_train")
print(y_train.shape)
print(y_train[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
print("y_test")
print(y_test.shape)
print(y_test[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
X_train_feat, X_test_feat = bench_train(X_train, X_test, y_train)
# FIXME Debug code
# return
# X_train_feat, X_test_feat = picke_multi_loader([f"X_train_feat_{label}", f"X_test_feat_{label}"], "./out")
# indices = picke_multi_loader(["indices"], "./out")[0]
# X_train_feat, X_test_feat = picke_multi_loader([f"X_train_feat_{label}", f"X_test_feat_{label}"], OUT_DIR)
# indices = picke_multi_loader(["indices"], OUT_DIR)[0]
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
bench_accuracy(label, X_train_feat, X_test_feat, y_train, y_test)
if __name__ == "__main__":
#toolbox_unit_test()
_main_()
if __DEBUG:
toolbox_unit_test()
# Only execute unit test after having trained the specified labels
unit_test(TS, ["GPU", "CPU", "PY", "PGPU"])

View File

@ -4,6 +4,7 @@ from numba import njit
import numpy as np
import pickle
import os
from config import MODEL_DIR, OUT_DIR
formats = ["ns", "µs", "ms", "s", "m", "h", "j", "w", "M", "y"]
nb = np.array([1, 1000, 1000, 1000, 60, 60, 24, 7, 4, 12], dtype = np.uint16)
@ -48,12 +49,12 @@ def toolbox_unit_test() -> None:
# UINT64_MAX == 2^64 = 18446744073709551615 == -1
assert "635y 5M 3j 23h 34m 33s 709ms 551µs 616ns" == format_time_ns(2**64)
def picke_multi_loader(filenames: List[str], save_dir: str = "./models") -> List[Any]:
def picke_multi_loader(filenames: List[str], save_dir: str = MODEL_DIR) -> List[Any]:
"""Load multiple pickle data files.
Args:
filenames (List[str]): List of all the filename to load.
save_dir (str, optional): Path of the files to load. Defaults to "./models".
save_dir (str, optional): Path of the files to load. Defaults to MODELS_DIR (see config.py).
Returns:
List[Any]. List of loaded pickle data files.
@ -82,10 +83,10 @@ def benchmark_function(step_name: str, fnc: Callable) -> Any:
s = perf_counter_ns()
b = fnc()
e = perf_counter_ns() - s
print(f"| {step_name:<49} | {e:>17,} | {format_time_ns(e):<29} |")
print(f"| {step_name:<49} | {e:>18,} | {format_time_ns(e):<29} |")
return b
def state_saver(step_name: str, filename: Union[str, List[str]], fnc, force_redo: bool = False, save_state: bool = True, save_dir: str = "./out") -> Any:
def state_saver(step_name: str, filename: Union[str, List[str]], fnc, force_redo: bool = False, save_state: bool = True, save_dir: str = OUT_DIR) -> Any:
"""Either execute a function then saves the result or load the already existing result.
Args:
@ -93,7 +94,7 @@ def state_saver(step_name: str, filename: Union[str, List[str]], fnc, force_redo
filename (Union[str, List[str]]): Name or list of names of the filenames where the result(s) are saved.
fnc ([type]): Function to call.
force_redo (bool, optional): Recall the function even if the result(s) is already saved. Defaults to False.
save_dir (str, optional): Path of the directory to save the result(s). Defaults to "./out".
save_dir (str, optional): Path of the directory to save the result(s). Defaults to OUT_DIR (see config.py).
Returns:
Any: The result(s) of the called function
@ -111,7 +112,7 @@ def state_saver(step_name: str, filename: Union[str, List[str]], fnc, force_redo
print(f"Loading results of {step_name}", end = '\r')
with open(f"{save_dir}/{filename}.pkl", "rb") as f:
res = pickle.load(f)
print(f"| {step_name:<49} | {'None':>17} | {'loaded saved state':<29} |")
print(f"| {step_name:<49} | {'None':>18} | {'loaded saved state':<29} |")
return res
elif isinstance(filename, list):
abs = False
@ -129,7 +130,7 @@ def state_saver(step_name: str, filename: Union[str, List[str]], fnc, force_redo
print(' ' * 100, end = '\r')
return b
print(f"| {step_name:<49} | {'None':>17} | {'loaded saved state':<29} |")
print(f"| {step_name:<49} | {'None':>18} | {'loaded saved state':<29} |")
b = []
print(f"Loading results of {step_name}", end = '\r')
for fn in filename: