Moved DEBUG option to config files
This commit is contained in:
@ -2,6 +2,7 @@ from toolbox import picke_multi_loader, format_time_ns, unit_test_argsort_2d
|
||||
from typing import List, Tuple
|
||||
from time import perf_counter_ns
|
||||
import numpy as np
|
||||
from config import OUT_DIR, DATA_DIR
|
||||
|
||||
def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e-8) -> None:
|
||||
"""Test if the each result is equals to other devices.
|
||||
@ -20,32 +21,32 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
|
||||
fnc_s = perf_counter_ns()
|
||||
n_total= 0
|
||||
n_success = 0
|
||||
print(f"\n| {'Unit testing':<37} | {'Test state':<10} | {'Time spent (ns)':<17} | {'Formatted time spent':<29} |")
|
||||
print(f"|{'-'*39}|{'-'*12}|{'-'*19}|{'-'*31}|")
|
||||
print(f"\n| {'Unit testing':<37} | {'Test state':<10} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |")
|
||||
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
|
||||
|
||||
for filename in ["X_train_feat", "X_test_feat", "X_train_ii", "X_test_ii"]:
|
||||
print(f"{filename}...", end = "\r")
|
||||
bs = picke_multi_loader([f"{filename}_{label}" for label in labels], "./out")
|
||||
bs = picke_multi_loader([f"{filename}_{label}" for label in labels], OUT_DIR)
|
||||
|
||||
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
||||
if b1 is None:
|
||||
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
|
||||
#print(f"| {filename:<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||
continue
|
||||
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
||||
if i >= j:
|
||||
continue
|
||||
if b2 is None:
|
||||
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
|
||||
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||
continue
|
||||
n_total += 1
|
||||
s = perf_counter_ns()
|
||||
state = np.abs(b1 - b2).mean() < tol
|
||||
e = perf_counter_ns() - s
|
||||
if state:
|
||||
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
n_success += 1
|
||||
else:
|
||||
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
|
||||
for filename, featname in zip(["X_train_feat_argsort", "X_test_feat_argsort"], ["X_train_feat", "X_test_feat"]):
|
||||
print(f"Loading {filename}...", end = "\r")
|
||||
@ -53,14 +54,14 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
|
||||
bs = []
|
||||
for label in labels:
|
||||
if feat is None:
|
||||
feat_tmp = picke_multi_loader([f"{featname}_{label}"], "./out")[0]
|
||||
feat_tmp = picke_multi_loader([f"{featname}_{label}"], OUT_DIR)[0]
|
||||
if feat_tmp is not None:
|
||||
feat = feat_tmp
|
||||
bs.append(picke_multi_loader([f"{filename}_{label}"], "./out")[0])
|
||||
bs.append(picke_multi_loader([f"{filename}_{label}"], OUT_DIR)[0])
|
||||
|
||||
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
||||
if b1 is None:
|
||||
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
|
||||
#print(f"| {filename:<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||
continue
|
||||
if feat is not None:
|
||||
n_total += 1
|
||||
@ -68,26 +69,26 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
|
||||
state = unit_test_argsort_2d(feat, b1)
|
||||
e = perf_counter_ns() - s
|
||||
if state:
|
||||
print(f"| {filename:<22} - {l1:<4} argsort | {'Passed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"| {filename:<22} - {l1:<4} argsort | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
n_success += 1
|
||||
else:
|
||||
print(f"| {filename:<22} - {l1:<4} argsort | {'Failed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"| {filename:<22} - {l1:<4} argsort | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
|
||||
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
||||
if i >= j:
|
||||
continue
|
||||
if b2 is None:
|
||||
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
|
||||
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||
continue
|
||||
n_total += 1
|
||||
s = perf_counter_ns()
|
||||
state = np.abs(b1 - b2).mean() < tol
|
||||
e = perf_counter_ns() - s
|
||||
if state:
|
||||
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
n_success += 1
|
||||
else:
|
||||
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
|
||||
for T in TS:
|
||||
for filename in ["alphas", "final_classifiers"]:
|
||||
@ -96,32 +97,33 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
|
||||
|
||||
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
||||
if b1 is None:
|
||||
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
|
||||
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||
continue
|
||||
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
||||
if i >= j:
|
||||
continue
|
||||
if b2 is None:
|
||||
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>17} | {'None':<29} |")
|
||||
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||
continue
|
||||
n_total += 1
|
||||
s = perf_counter_ns()
|
||||
state = np.abs(b1 - b2).mean() < tol
|
||||
e = perf_counter_ns() - s
|
||||
if state:
|
||||
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
n_success += 1
|
||||
else:
|
||||
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"|{'-'*39}|{'-'*12}|{'-'*19}|{'-'*31}|")
|
||||
e = perf_counter_ns() - fnc_s
|
||||
print(f"| {'Unit testing summary':<37} | {str(n_success) + '/' + str(n_total):>10} | {e:>17,} | {format_time_ns(e):<29} |")
|
||||
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
|
||||
def load_datasets(data_dir: str = "../data") -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
||||
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
|
||||
e = perf_counter_ns() - fnc_s
|
||||
print(f"| {'Unit testing summary':<37} | {str(n_success) + '/' + str(n_total):>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||
|
||||
def load_datasets(data_dir: str = DATA_DIR) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
||||
"""Load the datasets.
|
||||
|
||||
Args:
|
||||
data_dir (str, optional): [description]. Defaults to "../data".
|
||||
data_dir (str, optional): [description]. Defaults to DATA_DIR (see config.py).
|
||||
|
||||
Returns:
|
||||
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: [description]
|
||||
|
Reference in New Issue
Block a user