python : clearer main algorithm progression && revamp final test display

This commit is contained in:
saundersp
2023-07-25 12:41:03 +02:00
parent b507e1b0fd
commit 5371c6f201
2 changed files with 107 additions and 117 deletions

View File

@ -2,9 +2,9 @@ from toolbox import picke_multi_loader, format_time_ns, unit_test_argsort_2d
from typing import List, Tuple
from time import perf_counter_ns
import numpy as np
from config import OUT_DIR, DATA_DIR
from config import OUT_DIR, DATA_DIR, __DEBUG
def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e-8) -> None:
def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU", "PY", "PGPU"], tol: float = 1e-8) -> None:
"""Test if the each result is equals to other devices.
Given ViolaJones is a deterministic algorithm, the results no matter the device should be the same
@ -12,83 +12,78 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
Args:
TS (List[int]): Number of trained weak classifiers.
labels (List[str], optional): List of the trained device names. Defaults to ["CPU", "GPU"].
labels (List[str], optional): List of the trained device names. Defaults to ["CPU", "GPU", "PY", "PGPU"] (see config.py for more info).
tol (float, optional): Float difference tolerance. Defaults to 1e-8.
"""
if len(labels) < 2:
return print("Not enough devices to test")
fnc_s = perf_counter_ns()
n_total= 0
n_success = 0
print(f"\n| {'Unit testing':<37} | {'Test state':<10} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |")
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
for filename in ["X_train_feat", "X_test_feat", "X_train_ii", "X_test_ii"]:
print(f"{filename}...", end = "\r")
bs = picke_multi_loader([f"{filename}_{label}" for label in labels], OUT_DIR)
fnc_s = perf_counter_ns()
n_total = 0
n_success = 0
for i, (b1, l1) in enumerate(zip(bs, labels)):
if b1 is None:
#print(f"| {filename:<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
for j, (b2, l2) in enumerate(zip(bs, labels)):
if i >= j:
continue
if b2 is None:
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
n_total += 1
s = perf_counter_ns()
state = np.abs(b1 - b2).mean() < tol
e = perf_counter_ns() - s
if state:
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
def test_fnc(title, fnc):
nonlocal n_total, n_success
n_total += 1
s = perf_counter_ns()
state = fnc()
e = perf_counter_ns() - s
if state:
print(f"| {title:<37} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {title:<37} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
for filename, featname in zip(["X_train_feat_argsort", "X_test_feat_argsort"], ["X_train_feat", "X_test_feat"]):
print(f"Loading {filename}...", end = "\r")
for set_name in ["train", "test"]:
for filename in ["ii", "feat"]:
title = f"X_{set_name}_{filename}"
print(f"{filename}...", end = "\r")
bs = picke_multi_loader([f"{title}_{label}" for label in labels], OUT_DIR)
for i, (b1, l1) in enumerate(zip(bs, labels)):
if b1 is None:
if __DEBUG:
print(f"| {title:<22} - {l1:<12} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
for j, (b2, l2) in enumerate(zip(bs, labels)):
if i >= j:
continue
if b2 is None:
if __DEBUG:
print(f"| {title:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
test_fnc(f"{title:<22} - {l1:<4} vs {l2:<4}", lambda: np.abs(b1 - b2).mean() < tol)
title = f"X_{set_name}_feat_argsort"
print(f"Loading {title}...", end = "\r")
feat = None
bs = []
for label in labels:
if feat is None:
feat_tmp = picke_multi_loader([f"{featname}_{label}"], OUT_DIR)[0]
feat_tmp = picke_multi_loader([f"X_{set_name}_feat_{label}"], OUT_DIR)[0]
if feat_tmp is not None:
feat = feat_tmp
bs.append(picke_multi_loader([f"{filename}_{label}"], OUT_DIR)[0])
bs.append(picke_multi_loader([f"{title}_{label}"], OUT_DIR)[0])
for i, (b1, l1) in enumerate(zip(bs, labels)):
if b1 is None:
#print(f"| {filename:<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
if __DEBUG:
print(f"| {title:<22} - {l1:<12} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
if feat is not None:
n_total += 1
s = perf_counter_ns()
state = unit_test_argsort_2d(feat, b1)
e = perf_counter_ns() - s
if state:
print(f"| {filename:<22} - {l1:<4} argsort | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {filename:<22} - {l1:<4} argsort | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
test_fnc(f"{title:<22} - {l1:<4} argsort", lambda: unit_test_argsort_2d(feat, b1))
for j, (b2, l2) in enumerate(zip(bs, labels)):
if i >= j:
continue
if b2 is None:
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
if __DEBUG:
print(f"| {title:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
n_total += 1
s = perf_counter_ns()
state = np.abs(b1 - b2).mean() < tol
e = perf_counter_ns() - s
if state:
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
test_fnc(f"{title:<22} - {l1:<4} vs {l2:<4}", lambda: np.abs(b1 - b2).mean() < tol)
for T in TS:
for filename in ["alphas", "final_classifiers"]:
@ -97,23 +92,17 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e
for i, (b1, l1) in enumerate(zip(bs, labels)):
if b1 is None:
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
if __DEBUG:
print(f"| {filename + '_' + str(T):<22} - {l1:<12} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
for j, (b2, l2) in enumerate(zip(bs, labels)):
if i >= j:
continue
if b2 is None:
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
if __DEBUG:
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
continue
n_total += 1
s = perf_counter_ns()
state = np.abs(b1 - b2).mean() < tol
e = perf_counter_ns() - s
if state:
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
n_success += 1
else:
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
test_fnc(f"{filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4}", lambda: np.abs(b1 - b2).mean() < tol)
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
e = perf_counter_ns() - fnc_s