Compare commits
No commits in common. "5371c6f201d63196ebfb56fc5a42dc0c91eca432" and "d0493890f113c3d7ca683ad21c9a591e5a98d49f" have entirely different histories.
5371c6f201
...
d0493890f1
176
cpp/projet.cpp
176
cpp/projet.cpp
@ -191,95 +191,102 @@ void testing_and_evaluating(const np::Array<int32_t>& X_train_feat, const np::Ar
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void unit_test(void) {
|
void final_unit_test() {
|
||||||
printf("\n| %-37s | %-10s | %-18s | %-29s |\n", "Unit testing", "Test state", "Time spent (ns)", "Formatted time spent");
|
printf("\n| %-49s | %-10s | %-17s | %-29s |\n", "Unit testing", "Test state", "Time spent (ns)", "Formatted time spent");
|
||||||
printf("|%s|%s|%s|%s|\n", S(39), S(12), S(20), S(31));
|
printf("|%s|%s|%s|%s|\n", S(51), S(12), S(19), S(31));
|
||||||
|
|
||||||
|
if(fs::exists(OUT_DIR "/X_train_ii_CPU.bin") && fs::exists(OUT_DIR "/X_train_ii_GPU.bin")){
|
||||||
|
const np::Array<uint32_t> X_train_ii_cpu = load<uint32_t>(OUT_DIR "/X_train_ii_CPU.bin");
|
||||||
|
const np::Array<uint32_t> X_train_ii_gpu = load<uint32_t>(OUT_DIR "/X_train_ii_GPU.bin");
|
||||||
|
benchmark_function_void("X_train_ii CPU vs GPU", unit_test_cpu_vs_gpu<uint32_t>, X_train_ii_cpu, X_train_ii_gpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(fs::exists(OUT_DIR "/X_test_ii_CPU.bin") && fs::exists(OUT_DIR "/X_test_ii_GPU.bin")){
|
||||||
|
const np::Array<uint32_t> X_test_ii_cpu = load<uint32_t>(OUT_DIR "/X_test_ii_CPU.bin");
|
||||||
|
const np::Array<uint32_t> X_test_ii_gpu = load<uint32_t>(OUT_DIR "/X_test_ii_GPU.bin");
|
||||||
|
benchmark_function_void("X_test_ii CPU vs GPU", unit_test_cpu_vs_gpu<uint32_t>, X_test_ii_cpu, X_test_ii_gpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(fs::exists(OUT_DIR "/X_train_feat_CPU.bin")){
|
||||||
|
const np::Array<int32_t> X_train_feat = load<int32_t>(OUT_DIR "/X_train_feat_CPU.bin");
|
||||||
|
|
||||||
|
if(fs::exists(OUT_DIR "/X_train_feat_GPU.bin")){
|
||||||
|
const np::Array<int32_t> X_train_feat_gpu = load<int32_t>(OUT_DIR "/X_train_feat_CPU.bin");
|
||||||
|
benchmark_function_void("X_train_feat CPU vs GPU", unit_test_cpu_vs_gpu<int32_t>, X_train_feat, X_train_feat_gpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
np::Array<uint16_t> X_train_feat_argsort_cpu;
|
||||||
|
uint8_t loaded = 0;
|
||||||
|
if(fs::exists(OUT_DIR "/X_train_feat_argsort_CPU.bin")){
|
||||||
|
X_train_feat_argsort_cpu = std::move(load<uint16_t>(OUT_DIR "/X_train_feat_argsort_CPU.bin"));
|
||||||
|
++loaded;
|
||||||
|
benchmark_function_void("argsort_2D training set (CPU)", unit_test_argsort_2d<int32_t>, X_train_feat, X_train_feat_argsort_cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
np::Array<uint16_t> X_train_feat_argsort_gpu;
|
||||||
|
if(fs::exists(OUT_DIR "/X_train_feat_argsort_GPU.bin")){
|
||||||
|
X_train_feat_argsort_gpu = std::move(load<uint16_t>(OUT_DIR "/X_train_feat_argsort_GPU.bin"));
|
||||||
|
++loaded;
|
||||||
|
benchmark_function_void("argsort_2D training set (GPU)", unit_test_argsort_2d<int32_t>, X_train_feat, X_train_feat_argsort_gpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (loaded == 2)
|
||||||
|
benchmark_function_void("X_train_feat_argsort CPU vs GPU", unit_test_cpu_vs_gpu<uint16_t>, X_train_feat_argsort_cpu, X_train_feat_argsort_gpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(fs::exists(OUT_DIR "/X_test_feat_CPU.bin")){
|
||||||
|
const np::Array<int32_t> X_test_feat = load<int32_t>(OUT_DIR "/X_test_feat_CPU.bin");
|
||||||
|
|
||||||
|
if(fs::exists(OUT_DIR "/X_test_feat_GPU.bin")){
|
||||||
|
const np::Array<int32_t> X_test_feat_gpu = load<int32_t>(OUT_DIR "/X_test_feat_GPU.bin");
|
||||||
|
benchmark_function_void("X_test_feat CPU vs GPU", unit_test_cpu_vs_gpu<int32_t>, X_test_feat, X_test_feat_gpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
np::Array<uint16_t> X_test_feat_argsort_cpu;
|
||||||
|
uint8_t loaded = 0;
|
||||||
|
if(fs::exists(OUT_DIR "/X_test_feat_argsort_CPU.bin")){
|
||||||
|
X_test_feat_argsort_cpu = std::move(load<uint16_t>(OUT_DIR "/X_test_feat_argsort_CPU.bin"));
|
||||||
|
++loaded;
|
||||||
|
benchmark_function_void("argsort_2D testing set (CPU)", unit_test_argsort_2d<int32_t>, X_test_feat, X_test_feat_argsort_cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
np::Array<uint16_t> X_test_feat_argsort_gpu;
|
||||||
|
if(fs::exists(OUT_DIR "/X_test_feat_argsort_GPU.bin")){
|
||||||
|
X_test_feat_argsort_gpu = std::move(load<uint16_t>(OUT_DIR "/X_test_feat_argsort_GPU.bin"));
|
||||||
|
++loaded;
|
||||||
|
benchmark_function_void("argsort_2D testing set (GPU)", unit_test_argsort_2d<int32_t>, X_test_feat, X_test_feat_argsort_gpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (loaded == 2)
|
||||||
|
benchmark_function_void("X_test_feat_argsort CPU vs GPU", unit_test_cpu_vs_gpu<uint16_t>, X_test_feat_argsort_cpu, X_test_feat_argsort_gpu);
|
||||||
|
}
|
||||||
|
|
||||||
char title[BUFFER_SIZE] = { 0 };
|
char title[BUFFER_SIZE] = { 0 };
|
||||||
char tmp_title[BUFFER_SIZE / 2] = { 0 };
|
char alphas_title[BUFFER_SIZE] = { 0 };
|
||||||
char file_cpu[BUFFER_SIZE] = { 0 };
|
char final_classifiers_title[BUFFER_SIZE] = { 0 };
|
||||||
char file_gpu[BUFFER_SIZE] = { 0 };
|
|
||||||
const std::chrono::system_clock::time_point fnc_s = perf_counter_ns();
|
|
||||||
uint64_t n_total = 0, n_success = 0;
|
|
||||||
|
|
||||||
auto test_fnc = [&n_total, &n_success](const char* title, const auto& fnc) {
|
for (const size_t T : TS) {
|
||||||
++n_total;
|
sprintf(alphas_title, MODEL_DIR "/alphas_%lu_CPU.bin", T);
|
||||||
const std::chrono::system_clock::time_point start = perf_counter_ns();
|
if(!fs::exists(alphas_title)) continue;
|
||||||
const bool state = fnc();
|
const np::Array<float64_t> alphas = load<float64_t>(alphas_title);
|
||||||
const long long time_spent = duration_ns(perf_counter_ns() - start);
|
|
||||||
if(state){
|
|
||||||
printf("| %-37s | %10s | %18s | %-29s |\n", title, "Passed", thousand_sep(time_spent).c_str(), format_time_ns(time_spent).c_str());
|
|
||||||
++n_success;
|
|
||||||
} else
|
|
||||||
printf("| %-37s | %10s | %18s | %-29s |\n", title, "Failed", thousand_sep(time_spent).c_str(), format_time_ns(time_spent).c_str());
|
|
||||||
};
|
|
||||||
|
|
||||||
for (const char* label : { "train", "test" }) {
|
sprintf(final_classifiers_title, MODEL_DIR "/final_classifiers_%lu_CPU.bin", T);
|
||||||
sprintf(file_cpu, OUT_DIR "/X_%s_ii_CPU.bin", label);
|
if(!fs::exists(final_classifiers_title)) continue;
|
||||||
sprintf(file_gpu, OUT_DIR "/X_%s_ii_GPU.bin", label);
|
const np::Array<float64_t> final_classifiers = load<float64_t>(final_classifiers_title);
|
||||||
if (fs::exists(file_cpu) && fs::exists(file_gpu)) {
|
|
||||||
const np::Array<uint32_t> X_train_ii_cpu = load<uint32_t>(file_cpu);
|
|
||||||
const np::Array<uint32_t> X_train_ii_gpu = load<uint32_t>(file_gpu);
|
|
||||||
sprintf(tmp_title, "X_%s_ii", label);
|
|
||||||
sprintf(title, "%-22s - CPU vs GPU", tmp_title);
|
|
||||||
test_fnc(title, [&X_train_ii_cpu, &X_train_ii_gpu]{ return unit_test_cpu_vs_gpu<uint32_t>(X_train_ii_cpu, X_train_ii_gpu); });
|
|
||||||
}
|
|
||||||
char file_feat[BUFFER_SIZE] = { 0 };
|
|
||||||
sprintf(file_feat, OUT_DIR "/X_%s_feat_CPU.bin", label);
|
|
||||||
if (fs::exists(file_feat)) {
|
|
||||||
const np::Array<int32_t> X_feat = load<int32_t>(file_feat);
|
|
||||||
sprintf(file_gpu, OUT_DIR "/X_%s_feat_GPU.bin", label);
|
|
||||||
if (fs::exists(file_gpu)) {
|
|
||||||
const np::Array<int32_t> X_feat_gpu = load<int32_t>(file_gpu);
|
|
||||||
sprintf(tmp_title, "X_%s_feat", label);
|
|
||||||
sprintf(title, "%-22s - CPU vs GPU", tmp_title);
|
|
||||||
test_fnc(title, [&X_feat, &X_feat_gpu]{ return unit_test_cpu_vs_gpu<int32_t>(X_feat, X_feat_gpu); });
|
|
||||||
}
|
|
||||||
sprintf(file_cpu, OUT_DIR "/X_%s_feat_argsort_CPU.bin", label);
|
|
||||||
np::Array<uint16_t> X_feat_argsort_cpu;
|
|
||||||
uint8_t loaded = 0;
|
|
||||||
if (fs::exists(file_cpu)) {
|
|
||||||
X_feat_argsort_cpu = std::move(load<uint16_t>(file_cpu));
|
|
||||||
++loaded;
|
|
||||||
sprintf(tmp_title, "X_%s_feat_argsort", label);
|
|
||||||
sprintf(title, "%-22s - CPU argsort", tmp_title);
|
|
||||||
test_fnc(title, [&X_feat, &X_feat_argsort_cpu]{ return unit_test_argsort_2d<int32_t>(X_feat, X_feat_argsort_cpu); });
|
|
||||||
}
|
|
||||||
sprintf(file_gpu, OUT_DIR "/X_%s_feat_argsort_GPU.bin", label);
|
|
||||||
np::Array<uint16_t> X_feat_argsort_gpu;
|
|
||||||
if (fs::exists(file_gpu)) {
|
|
||||||
X_feat_argsort_gpu = std::move(load<uint16_t>(file_gpu));
|
|
||||||
++loaded;
|
|
||||||
sprintf(tmp_title, "X_%s_feat_argsort", label);
|
|
||||||
sprintf(title, "%-22s - GPU argsort", tmp_title);
|
|
||||||
test_fnc(title, [&X_feat, &X_feat_argsort_gpu]{ return unit_test_argsort_2d<int32_t>(X_feat, X_feat_argsort_gpu); });
|
|
||||||
}
|
|
||||||
if (loaded == 2){
|
|
||||||
sprintf(tmp_title, "X_%s_feat_argsort", label);
|
|
||||||
sprintf(title, "%-22s - CPU vs GPU", tmp_title);
|
|
||||||
test_fnc(title, [&X_feat_argsort_cpu, &X_feat_argsort_gpu]{ return unit_test_cpu_vs_gpu<uint16_t>(X_feat_argsort_cpu, X_feat_argsort_gpu); });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const size_t T : TS)
|
sprintf(alphas_title, MODEL_DIR "/alphas_%lu_GPU.bin", T);
|
||||||
for (const char* label : { "alphas", "final_classifiers" }) {
|
if(!fs::exists(alphas_title)) continue;
|
||||||
sprintf(file_cpu, MODEL_DIR "/%s_%lu_CPU.bin", label, T);
|
const np::Array<float64_t> alphas_gpu = load<float64_t>(alphas_title);
|
||||||
sprintf(file_gpu, MODEL_DIR "/%s_%lu_GPU.bin", label, T);
|
|
||||||
if (fs::exists(file_cpu) && fs::exists(file_gpu)){
|
|
||||||
const np::Array<float64_t> cpu = load<float64_t>(file_cpu);
|
|
||||||
const np::Array<float64_t> gpu = load<float64_t>(file_gpu);
|
|
||||||
sprintf(tmp_title, "%s_%ld", label, T);
|
|
||||||
sprintf(title, "%-22s - CPU vs GPU", tmp_title);
|
|
||||||
test_fnc(title, [&cpu, &gpu]{ return unit_test_cpu_vs_gpu<float64_t>(cpu, gpu); });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const long long time_spent = duration_ns(perf_counter_ns() - fnc_s);
|
sprintf(final_classifiers_title, MODEL_DIR "/final_classifiers_%lu_GPU.bin", T);
|
||||||
sprintf(title, "%ld/%ld", n_success, n_total);
|
if(!fs::exists(final_classifiers_title)) continue;
|
||||||
|
const np::Array<float64_t> final_classifiers_gpu = load<float64_t>(final_classifiers_title);
|
||||||
|
|
||||||
printf("|%s|%s|%s|%s|\n", S(39), S(12), S(20), S(31));
|
sprintf(title, "alphas %ld CPU vs GPU", T);
|
||||||
printf("| %-37s | %10s | %18s | %-29s |\n", "Unit testing summary", title, thousand_sep(time_spent).c_str(), format_time_ns(time_spent).c_str());
|
benchmark_function_void(title, unit_test_cpu_vs_gpu<float64_t>, alphas, alphas_gpu);
|
||||||
|
sprintf(title, "final_classifiers %ld CPU vs GPU", T);
|
||||||
|
benchmark_function_void(title, unit_test_cpu_vs_gpu<float64_t>, final_classifiers, final_classifiers_gpu);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(){
|
int main(){
|
||||||
@ -301,6 +308,9 @@ int main(){
|
|||||||
const auto [ X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test ] = preprocessing();
|
const auto [ X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test ] = preprocessing();
|
||||||
train(X_train_feat, X_train_feat_argsort, y_train);
|
train(X_train_feat, X_train_feat_argsort, y_train);
|
||||||
testing_and_evaluating(X_train_feat, y_train, X_test_feat, y_test);
|
testing_and_evaluating(X_train_feat, y_train, X_test_feat, y_test);
|
||||||
unit_test();
|
final_unit_test();
|
||||||
|
#if __DEBUG
|
||||||
|
printf("\nAFTER CLEANUP\n");
|
||||||
|
#endif
|
||||||
return EXIT_SUCCESS;
|
return EXIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -2,9 +2,9 @@ from toolbox import picke_multi_loader, format_time_ns, unit_test_argsort_2d
|
|||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
from time import perf_counter_ns
|
from time import perf_counter_ns
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from config import OUT_DIR, DATA_DIR, __DEBUG
|
from config import OUT_DIR, DATA_DIR
|
||||||
|
|
||||||
def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU", "PY", "PGPU"], tol: float = 1e-8) -> None:
|
def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU"], tol: float = 1e-8) -> None:
|
||||||
"""Test if the each result is equals to other devices.
|
"""Test if the each result is equals to other devices.
|
||||||
|
|
||||||
Given ViolaJones is a deterministic algorithm, the results no matter the device should be the same
|
Given ViolaJones is a deterministic algorithm, the results no matter the device should be the same
|
||||||
@ -12,78 +12,83 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU", "PY", "PGPU"], t
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
TS (List[int]): Number of trained weak classifiers.
|
TS (List[int]): Number of trained weak classifiers.
|
||||||
labels (List[str], optional): List of the trained device names. Defaults to ["CPU", "GPU", "PY", "PGPU"] (see config.py for more info).
|
labels (List[str], optional): List of the trained device names. Defaults to ["CPU", "GPU"].
|
||||||
tol (float, optional): Float difference tolerance. Defaults to 1e-8.
|
tol (float, optional): Float difference tolerance. Defaults to 1e-8.
|
||||||
"""
|
"""
|
||||||
if len(labels) < 2:
|
if len(labels) < 2:
|
||||||
return print("Not enough devices to test")
|
return print("Not enough devices to test")
|
||||||
|
|
||||||
|
fnc_s = perf_counter_ns()
|
||||||
|
n_total= 0
|
||||||
|
n_success = 0
|
||||||
print(f"\n| {'Unit testing':<37} | {'Test state':<10} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |")
|
print(f"\n| {'Unit testing':<37} | {'Test state':<10} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |")
|
||||||
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
|
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
|
||||||
|
|
||||||
fnc_s = perf_counter_ns()
|
for filename in ["X_train_feat", "X_test_feat", "X_train_ii", "X_test_ii"]:
|
||||||
n_total = 0
|
|
||||||
n_success = 0
|
|
||||||
|
|
||||||
def test_fnc(title, fnc):
|
|
||||||
nonlocal n_total, n_success
|
|
||||||
n_total += 1
|
|
||||||
s = perf_counter_ns()
|
|
||||||
state = fnc()
|
|
||||||
e = perf_counter_ns() - s
|
|
||||||
if state:
|
|
||||||
print(f"| {title:<37} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
|
||||||
n_success += 1
|
|
||||||
else:
|
|
||||||
print(f"| {title:<37} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
|
||||||
|
|
||||||
for set_name in ["train", "test"]:
|
|
||||||
for filename in ["ii", "feat"]:
|
|
||||||
title = f"X_{set_name}_{filename}"
|
|
||||||
print(f"{filename}...", end = "\r")
|
print(f"{filename}...", end = "\r")
|
||||||
bs = picke_multi_loader([f"{title}_{label}" for label in labels], OUT_DIR)
|
bs = picke_multi_loader([f"{filename}_{label}" for label in labels], OUT_DIR)
|
||||||
|
|
||||||
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
||||||
if b1 is None:
|
if b1 is None:
|
||||||
if __DEBUG:
|
#print(f"| {filename:<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||||
print(f"| {title:<22} - {l1:<12} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
|
||||||
continue
|
continue
|
||||||
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
||||||
if i >= j:
|
if i >= j:
|
||||||
continue
|
continue
|
||||||
if b2 is None:
|
if b2 is None:
|
||||||
if __DEBUG:
|
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||||
print(f"| {title:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
|
||||||
continue
|
continue
|
||||||
test_fnc(f"{title:<22} - {l1:<4} vs {l2:<4}", lambda: np.abs(b1 - b2).mean() < tol)
|
n_total += 1
|
||||||
|
s = perf_counter_ns()
|
||||||
|
state = np.abs(b1 - b2).mean() < tol
|
||||||
|
e = perf_counter_ns() - s
|
||||||
|
if state:
|
||||||
|
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||||
|
n_success += 1
|
||||||
|
else:
|
||||||
|
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||||
|
|
||||||
title = f"X_{set_name}_feat_argsort"
|
for filename, featname in zip(["X_train_feat_argsort", "X_test_feat_argsort"], ["X_train_feat", "X_test_feat"]):
|
||||||
print(f"Loading {title}...", end = "\r")
|
print(f"Loading {filename}...", end = "\r")
|
||||||
feat = None
|
feat = None
|
||||||
bs = []
|
bs = []
|
||||||
for label in labels:
|
for label in labels:
|
||||||
if feat is None:
|
if feat is None:
|
||||||
feat_tmp = picke_multi_loader([f"X_{set_name}_feat_{label}"], OUT_DIR)[0]
|
feat_tmp = picke_multi_loader([f"{featname}_{label}"], OUT_DIR)[0]
|
||||||
if feat_tmp is not None:
|
if feat_tmp is not None:
|
||||||
feat = feat_tmp
|
feat = feat_tmp
|
||||||
bs.append(picke_multi_loader([f"{title}_{label}"], OUT_DIR)[0])
|
bs.append(picke_multi_loader([f"{filename}_{label}"], OUT_DIR)[0])
|
||||||
|
|
||||||
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
||||||
if b1 is None:
|
if b1 is None:
|
||||||
if __DEBUG:
|
#print(f"| {filename:<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||||
print(f"| {title:<22} - {l1:<12} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
|
||||||
continue
|
continue
|
||||||
if feat is not None:
|
if feat is not None:
|
||||||
test_fnc(f"{title:<22} - {l1:<4} argsort", lambda: unit_test_argsort_2d(feat, b1))
|
n_total += 1
|
||||||
|
s = perf_counter_ns()
|
||||||
|
state = unit_test_argsort_2d(feat, b1)
|
||||||
|
e = perf_counter_ns() - s
|
||||||
|
if state:
|
||||||
|
print(f"| {filename:<22} - {l1:<4} argsort | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||||
|
n_success += 1
|
||||||
|
else:
|
||||||
|
print(f"| {filename:<22} - {l1:<4} argsort | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||||
|
|
||||||
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
||||||
if i >= j:
|
if i >= j:
|
||||||
continue
|
continue
|
||||||
if b2 is None:
|
if b2 is None:
|
||||||
if __DEBUG:
|
#print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||||
print(f"| {title:<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
|
||||||
continue
|
continue
|
||||||
test_fnc(f"{title:<22} - {l1:<4} vs {l2:<4}", lambda: np.abs(b1 - b2).mean() < tol)
|
n_total += 1
|
||||||
|
s = perf_counter_ns()
|
||||||
|
state = np.abs(b1 - b2).mean() < tol
|
||||||
|
e = perf_counter_ns() - s
|
||||||
|
if state:
|
||||||
|
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||||
|
n_success += 1
|
||||||
|
else:
|
||||||
|
print(f"| {filename:<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||||
|
|
||||||
for T in TS:
|
for T in TS:
|
||||||
for filename in ["alphas", "final_classifiers"]:
|
for filename in ["alphas", "final_classifiers"]:
|
||||||
@ -92,17 +97,23 @@ def unit_test(TS: List[int], labels: List[str] = ["CPU", "GPU", "PY", "PGPU"], t
|
|||||||
|
|
||||||
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
for i, (b1, l1) in enumerate(zip(bs, labels)):
|
||||||
if b1 is None:
|
if b1 is None:
|
||||||
if __DEBUG:
|
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||||
print(f"| {filename + '_' + str(T):<22} - {l1:<12} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
|
||||||
continue
|
continue
|
||||||
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
for j, (b2, l2) in enumerate(zip(bs, labels)):
|
||||||
if i >= j:
|
if i >= j:
|
||||||
continue
|
continue
|
||||||
if b2 is None:
|
if b2 is None:
|
||||||
if __DEBUG:
|
#print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
||||||
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Skipped':>10} | {'None':>18} | {'None':<29} |")
|
|
||||||
continue
|
continue
|
||||||
test_fnc(f"{filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4}", lambda: np.abs(b1 - b2).mean() < tol)
|
n_total += 1
|
||||||
|
s = perf_counter_ns()
|
||||||
|
state = np.abs(b1 - b2).mean() < tol
|
||||||
|
e = perf_counter_ns() - s
|
||||||
|
if state:
|
||||||
|
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Passed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||||
|
n_success += 1
|
||||||
|
else:
|
||||||
|
print(f"| {filename + '_' + str(T):<22} - {l1:<4} vs {l2:<4} | {'Failed':>10} | {e:>18,} | {format_time_ns(e):<29} |")
|
||||||
|
|
||||||
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
|
print(f"|{'-'*39}|{'-'*12}|{'-'*20}|{'-'*31}|")
|
||||||
e = perf_counter_ns() - fnc_s
|
e = perf_counter_ns() - fnc_s
|
||||||
|
@ -26,36 +26,17 @@ else:
|
|||||||
from ViolaJonesCPU import apply_features, set_integral_image, argsort
|
from ViolaJonesCPU import apply_features, set_integral_image, argsort
|
||||||
label = 'CPU' if COMPILE_WITH_C else 'PY'
|
label = 'CPU' if COMPILE_WITH_C else 'PY'
|
||||||
|
|
||||||
def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
||||||
"""Load the dataset, calculate features and integral images, apply features to images and calculate argsort of the featured images.
|
"""Train the weak classifiers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
X_train (np.ndarray): Training images.
|
||||||
|
X_test (np.ndarray): Testing Images.
|
||||||
|
y_train (np.ndarray): Training labels.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test
|
Tuple[np.ndarray, np.ndarray]: Training and testing features.
|
||||||
"""
|
"""
|
||||||
# Creating state saver folders if they don't exist already
|
|
||||||
if SAVE_STATE:
|
|
||||||
for folder_name in ["models", "out"]:
|
|
||||||
makedirs(folder_name, exist_ok = True)
|
|
||||||
|
|
||||||
print(f"| {'Preprocessing':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
|
|
||||||
|
|
||||||
X_train, y_train, X_test, y_test = state_saver("Loading sets", ["X_train", "y_train", "X_test", "y_test"],
|
|
||||||
load_datasets, FORCE_REDO, SAVE_STATE)
|
|
||||||
|
|
||||||
if __DEBUG:
|
|
||||||
print("X_train")
|
|
||||||
print(X_train.shape)
|
|
||||||
print(X_train[IDX_INSPECT])
|
|
||||||
print("X_test")
|
|
||||||
print(X_test.shape)
|
|
||||||
print(X_test[IDX_INSPECT])
|
|
||||||
print("y_train")
|
|
||||||
print(y_train.shape)
|
|
||||||
print(y_train[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
|
||||||
print("y_test")
|
|
||||||
print(y_test.shape)
|
|
||||||
print(y_test[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
|
||||||
|
|
||||||
feats = state_saver("Building features", "feats", lambda: build_features(X_train.shape[1], X_train.shape[2]), FORCE_REDO, SAVE_STATE)
|
feats = state_saver("Building features", "feats", lambda: build_features(X_train.shape[1], X_train.shape[2]), FORCE_REDO, SAVE_STATE)
|
||||||
|
|
||||||
if __DEBUG:
|
if __DEBUG:
|
||||||
@ -96,12 +77,13 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
|||||||
# fnc = lambda: get_best_anova_features(X_train_feat, y_train))
|
# fnc = lambda: get_best_anova_features(X_train_feat, y_train))
|
||||||
#indices = benchmark_function("Selecting best features (manual)", lambda: get_best_anova_features(X_train_feat, y_train))
|
#indices = benchmark_function("Selecting best features (manual)", lambda: get_best_anova_features(X_train_feat, y_train))
|
||||||
|
|
||||||
#if __DEBUG:
|
# FIXME Debug code
|
||||||
# print("indices")
|
# print("indices")
|
||||||
# print(indices.shape)
|
# print(indices.shape)
|
||||||
# print(indices[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
# print(indices[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||||
# assert indices.shape[0] == indices_new.shape[0], f"Indices length not equal : {indices.shape} != {indices_new.shape}"
|
# assert indices.shape[0] == indices_new.shape[0], f"Indices length not equal : {indices.shape} != {indices_new.shape}"
|
||||||
# assert (eq := indices == indices_new).all(), f"Indices not equal : {eq.sum() / indices.shape[0]}"
|
# assert (eq := indices == indices_new).all(), f"Indices not equal : {eq.sum() / indices.shape[0]}"
|
||||||
|
# return 0, 0
|
||||||
|
|
||||||
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
|
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
|
||||||
|
|
||||||
@ -122,17 +104,8 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
|||||||
print(X_test_feat_argsort.shape)
|
print(X_test_feat_argsort.shape)
|
||||||
print(X_test_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
|
print(X_test_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
|
||||||
benchmark_function("Arg unit test", lambda: unit_test_argsort_2d(X_test_feat, X_test_feat_argsort))
|
benchmark_function("Arg unit test", lambda: unit_test_argsort_2d(X_test_feat, X_test_feat_argsort))
|
||||||
|
del X_test_feat_argsort
|
||||||
|
|
||||||
return X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test
|
|
||||||
|
|
||||||
def train(X_train_feat: np.ndarray, X_train_feat_argsort: np.ndarray, y_train: np.ndarray) -> None:
|
|
||||||
"""Train the weak classifiers.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
X_train (np.ndarray): Training images.
|
|
||||||
X_test (np.ndarray): Testing Images.
|
|
||||||
y_train (np.ndarray): Training labels.
|
|
||||||
"""
|
|
||||||
print(f"\n| {'Training':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
|
print(f"\n| {'Training':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
|
||||||
|
|
||||||
for T in TS:
|
for T in TS:
|
||||||
@ -144,13 +117,15 @@ def train(X_train_feat: np.ndarray, X_train_feat_argsort: np.ndarray, y_train: n
|
|||||||
print("final_classifiers")
|
print("final_classifiers")
|
||||||
print(final_classifiers)
|
print(final_classifiers)
|
||||||
|
|
||||||
def testing_and_evaluating(X_train_feat: np.ndarray, y_train: np.ndarray, X_test_feat: np.ndarray, y_test: np.ndarray) -> None:
|
return X_train_feat, X_test_feat
|
||||||
|
|
||||||
|
def bench_accuracy(label, X_train_feat: np.ndarray, X_test_feat: np.ndarray, y_train: np.ndarray, y_test: np.ndarray) -> None:
|
||||||
"""Benchmark the trained classifiers on the training and testing sets.
|
"""Benchmark the trained classifiers on the training and testing sets.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
X_train_feat (np.ndarray): Training features.
|
X_train_feat (np.ndarray): Training features.
|
||||||
y_train (np.ndarray): Training labels.
|
|
||||||
X_test_feat (np.ndarray): Testing features.
|
X_test_feat (np.ndarray): Testing features.
|
||||||
|
y_train (np.ndarray): Training labels.
|
||||||
y_test (np.ndarray): Testing labels.
|
y_test (np.ndarray): Testing labels.
|
||||||
"""
|
"""
|
||||||
print(f"\n| {'Testing':<26} | Time spent (ns) (E) | {'Formatted time spent (E)':<29}", end = " | ")
|
print(f"\n| {'Testing':<26} | Time spent (ns) (E) | {'Formatted time spent (E)':<29}", end = " | ")
|
||||||
@ -187,21 +162,45 @@ def testing_and_evaluating(X_train_feat: np.ndarray, y_train: np.ndarray, X_test
|
|||||||
print(f"| {'ViolaJones T = ' + str(T):<19} | {e_acc:>7.2%} | {e_f1:>6.2f} | {e_FN:>6,} | {e_FP:>6,}", end = " | ")
|
print(f"| {'ViolaJones T = ' + str(T):<19} | {e_acc:>7.2%} | {e_f1:>6.2f} | {e_FN:>6,} | {e_FP:>6,}", end = " | ")
|
||||||
print(f"{t_acc:>7.2%} | {t_f1:>6.2f} | {t_FN:>6,} | {t_FP:>6,} |")
|
print(f"{t_acc:>7.2%} | {t_f1:>6.2f} | {t_FN:>6,} | {t_FP:>6,} |")
|
||||||
|
|
||||||
def main() -> None:
|
def _main_() -> None:
|
||||||
print(f"| {'Unit testing':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |")
|
|
||||||
print(f"|{'-'*51}|{'-'*20}|{'-'*31}|")
|
|
||||||
benchmark_function("Testing format_time_ns", format_time_ns_test)
|
|
||||||
print()
|
|
||||||
|
|
||||||
X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test = preprocessing()
|
# Creating state saver folders if they don't exist already
|
||||||
train(X_train_feat, X_train_feat_argsort, y_train)
|
if SAVE_STATE:
|
||||||
|
for folder_name in ["models", "out"]:
|
||||||
|
makedirs(folder_name, exist_ok = True)
|
||||||
|
|
||||||
|
print(f"| {'Preprocessing':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
|
||||||
|
|
||||||
|
X_train, y_train, X_test, y_test = state_saver("Loading sets", ["X_train", "y_train", "X_test", "y_test"],
|
||||||
|
load_datasets, FORCE_REDO, SAVE_STATE)
|
||||||
|
|
||||||
|
if __DEBUG:
|
||||||
|
print("X_train")
|
||||||
|
print(X_train.shape)
|
||||||
|
print(X_train[IDX_INSPECT])
|
||||||
|
print("X_test")
|
||||||
|
print(X_test.shape)
|
||||||
|
print(X_test[IDX_INSPECT])
|
||||||
|
print("y_train")
|
||||||
|
print(y_train.shape)
|
||||||
|
print(y_train[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||||
|
print("y_test")
|
||||||
|
print(y_test.shape)
|
||||||
|
print(y_test[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||||
|
|
||||||
|
X_train_feat, X_test_feat = bench_train(X_train, X_test, y_train)
|
||||||
|
|
||||||
# X_train_feat, X_test_feat = picke_multi_loader([f"X_train_feat_{label}", f"X_test_feat_{label}"], OUT_DIR)
|
# X_train_feat, X_test_feat = picke_multi_loader([f"X_train_feat_{label}", f"X_test_feat_{label}"], OUT_DIR)
|
||||||
# indices = picke_multi_loader(["indices"], OUT_DIR)[0]
|
# indices = picke_multi_loader(["indices"], OUT_DIR)[0]
|
||||||
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
|
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
|
||||||
|
|
||||||
testing_and_evaluating(X_train_feat, y_train, X_test_feat, y_test)
|
bench_accuracy(label, X_train_feat, X_test_feat, y_train, y_test)
|
||||||
unit_test(TS)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
_main_()
|
||||||
|
if __DEBUG:
|
||||||
|
toolbox_unit_test()
|
||||||
|
|
||||||
|
# Only execute unit test after having trained the specified labels
|
||||||
|
unit_test(TS, ["GPU", "CPU", "PY", "PGPU"])
|
||||||
|
pass
|
||||||
|
Loading…
x
Reference in New Issue
Block a user