python : clearer main algorithm progression && revamp final test display
This commit is contained in:
109
python/projet.py
109
python/projet.py
@ -26,17 +26,36 @@ else:
|
||||
from ViolaJonesCPU import apply_features, set_integral_image, argsort
|
||||
label = 'CPU' if COMPILE_WITH_C else 'PY'
|
||||
|
||||
def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Train the weak classifiers.
|
||||
|
||||
Args:
|
||||
X_train (np.ndarray): Training images.
|
||||
X_test (np.ndarray): Testing Images.
|
||||
y_train (np.ndarray): Training labels.
|
||||
def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
||||
"""Load the dataset, calculate features and integral images, apply features to images and calculate argsort of the featured images.
|
||||
|
||||
Returns:
|
||||
Tuple[np.ndarray, np.ndarray]: Training and testing features.
|
||||
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test
|
||||
"""
|
||||
# Creating state saver folders if they don't exist already
|
||||
if SAVE_STATE:
|
||||
for folder_name in ["models", "out"]:
|
||||
makedirs(folder_name, exist_ok = True)
|
||||
|
||||
print(f"| {'Preprocessing':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
|
||||
|
||||
X_train, y_train, X_test, y_test = state_saver("Loading sets", ["X_train", "y_train", "X_test", "y_test"],
|
||||
load_datasets, FORCE_REDO, SAVE_STATE)
|
||||
|
||||
if __DEBUG:
|
||||
print("X_train")
|
||||
print(X_train.shape)
|
||||
print(X_train[IDX_INSPECT])
|
||||
print("X_test")
|
||||
print(X_test.shape)
|
||||
print(X_test[IDX_INSPECT])
|
||||
print("y_train")
|
||||
print(y_train.shape)
|
||||
print(y_train[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||
print("y_test")
|
||||
print(y_test.shape)
|
||||
print(y_test[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||
|
||||
feats = state_saver("Building features", "feats", lambda: build_features(X_train.shape[1], X_train.shape[2]), FORCE_REDO, SAVE_STATE)
|
||||
|
||||
if __DEBUG:
|
||||
@ -77,13 +96,12 @@ def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) ->
|
||||
# fnc = lambda: get_best_anova_features(X_train_feat, y_train))
|
||||
#indices = benchmark_function("Selecting best features (manual)", lambda: get_best_anova_features(X_train_feat, y_train))
|
||||
|
||||
# FIXME Debug code
|
||||
# print("indices")
|
||||
# print(indices.shape)
|
||||
# print(indices[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||
# assert indices.shape[0] == indices_new.shape[0], f"Indices length not equal : {indices.shape} != {indices_new.shape}"
|
||||
# assert (eq := indices == indices_new).all(), f"Indices not equal : {eq.sum() / indices.shape[0]}"
|
||||
# return 0, 0
|
||||
#if __DEBUG:
|
||||
# print("indices")
|
||||
# print(indices.shape)
|
||||
# print(indices[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||
# assert indices.shape[0] == indices_new.shape[0], f"Indices length not equal : {indices.shape} != {indices_new.shape}"
|
||||
# assert (eq := indices == indices_new).all(), f"Indices not equal : {eq.sum() / indices.shape[0]}"
|
||||
|
||||
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
|
||||
|
||||
@ -104,8 +122,17 @@ def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) ->
|
||||
print(X_test_feat_argsort.shape)
|
||||
print(X_test_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
|
||||
benchmark_function("Arg unit test", lambda: unit_test_argsort_2d(X_test_feat, X_test_feat_argsort))
|
||||
del X_test_feat_argsort
|
||||
|
||||
return X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test
|
||||
|
||||
def train(X_train_feat: np.ndarray, X_train_feat_argsort: np.ndarray, y_train: np.ndarray) -> None:
|
||||
"""Train the weak classifiers.
|
||||
|
||||
Args:
|
||||
X_train (np.ndarray): Training images.
|
||||
X_test (np.ndarray): Testing Images.
|
||||
y_train (np.ndarray): Training labels.
|
||||
"""
|
||||
print(f"\n| {'Training':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
|
||||
|
||||
for T in TS:
|
||||
@ -117,15 +144,13 @@ def bench_train(X_train: np.ndarray, X_test: np.ndarray, y_train: np.ndarray) ->
|
||||
print("final_classifiers")
|
||||
print(final_classifiers)
|
||||
|
||||
return X_train_feat, X_test_feat
|
||||
|
||||
def bench_accuracy(label, X_train_feat: np.ndarray, X_test_feat: np.ndarray, y_train: np.ndarray, y_test: np.ndarray) -> None:
|
||||
def testing_and_evaluating(X_train_feat: np.ndarray, y_train: np.ndarray, X_test_feat: np.ndarray, y_test: np.ndarray) -> None:
|
||||
"""Benchmark the trained classifiers on the training and testing sets.
|
||||
|
||||
Args:
|
||||
X_train_feat (np.ndarray): Training features.
|
||||
X_test_feat (np.ndarray): Testing features.
|
||||
y_train (np.ndarray): Training labels.
|
||||
X_test_feat (np.ndarray): Testing features.
|
||||
y_test (np.ndarray): Testing labels.
|
||||
"""
|
||||
print(f"\n| {'Testing':<26} | Time spent (ns) (E) | {'Formatted time spent (E)':<29}", end = " | ")
|
||||
@ -162,45 +187,21 @@ def bench_accuracy(label, X_train_feat: np.ndarray, X_test_feat: np.ndarray, y_t
|
||||
print(f"| {'ViolaJones T = ' + str(T):<19} | {e_acc:>7.2%} | {e_f1:>6.2f} | {e_FN:>6,} | {e_FP:>6,}", end = " | ")
|
||||
print(f"{t_acc:>7.2%} | {t_f1:>6.2f} | {t_FN:>6,} | {t_FP:>6,} |")
|
||||
|
||||
def _main_() -> None:
|
||||
def main() -> None:
|
||||
print(f"| {'Unit testing':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |")
|
||||
print(f"|{'-'*51}|{'-'*20}|{'-'*31}|")
|
||||
benchmark_function("Testing format_time_ns", format_time_ns_test)
|
||||
print()
|
||||
|
||||
# Creating state saver folders if they don't exist already
|
||||
if SAVE_STATE:
|
||||
for folder_name in ["models", "out"]:
|
||||
makedirs(folder_name, exist_ok = True)
|
||||
|
||||
print(f"| {'Preprocessing':<49} | {'Time spent (ns)':<18} | {'Formatted time spent':<29} |\n|{'-'*51}|{'-'*20}|{'-'*31}|")
|
||||
|
||||
X_train, y_train, X_test, y_test = state_saver("Loading sets", ["X_train", "y_train", "X_test", "y_test"],
|
||||
load_datasets, FORCE_REDO, SAVE_STATE)
|
||||
|
||||
if __DEBUG:
|
||||
print("X_train")
|
||||
print(X_train.shape)
|
||||
print(X_train[IDX_INSPECT])
|
||||
print("X_test")
|
||||
print(X_test.shape)
|
||||
print(X_test[IDX_INSPECT])
|
||||
print("y_train")
|
||||
print(y_train.shape)
|
||||
print(y_train[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||
print("y_test")
|
||||
print(y_test.shape)
|
||||
print(y_test[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
|
||||
|
||||
X_train_feat, X_test_feat = bench_train(X_train, X_test, y_train)
|
||||
X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test = preprocessing()
|
||||
train(X_train_feat, X_train_feat_argsort, y_train)
|
||||
|
||||
# X_train_feat, X_test_feat = picke_multi_loader([f"X_train_feat_{label}", f"X_test_feat_{label}"], OUT_DIR)
|
||||
# indices = picke_multi_loader(["indices"], OUT_DIR)[0]
|
||||
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
|
||||
|
||||
bench_accuracy(label, X_train_feat, X_test_feat, y_train, y_test)
|
||||
testing_and_evaluating(X_train_feat, y_train, X_test_feat, y_test)
|
||||
unit_test(TS)
|
||||
|
||||
if __name__ == "__main__":
|
||||
_main_()
|
||||
if __DEBUG:
|
||||
toolbox_unit_test()
|
||||
|
||||
# Only execute unit test after having trained the specified labels
|
||||
unit_test(TS, ["GPU", "CPU", "PY", "PGPU"])
|
||||
pass
|
||||
main()
|
||||
|
Reference in New Issue
Block a user