python : Updated code with better display, documentation and format_time

This commit is contained in:
saundersp
2024-04-28 00:25:13 +02:00
parent c7d21e1014
commit 718724b63b
11 changed files with 591 additions and 566 deletions

View File

@ -2,14 +2,15 @@
# Author: @saundersp
from ViolaJones import train_viola_jones, classify_viola_jones
from toolbox import state_saver, picke_multi_loader, format_time_ns, benchmark_function, unit_test_argsort_2d
from toolbox_unit_test import format_time_ns_test
#from toolbox import state_saver, pickle_multi_loader, format_time_ns, benchmark_function, unit_test_argsort_2d
from toolbox import state_saver, format_time_ns, benchmark_function, unit_test_argsort_2d
from toolbox import header, footer, formatted_row, formatted_line
from toolbox_unit_test import format_time_test, format_time_ns_test
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from sklearn.feature_selection import SelectPercentile, f_classif
#from sklearn.feature_selection import SelectPercentile, f_classif
from common import load_datasets, unit_test
from ViolaJones import build_features, get_best_anova_features
from typing import Tuple
from ViolaJones import build_features # , get_best_anova_features
from typing import Tuple, List
from time import perf_counter_ns
from os import makedirs
import numpy as np
@ -28,35 +29,42 @@ else:
from ViolaJonesCPU import apply_features, set_integral_image, argsort
label = 'CPU' if COMPILE_WITH_C else 'PY'
def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Load the dataset, calculate features and integral images, apply features to images and calculate argsort of the featured images.
def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Execute the preprocessing phase
The preprocessing phase consist of the following steps :
- Load the dataset
- Calculate features
- Calculate integral images
- Apply features to images
- Calculate argsort of the featured images.
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: Tuple containing in order : training features, training features sorted indexes, training labels, testing features, testing labels
"""
# Creating state saver folders if they don't exist already
if SAVE_STATE:
for folder_name in ["models", "out"]:
for folder_name in ['models', 'out']:
makedirs(folder_name, exist_ok = True)
preproc_timestamp = perf_counter_ns()
preproc_gaps = [49, -18, 29]
header(['Preprocessing', 'Time spent (ns)', 'Formatted time spent'], preproc_gaps)
header(preproc_gaps, ['Preprocessing', 'Time spent (ns)', 'Formatted time spent'])
X_train, y_train, X_test, y_test = state_saver('Loading sets', preproc_gaps[0], ['X_train', 'y_train', 'X_test', 'y_test'],
load_datasets, FORCE_REDO, SAVE_STATE)
load_datasets, FORCE_REDO, SAVE_STATE)
if __DEBUG:
print("X_train")
print('X_train')
print(X_train.shape)
print(X_train[IDX_INSPECT])
print("X_test")
print('X_test')
print(X_test.shape)
print(X_test[IDX_INSPECT])
print("y_train")
print('y_train')
print(y_train.shape)
print(y_train[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
print("y_test")
print('y_test')
print(y_test.shape)
print(y_test[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
@ -64,7 +72,7 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
FORCE_REDO, SAVE_STATE)
if __DEBUG:
print("feats")
print('feats')
print(feats.shape)
print(feats[IDX_INSPECT].ravel())
@ -74,10 +82,10 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
lambda: set_integral_image(X_test), FORCE_REDO, SAVE_STATE)
if __DEBUG:
print("X_train_ii")
print('X_train_ii')
print(X_train_ii.shape)
print(X_train_ii[IDX_INSPECT])
print("X_test_ii")
print('X_test_ii')
print(X_test_ii.shape)
print(X_test_ii[IDX_INSPECT])
@ -88,25 +96,25 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
del X_train_ii, X_test_ii, feats
if __DEBUG:
print("X_train_feat")
print('X_train_feat')
print(X_train_feat.shape)
print(X_train_feat[IDX_INSPECT, : IDX_INSPECT_OFFSET])
print("X_test_feat")
print('X_test_feat')
print(X_test_feat.shape)
print(X_test_feat[IDX_INSPECT, : IDX_INSPECT_OFFSET])
#indices = state_saver("Selecting best features training set", "indices", force_redo = True, save_state = SAVE_STATE,
#indices = state_saver('Selecting best features training set', 'indices', force_redo = FORCE_REDO, save_state = SAVE_STATE,
# fnc = lambda: SelectPercentile(f_classif, percentile = 10).fit(X_train_feat.T, y_train).get_support(indices = True))
#indices = state_saver("Selecting best features training set", "indices", force_redo = FORCE_REDO, save_state = SAVE_STATE,
#indices = state_saver('Selecting best features training set', 'indices', force_redo = FORCE_REDO, save_state = SAVE_STATE,
# fnc = lambda: get_best_anova_features(X_train_feat, y_train))
#indices = benchmark_function("Selecting best features (manual)", lambda: get_best_anova_features(X_train_feat, y_train))
#indices = benchmark_function('Selecting best features (manual)', lambda: get_best_anova_features(X_train_feat, y_train))
#if __DEBUG:
# print("indices")
# print('indices')
# print(indices.shape)
# print(indices[IDX_INSPECT: IDX_INSPECT + IDX_INSPECT_OFFSET])
# assert indices.shape[0] == indices_new.shape[0], f"Indices length not equal : {indices.shape} != {indices_new.shape}"
# assert (eq := indices == indices_new).all(), f"Indices not equal : {eq.sum() / indices.shape[0]}"
# assert indices.shape[0] == indices_new.shape[0], f'Indices length not equal : {indices.shape} != {indices_new.shape}'
# assert (eq := indices == indices_new).all(), f'Indices not equal : {eq.sum() / indices.shape[0]}'
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
@ -114,19 +122,20 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
lambda: argsort(X_train_feat), FORCE_REDO, SAVE_STATE)
if __DEBUG:
print("X_train_feat_argsort")
print('X_train_feat_argsort')
print(X_train_feat_argsort.shape)
print(X_train_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
benchmark_function('Arg unit test', preproc_gaps[0], lambda: unit_test_argsort_2d(X_train_feat, X_train_feat_argsort))
X_test_feat_argsort = state_saver(f"Precalculating testing set argsort ({label})", f"X_test_feat_argsort_{label}",
X_test_feat_argsort = state_saver(f'Precalculating testing set argsort ({label})', preproc_gaps[0], f'X_test_feat_argsort_{label}',
lambda: argsort(X_test_feat), FORCE_REDO, SAVE_STATE)
if __DEBUG:
print("X_test_feat_argsort")
print('X_test_feat_argsort')
print(X_test_feat_argsort.shape)
print(X_test_feat_argsort[IDX_INSPECT, : IDX_INSPECT_OFFSET])
benchmark_function("Arg unit test", lambda: unit_test_argsort_2d(X_test_feat, X_test_feat_argsort))
benchmark_function('Arg unit test', lambda: unit_test_argsort_2d(X_test_feat, X_test_feat_argsort))
time_spent = perf_counter_ns() - preproc_timestamp
formatted_line(preproc_gaps, '', '', '', '')
formatted_row(preproc_gaps, ['Preprocessing summary', f'{time_spent:,}', format_time_ns(time_spent)])
@ -138,16 +147,17 @@ def train(X_train_feat: np.ndarray, X_train_feat_argsort: np.ndarray, y_train: n
"""Train the weak classifiers.
Args:
X_train (np.ndarray): Training images.
X_train_feat_argsort (np.ndarray): Sorted indexes of the training images features.
y_train (np.ndarray): Training labels.
X_train (np.ndarray): Training images
X_train_feat_argsort (np.ndarray): Sorted indexes of the training images features
y_train (np.ndarray): Training labels
Returns: List of trained models
Returns:
List[np.ndarray]: List of trained models
"""
training_timestamp = perf_counter_ns()
training_gaps = [26, -18, 29]
header(['Training', 'Time spent (ns)', 'Formatted time spent'], training_gaps)
header(training_gaps, ['Training', 'Time spent (ns)', 'Formatted time spent'])
models = []
for T in TS:
@ -157,9 +167,9 @@ def train(X_train_feat: np.ndarray, X_train_feat_argsort: np.ndarray, y_train: n
models.append([alphas, final_classifiers])
if __DEBUG:
print("alphas")
print('alphas')
print(alphas)
print("final_classifiers")
print('final_classifiers')
print(final_classifiers)
time_spent = perf_counter_ns() - training_timestamp
@ -173,15 +183,15 @@ def testing_and_evaluating(models: List[np.ndarray], X_train_feat: np.ndarray, y
"""Benchmark the trained classifiers on the training and testing sets.
Args:
models (List[np.ndarray]): List of trained models.
X_train_feat (np.ndarray): Training features.
y_train (np.ndarray): Training labels.
X_test_feat (np.ndarray): Testing features.
y_test (np.ndarray): Testing labels.
models (List[np.ndarray]): List of trained models
X_train_feat (np.ndarray): Training features
y_train (np.ndarray): Training labels
X_test_feat (np.ndarray): Testing features
y_test (np.ndarray): Testing labels
"""
testing_gaps = [26, -19, 24, -19, 24]
header(['Testing', 'Time spent (ns) (E)', 'Formatted time spent (E)', 'Time spent (ns) (T)', 'Formatted time spent (T)'], testing_gaps)
header(testing_gaps, ['Testing', 'Time spent (ns) (E)', 'Formatted time spent (E)', 'Time spent (ns) (T)', 'Formatted time spent (T)'])
performances = []
total_train_timestamp = 0
@ -213,7 +223,7 @@ def testing_and_evaluating(models: List[np.ndarray], X_train_feat: np.ndarray, y
footer(testing_gaps)
evaluating_gaps = [19, 7, 6, 6, 6, 7, 6, 6, 6]
header(['Evaluating', 'ACC (E)', 'F1 (E)', 'FN (E)', 'FP (E)', 'ACC (T)', 'F1 (T)', 'FN (T)', 'FP (T)'], evaluating_gaps)
header(evaluating_gaps, ['Evaluating', 'ACC (E)', 'F1 (E)', 'FN (E)', 'FP (E)', 'ACC (T)', 'F1 (T)', 'FN (T)', 'FP (T)'])
for T, (e_acc, e_f1, e_FN, e_FP, t_acc, t_f1, t_FN, t_FP) in zip(TS, performances):
print(f'│ ViolaJones T = {T:<4}{e_acc:>7.2%}{e_f1:>6.2f}{e_FN:>6,}{e_FP:>6,}', end = '')
@ -224,7 +234,7 @@ def testing_and_evaluating(models: List[np.ndarray], X_train_feat: np.ndarray, y
def main() -> None:
unit_timestamp = perf_counter_ns()
unit_gaps = [27, -18, 29]
header(['Unit testing', 'Time spent (ns)', 'Formatted time spent'], unit_gaps)
header(unit_gaps, ['Unit testing', 'Time spent (ns)', 'Formatted time spent'])
benchmark_function('testing format_time', unit_gaps[0], format_time_test)
benchmark_function('testing format_time_ns', unit_gaps[0], format_time_ns_test)
time_spent = perf_counter_ns() - unit_timestamp
@ -235,12 +245,12 @@ def main() -> None:
X_train_feat, X_train_feat_argsort, y_train, X_test_feat, y_test = preprocessing()
models = train(X_train_feat, X_train_feat_argsort, y_train)
# X_train_feat, X_test_feat = picke_multi_loader([f"X_train_feat_{label}", f"X_test_feat_{label}"], OUT_DIR)
# indices = picke_multi_loader(["indices"], OUT_DIR)[0]
# X_train_feat, X_test_feat = pickle_multi_loader([f'X_train_feat_{label}', f'X_test_feat_{label}'], OUT_DIR)
# indices = pickle_multi_loader(['indices'], OUT_DIR)[0]
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
testing_and_evaluating(models, X_train_feat, y_train, X_test_feat, y_test)
unit_test(TS)
if __name__ == "__main__":
if __name__ == '__main__':
main()