python : improved documentation

This commit is contained in:
saundersp
2024-04-28 22:35:42 +02:00
parent c71b04f00d
commit 4a42747837
5 changed files with 78 additions and 77 deletions

View File

@ -20,13 +20,13 @@ if __DEBUG:
from config import IDX_INSPECT, IDX_INSPECT_OFFSET
if GPU_BOOSTED:
from ViolaJonesGPU import apply_features, set_integral_image, argsort
from ViolaJonesGPU import apply_features, set_integral_image, argsort_2d
label = 'GPU' if COMPILE_WITH_C else 'PGPU'
# The parallel prefix sum doesn't use the whole GPU so numba output some annoying warnings, this disables it
from numba import config
config.CUDA_LOW_OCCUPANCY_WARNINGS = 0
else:
from ViolaJonesCPU import apply_features, set_integral_image, argsort
from ViolaJonesCPU import apply_features, set_integral_image, argsort_2d
label = 'CPU' if COMPILE_WITH_C else 'PY'
def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
@ -37,7 +37,7 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.
- Calculate features
- Calculate integral images
- Apply features to images
- Calculate argsort of the featured images.
- Calculate argsort of the featured images
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: Tuple containing in order : training features, training features sorted indexes, training labels, testing features, testing labels
@ -119,7 +119,7 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.
# X_train_feat, X_test_feat = X_train_feat[indices], X_test_feat[indices]
X_train_feat_argsort = state_saver(f'Precalculating training set argsort ({label})', preproc_gaps[0], f'X_train_feat_argsort_{label}',
lambda: argsort(X_train_feat), FORCE_REDO, SAVE_STATE)
lambda: argsort_2d(X_train_feat), FORCE_REDO, SAVE_STATE)
if __DEBUG:
print('X_train_feat_argsort')
@ -128,7 +128,7 @@ def preprocessing() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.
benchmark_function('Arg unit test', preproc_gaps[0], lambda: unit_test_argsort_2d(X_train_feat, X_train_feat_argsort))
X_test_feat_argsort = state_saver(f'Precalculating testing set argsort ({label})', preproc_gaps[0], f'X_test_feat_argsort_{label}',
lambda: argsort(X_test_feat), FORCE_REDO, SAVE_STATE)
lambda: argsort_2d(X_test_feat), FORCE_REDO, SAVE_STATE)
if __DEBUG:
print('X_test_feat_argsort')