cpp : Added documentation
This commit is contained in:
@ -1,55 +1,61 @@
|
||||
#include <cmath>
|
||||
#include "data.hpp"
|
||||
#include "config.hpp"
|
||||
#include "ViolaJonesGPU.hpp"
|
||||
#include "ViolaJones_device.hpp"
|
||||
|
||||
static inline void add_empty_feature(const np::Array<uint8_t>& feats, size_t& n) noexcept {
|
||||
constexpr static inline void add_empty_feature(const np::Array<uint8_t>& feats, size_t& n) noexcept {
|
||||
memset(&feats[n], 0, 4 * sizeof(uint8_t));
|
||||
n += 4;
|
||||
}
|
||||
|
||||
static inline void add_right_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
constexpr static inline void add_right_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
feats[n++] = i + w;
|
||||
feats[n++] = j;
|
||||
feats[n++] = w;
|
||||
feats[n++] = h;
|
||||
}
|
||||
|
||||
static inline void add_immediate_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
constexpr static inline void add_immediate_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
feats[n++] = i;
|
||||
feats[n++] = j;
|
||||
feats[n++] = w;
|
||||
feats[n++] = h;
|
||||
}
|
||||
|
||||
static inline void add_bottom_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
constexpr static inline void add_bottom_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
feats[n++] = i;
|
||||
feats[n++] = j + h;
|
||||
feats[n++] = w;
|
||||
feats[n++] = h;
|
||||
}
|
||||
|
||||
static inline void add_right2_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
constexpr static inline void add_right2_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
feats[n++] = i + 2 * w;
|
||||
feats[n++] = j;
|
||||
feats[n++] = w;
|
||||
feats[n++] = h;
|
||||
}
|
||||
|
||||
static inline void add_bottom2_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
constexpr static inline void add_bottom2_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
feats[n++] = i;
|
||||
feats[n++] = j + 2 * h;
|
||||
feats[n++] = w;
|
||||
feats[n++] = h;
|
||||
}
|
||||
|
||||
static inline void add_bottom_right_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
constexpr static inline void add_bottom_right_feature(const np::Array<uint8_t>& feats, size_t& n, const uint16_t& i, const uint16_t& j, const uint16_t& w, const uint16_t& h) noexcept {
|
||||
feats[n++] = i + w;
|
||||
feats[n++] = j + h;
|
||||
feats[n++] = w;
|
||||
feats[n++] = h;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize the features based on the input shape.
|
||||
*
|
||||
* @param width Width of the image
|
||||
* @param height Height of the image
|
||||
* @return The initialized features
|
||||
*/
|
||||
np::Array<uint8_t> build_features(const uint16_t& width, const uint16_t& height) noexcept {
|
||||
size_t n = 0;
|
||||
uint16_t w, h, i, j;
|
||||
@ -162,6 +168,12 @@ np::Array<uint8_t> build_features(const uint16_t& width, const uint16_t& height)
|
||||
// return res;
|
||||
//}
|
||||
|
||||
/**
|
||||
* @brief Initialize the weights of the weak classifiers based on the training labels.
|
||||
*
|
||||
* @param y_train Training labels
|
||||
* @return The initialized weights
|
||||
*/
|
||||
np::Array<float64_t> init_weights(const np::Array<uint8_t>& y_train) noexcept {
|
||||
np::Array<float64_t> weights = np::empty<float64_t>(y_train.shape);
|
||||
const uint16_t t = np::sum(np::astype<uint16_t>(y_train));
|
||||
@ -171,13 +183,30 @@ np::Array<float64_t> init_weights(const np::Array<uint8_t>& y_train) noexcept {
|
||||
}));
|
||||
}
|
||||
|
||||
np::Array<uint8_t> classify_weak_clf(const np::Array<int32_t>& X_feat_i, const size_t& j, const float64_t& threshold, const float64_t& polarity) noexcept {
|
||||
/**
|
||||
* @brief Classify the integrated features based on polarity and threshold.
|
||||
*
|
||||
* @param X_feat_i Integrated features
|
||||
* @param j Index of the classifier
|
||||
* @param threshold Trained threshold
|
||||
* @param polarity Trained polarity
|
||||
* @return Classified features
|
||||
*/
|
||||
static np::Array<uint8_t> classify_weak_clf(const np::Array<int32_t>& X_feat_i, const size_t& j, const float64_t& threshold, const float64_t& polarity) noexcept {
|
||||
np::Array<uint8_t> res = np::empty<uint8_t>({ X_feat_i.shape[1] });
|
||||
for(size_t i = 0; i < res.shape[0]; ++i)
|
||||
res[i] = polarity * X_feat_i[j * X_feat_i.shape[1] + i] < polarity * threshold ? 1 : 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Classify the trained classifiers on the given features.
|
||||
*
|
||||
* @param alphas Trained alphas
|
||||
* @param classifiers Trained classifiers
|
||||
* @param X_feat integrated features
|
||||
* @return Classification results
|
||||
*/
|
||||
np::Array<uint8_t> classify_viola_jones(const np::Array<float64_t>& alphas, const np::Array<float64_t>& classifiers, const np::Array<int32_t>& X_feat) noexcept {
|
||||
np::Array<float64_t> total = np::zeros<float64_t>({ X_feat.shape[1] });
|
||||
|
||||
@ -198,6 +227,15 @@ np::Array<uint8_t> classify_viola_jones(const np::Array<float64_t>& alphas, cons
|
||||
return y_pred;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Select the best classifer given their predictions.
|
||||
*
|
||||
* @param classifiers The weak classifiers
|
||||
* @param weights Trained weights of each classifiers
|
||||
* @param X_feat Integrated features
|
||||
* @param y Features labels
|
||||
* @return Index of the best classifier, the best error and the best accuracy
|
||||
*/
|
||||
std::tuple<int32_t, float64_t, np::Array<float64_t>> select_best(const np::Array<float64_t>& classifiers, const np::Array<float64_t>& weights, const np::Array<int32_t>& X_feat, const np::Array<uint8_t>& y) noexcept {
|
||||
std::tuple<int32_t, float64_t, np::Array<float64_t>> res = { -1, np::inf, np::empty<float64_t>({ X_feat.shape[0] }) };
|
||||
|
||||
@ -215,6 +253,15 @@ std::tuple<int32_t, float64_t, np::Array<float64_t>> select_best(const np::Array
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Train the weak calssifiers.
|
||||
*
|
||||
* @param T Number of weak classifiers
|
||||
* @param X_feat Integrated features
|
||||
* @param X_feat_argsort Sorted indexes of the integrated features
|
||||
* @param y Features labels
|
||||
* @return List of trained alphas and the list of the final classifiers
|
||||
*/
|
||||
std::array<np::Array<float64_t>, 2> train_viola_jones(const size_t& T, const np::Array<int32_t>& X_feat, const np::Array<uint16_t>& X_feat_argsort, const np::Array<uint8_t>& y) noexcept {
|
||||
np::Array<float64_t> weights = init_weights(y);
|
||||
np::Array<float64_t> alphas = np::empty<float64_t>({ T });
|
||||
@ -222,11 +269,7 @@ std::array<np::Array<float64_t>, 2> train_viola_jones(const size_t& T, const np:
|
||||
|
||||
for(size_t t = 0; t < T; ++t ){
|
||||
weights /= np::sum(weights);
|
||||
#if GPU_BOOSTED
|
||||
const np::Array<float64_t> classifiers = train_weak_clf_gpu(X_feat, X_feat_argsort, y, weights);
|
||||
#else
|
||||
const np::Array<float64_t> classifiers = train_weak_clf_cpu(X_feat, X_feat_argsort, y, weights);
|
||||
#endif
|
||||
const np::Array<float64_t> classifiers = train_weak_clf(X_feat, X_feat_argsort, y, weights);
|
||||
const auto [ clf, error, accuracy ] = select_best(classifiers, weights, X_feat, y);
|
||||
float64_t beta = error / (1.0 - error);
|
||||
weights *= np::pow(beta, (1.0 - accuracy));
|
||||
@ -238,6 +281,13 @@ std::array<np::Array<float64_t>, 2> train_viola_jones(const size_t& T, const np:
|
||||
return { alphas, final_classifier };
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Compute the accuracy score i.e. how a given set of measurements are close to their true value.
|
||||
*
|
||||
* @param y Ground truth labels
|
||||
* @param y_pred Predicted labels
|
||||
* @return computed accuracy score
|
||||
*/
|
||||
float64_t accuracy_score(const np::Array<uint8_t>& y, const np::Array<uint8_t>& y_pred) noexcept {
|
||||
float64_t res = 0.0;
|
||||
for(size_t i = 0; i < y.shape[0]; ++i)
|
||||
@ -246,6 +296,13 @@ float64_t accuracy_score(const np::Array<uint8_t>& y, const np::Array<uint8_t>&
|
||||
return res / y.shape[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Compute the precision score i.e. how a given set of measurements are close to each other.
|
||||
*
|
||||
* @param y Ground truth labels
|
||||
* @param y_pred Predicted labels
|
||||
* @return computed precision score
|
||||
*/
|
||||
float64_t precision_score(const np::Array<uint8_t>& y, const np::Array<uint8_t>& y_pred) noexcept {
|
||||
uint16_t true_positive = 0, false_positive = 0;
|
||||
for(size_t i = 0; i < y.shape[0]; ++i)
|
||||
@ -258,6 +315,13 @@ float64_t precision_score(const np::Array<uint8_t>& y, const np::Array<uint8_t>&
|
||||
return static_cast<float64_t>(true_positive) / (true_positive + false_positive);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Compute the recall score i.e. the ratio (TP / (TP + FN)) where TP is the number of true positives and FN the number of false negatives.
|
||||
*
|
||||
* @param y Ground truth labels
|
||||
* @param y_pred Predicted labels
|
||||
* @return computed recall score
|
||||
*/
|
||||
float64_t recall_score(const np::Array<uint8_t>& y, const np::Array<uint8_t>& y_pred) noexcept {
|
||||
uint16_t true_positive = 0, false_negative = 0;
|
||||
for(size_t i = 0; i < y.shape[0]; ++i)
|
||||
@ -271,12 +335,35 @@ float64_t recall_score(const np::Array<uint8_t>& y, const np::Array<uint8_t>& y_
|
||||
return static_cast<float64_t>(true_positive) / (true_positive + false_negative);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Compute the F1 score aka balanced F-score or F-measure.
|
||||
*
|
||||
* F1 = (2 * TP) / (2 * TP + FP + FN)
|
||||
* where TP is the true positives,
|
||||
* FP is the false positives,
|
||||
* and FN is the false negatives
|
||||
*
|
||||
* @param y Ground truth labels
|
||||
* @param y_pred Predicted labels
|
||||
* @return computed F1 score
|
||||
*/
|
||||
float64_t f1_score(const np::Array<uint8_t>& y, const np::Array<uint8_t>& y_pred) noexcept {
|
||||
const float64_t precision = precision_score(y, y_pred);
|
||||
const float64_t recall = recall_score(y, y_pred);
|
||||
return 2 * (precision * recall) / (precision + recall);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Compute the confusion matrix to evaluate a given classification.
|
||||
*
|
||||
* A confusion matrix of a binary classification consists of a 2x2 matrix containing
|
||||
* | True negatives | False positives |
|
||||
* | False negatives | True positives |
|
||||
*
|
||||
* @param y Ground truth labels
|
||||
* @param y_pred Predicted labels
|
||||
* @return computed confusion matrix
|
||||
*/
|
||||
std::tuple<uint16_t, uint16_t, uint16_t, uint16_t> confusion_matrix(const np::Array<uint8_t>& y, const np::Array<uint8_t>& y_pred) noexcept {
|
||||
uint16_t true_positive = 0, false_positive = 0, true_negative = 0, false_negative = 0;
|
||||
for(size_t i = 0; i < y.shape[0]; ++i)
|
||||
|
Reference in New Issue
Block a user