Added files

This commit is contained in:
saundersp 2023-05-07 19:46:28 +02:00
parent c647e2bb5c
commit 4ef410ccd0
26 changed files with 2625 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
out

40
Makefile Normal file
View File

@ -0,0 +1,40 @@
OUT_DIR := out
$(shell mkdir -p $(OUT_DIR))
GRAPHS_DIR := graphs
CONTENTS_DIR := contents
CONTENTS := $(wildcard $(CONTENTS)/*.tex)
GRAPHS := $(wildcard $(GRAPHS_DIR)/*.gv)
GRAPHS_IMG := $(GRAPHS:$(GRAPHS_DIR)/%.gv=$(OUT_DIR)/%.gv.png)
.PHONY: all preview clean references pdf
all: pdf
$(OUT_DIR)/%.gv.png: $(GRAPHS_DIR)/%.gv
dot -Tpng $< -o $@
index: $(OUT_DIR)/main.idx
makeindex $^
references: annexes.bib references.bib
bibtex $(OUT_DIR)/annexes
bibtex $(OUT_DIR)/references
#pdf: $(OUT_DIR)/main.pdf
#$(OUT_DIR)/main.pdf: main.tex $(GRAPHS_IMG) $(CONTENTS)
pdf: main.tex $(GRAPHS_IMG) $(CONTENTS)
pdflatex -output-directory $(OUT_DIR) $^
preview: $(OUT_DIR)/main.pdf
zathura $^
document:
make
make index
make references
make
clean:
rm $(OUT_DIR)/*

303
annexes.bib Normal file
View File

@ -0,0 +1,303 @@
@online{project_vae,
author = {Pierre Saunders},
title = {Auto encodeur variationnel},
url = {https://nbviewer.org/github/saundersp/VariationalAutoEncoder/blob/main/Variational\%20Auto\%20Encoder.ipynb}
}
@online{project_reglin,
author = {Pierre Saunders},
title = {Régression linéaire},
url = {https://nbviewer.org/github/saundersp/SimpleGradientDescent/blob/master/Linear\%20Regression.ipynb}
}
@online{project_wgan-gp,
author = {Pierre Saunders},
title = {Wasserstein GAN avec pénalité de gradient},
url = {https://github.com/saundersp/wgan-gp}
}
@online{miage_website,
author = {MIAGE},
title = {Méthodes Informatiques Appliquées à la Gestion des Entreprises},
url = {https://univ-cotedazur.fr/miage}
}
@online{ia2_website,
author = {MIAGE},
title = {Intelligence Artificielle Appliquée},
url = {https://univ-cotedazur.fr/formation/offre-de-formation/parcours-intelligence-artificielle-appliquee-ia2-1},
}
@online{personnal_portfolio,
author = {Pierre Saunders},
title = {Pierre Saunders's portfolio},
url = {saundersp.com},
}
@online{inria_website,
author = {Inria},
title = {Institut National de Recherche en Informatique et en Automatique},
url = {https://www.inria.fr/fr}
}
@online{inria_sophia_website,
author = {Inria},
title = {Inria Sophia Antipolis official website},
url = {https://www.inria.fr/en/centre-inria-sophia-antipolis-mediterranee}
}
@online{wimmics_website,
author = {Wimmics},
title = {Wimmics Bridging social semantics and formal semantics on the web},
url = {https://team.inria.fr/wimmics}
}
@online{tyrex_website,
author = {Tyrex},
title = {Tyrex's official website},
url = {https://tyrex.inria.fr/}
}
@manual{sparql,
author = {W3C},
title = {SPARQL Standard},
url = {https://www.w3.org/TR/sparql11-overview}
}
@manual{owl,
author = {W3C},
title = {OWL Standard},
url = {https://www.w3.org/TR/owl-features}
}
@manual{rdfa,
author = {W3C},
title = {RDFa Standard},
url = {https://www.w3.org/2001/sw/wiki/RDFa}
}
@manual{sparql_query,
author = {W3C},
title = {SPARQL Query Language for RDF},
url = {https://www.w3.org/TR/rdf-sparql-query}
}
@manual{sparql_graph_patterns,
author = {W3C},
title = {SPARQL - Graph Patterns},
url = {https://www.w3.org/2004/Talks/17Dec-sparql/QueryLang1/all.html}
}
@manual{sparql_inference,
author = {W3C Member Submission},
title = {SPARQL Inferencing Notation},
url = {https://spinrdf.org}
}
@manual{rdf_primer,
author = {W3C},
title = {RDF Primer},
url = {https://www.w3.org/TR/rdf11-primer}
}
@manual{rdf_schema,
author = {W3C},
title = {RDF Schema},
url = {https://www.w3.org/TR/rdf-schema}
}
@manual{rdf_survey,
title = {RDF Data Storage and Query Processing Schemes : A Survey},
url = {https://dl.acm.org/doi/pdf/10.1145/3177850}
}
@online{kgram_website,
author = {Inria},
title = {KGRAM's GitHub repository},
url = {https://gforge.inria.fr/projects/kgram/}
}
@online{corese_github,
author = {Inria},
title = {Corese's GitHub repository},
url = {https://github.com/Wimmics/corese}
}
@online{corese_website,
author = {Inria},
title = {Corese's official website},
url = {https://project.inria.fr/corese}
}
@online{sparqlgx_website,
author = {Tyrex},
title = {SPRAQLGX's official website},
url = {https://tyrex.inria.fr/sparqlgx/home.html}
}
@online{spark_website,
author = {Apache},
title = {Apache SPARK's official website},
url = {https://spark.apache.org}
}
@online{hadoop_website,
author = {Apache},
title = {Apache Hadoop's official website},
url = {https://hadoop.apache.org}
}
@online{mulgara_website,
author = {Mulgara},
title = {Mulgara's official website},
url = {http://docs.mulgara.org/system/index.html}
}
@online{mulgara_github,
author = {Mulgara},
title = {Mulgara's GitHub repository},
url = {https://github.com/quoll/mulgara}
}
@online{w3c_website,
author = {W3C},
title = {W3C's official website},
url = {https://www.w3.org}
}
@online{cumulusrdf_website,
author = {Karlsruher Institut für Technologie},
title = {CumulusRDF's official website},
url = {https://www.aifb.kit.edu/web/CumulusRDF/en}
}
@online{cumulusrdf_github,
author = {Karlsruher Institut für Technologie},
title = {CumulusRDF's GitHub repository},
url = {https://github.com/cumulusrdf/cumulusrdf}
}
@online{cassandra_website,
author = {Apache},
title = {Apache Cassandra's official website},
url = {https://cassandra.apache.org/_/index.html}
}
@online{tomcat_website,
author = {Apache},
title = {Apache Tomcat's official website},
url = {https://tomcat.apache.org}
}
@online{latex_website,
author = {LaTeX},
title = {LaTeX's official website},
url = {https://www.latex-project.org}
}
@online{alpine_website,
author = {Alpine},
title = {Alpine's official website},
url = {https://alpinelinux.org/about}
}
@online{musl_website,
author = {musl},
title = {musl's official website},
url = {https://musl.libc.org}
}
@online{glibc_website,
author = {GNU},
title = {GNU C library's official website},
url = {https://www.gnu.org/software/libc}
}
@online{apache_website,
author = {Apache},
title = {Apache's official website},
url = {https://apache.org}
}
@online{hp_labs_website,
author = {HP},
title = {HP Labs official website},
url = {https://www.hp.com/us-en/hp-labs/index.html}
}
@online{i3s_website,
author = {I3S},
title = {I3S official website},
url = {https://www.i3s.unice.fr}
}
@online{rdf2rdf_website,
title = {RDF2RDFs official website},
url = {http://www.l3s.de/~minack/rdf2rdf}
}
@online{team_github,
author = {Damien Graux and Pierre Saunders},
year = {2021},
title = {Teams GitHub},
url = {https://github.com/SemanticWebBenchmarker}
}
@manual{unix_standard,
title = {The UNIX® Standard},
url = {https://www.opengroup.org/membership/forums/platform/unix}
}
@online{virtuoso_website,
title = {Virtuosos official website},
url = {https://virtuoso.openlinksw.com}
}
@online{dbpedia_website,
title = {Dbpedias official website},
url = {https://www.dbpedia.org}
}
@online{lod-cloud_website,
title = {The Linked Open Data Clouds official website},
url = {https://www.lod-cloud.net}
}
@online{fuseki_website,
author = {Apache},
title = {Apache Jena Fueskis official website},
url = {https://jena.apache.org/documentation/fuseki2}
}
@manual{sparql_standard,
title = {SPARQL 1.1 Graph Store HTTP Protocol Standard},
url = {https://www.w3.org/TR/sparql11-http-rdf-update}
}
@online{tdb2_website,
author = {Apache},
title = {TDB2s official website},
url = {https://jena.apache.org/documentation/tdb2}
}
@online{watdiv_website,
title = {WatDivs official website},
url = {https://dsg.uwaterloo.ca/watdiv}
}
@online{sp2bench_website,
title = {SP2Bench's official website},
url = {http://dbis.informatik.uni-freiburg.de/index.php?project=SP2B}
}
@online{tf_website,
title = {TensorFlow's official website},
url = {https://www.tensorflow.org/}
}
@online{keras_website,
title = {Keras's official website},
url = {https://keras.io/}
}
@online{pytorch_website,
title = {PyTorch's official website},
url = {https://pytorch.org/}
}
@online{numpy_website,
title = {Numpy's official website},
url = {https://numpy.org/}
}
@online{rdf4j_website,
title = {RDF4J's official website},
url = {https://rdf4j.org}
}
@online{jena_website,
author = {Apache},
title = {Apache Jena's official website},
url = {https://jena.apache.org/index.html}
}
@online{lubm_website,
title = {LUBM's official website},
url = {http://sc.cse.lehigh.edu/projects/lubm}
}
@online{rdf3x_github,
title = {RDF3X's GitHub repository},
url = {https://gitlab.db.in.tum.de/dbtools/rdf3x}
}
@online{docker_website,
title = {Docker official website},
url = {https://www.docker.com}
}
@online{4store_github,
title = {4store's GitHub repository},
url = {https://github.com/SemanticWebBenchmarker/4store}
}
@online{wikipedia_cayley_dickson,
title = {CayleyDickson construction},
url = {https://en.wikipedia.org/wiki/Cayley-Dickson\_construction}
}
@online{wikipedia_complex_number,
title = {Complex number},
url = {https://en.wikipedia.org/wiki/Complex_number}
}
@online{wikipedia_quaternion,
title = {Quaternion},
url = {https://en.wikipedia.org/wiki/Quaternion}
}
@online{wikipedia_octonion,
title = {Octonion},
url = {https://en.wikipedia.org/wiki/Octonion}
}
@online{wikipedia_sedenion,
title = {Sedenion},
url = {https://en.wikipedia.org/wiki/Sedenion}
}

View File

@ -0,0 +1,59 @@
\chapter{Sujet thèse : Le Paradigme Gaussien}
%TODO Complete chapter
%\section{A propos de moi}
%
%J'ai conclu septembre dernier ma seconde année de master à la MIAGE \citeannexes{miage_website} spécialité IA2 \citeannexes{ia2_website} et je suis actuellement en recherche d'un poste de doctorant en intelligence artificielle sur les modèles génératifs (GAN \citereferences{generative_adversarial_nets}, auto-encodeur variationnel \citereferences{vae_paper}, etc.).
%Malheureusement, il n'y que très peu sujets dans ce domaine en France, c'est pourquoi, je suis dans l'optique de proposer mon propre sujet.
%
%Le sujet que je propose est mi-mathématiques théoriques (théorie de la mesure, théorie de l'information, etc.) et mi-informatique (modèles génératifs, processus gaussien, etc.) et vise à unifier les différents paradigmes de l'apprentissage profond (i.e. deep learning).
\section{Abstract}
La mode actuelle dans l'apprentissage profond en termes de classification est d'établir un hyperplan qui sépare le mieux possible les points d'un set de données de façon déterministique.
Cette méthodologie héritée des machines à vecteurs de supports (i.e. SVM \citereferences{Weston1999SupportVM}) maximise la marge (e.g. hinge loss), pourtant, cette approche s'éloigne énormément de l'anthropomorphisme recherché par les réseaux neuronaux.
Car cette approche vise à différencier les classes entre toutes les autres (duel $1$ vs $N-1$ classes) ce qui résulte un hyperplan dont on ne peut que difficilement interpréter les résultats.
De plus, si on rajoute des classes, on doit entraîner à nouveau le modèle ou, au minima, entraîner à nouveau la dernière couche avec l'apprentissage par transfert \citereferences{transfer_learning_survey}.
Une approche plus anthropomorphiste serait d'entraîner un modèle qu'y se base non sur les différences, mais sur les similitudes. Cela permettra également d'unifier plusieurs paradigmes de l'apprentissage automatique tel que la classification, la détection d'anomalie, la génération d'échantillons ainsi que l'apprentissage semi-supervisée.
Plusieurs tentatives d'unification des paradigmes ont été tentées comme le fait d'utiliser un modèle génératif de type GAN \citereferences{generative_adversarial_nets} pour faire de la classification \citereferences{semi-supervised_learning_with_deep_generative_models}. Pourtant le fait que tout les modèles entraînées par descente de gradient sont des approximations de machine de kernel \citereferences{every_model_learned_by_gradient_descent_is_approximately_a_kernel_machine} montre que le problème est intrinsèque au paradigme et donc qu'il peut être intéréssant de changer d'approche.
\section{Sujet}
Le but du sujet est de créer un modèle caractériser comme un réseau de neurones probabilistes qui, sur un set de données défini tel que :
$D = \{ (x_1, y_1),\dots, (x_n, y_n)\} \subseteq \mathcal{R}^d * \mathcal{C}$
\begin{itemize}
\item $\mathcal{C}$ est l'espace vectoriel des labels
\item $x_i$ est le i-ème vecteur du set
\item $y_i$ est le i-ème label du set
\item $\mathcal{R}^d$ est l'espace vectoriel à $d$ dimensions
\end{itemize}
Le modèle maximisera une approximation de la distribution de $P(X)$, sachant que grâce au théorème central limite, nous pouvons raisonnablement prédire que la distribution sera gaussienne, ce qui est essentiel pour ce qui suit.
Et à partir de cette distribution, nous pouvons assigner un point $x_i$ du set à son label $y_i$ et donc estimer de manière fractale (comme le permet le théorème centrale limite) chaque sous distribution $P(X|Y)$. Cette approche permet, si on dispose de nouvelles données, d'uniquement utiliser celle-ci et non le set entier, ce qui réduit considérablement le temps d'entraînement.
\section{Applications}
Avec ce changement intrinsèque dans la manière d'entraîner les modèles nous avons avec ces distributions, plusieurs choix possibles comme :
\begin{itemize}
\item Classification : on peux inférer le label en calculant l'argmin de chaque divergence de Kullback-Leibler \citereferences{kl_divergence} pour toutes les distributions
\item Détection d'anomalie : chaque sous distribution est gaussienne, donc un calcul du z-score ($Z=\frac{x-\mu}{\sigma}$) permet de détecter une potentielle anomalie
\item Génération d'échantillons : avec les paramètres estimés de chaque distribution, nous pouvons utiliser un vecteur $\mathcal{N}(\mu',\sigma')$ pour générer de nouveaux vecteurs dans l'espace vectoriel estimé
\item Apprentissage semi-supervisée : l'entraînement du modèle ne dépend pas de $Y$ donc nous pouvons entraîner le modèle avec le maximum de données non labellisé. Ensuite, en labellisant que certains points $X$ le modèle pourra déduire quels points sont les plus similaires et en conséquence les plus susceptibles d'être du même label.
\end{itemize}
\section{Travaux}
J'ai déjà fait plusieurs projets sur de l'intelligence artificielle dont notamment :
\begin{itemize}
\item Auto encodeur variationnel (i.e. VAE \citereferences{variational_lossy_autoencoder}) avec PyTorch \citeannexes{pytorch_website} \citeannexes{project_vae}
\item Wasserstein GAN avec pénalité de gradient (i.e. WGAN-GP \citereferences{wgan-gp_paper}) avec TensorFlow \citeannexes{tf_website} et Keras \citeannexes{keras_website} \citeannexes{project_wgan-gp}
\item Régression linéaire en utilisant plusieurs algorithmes d'optimisations comme le momentum \citereferences{momentum_paper}, le gradient accéléré de Nesterov \citereferences{nesterov_gradient_paper}, Adagrad \citereferences{adagrad_paper}, Adadelta \citereferences{adadelta_paper}, RMSprop \citereferences{rmsprop_lecture} et Adam \citereferences{adam_paper} avec Numpy \citeannexes{numpy_website} \citeannexes{project_reglin}
\end{itemize}
Vous pouvez également trouver mes autres travaux directement sur mon portfolio \citeannexes{personnal_portfolio}.

185
contents/algebra.tex Normal file
View File

@ -0,0 +1,185 @@
\langchapter{Algèbre}{Algebra}
%TODO Complete chapter
\section{Structures}
%TODO Complete section
\subsection{Monoïd}
%TODO Complete subsection
\langsubsection{Corps}{Field}
%TODO Complete subsection
\langsubsection{Anneau}{Ring}
%TODO Complete subsection
\section{Matrices}
%TODO Complete section
Un matrice est une structure qui permet de regrouper plusieurs éléments d'un corps $\K$ en un tableau de $n$ lignes et $m$ colonnes ou plus et est notée $\mathcal{M}_{n,m}(\K)$. Dans le cas d'une matrice carrée , on peux simplifier la notation en $\mathcal{M}_{n}(\K)$.
\begin{definition_sq} \label{definition:square_matrix}
Une matrice carrée (notée $\mathcal{M}_n(\K)$) est une matrice $\mathcal{M}_{n,m}(\K)$ d'un corps $\K$$n = m$.
\end{definition_sq}
\begin{definition_sq} \label{definition:identity_matrix}
La matrice identité (notée $I_n$) est une matrice carrée \ref{definition:square_matrix} tel que $\forall (i,j) \in \{1, \cdots, n\}^2, M_{i,j} = \begin{cases} i = j & 1 \\ \otherwise & 0 \end{cases}$
\end{definition_sq}
\subsection{Trace}
%TODO Complete subsection
$\forall A \in \mathcal{M}_{n}, tr(A)=\sum_{k=1}^na_{kk}$
$tr\in\mathcal{L}(\mathcal{M}_n(\K),\K)$
$\forall(A,B)\in\mathcal{M}_{n,p}(\K)\times\mathcal{M}_{p,n}(\K), tr(AB) = tr(BA)$
\langsubsection{Valeurs propres}{Eigenvalues}
%TODO Complete subsection
\subsubsection{Astuces pour le cas 2x2}
Avec $m := \frac{Tr(A)}{2}$
$Eigenvalues = m \pm \sqrt{m^2-det(A)}$
\langsubsection{Vecteurs propres}{Eigenvectors}
%TODO Complete subsection
\langsubsubsection{Polynôme caractéristique}{Characteristic polyonomial}
%%TODO Complete subsubsection
\langsubsection{Déterminant}{Determinant}
%%TODO Complete subsection
$\function{D}{\mathcal{M}_{m\times n}(\R)}{R}$
\langsubsubsection{Axiomes}{Axioms}
%%TODO Complete subsubsection
$\forall M \in \mathcal{M}_{m\times n}$
\begin{itemize}
\item{$M' = \begin{pmatrix}1 & 0 & \cdots & 0 \\ 0 & 1 & \cdots & 0 \end{pmatrix}M$}
\item{$\forall \lambda \in K, D(\lambda M) = \lambda D(M)$}
\item{}
\end{itemize}
\langsubsubsection{Cas 2x2}{2x2 case}
%TODO Complete subsubsection
$det\left(\begin{bmatrix}a & b\\c & d\end{bmatrix}\right) = ad - bc$
\langsubsubsection{Cas 3x3}{3x3 case}
%TODO Complete subsubsection
\subsection{Inverse}
%TODO Complete subsection
$det(M) \neq 0$
$(AB)^{-1} = B^{-1}A^{-1}$
\langsubsection{Diagonalisation}{Diagonalization}
%TODO Complete subsection
\langsubsection{Orthogonalité}{Orthogonality}
%TODO Complete subsection
$det(M) \in \{-1,1\}$
\subsection{Triangulation}
%TODO Complete subsection
$a \in Tr_n$
\langsection{Formes quadratiques}{Quadratic forms}
%TODO Complete section
\langsubsection{Forme linéaire}{Linear form}
%TODO Complete subsubsection
\langsubsubsection{Cas 2x2}{2x2 case}
%TODO Complete subsection
$a_1x_1^2 + a_2x_1x_2 + a_3x_2^2 = b$
\langsubsubsection{Cas 3x3}{3x3 case}
%TODO Complete subsection
$a_1x_1^2 + a_2x_2^2 + a_3x_3^3 + a_4x_1x_2 + a_5x_1x_3 + a_6x_2x_3$
\langsubsection{Forme matricielle}{Matrix form}
%TODO Complete subsubsection
\langsubsubsection{Cas 2x2}{2x2 case}
%TODO Complete subsection
$\begin{bmatrix}x_1 & x_2\end{bmatrix}
\begin{bmatrix}a_1 & \frac{a_2}{2} \\\frac{a_2}{2} & a_3\end{bmatrix}
\begin{bmatrix}x_1\\x_2\end{bmatrix}
= b \Leftrightarrow X^TAX$
\langsubsubsection{Cas 3x3}{3x3 case}
%TODO Complete subsection
$\begin{bmatrix}x_1 & x_2 & x_3 \end{bmatrix}
\begin{bmatrix}a_1 & \frac{a_2}{3} & \frac{a_4}{3} \\\frac{a_2}{3} & a_2 & \frac{a_3}{3} \\\frac{a_3}{3} & \frac{a_4}{3} & a_3\end{bmatrix}
\begin{bmatrix}x_1\\x_2\\x_3\end{bmatrix}
= b \Leftrightarrow X^TAX$
\langsubsection{Cas général}{General case}
%TODO Complete subsection
\langsubsubsection{Forme linéaire}{Linear form}
%TODO Complete subsubsection
$a_1x_1^2 + a_2x_2^2 + a_3x_3^3 + a_4x_1x_2 + a_5x_1x_3 + a_6x_2x_3$
\langsubsubsection{Forme matricielle}{Matrix form}
%TODO Complete subsubsection
$X \in \mathcal{M}_{1,n}$
$X = \begin{bmatrix}x_1, \cdots, x_n\end{bmatrix}$
$A \in \mathcal{T}^+_{n,n}$
$A = \begin{bmatrix}x_1, \cdots, x_n\end{bmatrix}$
\langsection{Espaces vectoriels}{Vectors spaces}
%TODO Complete section
Soit $(E,+)$ un groupe abélien (i.e. commutatif) de $\mathbb{K}$
\begin{itemize}
\item{muni d'une loi de composition interne notée $+$}
\item{muni d'une loi de composition externe $\mathbb{K}*E \rightarrow E$ vérifiant $(\alpha,x) \rightarrow \alpha x$}
\end{itemize}
\bigskip
Et vérifiant $\forall(\alpha,\beta) \in \mathbb{K}, \forall(a,b,c) \in E$
\begin{itemize}
\item{Commutativité $a + b = b + a$}
\item{Associativité $(a + b) + c = a + (b + c)$}
\item{Élement neutre de $+ \Leftrightarrow \exists 0_E \in E : a + 0_E = a$}
\item{Élement neutre de $* \Leftrightarrow \exists 1_K \in K : a \cdot 1_K = a$}
\item{Élement opposé $\forall a \in E, \exists b \in E : a + b = b + a = 0_E$}
\item{Stabilité par $+ \Leftrightarrow a + b \in E$}
\item{Distributivité $+$ de $\mathbb{K} \Leftrightarrow (\alpha+\beta)a=\alpha a + \beta a$}
\item{Distributivité $*$ de $\mathbb{K} \Leftrightarrow (\alpha*\beta)a=\alpha(\beta a)$}
\end{itemize}
\langsubsection{sous-espaces vectoriels}{Sub vector spaces}
%TODO Complete subsection
Soit $E$ un $\mathbb{K}$-espace vectoriel et $F \subset E$
\begin{itemize}
\item{$F \ne \emptyset$}
\item{$0_E \in F$}
\item{$\forall(\alpha,\beta)\in\mathbb{K}, \forall(x,y)\in F, \alpha x+\beta y\in F$}
\end{itemize}

View File

@ -0,0 +1,15 @@
\langchapter{Théorie des Catégories}{Category theory}
%TODO Complete chapter
\section{Morphismes}
%TODO Complete section
\section{Functors}
%TODO Complete section
\subsection{Monads}
%TODO Complete subsection
\langsection{Argument diagonal}{Diagonal argument}
%TODO Complete section

View File

@ -0,0 +1,56 @@
\langchapter{Science informatique}{Computer Science}
%TODO Complete chapter
\langsection{Algorithmes}{Algorithms}
%TODO Complete section
\index{Algorithms}
\begin{algorithm}[H]
\KwIn{This is some input}
\KwOut{This is some output}
\SetAlgoLined
\SetNoFillComment
\tcc{This is a comment}
\vspace{3mm}
some code here\;
$x \leftarrow 0$\;
$y \leftarrow 0$\;
\uIf{$ x > 5$} {
x is greater than 5 \tcp*{This is also a comment}
}
\Else {
x is less than or equal to 5\;
}
\ForEach{y in 0..5} {
$y \leftarrow y + 1$\;
}
\For{$y$ in $0..5$} {
$y \leftarrow y - 1$\;
}
\While{$x > 5$} {
$x \leftarrow x - 1$\;
}
\Return Return something here\;
\caption{what}
\end{algorithm}
\langsection{Exemple en Python}{Python example}
\begin{lstlisting}[language=Python]
def fnc(a, b):
return a + b
\end{lstlisting}
\langsection{Exemple en C}{C example}
\begin{lstlisting}[language=C]
int fnc(int a, int b){
return a + b;
}
\end{lstlisting}
\langsection{Intelligence artificiel}{Artificial Intelligence}
%TODO Complete section
\langsubsection{Thèse orthogonal (et stupidité)}{Orthogonal Thesis (and stupidity)}
% TODO Complete subsection

View File

@ -0,0 +1,9 @@
\langchapter{Différentiabilité}{Differentiability}
%TODO Complete chapter
\langsection{Axiomes}{Axioms}
%TODO Complete section
\section{Extremums}
%TODO Complete section

View File

@ -0,0 +1,9 @@
\langchapter{Équations Différentiel}{Differential Equations}
%TODO Complete chapter
\section{Linéaire homogéne}
%TODO Complete section
\section{Non-linéaire homogéne}
%TODO Complete section

198
contents/latex.tex Normal file
View File

@ -0,0 +1,198 @@
\chapter{LaTeX}
%TODO Complete chapter
\langsection{Méta-données}{Metadata}
%TODO Complete section
\begin{verbatim}
\maketitle
\end{verbatim}
\langsubsection{Titre}{Title}
\begin{verbatim}
\title{Sections and Chapters}
\end{verbatim}
\langsubsection{Auteur(es)}{Author(s)}
\begin{verbatim}
\author{Pierre Saunders}
\end{verbatim}
\subsection{Date}
\begin{verbatim}
\date{\today}
\date{\tomorrow}
\end{verbatim}
\langsubsection{Table des matières}{Contents}
\begin{verbatim}
\tableofcontents
\end{verbatim}
\langsection{Segmentation du document}{Document sectioning}
%TODO Complete section
\lang{Ces parties seront répertoriées dans la table des matières}{These part will be linked to the contents}.
\begin{itemize}
\item{-1 part}
\item{0 chapter}
\item{1 section}
\item{2 subsection}
\item{3 subsubsection}
\item{4 paragraph}
\item{5 subparagraph}
\end{itemize}
\langsubsection{Ajouter une partie numéroté}{Add a labeled part}
% TODO Find a way to localize verbatim
\begin{verbatim}
\part{Nom de la partie}
\chapter{Nom du chapitre}
etc.
\end{verbatim}
\langsubsection{Ajouter une partie non-numéroté}{Add a non labeled part}
\begin{verbatim}
\part*{Nom de la partie}
\chapter*{Nom du chapitre}
etc.
\end{verbatim}
\langsection{Listes}{Lists}
%TODO Complete section
\begin{verbatim}
\begin{enumerate}
\item{Item 1}
\item{Item 2}
\item{Item 3}
\end{enumerate}
\end{verbatim}
\begin{mdframed}
\begin{enumerate}
\item{Item 1}
\item{Item 2}
\item{Item 3}
\end{enumerate}
\end{mdframed}
\begin{verbatim}
\begin{itemize}
\item{Item 1}
\item{Item 2}
\item{Item 3}
\end{itemize}
\end{verbatim}
\begin{mdframed}
\begin{itemize}
\item{Item 1}
\item{Item 2}
\item{Item 3}
\end{itemize}
\end{mdframed}
\langsection{Paquets additionnels}{Additional packages}
%TODO Complete section
\langsection{Mathématiques}{Mathematics}
%TODO Complete section
\subsection{Matrices}
%TODO Complete subsection
\begin{verbatim}
\begin{matrix} a & b \\ c & d \end{matrix}
\begin{pmatrix} a & b \\ c & d \end{pmatrix}
\begin{bmatrix} a & b \\ c & d \end{bmatrix}
\begin{Bmatrix} a & b \\ c & d \end{Bmatrix}
\begin{vmatrix} a & b \\ c & d \end{vmatrix}
\begin{Vmatrix} a & b \\ c & d \end{Vmatrix}
\end{verbatim}
$\begin{matrix} a & b \\ c & d \end{matrix}$
$\begin{pmatrix} a & b \\ c & d \end{pmatrix}$
$\begin{bmatrix} a & b \\ c & d \end{bmatrix}$
$\begin{Bmatrix} a & b \\ c & d \end{Bmatrix}$
$\begin{vmatrix} a & b \\ c & d \end{vmatrix}$
$\begin{Vmatrix} a & b \\ c & d \end{Vmatrix}$
\langsection{Informatique}{Computer science}
%TODO Complete section
\subsection{LaTex}
\begin{verbatim}
\begin{verbatim}
\title{Sections and Chapters}
\end{verbatim }
\end{verbatim}
\langsubsection{Algorithmes}{Algorithms}
%TODO Complete subsection
\begin{verbatim}
\begin{algorithm}[H]
\KwIn{This is some input}
\KwOut{This is some output}
\SetAlgoLined
\SetNoFillComment
\tcc{This is a comment}
\vspace{3mm}
some code here\;
$x \leftarrow 0$\;
$y \leftarrow 0$\;
\uIf{$ x > 5$} {
x is greater than 5 \tcp*{This is also a comment}
}
\Else {
x is less than or equal to 5\;
}
\ForEach{y in 0..5} {
$y \leftarrow y + 1$\;
}
\For{$y$ in $0..5$} {
$y \leftarrow y - 1$\;
}
\While{$x > 5$} {
$x \leftarrow x - 1$\;
}
\Return Return something here\;
\caption{what}
\end{algorithm}
\end{verbatim}
\begin{algorithm}[H]
\KwIn{This is some input}
\KwOut{This is some output}
\SetAlgoLined
\SetNoFillComment
\tcc{This is a comment}
\vspace{3mm}
some code here\;
$x \leftarrow 0$\;
$y \leftarrow 0$\;
\uIf{$ x > 5$} {
x is greater than 5 \tcp*{This is also a comment}
}
\Else {
x is less than or equal to 5\;
}
\ForEach{y in 0..5} {
$y \leftarrow y + 1$\;
}
\For{$y$ in $0..5$} {
$y \leftarrow y - 1$\;
}
\While{$x > 5$} {
$x \leftarrow x - 1$\;
}
\Return Return something here\;
\caption{what}
\end{algorithm}

76
contents/linguistic.tex Normal file
View File

@ -0,0 +1,76 @@
\langchapter{Linguistiques}{Lingustics}
%TODO Complete chapter
\langsection{Grecque}{Greek}
%TODO Complete section
\subsection{Alphabet}
%TODO Complete subsection
\begin{tabular}{|c|c|c|}
\hline
NA & $\alpha$ & Alpha \\
\hline
NA & $\beta$ & Beta \\
\hline
$\Gamma$ & $\gamma$ & Gamma \\
\hline
$\Delta$ & $\delta$ & Delta \\
\hline
\end{tabular}
Upper case Greek letters
Lower case Greek letters
Misc Greek letters
$\Gamma$
$\Delta$
$\Lambda$
$\Phi$
$\Pi$
$\Psi$
$\Sigma$
$\Theta$
$\Upsilon$
$\Xi$
$\Omega$
$\alpha $
$\beta $
$\gamma $
$\delta $
$\epsilon $
$\zeta $
$\eta $
$\theta $
$\iota $
$\kappa $
$\lambda $
$\mu $
$\nu $
$\xi $
$\pi $
$\rho $
$\sigma $
$\tau $
$\upsilon $
$\phi $
$\chi $
$\psi $
$\omega $
%$\digamma $
%$\varepsil$on
%$\varkappa$
%$\varphi $
%$\varpi $
%$\varrho $
%$\varsigma$
%$\vartheta$

145
contents/logic.tex Normal file
View File

@ -0,0 +1,145 @@
\langchapter{Logique}{Logic}
%TODO Complete chapter
La logique consiste en des opérations effectuées uniquement sur des variables (notées $P,Q,R$) n'ayant pour valeur soit Vrai (noté \true), soit Faux (noté \false).
%Logic consists of operations done on sole values : True $T$ and False $F$.
\langsection{Relation Binaires}{Binary relations}
%TODO Complete section
\langsubsection{Réflexion}{Reflexivity}
% TODO Complete subsection
Une relation $\Rel$ sur $E$ est dite \textbf{réflexive} si et seulement si $\forall a \in E, a \Rel a$.
\langsubsection{Transitivité}{Transitivity}
% TODO Complete subsection
Une relation $\Rel$ sur $E$ est dite \textbf{transitive} si et seulement si $\forall (a,b) \in E, a \Rel b \land b \Rel c \equivalance a \Rel c$.
\langsubsection{Associativité}{Associativity}
% TODO Complete subsection
Une relation $\Rel$ sur $E$ est dite \textbf{associative} si et seulement si $\forall (a,b) \in E, (a \Rel b) \Rel c \equivalance a \Rel (b \Rel c) \Leftrightarrow a \Rel b \Rel c$.
\langsubsection{Commutativité}{Commutativity}
% TODO Complete subsection
Une relation $\Rel$ sur $E$ est dite \textbf{commutative} si et seulement si $\forall (a,b) \in E, a \Rel b = b \Rel a$.
\langsection{Opérateurs}{Operators}
%TODO Complete section
\langsubsection{NON}{NOT}
% TODO Complete subsection
$P \Leftrightarrow \lnot \lnot P$
\langsubsubsection{Table de vérité}{Truth table}
\begin{tabular}{|c|c|}
\hline
P & $\lnot P$ \\
\hline
\false & \true \\
\hline
\true & \false \\
\hline
\end{tabular}
\langsubsection{ET}{AND}
%TODO Complete subsection
$P \land Q \equivalance \lnot P \lor \lnot Q$
\begin{tabular}{|c|c||c|}
\hline
P & Q & P $\land$ Q \\
\hline
\false & \false & \false \\
\hline
\true & \false & \false \\
\hline
\false & \true & \false \\
\hline
\true & \true & \true \\
\hline
\end{tabular}
\langsubsection{OU}{OR}
% TODO Complete subsection
$P \lor Q \equivalance \lnot P \land \lnot Q$
\medskip
\begin{tabular}{|c|c||c|}
\hline
P & Q & P $\lor$ Q \\
\hline
\false & \false & \false \\
\hline
\true & \false & \true \\
\hline
\false & \true & \true \\
\hline
\true & \true & \true \\
\hline
\end{tabular}
\subsection{Implication}
%TODO Complete subsection
\begin{tabular}{|c|c||c|}
\hline
P & Q & P $\Rightarrow$ Q \\
\hline
\false & \false & \true \\
\hline
\true & \false & \false \\
\hline
\false & \true & \true \\
\hline
\true & \true & \true \\
\hline
\end{tabular}
\lang{Contraposée}{Contraposition } : \
$\lnot Q \implies \lnot P$
\langsubsection{Équivalence}{Equivalence}
% TODO Complete subsection
\begin{tabular}{|c|c||c|}
\hline
P & Q & P $\equivalance$ Q \\
\hline
\false & \false & \true \\
\hline
\true & \false & \false \\
\hline
\false & \true & \false \\
\hline
\true & \true & \true \\
\hline
\end{tabular}
\langsubsection{OU exclusif / XOR}{Exclusive OR / XOR}
%TODO Complete subsection
$P \oplus Q \equivalance (P \lor Q) \land \lnot (P \land Q)$
\begin{tabular}{|c|c||c|}
\hline
P & Q & $P \oplus Q$ \\
\hline
\false & \false & \false \\
\hline
\true & \false & \true \\
\hline
\false & \true & \true \\
\hline
\true & \true & \false \\
\hline
\end{tabular}

View File

@ -0,0 +1,12 @@
\langchapter{Théorie de la mesure}{Measure theory}
%TODO Complete chapter
\section{Mesure de Lebesgue}
%TODO Complete section
\subsection{Tribu de Lebesgue}
%TODO Complete subsection
\subsection{Tribu Borélienne}
%TODO Complete subsection

View File

@ -0,0 +1,9 @@
\langchapter{Théorie Musicale}{Music Theory}
%TODO Complete chapter
\section{Notes}
%TODO Complete section
\langsection{Accords}{Chords}
%TODO Complete section

334
contents/number_theory.tex Normal file
View File

@ -0,0 +1,334 @@
\langchapter{Théorie des nombres}{Number theory}
%TODO Complete chapter
\langsection{Construction des entiers naturels $(\N)$}{Construction of natural numbers $(\N)$}
%TODO Complete section
\langsubsection{Axiomes de Peano}{Peano's Axioms}
%TODO Complete subsection
\langsubsection{Construction de Von Neumann}{Von Neumann's construction}
%TODO Complete subsection
Using set theory [\ref{set_theory}], we know, there is the empty set that we are gonna label as '0'
$0 := \emptyset$
$1 := \{0\} = \{\emptyset\}$
$2 := \{1, 0\} = \{\{\}\}$
\subsection{Construction de ??}
%TODO Complete subsection
Using set theory [\ref{set_theory}], we know, there is the empty set that we are gonna label as '0'
$0 := \emptyset$
Using recursion, we can define all the following integers.
$1 := \{\emptyset\}$
$2 := \{\{\emptyset\}\}$
$\N := \{0,1,2,3,\dots\}$
Note : the inclusion of 0 or not is an unsettled debate, some authors uses $\N$ as $\N^*$ implicitly.
\subsection{Relations binaries}
%TODO Complete subsection
\subsection{Opérateurs}
%TODO Complete subsection
\subsection{Dénombrabilité}
%\subsection{Countability}
\begin{definition_sq} \label{definition:countability}
Un ensemble $E$ est dit dénombrable si, et seulement si, il existe une application injective de $E$ dans $\N$.
\end{definition_sq}
\langsubsection{Infini}{Infinity}
\begin{theorem_sq} \label{theorem:smallest_infity}
L'ensemble $\N$ est le plus petit infini possible.
\end{theorem_sq}
De manière intuitive, on pourrait croire que prendre une sous-partie infini de $\N$ produirait une infini plus petit, pourtant on peux toujours créer une application injective entre $\N$ est cette sous-partie.
\subsubsection{Démonstration}
La sous-partie des nombres paires est définie par les nombres de $\N$ qui sont dites paires, autrement dit qui sont de la forme
\medskip
$\N_{2} = \{2n | n \in \N\}$
Ou
$\function{g_2}{\N}{\N_{2}}$
$\functiondef{n}{2n}$
\medskip
On peux voir que cette application est un cas particulier de l'ensemble des application généré par la application suivante :
$\function{g}{\N,\N}{\N_c}$
$\functiondef{n,c}{cn}$
\medskip
Chaque application généré de $g_c$ avec $c \in \N^*$ est injective avec $\N$, par \ref{definition:countability} il sont donc de même "taille".
\langsubsection{Propriétés}{Proprieties}
%TODO Complete subsection
\begin{itemize} \label{theorem:totally_ordered_natural_numbers}
\item{L'ensemble est totalement ordonnée : $\forall n \in \N, \exists k \suchas k = n + 1 \land n < k$}
\item{On peut diviser l'ensemble en deux ensembles distincts : $\forall n \in \N, \exists! k \in \N \suchas n := \begin{cases} 2k & \text{pair} \\ 2k+1 & \text{Impair} \end{cases}$}
\end{itemize}
\begin{theorem_sq}
Il existe toujours un élément minimum pour n'importe quel sous-ensemble de $\N$.
\end{theorem_sq}
\langsection{Construction des entiers relatifs $(\Z)$}{Construction of relative numbers}
%TODO Complete section
$\Z := \{\dots,-3,-2,-1,0,1,2,3,\dots\}$
\subsection{Relations binaries}
%TODO Complete subsection
\subsection{Opérateurs}
%TODO Complete subsection
\subsection{Dénombrabilité}
De manière intuitive, on pourrait croire que cette ensemble est "deux fois la taille" de $\N$ mais on peux démontrer que cela n'est pas le cas.
\begin{theorem_sq} \label{theorem:countable_integers}
L'ensemble $\Z$ est dénombrable.
\end{theorem_sq}
\subsubsection{Démonstration}
\begin{center}
\includegraphics[width=\textwidth]{out/countable_integers.gv.png}
\end{center}
$\function{f}{\Z}{\N}$
$\functiondef{n}{\begin{cases}n \le 0 & -2n \\ \otherwise & 2n-1 \end{cases}}$
\medskip
\langsection{Construction des rationnels $(\Q)$}{Construction of rational numbers}
%TODO Complete section
$p \in \Z, q \in \N, \frac{p}{q}$
$PGCD(p,q) := 1$
\subsection{Relations binaries}
%TODO Complete subsection
$\forall (p,q) \in \Q, \forall n \in \N^*, \frac{p}{q} \Leftrightarrow \frac{p \cdot n}{q \cdot n}$
\subsection{Opérateurs}
%TODO Complete subsection
$\forall ((p,q), (a,b)) \in \Q^2, \frac{p}{q} + \frac{a}{b} = \frac{pb + aq}{qb}$
$\forall ((p,q), (a,b)) \in \Q^2, \frac{p}{q} \cdot \frac{a}{b} = \frac{pa}{qb}$
$\forall (p,q) \in \Q, \forall k \in \Z, (\frac{p}{q})^k = \frac{p^k}{q^k}$
\subsection{Dénombrabilité}
De manière intuitive, on pourrait croire que cette ensemble n'est pas dénombrable du fait de la nature visiblement différente de cette ensemble, pourtant cela est le cas.
\begin{theorem_sq} \label{theorem:countable_rationals}
L'ensemble $\Q$ est dénombrable.
\end{theorem_sq}
\subsubsection{Démonstration}
\begin{center}
\includegraphics[width=30em]{out/countable_rationals.gv.png}
\end{center}
$P_i$ sont des nombres premiers.
$\function{f}{\Q}{\N}$
$\functiondef{(p,q)}{P_1^{\frac{p}{|p|} + 1}P_2^pP_3^q}$
\medskip
\langsection{Construction des réels $(\R)$}{Construction of reals numbers}
%TODO Complete section
\langsubsection{Construction de CayleyDickson}{CayleyDickson's construction}
%\citeannexes{wikipedia_cayley_dickson}
\citeannexes{project_vae}
\subsection{Coupes de Dedekind}
%TODO Complete subsection
\langsection{Construction des complexes $(\C)$}{Construction of complex numbers}
%TODO Complete section
\citeannexes{wikipedia_complex_numbers}
$\C = (a,b) \in R^2, a + ib ~= \R^2 $
$i^2 = -1$
\langsubsection{Table de Cayley}{Multiplication table}
%TODO Complete subsection
\begin{tabular}{|c||c|c|}
\hline
& 1 & i \\
\hline
\hline
1 & 1 & i \\
\hline
i & i & -1 \\
\hline
\end{tabular}
\subsection{Relations binaries}
%TODO Complete subsection
$\forall ((a,b), (c,d)) \in \C^2, a = c \land b = d \Leftrightarrow a + ib = c + id$
\subsection{Opérateurs}
%TODO Complete subsection
Il est impossible d'avoir une relation d'ordre dans le corps des complexes mais on peux construire une relation lexicographique.
\subsubsection{Ordre lexicographique}
$\forall((a,b),(c,d)) \in \C^2, a + ib \Rel_L c + id := \begin{cases}
a < c & \implies a + ib < c + id \\
\otherwise & \begin{cases}
b < d & \implies a + ib < c + id \\
\otherwise & \implies a + ib > c + id
\end{cases}
\end{cases}$
\section{Construction des quaternions $(\Hq)$}
\citeannexes{wikipedia_quaternion}
\langsubsection{Table de Cayley}{Multiplication table}
%TODO Complete subsection
\begin{tabular}{|c||c|c|c|c|}
\hline
& 1 & i & j & k \\
\hline
\hline
1 & 1 & i & j & k \\
\hline
i & i & -1 & k & -j \\
\hline
j & j & -k & -1 & i \\
\hline
k & k & j & -i & -1 \\
\hline
\end{tabular}
\section{Construction des octonions $(\Ot)$}
\citeannexes{wikipedia_octonion}
\langsubsection{Table de multiplication}{Multiplication table}
%TODO Complete subsection
\begin{tabular}{|c||c|c|c|c|c|c|c|c|}
\hline
$e_i/e_j $ & $e_0$ & $e_1$ & $e_2$ & $e_3$ & $e_4$ & $e_5$ & $e_6$ & $e_7$ \\
\hline
\hline
$e_0$ & $e_0$ & $e_1$ & $e_2$ & $e_3$ & $e_4$ & $e_5$ & $e_6$ & $e_7$ \\
\hline
$e_1$ & $e_1$ & $-e_0$ & $e_3$ & $-e_2$ & $e_5$ & $-e_4$ & $-e_7$ & $e_6$ \\
\hline
$e_2$ & $e_2$ & $-e_3$ & $-e_0$ & $e_1$ & $e_6$ & $e_7$ & $-e_4$ & $-e_5$ \\
\hline
$e_3$ & $e_3$ & $e_2$ & $-e_1$ & $-e_0$ & $e_7$ & $-e_6$ & $e_5$ & $-e_4$ \\
\hline
$e_4$ & $e_4$ & $-e_5$ & $-e_6$ & $-e_7$ & $-e_0$ & $e_1$ & $e_2$ & $e_3$ \\
\hline
$e_5$ & $e_5$ & $e_4$ & $-e_7$ & $e_6$ & $-e_1$ & $-e_0$ & $-e_3$ & $e_2$ \\
\hline
$e_6$ & $e_6$ & $e_7$ & $e_4$ & $-e_5$ & $-e_2$ & $e_3$ & $-e_0$ & $-e_1$ \\
\hline
$e_7$ & $e_7$ & $-e_6$ & $e_5$ & $e_4$ & $-e_3$ & $-e_2$ & $e_1$ & $-e_0$ \\
\hline
\end{tabular}
\smallskip
$e_ie_j = \begin{cases} e_j, & \text{if i = 0} \\ e_i, & \text{if j = 0} \\ -\delta_{ij}e_0 + \epsilon_{ijk}e_k, & \text{otherwise}\end{cases}$
\smallskip
$\delta_{ij}$ est le symbole de Kronecker et $\epsilon_{ijk}$ est un tenseur complètement anti-symétrique.
\section{Construction des sedenions $(\Se)$}
\citeannexes{wikipedia_sedenion}
\langsubsection{Table de multiplication}{Multiplication table}
%TODO Complete subsection
\begin{tabular}{|c|c|c|c|}
\hline
& i & j & k \\
\hline
i & -1 & k & -j \\
\hline
j & -k & -1 & i \\
\hline
k & j & -i & -1 \\
\hline
\end{tabular}
\langsection{Nombres premiers}{Prime numbers}
%TODO Complete section
\begin{definition_sq} \label{definition:prime_number}
Un nombre $n \in \N^*$ est dit premier si, et seulement si, ces facteurs sont 1 et lui-même. Sinon ce nombre est dit composé.
\end{definition_sq}
Par convention, le nombre 1 n'est pas un nombre premier mais cela na pas toujours été le cas.
\langsubsection{Infinité}{Infinity}
\begin{theorem_sq} \label{theorem:prime_infinity}
Il existe une infinité de nombres premiers.
\end{theorem_sq}
\langsubsubsection{Démonstration}{Demonstration}
Par preveue par contradiction, supposons qu'il existe un nombre fini de nombre premiers.
$\Pn = \{p | p \in \N^* \land p \text{ est premier}\} = p_0, p_1, \dots p_{n-1}, p_n$
$\omega = (\prod_{p\in \Pn} p) + 1$
$\forall p \in \Pn, \omega \div p$
$\omega \notin \Pn \land \omega$ est premier
$\rightarrow\leftarrow$
$\implies$ Il existe une infinité de nombre premiers.

33
contents/philosophy.tex Normal file
View File

@ -0,0 +1,33 @@
\chapter{Philosophy}
%TODO Complete chapter
\section{Aphorisms}
%TODO Complete section
\subsection{Ludwig Wittgenstein}
\subsubsection{Uber Geweissheit (About uncertainty)}
\begin{quote}
Der Zweifel setzt die Geweißheit voraus
\end{quote}
(Doubt presupposes certainty)
\subsubsection{Tractatus Logico-Philosophicus}
\begin{quote}
STUFFS
\end{quote}
(Whereof one cannot speak, thereof one must be silent)
\begin{quote}
Stuffs
\end{quote}
(The limits of my language means the limits of my world)
\subsection{Friedrich Nietzsche}
%TODO Complete subsection
\subsection{Immanuel Kant}
%TODO Complete subsection

78
contents/set_theory.tex Normal file
View File

@ -0,0 +1,78 @@
\langchapter{Théorie des ensembles}{Set theory} \label{set_theory}
%TODO Complete chapter
Un ensemble est une construction mathématiques qui réuni plusieurs objets en une même instance.
%A set is a mathematical construct to assemble multiple objects in a single instance.
$S = \{a,b,c\}$
\langsection{Axiomes}{Axioms}
%TODO Complete section
\langsubsection{Extensionnalité}{Extensionality}
$\forall A\forall B(\forall X(X \in A \Leftrightarrow X \in B) \Rightarrow A = B)$
\langsubsection{Spécification}{Specification}
%TODO Complete subsection
\langsubsection{Paire}{Pairing}
%TODO Complete subsection
\langsubsection{Réunion}{Union}
%TODO Complete section
Unite all elements of two given sets into one.
$n,m \in \N^+$
$A = \{a_1, \cdots, a_n\}$
$B = \{b_1, \cdots, b_m\}$
$A \cup B = \{a_1, \cdots, a_n, b_1, \cdots, b_m\}$
\langsubsection{Scheme of replacement}{Scheme of replacement}
%TODO Complete subsection
\langsubsection{Infini}{Infinity}
%TODO Complete subsection
\subsection{Power set}
%TODO Complete subsection
\langsubsection{Choix}{Choice}
%TODO Complete subsection
\section{Intersection}
%TODO Complete subsection
\langsection{Différence des sets}{Set difference}
%TODO Complete section
\langsection{Fonction}{Function}
%TODO Complete section
Une fonction $f$ est un opération qui permet de transformer un ou plusieurs éléments d'un set $A$ en d'autres éléments d'un set $B$.
\subsection{Notation}
%TODO Complete subsection
$A \longrightarrow B$
$ x \longrightarrow f(x)$
\langsubsection{Injectivité}{Injectivity}
%TODO Complete subsection
Une fonction $f$ de $E$ dans $F$ est dite \textbf{injective} si, et seulement si, $\forall (a,b) \in E, f(a) = f(b) \Rightarrow a = b$.
\langsubsection{Surjectivité}{Surjectivity}
%TODO Complete subsection
Une fonction $f$ de $E$ dans $F$ est dite \textbf{surjective} si, et seulement si, $\forall y \in F, \exists x \in E : y = f(x)$.
\langsubsection{Bijectivité}{Bijectivity}
%TODO Complete subsection
Une fonction $f$ de $E$ dans $F$ est dite \textbf{bijective} si, et seulement si, elle est à la fois injective et surjective ou $\forall y \in F, \exists! x \in E : y = f(x)$.

248
contents/topology.tex Normal file
View File

@ -0,0 +1,248 @@
\langchapter{Topologie}{Topology}
%TODO Complete chapter
La topologie traite de l'étude des applications continues.
\langsection{Espaces vectoriels normés en dimension fini}{Vector spaces in finite dimensions}
Dans cette section, $E$ sera un $\R$-espace vectoriel.
\langsubsection{Normes}{Norms}
Une norme sur $E$ est une application continue qui vérifie certaines propriétés.
\smallskip
$\function{\norm{.}}{E}{\R}$
\langsubsubsection{Axiomes}{Axioms}
\begin{itemize}
\item{$\forall x \in E, \norm{x} \ge 0$}
\item{$\norm{x} \equivalance x = 0$}
\item{$\forall \lambda \in \R, \norm{\lambda x} = |\lambda|\norm{x}$}
\item{$\forall(x,y) \in E, \norm{x + y} \le \norm{x} + \norm{y}$} (inégalité triangulaire)
\end{itemize}
\smallskip
On appellera $(E,\norm{.})$ un \textbf{espace vectoriel normé}.
\langsubsubsection{Exemples}{Examples}
$n \in \N^*, E = \R^n$
\begin{itemize}
\item{$\norm{x}_1 = \sum_{i=0}^n |x_i|$}
\item{$\norm{x}_2 = \sqrt{\sum_{i=0}^n x^2_i}$}
\item{$\norm{x}_\infty = \max\{|x_0|, \dots, |x_n|\}$}
\item{$E = R_n[X], \norm{P} = \int_0^1 |P(x)|dx$}
\item{$m \in \N^*, E = \mathcal{L}(R^n, R^m), \norm{\phi} = \max\{\norm{\phi(e_i)}_\infty, i \subseteq N^*\}$} ($e_i :=$ base canonique de $\R^n$)
\item{Avec $(E,\norm{.}_E)$ et $(F,\norm{.}_F)$, on définit la \textbf{norme produit} $\norm{E \times F}$ sur $E \times F$ par $u \in E, v \in F, \norm{(u,v)}_{E \times F} = \norm{u}_E + \norm{v}_F$}
\end{itemize}
\subsubsection{Équivalence des normes}
Deux normes $\norm{.}_1$ et $\norm{.}_2$ sont dites \textbf{équivalentes} si $\exists \alpha, \beta \in \R^*_+ \suchas \forall x \in E, \alpha\norm{x}_1 \le \norm{x}_2 \le \beta\norm{x}_1$
\smallskip
Note : On remarque que la relation \textit{être équivalentes} est bien une relation d'équivalence sur l'ensemble des normes sur $E$.
\langsubsection{Boules}{Balls}
Soit $x \in E$ et $r \in \R^*_+$
\subsubsection{Ouverte}
La \textbf{boule ouverte} de centre $x$ et de rayon $r$ est définie par $B(x,r) = \{ y \in E, \norm{x - y} < r\}$.
\smallskip
Note : la seule différence avec une boule fermée est la non inclusion des éléments dont la norme est égale au rayon.
\subsubsection{Fermée}
La \textbf{boule fermée} de centre $x$ et de rayon $r$ est définie par $B(x,r) = \{ y \in E, \norm{x - y} \le r\}$.
\smallskip
Note : la seule différence avec une boule fermée est l'inclusion des éléments dont la norme est égale au rayon.
\subsubsection{Voisinage}
On appelle \textbf{voisinage} de $x$ tout ensemble $U \in E$ contenant $B(x,\epsilon)$ pour un certain $\epsilon \in \R^*_+$
\langsection{Limite}{Limit}
Une norme sur un espace vectoriel permet de définir la notion de limite. Elle est cependant légèrement différente selon si on l'applique à une suite ou a une application.
\subsection{Suite}
Soit \suite{x} une suite d'éléments d'un espaces vectoriel normé $(E, \norm{.})$.
On dit que \suite{x} \textit{converge} vers une limite $l \in E$, et l'on note $\lim(x_n) = l$ ou $x_n \rightarrow l$ si $\forall \epsilon \in \R_+^*, \exists n_0 \in \N, \suchas n > n_0 \implies x_n \in B(l,\epsilon)$
\subsection{Application}
Soit $(E, \norm{.}_E)$, $(F, \norm{.}_F)$, $A \subset E$, $\function{f}{A}{F}$, $t,x \in A$ et $l \in F$.
On dit que \textit{$f(t)$ tend vers $l$ quand $t$ tend vers $x$}, et l'on note $\lim_{t\rightarrow x}f(t) = l$ si $\forall \epsilon \in \R_+^*, \exists \delta \in \R_+^*, \suchas t \in B_E(x, \delta) \implies f(t) \in B_F(l, \epsilon)$
\section{Devoir Maison 1 : Topologie des espaces vectoriels normés}
\subsection{Exercice 1}
Soit $(E, \norm{.})$ un espace vectoriel normé et \suite{x} une suite déléments de $E$ qui converge vers $l \in E$.
\subsubsection{1.a} \label{sec:ex1a}
Montrer que toute sous-suite de $(x_n)_{n \in \N}$ converge vers $l$.
\\
Soit $\epsilon > 0$, comme $\lim_{n \to +\infty} x_n = l$
$\Rightarrow \exists n_0 \in \N$ tel que $\forall x \ge n_0$, $x_n \in \mathbb{B}(l, \epsilon)$
\\
Soit la fonction extractrice $\phi$ tel que
$\phi : \N \rightarrow \N$, $\forall n \in \N$, $\phi(n) > n$
\\
Et soit la sous-suite \suite{u} tel que $x_n = u_{\phi(n)}$
$\Rightarrow \exists n_0 \in \N$ tel que $\forall u \ge n_0$, $u_{\phi(n)} \in \mathbb{B}(l,\epsilon)$
$\Rightarrow \forall n \ge n_0$, $\phi(n) > n > n_0$
$\Rightarrow \forall n \ge n_0$, $u_{\phi(n)} \in \mathbb{B}(l, \epsilon)$
$\Rightarrow (u_n)$, sous-suite de $(x_n)$, $\lim_{n \to +\infty} x_n = l$.
Par unicité de la limite nous pouvons conclure.
\begin{theorem_sq} \label{theorem_1}
Toute sous-suites (ou suites extraite) d'un suite convergente vers $l \in E$ converge vers $l$.
\end{theorem_sq}
\subsubsection{1.b} \label{sec:ex1b}
Montrer que lensemble $\{x_n, n \in \N\}$ est borné.
\\
Sachant que $(x_n) \ in E$ converge vers $l \in E$ \&\& $\epsilon > 0$.
$\Leftrightarrow \exists y \in E$ tel que $\{\forall n \in \N, x_n, l\} \subset \bar{\mathbb{B}}(y, \epsilon) \subset E$.
$\Leftrightarrow (x_n)$ est fermée.
\begin{theorem_sq} \label{theorem_2}
Toute suites \suite{x} d'élements de $(E, \|.\|)$ qui converge en $l \in E$ est fermée.
\end{theorem_sq}
\subsection{Exercice 2}
Soit $(E, \|.\|)$ un espace vectoriel normé et $K \subset E$ un sous-ensemble.
Montrer que $K$ est compact si et seulement si tout sous-ensemble infini $Z \subset K$ possède un point daccumulation dans $K$.
\begin{definition_sq}[cf Cours 1.4.1]
Un sous ensemble K dun espace vectoriel normé $(E, \|.\|)$ est dit compact si toute suite déléments de $K$ admet une sous-suite qui converge dans $K$.
\end{definition_sq}
\begin{lemme_sq}
$K$ est compact $\Rightarrow K$ possède un point d'accumulation.
\end{lemme_sq}
$K$ est compact
\\
Soit $\epsilon > 0$ \&\& $X = \{x_n, \forall n \in \N \}$ \&\& $X \subset K$
$\Rightarrow \exists l \in K$ tel que $\lim_{n \to +\infty} x_n = l \in \mathbb{B}(l, \epsilon) \subset K$
$\Rightarrow \exists y \in K$ tel que $\forall x_n \in \mathbb{B}(y, \epsilon)$
$\Rightarrow l$ est un point d'accumulation de $(u_n) \in K$
$\Rightarrow K$ possède un point d'accumulation
\begin{lemme_sq}
$K$ possède un point d'accumulation. $\Rightarrow K$ est compact.
\end{lemme_sq}
Soit $X = \{x_n, \forall n \in \N \}$ \&\& $X \subset K$
\paragraph{Si $X$ est fini}
$\Rightarrow \exists l \in X$ tel que $ \forall n \in \N, x_n = l$ une infinité de solution ayant la même valeur.
$\Rightarrow X$ possède un point d'accumulation et $X \subset K$
$\Rightarrow K$ possède un point d'accumulation
\paragraph{Si $X$ est infini}
$\Rightarrow \exists l \in X$ tel que $ \forall n \in \N, x_n = l_n$
En fixant $l \in X$,
$\Rightarrow$ $X$ possède un point d'accumulation tel que $l \in X \subset K$
$\Rightarrow K$ possède un point d'accumulation
\begin{theorem_sq}
$K \subset (E, \|.\|)$, $Z \subset K$ pour $Z$ tout sous-ensemble infini possède un point d'accumulation dans $K \Leftrightarrow K$ est compact.
\end{theorem_sq}
\subsection{Exercice 3}
Soit $K \subset R$ un compact non-vide. Montrer que $K$ possède un maximum et un minimum.
Soit \suite{x} des éléments de $K$ qui converge vers $l \in K$
Selon le \textbf{Théorème \ref{theorem_1}} et \textbf{\ref{theorem_2}}, toute suite d'éléments qui converge dans $K$ est bornée
$\Rightarrow$ $K$ possède au moins un majorant et au moins un minorant et ils sont inclus dans $K$
$\Rightarrow$ $K$ possède un maximum défini comme le plus petit des majorants et un minimum comme le plus petit des minorants.
\begin{theorem_sq}
Si $K \subset R$ un compact non vide, alors $K$ possède un maximum et un minimum.
\end{theorem_sq}
\subsection{Exercice 4}
Soit $E$ un espace vectoriel normé et $(x_n)_{n \in \N}$ une suite déléments de $E$. On dit que $(x_n)_{n \in \N}$ est \textit{une suite de Cauchy} si
$$\forall \epsilon > 0 , \exists N \in \N , \forall n_1, n_2 \ge N , \|x_{n_1} - x_{n_2} \| \le \epsilon$$
Montrer quune suite est de Cauchy si et seulement si elle est convergente (on dit que $E$ est \textit{complet}).
\\
\begin{lemme_sq}
Si une suite est de Cauchy $\Rightarrow$ la suite est convergente.
\end{lemme_sq}
En démontrant par contraposé, soit \suite{x} $\in E$ qui ne converge pas.
$\Rightarrow \forall l \in E$, $\exists \epsilon > 0$ tel que $\forall N \in \N$,$\exists n \in \N$, $n \ge N$, $x_n \notin \mathbb{B}(l, \epsilon)$
$\Rightarrow \forall \epsilon > 0$, $\exists N \in \N$, $\forall i,j \in \N$, $i \le N$ \&\& $j \le N$, $\|x_i - x_j\| > \epsilon$
$\Rightarrow$ La suite $(x_n)$ n'est pas de Cauchy.
\begin{lemme_sq}
Si une suite est convergente $\Rightarrow$ la suite est de Cauchy.
\end{lemme_sq}
Soit \suite{x} $\lim_{n \to +\infty} x_n = l$
$\Rightarrow \forall \epsilon > 0,$ $\exists N,n \in \N$ tel que $x_n \in \mathbb{B}(l, \frac{\epsilon}{2})$
$\Rightarrow \forall i,j \in \N \le N$, $x_i \in \mathbb{B}(\epsilon, \frac{\epsilon}{2})$ \&\& $x_j \in \mathbb{B}(\epsilon, \frac{\epsilon}{2})$
$\Rightarrow \|x_i - x_j\| < \epsilon$
$\Rightarrow (x_n)$ est une suite de Cauchy.
\begin{theorem_sq}
Pour une suite \suite{x} donnée : $(x_n)$ est de Cauchy $\Leftrightarrow$ $(x_n)$ est convergente.
\end{theorem_sq}

83
contents/trigonometry.tex Normal file
View File

@ -0,0 +1,83 @@
\langchapter{Trigonométrie}{Trigonometry}
%TODO Complete chapter
\langsection{Cercle unitaire}{Unit circle}
%TODO Complete section
Le cercle unitaire est un cercle de centre $(0,0)$ et de rayon 1.
\subsection{cos}
%TODO Complete subsection
$\cos 0 = 1$
$\cos \frac{\pi}{2} = 0$
$\cos \pi = -1$
$\cos(-\frac{\pi}{2}) = 0$
$\cos(\pi + t) = -\cos(t)$
$\cos\frac{\pi}{6} = \frac{\sqrt{3}}{2}$
$\cos\frac{\pi}{3} = \frac{1}{2}$
$\forall (a,b) \in \R^2$
$\cos(a + b) = \cos a \cos b + \sin a \sin b$
$\cos(a - b) = \cos a \cos b - \sin a \sin b$
$\cos a + \cos b = 2 \cos(\frac{a + b}{2}) \cos(\frac{a - b}{2} )$
\subsection{sin}
%TODO Complete subsection
$\sin 0 = 0$
$\sin(\pi - t) = \sin(t)$
$\sin(\frac{\pi}{2} - t) = \cos(t)$
$\sin \frac{\pi}{6} = \frac{1}{2}$
$\sin \frac{\pi}{2} = 1$
%$\sin(\frac{\pi}{2} + t) = -\cos(t)$
$\forall (a,b) \in \R^2$
$\sin(a + b) = \sin a \cos b + \sin b \cos a$
$\sin(a - b) = \sin a \cos b - \sin b \cos a$
$\sin a - \sin b = 2 \cos (\frac{a+b}{2}) \sin (\frac{a-b}{2})$
$\sin a\sin b = \frac{\cos(a - b) - \cos(a + b)}{2}$
\subsection{tan}
%TODO Complete subsection
$\tan 0 = 0$
$\tan \frac{\pi}{6} = \frac{1}{\sqrt{3}}$
$\tan \frac{\pi}{4} = 1$
$\tan(\frac{\pi}{2} - x) = \frac{1}{\tan x}$
$\tan(\frac{\pi}{2} + x) = -\frac{1}{\tan x}$
$\tan(a + b) = \frac{\tan(a) + \tan(b)}{1- \tan(a)\tan(b)}$
$\tan(a - b) = \frac{\tan(a) - \tan(b)}{1 + \tan(a)\tan(b)}$
\subsection{Combinaisons}
%TODO Complete subsection
$\forall (a,b) \in \R^2$
$\sin a \cos b = \frac{\sin(a + b) + \sin(a - b)}{2}$

View File

@ -0,0 +1,29 @@
digraph {
node [shape = plaintext, fontcolor = White, fontsize = 30];
rankdir = LR;
bgcolor = None;
Edge [fontcolor = White, color = White, fontsize = 25];
subgraph pos {
Edge [constraint = False];
0 -> 1 [taillabel = 0];
-1 -> 2 [taillabel = 2];
-2 -> 3 [taillabel = 4];
-3 -> 4 [taillabel = 6];
-4 -> 5 [taillabel = 8];
}
subgraph neg {
Edge [constraint = False];
1 -> -1 [taillabel = 1];
2 -> -2 [taillabel = 3];
3 -> -3 [taillabel = 5];
4 -> -4 [taillabel = 7];
5 -> -5 [taillabel = 9];
}
subgraph dots {
node [label = "..."];
d; md;
}
md -> -5 -> -4 -> -3 -> -2 -> -1 -> 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> d [color = None];
}

View File

@ -0,0 +1,48 @@
digraph {
node [shape = plaintext, fontcolor = White, fontsize = 30];
rankdir = LR;
bgcolor = None;
Edge [fontcolor = White, color = White, fontsize = 25];
subgraph dots {
node [label = "..."];
d; d2; d3; d4; d5;
vd; vd2; vd3; vd4; vd5; vd6;
}
subgraph pos {
Edge [constraint = False];
"1/1" -> "1/2" [taillabel = 0];
"1/2" -> "2/1" [taillabel = 1];
"2/1" -> "3/1" [taillabel = 2];
"3/1" -> "2/2" [taillabel = 3];
"2/2" -> "1/3" [taillabel = 4];
"1/3" -> "2/3" [taillabel = 5];
"2/3" -> "3/2" [taillabel = 6];
"3/2" -> "4/1" [taillabel = 7];
"4/1" -> "5/1" [taillabel = 8];
"5/1" -> "4/2" [taillabel = 9];
"4/2" -> "3/3" [taillabel = 10];
"3/3" -> "2/4" [taillabel = 11];
"2/4" -> "1/4" [taillabel = 12];
"1/4" -> "1/5" [taillabel = 13];
"1/5" -> "2/5" [taillabel = 14];
//"2/5" -> "3/4" [taillabel = 15];
//"3/4" -> "4/3" [taillabel = 16];
"4/3" -> "5/2" [taillabel = 17];
"1/5" -> vd [color = None];
"2/5" -> vd2 [color = None];
"3/5" -> vd3 [color = None];
"4/5" -> vd4 [color = None];
"5/5" -> vd5 [color = None];
d5 -> vd6 [color = None];
}
"1/1" -> "2/1" -> "3/1" -> "4/1" -> "5/1" -> d [color = None];
"1/2" -> "2/2" -> "3/2" -> "4/2" -> "5/2" -> d2 [color = None];
"1/3" -> "2/3" -> "3/3" -> "4/3" -> "5/3" -> d3 [color = None];
"1/4" -> "2/4" -> "3/4" -> "4/4" -> "5/4" -> d4 [color = None];
"1/5" -> "2/5" -> "3/5" -> "4/5" -> "5/5" -> d5 [color = None];
vd -> vd2 -> vd3 -> vd4 -> vd5 -> vd6 [color = None];
}

30
language_selector.sty Normal file
View File

@ -0,0 +1,30 @@
\ProvidesPackage{language_selector}
\DeclareOption{french}{
\def\langoption{french}
\newcommand{\lang}[2]{#1}
\newcommand{\langtitle}[2]{\title{#1}}
\newcommand{\langchapter}[2]{\chapter{#1}}
\newcommand{\langsection}[2]{\section{#1}}
\newcommand{\langsubsection}[2]{\subsection{#1}}
\newcommand{\langsubsubsection}[2]{\subsubsection{#1}}
\newcommand{\langsubsubsubsection}[2]{\subsubsubsection{#1}}
\newcommand{\langnewcites}[3]{\newcites{#1}{#2}}
}
\DeclareOption{english}{
\def\langoption{english}
\newcommand{\lang}[2]{#2}
\newcommand{\langtitle}[2]{\title{#2}}
\newcommand{\langchapter}[2]{\chapter{#2}}
\newcommand{\langsection}[2]{\section{#2}}
\newcommand{\langsubsection}[2]{\subsection{#2}}
\newcommand{\langsubsubsection}[2]{\subsubsection{#2}}
\newcommand{\langsubsubsubsection}[2]{\subsubsubsection{#2}}
\newcommand{\langnewcites}[3]{\newcites{#1}{#3}}
}
\ProcessOptions\relax
% Format
\RequirePackage[\langoption]{babel}

48
macros.sty Normal file
View File

@ -0,0 +1,48 @@
\ProvidesPackage{macros}
\RequirePackage{amsfonts} % Include missing symbols s.a "Natural Numbers"
% Snippet to add dots to TOC
% Thanks to "user11232" at https://tex.stackexchange.com/questions/53898/how-to-get-lines-with-dots-in-the-table-of-contents-for-sections
%\usepackage{tocloft}
%\renewcommand{\cftpartleader}{\cftdotfill{\cftdotsep}} % for parts
%\renewcommand{\cftsecleader}{\cftdotfill{\cftdotsep}} % for sections
\newcommand{\N}{\mathbb{N}} % Natural numbers symbol
\newcommand{\Z}{\mathbb{Z}} % Integer symbol
\newcommand{\Q}{\mathbb{Q}} % Rational numbers symbol
\newcommand{\R}{\mathbb{R}} % Real numbers symbol
\newcommand{\C}{\mathbb{C}} % Complex numbers symbol
\newcommand{\K}{\mathbb{K}} % Corps
\newcommand{\Hq}{\mathbb{H}} % Quaternions numbers symbol
\newcommand{\Ot}{\mathbb{O}} % Octonions numbers symbol
\newcommand{\Se}{\mathbb{S}} % Sedenions numbers symbol
\newcommand{\Pn}{\mathbb{P}} % Sets of all the prime numbers
\newcommand{\false}{{\color{th_colour_red}F}} % New symbol for false value
\newcommand{\true}{{\color{th_colour_green}V}} % New symbol for true value
%\newcommand{\false}{F} % New symbol for false value
%\newcommand{\true}{V} % New symbol for true value
\DeclareMathOperator{\Rel}{\mathcal{R}} % New symbol for binary relations
\newtheorem{definition}{Définition}
%\newtheorem{definition}{Definition}
\newtheorem{theorem}{Théorème}
%\newtheorem{theorem}{Theoreme}
\newtheorem{lemme}{Lemme}
%\newtheorem{lemme}{Lemme}
\newcommandx{\suite}[3][1=n,2=n]{$(#3_{#1})_{#2 \in \N}$}
\newenvironment{definition_sq}{\begin{mdframed}\begin{definition}}{\end{definition}\end{mdframed}}
\newenvironment{theorem_sq}{\begin{mdframed}\begin{theorem}}{\end{theorem}\end{mdframed}}
\newenvironment{lemme_sq}{\begin{mdframed}\begin{lemme}}{\end{lemme}\end{mdframed}}
\newcommand{\norm}[1]{\|#1\|}
\newcommand{\equivalance}{\Leftrightarrow}
\renewcommand{\implies}{\Rightarrow}
\DeclareMathOperator{\suchas}{\text{tel que}}
%\DeclareMathOperator{\suchas}{\text{such as}}
\renewcommand{\function}[3]{#1 : #2 \longrightarrow #3}
\newcommand{\functiondef}[2]{\hspace{15pt}#1 \longmapsto #2}
\newcommand{\otherwise}{\text{Sinon}}
%\newcommand{\otherwise}{\text{Otherwise}}
\renewcommand{\smallskip}{\vspace{3pt}}
\renewcommand{\medskip}{\vspace{6pt}}
\renewcommand{\bigskip}{\vspace{12pt}}

95
main.tex Normal file
View File

@ -0,0 +1,95 @@
\documentclass{report}
\usepackage[margin=1.5cm]{geometry} % Defines the margins for the whole document.
\usepackage[utf8]{inputenc} % Sets the font & encoding
%\usepackage{helvet} % Add the Helvet font
\renewcommand{\familydefault}{\rmdefault} % Change default font to serif font family (default)
%\renewcommand{\familydefault}{\ttdefault} % Change default font to monospace font family
%\renewcommand{\familydefault}{\sfdefault} % Change default font to sans serif font family
\usepackage[T1]{fontenc} % Set the font (output) encoding
\usepackage[french]{language_selector}
%\usepackage[autolanguage]{numprint} % for the \nombre command
\usepackage{hyphenat} % Hyphenation rules
\hyphenation{mate-mática recu-perar}
\usepackage{setspace} % Sets the line spacing.
\setstretch{1.0}
\usepackage{multibib} % Allow multiple separates bibliography citations
\langnewcites{annexes}{Annexes}{Annexes}
\langnewcites{references}{Références}{References}
%\usepackage{lipsum} % Command to generate temporary dummy text
\usepackage[ruled,vlined,linesnumbered]{algorithm2e} % Add the algorithm environnement
\usepackage[codedark]{themes} % Include many colours themes ([default], codedark or dracula)
\pagecolor{th_colour_bg}
\color{th_colour_fg}
\usepackage{amsmath} % Provides command to typeset matrices with different delimiters
\usepackage{listings} % Add an environnement to highlight code
\usepackage{xargs} % Allow multiple optional parameters parsing
\usepackage{mdframed} % Fancy rectangles
\mdfsetup{linecolor = th_colour_fg, innerlinecolor = th_colour_fg,%
middlelinecolor = th_colour_fg, outerlinecolor = th_colour_fg,%
backgroundcolor = th_colour_bg, fontcolor = th_colour_fg}
\usepackage{macros} % Customs macros
\usepackage{graphicx}
\usepackage{makeidx}[intoc] % Make a word index
\makeindex
\langtitle{Notebook ultime}{Ultimate Notebook}
\author{Pierre Saunders}
%\date{}
\begin{document}
\maketitle
%\renewcommand{\contentsname}{Sommaire}
\tableofcontents
\langchapter{Préambule}{Stuffings}
%TODO Complete chapter
\langsection{Motivations}{Motivations}
%TODO Complete section
Ce notebook est destinée à acueillir mes maigres connaissances manière digeste et mais intrinsecement imcomplet, imprècis voir éronné. A vous lecteur qui découvrent ce notebook, accueiller le davantage comme une liste de connaissances que comme un manuel scolaire.
\langsection{Remerciements}{Thankings}
%TODO Complete section
Je remercie Adel Medjhoub pour les nombreuses conversations qui ont mürit mes visions du monde.
Je remercie Damien Graux de m'avoir introduit le monde de la recherche ainsi que la language LaTeX sur laquelle ce notebook est rédiger.
De de manière honteusement démagogique, je vous remercie tout lecteurs de ce notebook.
\input{contents/latex}
\input{contents/computer_science}
\input{contents/logic}
\input{contents/set_theory}
\input{contents/number_theory}
\input{contents/algebra}
\input{contents/trigonometry}
\input{contents/differentiability}
\input{contents/differential_equations}
\input{contents/measure_theory}
\input{contents/topology}
\input{contents/category_theory}
\input{contents/GaussianParadigm}
\input{contents/music_theory}
\input{contents/philosophy}
\input{contents/linguistic}
\addcontentsline{toc}{chapter}{Références}
\begingroup
\bibliographystylereferences{IEEEtran}
\bibliographyreferences{references}
\endgroup
\addcontentsline{toc}{chapter}{Annexes}
\begingroup
\bibliographystyleannexes{IEEEtran}
\bibliographyannexes{annexes}
\endgroup
\printindex
\end{document}

435
references.bib Normal file
View File

@ -0,0 +1,435 @@
@inproceedings{wgan-gp_paper,
author = {Gulrajani, Ishaan and Ahmed, Faruk and Arjovsky, Martin and Dumoulin, Vincent and Courville, Aaron},
title = {Improved Training of Wasserstein GANs},
year = {2017},
isbn = {9781510860964},
publisher = {Curran Associates Inc.},
address = {Red Hook, NY, USA},
abstract = {Generative Adversarial Networks (GANs) are powerful generative models, but suffer from training instability. The recently proposed Wasserstein GAN (WGAN) makes progress toward stable training of GANs, but sometimes can still generate only poor samples or fail to converge. We find that these problems are often due to the use of weight clipping in WGAN to enforce a Lipschitz constraint on the critic, which can lead to undesired behavior. We propose an alternative to clipping weights: penalize the norm of gradient of the critic with respect to its input. Our proposed method performs better than standard WGAN and enables stable training of a wide variety of GAN architectures with almost no hyperparameter tuning, including 101-layer ResNets and language models with continuous generators. We also achieve high quality generations on CIFAR-10 and LSUN bedrooms.},
booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems},
pages = {57695779},
numpages = {11},
location = {Long Beach, California, USA},
series = {NIPS'17},
url = {https://arxiv.org/abs/1704.00028}
}
@article{momentum_paper,
title = {On the momentum term in gradient descent learning algorithms},
journal = {Neural Networks},
volume = {12},
number = {1},
pages = {145-151},
year = {1999},
issn = {0893-6080},
doi = {https://doi.org/10.1016/S0893-6080(98)00116-6},
url = {https://www.sciencedirect.com/science/article/pii/S0893608098001166},
author = {Ning Qian},
keywords = {Momentum, Gradient descent learning algorithm, Damped harmonic oscillator, Critical damping, Learning rate, Speed of convergence},
abstract = {A momentum term is usually included in the simulations of connectionist learning algorithms. Although it is well known that such a term greatly improves the speed of learning, there have been few rigorous studies of its mechanisms. In this paper, I show that in the limit of continuous time, the momentum parameter is analogous to the mass of Newtonian particles that move through a viscous medium in a conservative force field. The behavior of the system near a local minimum is equivalent to a set of coupled and damped harmonic oscillators. The momentum term improves the speed of convergence by bringing some eigen components of the system closer to critical damping. Similar results can be obtained for the discrete time case used in computer simulations. In particular, I derive the bounds for convergence on learning-rate and momentum parameters, and demonstrate that the momentum term can increase the range of learning rate over which the system converges. The optimal condition for convergence is also analyzed.}
}
@article{nesterov_gradient_paper,
title = {A method for unconstrained convex minimization problem with the rate of convergence o(1/k2)},
author = {Yurii Nesterov},
year = {1983},
volume = {269},
pages = {543-547},
booktitle = {Doklady ANSSSR (translated as Soviet.Math.Docl.)}
}
@article{adagrad_paper,
author = {John Duchi and Elad Hazan and Yoram Singer},
title = {Adaptive Subgradient Methods for Online Learning and Stochastic Optimization},
journal = {Journal of Machine Learning Research},
year = {2011},
volume = {12},
number = {61},
pages = {2121-2159},
url = {http://jmlr.org/papers/v12/duchi11a.html}
}
@article{adadelta_paper,
author = {Matthew D. Zeiler},
title = {{ADADELTA:} An Adaptive Learning Rate Method},
journal = {CoRR},
volume = {abs/1212.5701},
year = {2012},
url = {http://arxiv.org/abs/1212.5701},
eprinttype = {arXiv},
eprint = {1212.5701},
timestamp = {Mon, 13 Aug 2018 16:45:57 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1212-5701.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{rmsprop_lecture,
author = {Geoffrey Hinton with Nitish Srivastava and Kevin Swersky},
title = {Neural Networks for Machine Learning : Lecture 6a Overview of mini-batch gradient descent},
pages = {26-30},
numpages = {5},
year = {2014},
journal = {CSC321 Toronto Winter 2014},
url = {https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture\_slides\_lec6.pdf}
}
@misc{adam_paper,
doi = {10.48550/ARXIV.1412.6980},
url = {https://arxiv.org/abs/1412.6980},
author = {Kingma, Diederik P. and Ba, Jimmy},
keywords = {Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Adam: A Method for Stochastic Optimization},
publisher = {arXiv},
year = {2014},
copyright = {arXiv.org perpetual, non-exclusive license}
}
@inproceedings{sparql_ranking,
title = {{A Multi-Criteria Experimental Ranking of Distributed SPARQL Evaluators}},
author = {Graux, Damien and Jachiet, Louis and Genev{\`e}s, Pierre and Laya{\"i}da, Nabil},
url = {https://hal.inria.fr/hal-01381781},
booktitle = {{Big Data 2018 - IEEE International Conference on Big Data}},
address = {Seattle, United States},
publisher = {{IEEE}},
pages = {1-10},
year = {2018},
month = Dec,
keywords = {SPARQL ; Distributed Evaluation ; Benchmarking},
pdf = {https://hal.inria.fr/hal-01381781v2/file/experiment-analysis.pdf},
hal_id = {hal-01381781},
hal_version = {v2}
}
@inproceedings{sparql_representative,
author = {Saleem, Muhammad and Sz\'{a}rnyas, G\'{a}bor and Conrads, Felix and Bukhari, Syed Ahmad Chan and Mehmood, Qaiser and Ngonga Ngomo, Axel-Cyrille},
title = {How Representative Is a SPARQL Benchmark? An Analysis of RDF Triplestore Benchmarks},
year = {2019},
isbn = {9781450366748},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3308558.3313556},
doi = {10.1145/3308558.3313556},
abstract = {Triplestores are data management systems for storing and querying RDF data. Over recent
years, various benchmarks have been proposed to assess the performance of triplestores
across different performance measures. However, choosing the most suitable benchmark
for evaluating triplestores in practical settings is not a trivial task. This is because
triplestores experience varying workloads when deployed in real applications. We address
the problem of determining an appropriate benchmark for a given real-life workload
by providing a fine-grained comparative analysis of existing triplestore benchmarks.
In particular, we analyze the data and queries provided with the existing triplestore
benchmarks in addition to several real-world datasets. Furthermore, we measure the
correlation between the query execution time and various SPARQL query features and
rank those features based on their significance levels. Our experiments reveal several
interesting insights about the design of such benchmarks. With this fine-grained evaluation,
we aim to support the design and implementation of more diverse benchmarks. Application
developers can use our result to analyze their data and queries and choose a data
management system.},
booktitle = {The World Wide Web Conference},
pages = {16231633},
numpages = {11},
location = {San Francisco, CA, USA},
series = {WWW '19}
}
@inproceedings{4store_article,
author = {Steve Harris and Nick Lamb and Nigel Shadbolt},
title = { 4store: The Design and Implementation of a Clustered RDF Store},
booktitle = {IN: SCALABLE SEMANTIC WEB KNOWLEDGE BASE SYSTEMS - SSWS2009},
year = {2009},
pages = {94--109},
publisher = {},
url = {https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.368.5588}
}
@article{rdf3x_article,
author = {Neumann, Thomas and Weikum, Gerhard},
title = {RDF-3X: A RISC-Style Engine for RDF},
year = {2008},
issue_date = {August 2008},
publisher = {VLDB Endowment},
volume = {1},
number = {1},
issn = {2150-8097},
url = {https://doi.org/10.14778/1453856.1453927},
doi = {10.14778/1453856.1453927},
abstract = {RDF is a data representation format for schema-free structured information that is
gaining momentum in the context of Semantic-Web corpora, life sciences, and also Web
2.0 platforms. The "pay-as-you-go" nature of RDF and the flexible pattern-matching
capabilities of its query language SPARQL entail efficiency and scalability challenges
for complex queries including long join paths. This paper presents the RDF-3X engine,
an implementation of SPARQL that achieves excellent performance by pursuing a RISC-style
architecture with a streamlined architecture and carefully designed, puristic data
structures and operations. The salient points of RDF-3X are: 1) a generic solution
for storing and indexing RDF triples that completely eliminates the need for physical-design
tuning, 2) a powerful yet simple query processor that leverages fast merge joins to
the largest possible extent, and 3) a query optimizer for choosing optimal join orders
using a cost model based on statistical synopses for entire join paths. The performance
of RDF-3X, in comparison to the previously best state-of-the-art systems, has been
measured on several large-scale datasets with more than 50 million RDF triples and
benchmark queries that include pattern matching and long join paths in the underlying
data graphs.},
journal = {Proc. VLDB Endow.},
month = aug,
pages = {647659},
numpages = {13}
}
@article{lubm_article,
title = {LUBM: A benchmark for OWL knowledge base systems},
journal = {Journal of Web Semantics},
volume = {3},
number = {2},
pages = {158-182},
year = {2005},
note = {Selcted Papers from the International Semantic Web Conference, 2004},
issn = {1570-8268},
doi = {https://doi.org/10.1016/j.websem.2005.06.005},
url = {https://www.sciencedirect.com/science/article/pii/S1570826805000132},
author = {Yuanbo Guo and Zhengxiang Pan and Jeff Heflin},
keywords = {Semantic Web, Knowledge base system, Lehigh University Benchmark, Evaluation},
abstract = {We describe our method for benchmarking Semantic Web knowledge base systems with
respect to use in large OWL applications. We present the Lehigh University Benchmark (LUBM) as
an example of how to design such benchmarks. The LUBM features an ontology for the university
domain, synthetic OWL data scalable to an arbitrary size, 14 extensional queries representing
a variety of properties, and several performance metrics. The LUBM can be used to evaluate
systems with different reasoning capabilities and storage mechanisms. We demonstrate this with
an evaluation of two memory-based systems and two systems with persistent storage.}
}
@inproceedings{owl_article,
author = {Guo, Yuanbo and Pan, Zhengxiang and Heflin, Jeff},
year = {2004},
month = {11},
pages = {274-288},
title = {An Evaluation of Knowledge Base Systems for Large OWL Datasets},
volume = {3298},
isbn = {978-3-540-23798-3},
doi = {10.1007/978-3-540-30475-3_20},
pdf = {http://swat.cse.lehigh.edu/pubs/guo04c.pdf},
editor = {McIlraith, Sheila A. and Plexousakis, Dimitris and van Harmelen, Frank},
booktitle = {The Semantic Web -- ISWC 2004},
publisher = {Springer Berlin Heidelberg},
address = {Berlin, Heidelberg},
abstract = {In this paper, we present an evaluation of four knowledge base systems (KBS) with
respect to use in large OWL applications. To our knowledge, no experiment has been done with the
scale of data used here. The smallest dataset used consists of 15 OWL files totaling 8MB, while
the largest dataset consists of 999 files totaling 583MB. We evaluated two memory-based systems
(OWLJessKB and memory-based Sesame) and two systems with persistent storage (database-based
Sesame and DLDB-OWL). We describe how we have performed the evaluation and what factors we have
considered in it. We show the results of the experiment and discuss the performance of each system.
In particular, we have concluded that existing systems need to place a greater emphasis on scalability.}
}
@article{sp2bench_benchmark,
author = {Michael Schmidt and Thomas Hornung and Georg Lausen and Christoph Pinkel},
title = {SP2Bench: {A} {SPARQL} Performance Benchmark},
journal = {CoRR},
volume = {abs/0806.4627},
year = {2008},
url = {http://arxiv.org/abs/0806.4627},
archiveprefix = {arXiv},
eprint = {0806.4627},
timestamp = {Mon, 13 Aug 2018 16:48:37 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-0806-4627.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{rdf_comparison,
author = {Duan, Songyun and Kementsietsidis, Anastasios and Srinivas, Kavitha and Udrea, Octavian},
title = {Apples and Oranges: A Comparison of RDF Benchmarks and Real RDF Datasets},
year = {2011},
isbn = {9781450306614},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/1989323.1989340},
doi = {10.1145/1989323.1989340},
abstract = {The widespread adoption of the Resource Description Framework (RDF) for the representation
of both open web and enterprise data is the driving force behind the increasing research
interest in RDF data management. As RDF data management systems proliferate, so are
benchmarks to test the scalability and performance of these systems under data and
workloads with various characteristics.In this paper, we compare data generated with
existing RDF benchmarks and data found in widely used real RDF datasets. The results
of our comparison illustrate that existing benchmark data have little in common with
real data. Therefore any conclusions drawn from existing benchmark tests might not
actually translate to expected behaviours in real settings. In terms of the comparison
itself, we show that simple primitive data metrics are inadequate to flesh out the
fundamental differences between real and benchmark data. We make two contributions
in this paper: (1) To address the limitations of the primitive metrics, we introduce
intuitive and novel metrics that can indeed highlight the key differences between
distinct datasets; (2) To address the limitations of existing benchmarks, we introduce
a new benchmark generator with the following novel characteristics: (a) the generator
can use any (real or synthetic) dataset and convert it into a benchmark dataset; (b)
the generator can generate data that mimic the characteristics of real datasets with
user-specified data properties. On the technical side, we formulate the benchmark
generation problem as an integer programming problem whose solution provides us with
the desired benchmark datasets. To our knowledge, this is the first methodological
study of RDF benchmarks, as well as the first attempt on generating RDF benchmarks
in a principled way.},
booktitle = {Proceedings of the 2011 ACM SIGMOD International Conference on Management of Data},
pages = {145156},
numpages = {12},
keywords = {RDF, benchmark},
location = {Athens, Greece},
series = {SIGMOD '11}
}
@inproceedings{Weston1999SupportVM,
title = {Support vector machines for multi-class pattern recognition},
author = {Jason Weston and Chris Watkins},
booktitle = {ESANN},
year = {1999}
}
@article{kl_divergence,
author = {S. Kullback and R. A. Leibler},
title = {{On Information and Sufficiency}},
volume = {22},
journal = {The Annals of Mathematical Statistics},
number = {1},
publisher = {Institute of Mathematical Statistics},
pages = {79 -- 86},
abstract = {},
year = {1951},
doi = {10.1214/aoms/1177729694},
URL = {https://doi.org/10.1214/aoms/1177729694}
}
@article{variational_lossy_autoencoder,
author = {Xi Chen and
Diederik P. Kingma and
Tim Salimans and
Yan Duan and
Prafulla Dhariwal and
John Schulman and
Ilya Sutskever and
Pieter Abbeel},
title = {Variational Lossy Autoencoder},
journal = {CoRR},
volume = {abs/1611.02731},
year = {2016},
url = {http://arxiv.org/abs/1611.02731},
eprinttype = {arXiv},
eprint = {1611.02731},
timestamp = {Mon, 03 Sep 2018 12:15:29 +0200},
biburl = {https://dblp.org/rec/journals/corr/ChenKSDDSSA16.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{transfer_learning_survey,
author = {Fuzhen Zhuang and
Zhiyuan Qi and
Keyu Duan and
Dongbo Xi and
Yongchun Zhu and
Hengshu Zhu and
Hui Xiong and
Qing He},
title = {A Comprehensive Survey on Transfer Learning},
journal = {CoRR},
volume = {abs/1911.02685},
year = {2019},
url = {http://arxiv.org/abs/1911.02685},
eprinttype = {arXiv},
eprint = {1911.02685},
timestamp = {Sat, 29 Aug 2020 18:19:14 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1911-02685.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{generative_adversarial_nets,
doi = {10.48550/ARXIV.1406.2661},
url = {https://arxiv.org/abs/1406.2661},
author = {Goodfellow, Ian J. and Pouget-Abadie, Jean and Mirza, Mehdi and Xu, Bing and Warde-Farley, David and Ozair, Sherjil and Courville, Aaron and Bengio, Yoshua},
keywords = {Machine Learning (stat.ML), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Generative Adversarial Networks},
publisher = {arXiv},
year = {2014},
copyright = {arXiv.org perpetual, non-exclusive license}
}
@article{vae_paper,
doi = {10.48550/ARXIV.1312.6114},
url = {https://arxiv.org/abs/1312.6114},
author = {Kingma, Diederik P and Welling, Max},
keywords = {Machine Learning (stat.ML), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Auto-Encoding Variational Bayes},
publisher = {arXiv},
year = {2013},
copyright = {arXiv.org perpetual, non-exclusive license}
}
@article{edit_gan_paper,
author = {Huan Ling and
Karsten Kreis and
Daiqing Li and
Seung Wook Kim and
Antonio Torralba and
Sanja Fidler},
title = {EditGAN: High-Precision Semantic Image Editing},
journal = {CoRR},
volume = {abs/2111.03186},
year = {2021},
url = {https://arxiv.org/abs/2111.03186},
eprinttype = {arXiv},
eprint = {2111.03186},
timestamp = {Wed, 10 Nov 2021 16:07:30 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-2111-03186.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@misc{dall_e_2_paper,
doi = {10.48550/ARXIV.2204.06125},
url = {https://arxiv.org/abs/2204.06125},
author = {Ramesh, Aditya and Dhariwal, Prafulla and Nichol, Alex and Chu, Casey and Chen, Mark},
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Hierarchical Text-Conditional Image Generation with CLIP Latents},
publisher = {arXiv},
year = {2022},
copyright = {Creative Commons Attribution 4.0 International}
}
@inproceedings{sparql_pretty_printing,
title = {{SPARQL Template : un langage de Pretty Printing pour RDF}},
author = {Corby, Olivier and Faron Zucker, Catherine},
url = {https://hal.inria.fr/hal-01015267},
note = {Session 4 : Web s{\'e}mantique},
booktitle = {{IC - 25{\`e}mes Journ{\'e}es francophones d'Ing{\'e}nierie des Connaissances}},
address = {Clermont-Ferrand, France},
editor = {Catherine Faron-Zucker},
pages = {213-224},
year = {2014},
month = May,
keywords = {RDF Pretty Printing ; RDF AST ; SPARQL Template},
pdf = {https://hal.inria.fr/hal-01015267/file/Corby.pdf},
hal_id = {hal-01015267},
hal_version = {v1}
}
@misc{deep_gaussian_processes,
doi = {10.48550/ARXIV.1211.0358},
url = {https://arxiv.org/abs/1211.0358},
author = {Damianou, Andreas C. and Lawrence, Neil D.},
keywords = {Machine Learning (stat.ML), Machine Learning (cs.LG), Probability (math.PR), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics, G.3; G.1.2; I.2.6, 60G15, 58E30},
title = {Deep Gaussian Processes},
publisher = {arXiv},
year = {2012},
copyright = {arXiv.org perpetual, non-exclusive license}
}
@inproceedings{gaussian_processes_regression,
author = {Williams, Christopher K. I. and Rasmussen, Carl Edward},
title = {Gaussian Processes for Regression},
year = {1995},
publisher = {MIT Press},
address = {Cambridge, MA, USA},
abstract = {The Bayesian analysis of neural networks is difficult because a simple prior over weights implies a complex prior distribution over functions. In this paper we investigate the use of Gaussian process priors over functions, which permit the predictive Bayesian analysis for fixed values of hyperparameters to be carried out exactly using matrix operations. Two methods, using optimization and averaging (via Hybrid Monte Carlo) over hyperparameters have been tested on a number of challenging problems and have produced excellent results.},
booktitle = {Proceedings of the 8th International Conference on Neural Information Processing Systems},
pages = {514520},
numpages = {7},
location = {Denver, Colorado},
series = {NIPS'95}
}
@article{semi-supervised_learning_with_deep_generative_models,
author = {Diederik P. Kingma and
Danilo Jimenez Rezende and
Shakir Mohamed and
Max Welling},
title = {Semi-Supervised Learning with Deep Generative Models},
journal = {CoRR},
volume = {abs/1406.5298},
year = {2014},
url = {http://arxiv.org/abs/1406.5298},
eprinttype = {arXiv},
eprint = {1406.5298},
timestamp = {Mon, 13 Aug 2018 16:47:38 +0200},
biburl = {https://dblp.org/rec/journals/corr/KingmaRMW14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{every_model_learned_by_gradient_descent_is_approximately_a_kernel_machine,
author = {Pedro Domingos},
title = {Every Model Learned by Gradient Descent Is Approximately a Kernel Machine},
journal = {CoRR},
volume = {abs/2012.00152},
year = {2020},
url = {https://arxiv.org/abs/2012.00152},
eprinttype = {arXiv},
eprint = {2012.00152},
timestamp = {Fri, 04 Dec 2020 12:07:23 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-2012-00152.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}

47
themes.sty Normal file
View File

@ -0,0 +1,47 @@
\ProvidesPackage{themes}
% Add many functions for colour themes
\RequirePackage{xcolor}
\DeclareOption{default}{\OptionNotUsed}
\definecolor{th_colour_bg} {RGB} {255, 255, 255}
\definecolor{th_colour_fg} {RGB} {0, 0, 0 }
\definecolor{th_colour_cl} {RGB} {68, 71, 90 }
\definecolor{th_colour_comment} {RGB} {98, 114, 164}
\definecolor{th_colour_cyan} {RGB} {139, 233, 253}
\definecolor{th_colour_green} {RGB} {0, 255, 0 }
\definecolor{th_colour_orange} {RGB} {255, 184, 108}
\definecolor{th_colour_pink} {RGB} {255, 121, 198}
\definecolor{th_colour_purple} {RGB} {189, 147, 249}
\definecolor{th_colour_red} {RGB} {255, 0, 0 }
\definecolor{th_colour_yellow} {RGB} {255, 255, 0 }
\DeclareOption{codedark}{
\definecolor{th_colour_bg} {HTML} {222324}
\definecolor{th_colour_fg} {HTML} {FFFFFF}
\definecolor{th_colour_cl} {RGB} {68, 71, 90 }
\definecolor{th_colour_comment} {RGB} {98, 114, 164}
\definecolor{th_colour_cyan} {RGB} {139, 233, 253}
\definecolor{th_colour_green} {RGB} {80, 250, 123}
\definecolor{th_colour_orange} {RGB} {255, 184, 108}
\definecolor{th_colour_pink} {RGB} {255, 121, 198}
\definecolor{th_colour_purple} {RGB} {189, 147, 249}
\definecolor{th_colour_red} {RGB} {255, 85, 85 }
\definecolor{th_colour_yellow} {RGB} {241, 250, 140}
}
\DeclareOption{dracula}{
\definecolor{th_colour_bg} {RGB} {40, 42, 54 }
\definecolor{th_colour_fg} {RGB} {248, 248, 242}
\definecolor{th_colour_cl} {RGB} {68, 71, 90 }
\definecolor{th_colour_comment} {RGB} {98, 114, 164}
\definecolor{th_colour_cyan} {RGB} {139, 233, 253}
\definecolor{th_colour_green} {RGB} {80, 250, 123}
\definecolor{th_colour_orange} {RGB} {255, 184, 108}
\definecolor{th_colour_pink} {RGB} {255, 121, 198}
\definecolor{th_colour_purple} {RGB} {189, 147, 249}
\definecolor{th_colour_red} {RGB} {255, 85, 85 }
\definecolor{th_colour_yellow} {RGB} {241, 250, 140}
}
\ProcessOptions\relax