472 lines
24 KiB
BibTeX
472 lines
24 KiB
BibTeX
@inproceedings{wgan-gp_paper,
|
||
author = {Gulrajani, Ishaan and Ahmed, Faruk and Arjovsky, Martin and Dumoulin, Vincent and Courville, Aaron},
|
||
title = {Improved Training of Wasserstein GANs},
|
||
year = {2017},
|
||
isbn = {9781510860964},
|
||
publisher = {Curran Associates Inc.},
|
||
address = {Red Hook, NY, USA},
|
||
abstract = {Generative Adversarial Networks (GANs) are powerful generative models, but suffer from training instability. The recently proposed Wasserstein GAN (WGAN) makes progress toward stable training of GANs, but sometimes can still generate only poor samples or fail to converge. We find that these problems are often due to the use of weight clipping in WGAN to enforce a Lipschitz constraint on the critic, which can lead to undesired behavior. We propose an alternative to clipping weights: penalize the norm of gradient of the critic with respect to its input. Our proposed method performs better than standard WGAN and enables stable training of a wide variety of GAN architectures with almost no hyperparameter tuning, including 101-layer ResNets and language models with continuous generators. We also achieve high quality generations on CIFAR-10 and LSUN bedrooms.},
|
||
booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems},
|
||
pages = {5769–5779},
|
||
numpages = {11},
|
||
location = {Long Beach, California, USA},
|
||
series = {NIPS'17},
|
||
url = {https://arxiv.org/abs/1704.00028}
|
||
}
|
||
@article{momentum_paper,
|
||
title = {On the momentum term in gradient descent learning algorithms},
|
||
journal = {Neural Networks},
|
||
volume = {12},
|
||
number = {1},
|
||
pages = {145-151},
|
||
year = {1999},
|
||
issn = {0893-6080},
|
||
doi = {https://doi.org/10.1016/S0893-6080(98)00116-6},
|
||
url = {https://www.sciencedirect.com/science/article/pii/S0893608098001166},
|
||
author = {Ning Qian},
|
||
keywords = {Momentum, Gradient descent learning algorithm, Damped harmonic oscillator, Critical damping, Learning rate, Speed of convergence},
|
||
abstract = {A momentum term is usually included in the simulations of connectionist learning algorithms. Although it is well known that such a term greatly improves the speed of learning, there have been few rigorous studies of its mechanisms. In this paper, I show that in the limit of continuous time, the momentum parameter is analogous to the mass of Newtonian particles that move through a viscous medium in a conservative force field. The behavior of the system near a local minimum is equivalent to a set of coupled and damped harmonic oscillators. The momentum term improves the speed of convergence by bringing some eigen components of the system closer to critical damping. Similar results can be obtained for the discrete time case used in computer simulations. In particular, I derive the bounds for convergence on learning-rate and momentum parameters, and demonstrate that the momentum term can increase the range of learning rate over which the system converges. The optimal condition for convergence is also analyzed.}
|
||
}
|
||
@article{nesterov_gradient_paper,
|
||
title = {A method for unconstrained convex minimization problem with the rate of convergence o(1/k2)},
|
||
author = {Yurii Nesterov},
|
||
year = {1983},
|
||
volume = {269},
|
||
pages = {543-547},
|
||
booktitle = {Doklady ANSSSR (translated as Soviet.Math.Docl.)}
|
||
}
|
||
@article{adagrad_paper,
|
||
author = {John Duchi and Elad Hazan and Yoram Singer},
|
||
title = {Adaptive Subgradient Methods for Online Learning and Stochastic Optimization},
|
||
journal = {Journal of Machine Learning Research},
|
||
year = {2011},
|
||
volume = {12},
|
||
number = {61},
|
||
pages = {2121-2159},
|
||
url = {http://jmlr.org/papers/v12/duchi11a.html}
|
||
}
|
||
@article{adadelta_paper,
|
||
author = {Matthew D. Zeiler},
|
||
title = {{ADADELTA:} An Adaptive Learning Rate Method},
|
||
journal = {CoRR},
|
||
volume = {abs/1212.5701},
|
||
year = {2012},
|
||
url = {http://arxiv.org/abs/1212.5701},
|
||
eprinttype = {arXiv},
|
||
eprint = {1212.5701},
|
||
timestamp = {Mon, 13 Aug 2018 16:45:57 +0200},
|
||
biburl = {https://dblp.org/rec/journals/corr/abs-1212-5701.bib},
|
||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||
}
|
||
@article{rmsprop_lecture,
|
||
author = {Geoffrey Hinton with Nitish Srivastava and Kevin Swersky},
|
||
title = {Neural Networks for Machine Learning : Lecture 6a Overview of mini-batch gradient descent},
|
||
pages = {26-30},
|
||
numpages = {5},
|
||
year = {2014},
|
||
journal = {CSC321 Toronto Winter 2014},
|
||
url = {https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture\_slides\_lec6.pdf}
|
||
}
|
||
@misc{adam_paper,
|
||
doi = {10.48550/ARXIV.1412.6980},
|
||
url = {https://arxiv.org/abs/1412.6980},
|
||
author = {Kingma, Diederik P. and Ba, Jimmy},
|
||
keywords = {Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
||
title = {Adam: A Method for Stochastic Optimization},
|
||
publisher = {arXiv},
|
||
year = {2014},
|
||
copyright = {arXiv.org perpetual, non-exclusive license}
|
||
}
|
||
@inproceedings{sparql_ranking,
|
||
title = {{A Multi-Criteria Experimental Ranking of Distributed SPARQL Evaluators}},
|
||
author = {Graux, Damien and Jachiet, Louis and Genev{\`e}s, Pierre and Laya{\"i}da, Nabil},
|
||
url = {https://hal.inria.fr/hal-01381781},
|
||
booktitle = {{Big Data 2018 - IEEE International Conference on Big Data}},
|
||
address = {Seattle, United States},
|
||
publisher = {{IEEE}},
|
||
pages = {1-10},
|
||
year = {2018},
|
||
month = Dec,
|
||
keywords = {SPARQL ; Distributed Evaluation ; Benchmarking},
|
||
pdf = {https://hal.inria.fr/hal-01381781v2/file/experiment-analysis.pdf},
|
||
hal_id = {hal-01381781},
|
||
hal_version = {v2}
|
||
}
|
||
@inproceedings{sparql_representative,
|
||
author = {Saleem, Muhammad and Sz\'{a}rnyas, G\'{a}bor and Conrads, Felix and Bukhari, Syed Ahmad Chan and Mehmood, Qaiser and Ngonga Ngomo, Axel-Cyrille},
|
||
title = {How Representative Is a SPARQL Benchmark? An Analysis of RDF Triplestore Benchmarks},
|
||
year = {2019},
|
||
isbn = {9781450366748},
|
||
publisher = {Association for Computing Machinery},
|
||
address = {New York, NY, USA},
|
||
url = {https://doi.org/10.1145/3308558.3313556},
|
||
doi = {10.1145/3308558.3313556},
|
||
abstract = {Triplestores are data management systems for storing and querying RDF data. Over recent
|
||
years, various benchmarks have been proposed to assess the performance of triplestores
|
||
across different performance measures. However, choosing the most suitable benchmark
|
||
for evaluating triplestores in practical settings is not a trivial task. This is because
|
||
triplestores experience varying workloads when deployed in real applications. We address
|
||
the problem of determining an appropriate benchmark for a given real-life workload
|
||
by providing a fine-grained comparative analysis of existing triplestore benchmarks.
|
||
In particular, we analyze the data and queries provided with the existing triplestore
|
||
benchmarks in addition to several real-world datasets. Furthermore, we measure the
|
||
correlation between the query execution time and various SPARQL query features and
|
||
rank those features based on their significance levels. Our experiments reveal several
|
||
interesting insights about the design of such benchmarks. With this fine-grained evaluation,
|
||
we aim to support the design and implementation of more diverse benchmarks. Application
|
||
developers can use our result to analyze their data and queries and choose a data
|
||
management system.},
|
||
booktitle = {The World Wide Web Conference},
|
||
pages = {1623–1633},
|
||
numpages = {11},
|
||
location = {San Francisco, CA, USA},
|
||
series = {WWW '19}
|
||
}
|
||
@inproceedings{4store_article,
|
||
author = {Steve Harris and Nick Lamb and Nigel Shadbolt},
|
||
title = { 4store: The Design and Implementation of a Clustered RDF Store},
|
||
booktitle = {IN: SCALABLE SEMANTIC WEB KNOWLEDGE BASE SYSTEMS - SSWS2009},
|
||
year = {2009},
|
||
pages = {94--109},
|
||
publisher = {},
|
||
url = {https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.368.5588}
|
||
}
|
||
@article{rdf3x_article,
|
||
author = {Neumann, Thomas and Weikum, Gerhard},
|
||
title = {RDF-3X: A RISC-Style Engine for RDF},
|
||
year = {2008},
|
||
issue_date = {August 2008},
|
||
publisher = {VLDB Endowment},
|
||
volume = {1},
|
||
number = {1},
|
||
issn = {2150-8097},
|
||
url = {https://doi.org/10.14778/1453856.1453927},
|
||
doi = {10.14778/1453856.1453927},
|
||
abstract = {RDF is a data representation format for schema-free structured information that is
|
||
gaining momentum in the context of Semantic-Web corpora, life sciences, and also Web
|
||
2.0 platforms. The "pay-as-you-go" nature of RDF and the flexible pattern-matching
|
||
capabilities of its query language SPARQL entail efficiency and scalability challenges
|
||
for complex queries including long join paths. This paper presents the RDF-3X engine,
|
||
an implementation of SPARQL that achieves excellent performance by pursuing a RISC-style
|
||
architecture with a streamlined architecture and carefully designed, puristic data
|
||
structures and operations. The salient points of RDF-3X are: 1) a generic solution
|
||
for storing and indexing RDF triples that completely eliminates the need for physical-design
|
||
tuning, 2) a powerful yet simple query processor that leverages fast merge joins to
|
||
the largest possible extent, and 3) a query optimizer for choosing optimal join orders
|
||
using a cost model based on statistical synopses for entire join paths. The performance
|
||
of RDF-3X, in comparison to the previously best state-of-the-art systems, has been
|
||
measured on several large-scale datasets with more than 50 million RDF triples and
|
||
benchmark queries that include pattern matching and long join paths in the underlying
|
||
data graphs.},
|
||
journal = {Proc. VLDB Endow.},
|
||
month = aug,
|
||
pages = {647–659},
|
||
numpages = {13}
|
||
}
|
||
@article{lubm_article,
|
||
title = {LUBM: A benchmark for OWL knowledge base systems},
|
||
journal = {Journal of Web Semantics},
|
||
volume = {3},
|
||
number = {2},
|
||
pages = {158-182},
|
||
year = {2005},
|
||
note = {Selcted Papers from the International Semantic Web Conference, 2004},
|
||
issn = {1570-8268},
|
||
doi = {https://doi.org/10.1016/j.websem.2005.06.005},
|
||
url = {https://www.sciencedirect.com/science/article/pii/S1570826805000132},
|
||
author = {Yuanbo Guo and Zhengxiang Pan and Jeff Heflin},
|
||
keywords = {Semantic Web, Knowledge base system, Lehigh University Benchmark, Evaluation},
|
||
abstract = {We describe our method for benchmarking Semantic Web knowledge base systems with
|
||
respect to use in large OWL applications. We present the Lehigh University Benchmark (LUBM) as
|
||
an example of how to design such benchmarks. The LUBM features an ontology for the university
|
||
domain, synthetic OWL data scalable to an arbitrary size, 14 extensional queries representing
|
||
a variety of properties, and several performance metrics. The LUBM can be used to evaluate
|
||
systems with different reasoning capabilities and storage mechanisms. We demonstrate this with
|
||
an evaluation of two memory-based systems and two systems with persistent storage.}
|
||
}
|
||
@inproceedings{owl_article,
|
||
author = {Guo, Yuanbo and Pan, Zhengxiang and Heflin, Jeff},
|
||
year = {2004},
|
||
month = {11},
|
||
pages = {274-288},
|
||
title = {An Evaluation of Knowledge Base Systems for Large OWL Datasets},
|
||
volume = {3298},
|
||
isbn = {978-3-540-23798-3},
|
||
doi = {10.1007/978-3-540-30475-3_20},
|
||
pdf = {http://swat.cse.lehigh.edu/pubs/guo04c.pdf},
|
||
editor = {McIlraith, Sheila A. and Plexousakis, Dimitris and van Harmelen, Frank},
|
||
booktitle = {The Semantic Web -- ISWC 2004},
|
||
publisher = {Springer Berlin Heidelberg},
|
||
address = {Berlin, Heidelberg},
|
||
abstract = {In this paper, we present an evaluation of four knowledge base systems (KBS) with
|
||
respect to use in large OWL applications. To our knowledge, no experiment has been done with the
|
||
scale of data used here. The smallest dataset used consists of 15 OWL files totaling 8MB, while
|
||
the largest dataset consists of 999 files totaling 583MB. We evaluated two memory-based systems
|
||
(OWLJessKB and memory-based Sesame) and two systems with persistent storage (database-based
|
||
Sesame and DLDB-OWL). We describe how we have performed the evaluation and what factors we have
|
||
considered in it. We show the results of the experiment and discuss the performance of each system.
|
||
In particular, we have concluded that existing systems need to place a greater emphasis on scalability.}
|
||
}
|
||
@article{sp2bench_benchmark,
|
||
author = {Michael Schmidt and Thomas Hornung and Georg Lausen and Christoph Pinkel},
|
||
title = {SP2Bench: {A} {SPARQL} Performance Benchmark},
|
||
journal = {CoRR},
|
||
volume = {abs/0806.4627},
|
||
year = {2008},
|
||
url = {http://arxiv.org/abs/0806.4627},
|
||
archiveprefix = {arXiv},
|
||
eprint = {0806.4627},
|
||
timestamp = {Mon, 13 Aug 2018 16:48:37 +0200},
|
||
biburl = {https://dblp.org/rec/journals/corr/abs-0806-4627.bib},
|
||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||
}
|
||
@inproceedings{rdf_comparison,
|
||
author = {Duan, Songyun and Kementsietsidis, Anastasios and Srinivas, Kavitha and Udrea, Octavian},
|
||
title = {Apples and Oranges: A Comparison of RDF Benchmarks and Real RDF Datasets},
|
||
year = {2011},
|
||
isbn = {9781450306614},
|
||
publisher = {Association for Computing Machinery},
|
||
address = {New York, NY, USA},
|
||
url = {https://doi.org/10.1145/1989323.1989340},
|
||
doi = {10.1145/1989323.1989340},
|
||
abstract = {The widespread adoption of the Resource Description Framework (RDF) for the representation
|
||
of both open web and enterprise data is the driving force behind the increasing research
|
||
interest in RDF data management. As RDF data management systems proliferate, so are
|
||
benchmarks to test the scalability and performance of these systems under data and
|
||
workloads with various characteristics.In this paper, we compare data generated with
|
||
existing RDF benchmarks and data found in widely used real RDF datasets. The results
|
||
of our comparison illustrate that existing benchmark data have little in common with
|
||
real data. Therefore any conclusions drawn from existing benchmark tests might not
|
||
actually translate to expected behaviours in real settings. In terms of the comparison
|
||
itself, we show that simple primitive data metrics are inadequate to flesh out the
|
||
fundamental differences between real and benchmark data. We make two contributions
|
||
in this paper: (1) To address the limitations of the primitive metrics, we introduce
|
||
intuitive and novel metrics that can indeed highlight the key differences between
|
||
distinct datasets; (2) To address the limitations of existing benchmarks, we introduce
|
||
a new benchmark generator with the following novel characteristics: (a) the generator
|
||
can use any (real or synthetic) dataset and convert it into a benchmark dataset; (b)
|
||
the generator can generate data that mimic the characteristics of real datasets with
|
||
user-specified data properties. On the technical side, we formulate the benchmark
|
||
generation problem as an integer programming problem whose solution provides us with
|
||
the desired benchmark datasets. To our knowledge, this is the first methodological
|
||
study of RDF benchmarks, as well as the first attempt on generating RDF benchmarks
|
||
in a principled way.},
|
||
booktitle = {Proceedings of the 2011 ACM SIGMOD International Conference on Management of Data},
|
||
pages = {145–156},
|
||
numpages = {12},
|
||
keywords = {RDF, benchmark},
|
||
location = {Athens, Greece},
|
||
series = {SIGMOD '11}
|
||
}
|
||
@inproceedings{Weston1999SupportVM,
|
||
title = {Support vector machines for multi-class pattern recognition},
|
||
author = {Jason Weston and Chris Watkins},
|
||
booktitle = {ESANN},
|
||
year = {1999}
|
||
}
|
||
@article{kl_divergence,
|
||
author = {S. Kullback and R. A. Leibler},
|
||
title = {{On Information and Sufficiency}},
|
||
volume = {22},
|
||
journal = {The Annals of Mathematical Statistics},
|
||
number = {1},
|
||
publisher = {Institute of Mathematical Statistics},
|
||
pages = {79 -- 86},
|
||
abstract = {},
|
||
year = {1951},
|
||
doi = {10.1214/aoms/1177729694},
|
||
URL = {https://doi.org/10.1214/aoms/1177729694}
|
||
}
|
||
@article{variational_lossy_autoencoder,
|
||
author = {Xi Chen and
|
||
Diederik P. Kingma and
|
||
Tim Salimans and
|
||
Yan Duan and
|
||
Prafulla Dhariwal and
|
||
John Schulman and
|
||
Ilya Sutskever and
|
||
Pieter Abbeel},
|
||
title = {Variational Lossy Autoencoder},
|
||
journal = {CoRR},
|
||
volume = {abs/1611.02731},
|
||
year = {2016},
|
||
url = {http://arxiv.org/abs/1611.02731},
|
||
eprinttype = {arXiv},
|
||
eprint = {1611.02731},
|
||
timestamp = {Mon, 03 Sep 2018 12:15:29 +0200},
|
||
biburl = {https://dblp.org/rec/journals/corr/ChenKSDDSSA16.bib},
|
||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||
}
|
||
@article{transfer_learning_survey,
|
||
author = {Fuzhen Zhuang and
|
||
Zhiyuan Qi and
|
||
Keyu Duan and
|
||
Dongbo Xi and
|
||
Yongchun Zhu and
|
||
Hengshu Zhu and
|
||
Hui Xiong and
|
||
Qing He},
|
||
title = {A Comprehensive Survey on Transfer Learning},
|
||
journal = {CoRR},
|
||
volume = {abs/1911.02685},
|
||
year = {2019},
|
||
url = {http://arxiv.org/abs/1911.02685},
|
||
eprinttype = {arXiv},
|
||
eprint = {1911.02685},
|
||
timestamp = {Sat, 29 Aug 2020 18:19:14 +0200},
|
||
biburl = {https://dblp.org/rec/journals/corr/abs-1911-02685.bib},
|
||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||
}
|
||
@article{generative_adversarial_nets,
|
||
doi = {10.48550/ARXIV.1406.2661},
|
||
url = {https://arxiv.org/abs/1406.2661},
|
||
author = {Goodfellow, Ian J. and Pouget-Abadie, Jean and Mirza, Mehdi and Xu, Bing and Warde-Farley, David and Ozair, Sherjil and Courville, Aaron and Bengio, Yoshua},
|
||
keywords = {Machine Learning (stat.ML), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
||
title = {Generative Adversarial Networks},
|
||
publisher = {arXiv},
|
||
year = {2014},
|
||
copyright = {arXiv.org perpetual, non-exclusive license}
|
||
}
|
||
@article{vae_paper,
|
||
doi = {10.48550/ARXIV.1312.6114},
|
||
url = {https://arxiv.org/abs/1312.6114},
|
||
author = {Kingma, Diederik P and Welling, Max},
|
||
keywords = {Machine Learning (stat.ML), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
||
title = {Auto-Encoding Variational Bayes},
|
||
publisher = {arXiv},
|
||
year = {2013},
|
||
copyright = {arXiv.org perpetual, non-exclusive license}
|
||
}
|
||
@article{edit_gan_paper,
|
||
author = {Huan Ling and
|
||
Karsten Kreis and
|
||
Daiqing Li and
|
||
Seung Wook Kim and
|
||
Antonio Torralba and
|
||
Sanja Fidler},
|
||
title = {EditGAN: High-Precision Semantic Image Editing},
|
||
journal = {CoRR},
|
||
volume = {abs/2111.03186},
|
||
year = {2021},
|
||
url = {https://arxiv.org/abs/2111.03186},
|
||
eprinttype = {arXiv},
|
||
eprint = {2111.03186},
|
||
timestamp = {Wed, 10 Nov 2021 16:07:30 +0100},
|
||
biburl = {https://dblp.org/rec/journals/corr/abs-2111-03186.bib},
|
||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||
}
|
||
@misc{dall_e_2_paper,
|
||
doi = {10.48550/ARXIV.2204.06125},
|
||
url = {https://arxiv.org/abs/2204.06125},
|
||
author = {Ramesh, Aditya and Dhariwal, Prafulla and Nichol, Alex and Chu, Casey and Chen, Mark},
|
||
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
||
title = {Hierarchical Text-Conditional Image Generation with CLIP Latents},
|
||
publisher = {arXiv},
|
||
year = {2022},
|
||
copyright = {Creative Commons Attribution 4.0 International}
|
||
}
|
||
@inproceedings{sparql_pretty_printing,
|
||
title = {{SPARQL Template : un langage de Pretty Printing pour RDF}},
|
||
author = {Corby, Olivier and Faron Zucker, Catherine},
|
||
url = {https://hal.inria.fr/hal-01015267},
|
||
note = {Session 4 : Web s{\'e}mantique},
|
||
booktitle = {{IC - 25{\`e}mes Journ{\'e}es francophones d'Ing{\'e}nierie des Connaissances}},
|
||
address = {Clermont-Ferrand, France},
|
||
editor = {Catherine Faron-Zucker},
|
||
pages = {213-224},
|
||
year = {2014},
|
||
month = May,
|
||
keywords = {RDF Pretty Printing ; RDF AST ; SPARQL Template},
|
||
pdf = {https://hal.inria.fr/hal-01015267/file/Corby.pdf},
|
||
hal_id = {hal-01015267},
|
||
hal_version = {v1}
|
||
}
|
||
@misc{deep_gaussian_processes,
|
||
doi = {10.48550/ARXIV.1211.0358},
|
||
url = {https://arxiv.org/abs/1211.0358},
|
||
author = {Damianou, Andreas C. and Lawrence, Neil D.},
|
||
keywords = {Machine Learning (stat.ML), Machine Learning (cs.LG), Probability (math.PR), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics, G.3; G.1.2; I.2.6, 60G15, 58E30},
|
||
title = {Deep Gaussian Processes},
|
||
publisher = {arXiv},
|
||
year = {2012},
|
||
copyright = {arXiv.org perpetual, non-exclusive license}
|
||
}
|
||
@inproceedings{gaussian_processes_regression,
|
||
author = {Williams, Christopher K. I. and Rasmussen, Carl Edward},
|
||
title = {Gaussian Processes for Regression},
|
||
year = {1995},
|
||
publisher = {MIT Press},
|
||
address = {Cambridge, MA, USA},
|
||
abstract = {The Bayesian analysis of neural networks is difficult because a simple prior over weights implies a complex prior distribution over functions. In this paper we investigate the use of Gaussian process priors over functions, which permit the predictive Bayesian analysis for fixed values of hyperparameters to be carried out exactly using matrix operations. Two methods, using optimization and averaging (via Hybrid Monte Carlo) over hyperparameters have been tested on a number of challenging problems and have produced excellent results.},
|
||
booktitle = {Proceedings of the 8th International Conference on Neural Information Processing Systems},
|
||
pages = {514–520},
|
||
numpages = {7},
|
||
location = {Denver, Colorado},
|
||
series = {NIPS'95}
|
||
}
|
||
@article{semi-supervised_learning_with_deep_generative_models,
|
||
author = {Diederik P. Kingma and
|
||
Danilo Jimenez Rezende and
|
||
Shakir Mohamed and
|
||
Max Welling},
|
||
title = {Semi-Supervised Learning with Deep Generative Models},
|
||
journal = {CoRR},
|
||
volume = {abs/1406.5298},
|
||
year = {2014},
|
||
url = {http://arxiv.org/abs/1406.5298},
|
||
eprinttype = {arXiv},
|
||
eprint = {1406.5298},
|
||
timestamp = {Mon, 13 Aug 2018 16:47:38 +0200},
|
||
biburl = {https://dblp.org/rec/journals/corr/KingmaRMW14.bib},
|
||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||
}
|
||
@article{every_model_learned_by_gradient_descent_is_approximately_a_kernel_machine,
|
||
author = {Pedro Domingos},
|
||
title = {Every Model Learned by Gradient Descent Is Approximately a Kernel Machine},
|
||
journal = {CoRR},
|
||
volume = {abs/2012.00152},
|
||
year = {2020},
|
||
url = {https://arxiv.org/abs/2012.00152},
|
||
eprinttype = {arXiv},
|
||
eprint = {2012.00152},
|
||
timestamp = {Fri, 04 Dec 2020 12:07:23 +0100},
|
||
biburl = {https://dblp.org/rec/journals/corr/abs-2012-00152.bib},
|
||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||
}
|
||
@book{carlyle_2005_french_revolution,
|
||
title = {The French Revolution},
|
||
author = {Carlyle, T. and Ball, A.H.R.},
|
||
isbn = {9780486445137},
|
||
page = {242},
|
||
lccn = {2005047548},
|
||
series = {Dover Value Editions},
|
||
year = {2005},
|
||
publisher = {Dover Publications}
|
||
}
|
||
@book{robertson_2003_excess,
|
||
title = {An Excess of Phobias and Manias},
|
||
author = {John G. Robertson},
|
||
page = {75},
|
||
isbn = {9780963091932},
|
||
year = {2003},
|
||
publisher = {Senior Scribe Publications}
|
||
}
|
||
@book{tucker_1970_liberty,
|
||
title = {Liberty},
|
||
author = {Benjamin Ricketson Tucker},
|
||
page = {361},
|
||
volumes = {235,312},
|
||
year = {1970},
|
||
number = {vol.~9~{\`a}~10~;vol.~235~{\`a}~312},
|
||
lccn = {72022711},
|
||
series = {Radical periodicals in the United States},
|
||
publisher = {Greenwood Reprint Corporation}
|
||
}
|
||
@book{wheeler_1910_literature,
|
||
title = {Current Literature},
|
||
author = {Edward Jewitt Wheeler},
|
||
page = {564},
|
||
volumes = {49},
|
||
year = {1910}
|
||
}
|