No Description
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

zotero_export.bib 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. @article{damianou_deep_2012,
  2. title = {Deep {Gaussian} {Processes}},
  3. url = {http://arxiv.org/abs/1211.0358},
  4. abstract = {In this paper we introduce deep Gaussian process (GP) models. Deep GPs are a deep belief network based on Gaussian process mappings. The data is modeled as the output of a multivariate GP. The inputs to that Gaussian process are then governed by another GP. A single layer model is equivalent to a standard GP or the GP latent variable model (GP-LVM). We perform inference in the model by approximate variational marginalization. This results in a strict lower bound on the marginal likelihood of the model which we use for model selection (number of layers and nodes per layer). Deep belief networks are typically applied to relatively large data sets using stochastic gradient descent for optimization. Our fully Bayesian treatment allows for the application of deep models even when data is scarce. Model selection by our variational bound shows that a five layer hierarchy is justified even when modelling a digit data set containing only 150 examples.},
  5. urldate = {2016-09-05},
  6. journal = {arXiv:1211.0358 [cs, math, stat]},
  7. author = {Damianou, Andreas C. and Lawrence, Neil D.},
  8. month = nov,
  9. year = {2012},
  10. note = {arXiv: 1211.0358},
  11. keywords = {Computer Science - Learning, Statistics - Machine Learning, 60G15, 58E30, G.1.2, G.3, I.2.6, Mathematics - Probability},
  12. file = {arXiv\:1211.0358 PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\BUXWE2UV\\Damianou and Lawrence - 2012 - Deep Gaussian Processes.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\S2KB72DK\\1211.html:text/html}
  13. }
  14. @inproceedings{zhou_generalized_2012,
  15. title = {Generalized time warping for multi-modal alignment of human motion},
  16. booktitle = {Computer {Vision} and {Pattern} {Recognition} ({CVPR}), 2012 {IEEE} {Conference} on},
  17. publisher = {IEEE},
  18. author = {Zhou, Feng and De la Torre, Fernando},
  19. year = {2012},
  20. pages = {1282--1289},
  21. file = {Fulltext:C\:\\Users\\z003jvyc\\Zotero\\storage\\H9KUZQZQ\\Zhou und De la Torre - 2012 - Generalized time warping for multi-modal alignment.pdf:application/pdf;Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\A5RQ27H7\\auD.html:text/html}
  22. }
  23. @article{matthews_gpflow:_2017,
  24. title = {{GPflow}: {A} {Gaussian} process library using {TensorFlow}},
  25. volume = {18},
  26. shorttitle = {{GPflow}},
  27. url = {http://www.jmlr.org/papers/volume18/16-537/16-537.pdf},
  28. number = {40},
  29. urldate = {2017-09-27},
  30. journal = {Journal of Machine Learning Research},
  31. author = {Matthews, Alexander G. de G. and van der Wilk, Mark and Nickson, Tom and Fujii, Keisuke and Boukouvalas, Alexis and León-Villagrá, Pablo and Ghahramani, Zoubin and Hensman, James},
  32. year = {2017},
  33. pages = {1--6},
  34. file = {Full Text:C\:\\Users\\z003jvyc\\Zotero\\storage\\X6QGAFR8\\Matthews et al. - 2017 - GPflow A Gaussian process library using TensorFlo.pdf:application/pdf}
  35. }
  36. @article{snoek_input_2014,
  37. title = {Input {Warping} for {Bayesian} {Optimization} of {Non}-stationary {Functions}},
  38. url = {http://arxiv.org/abs/1402.0929},
  39. abstract = {Bayesian optimization has proven to be a highly effective methodology for the global optimization of unknown, expensive and multimodal functions. The ability to accurately model distributions over functions is critical to the effectiveness of Bayesian optimization. Although Gaussian processes provide a flexible prior over functions which can be queried efficiently, there are various classes of functions that remain difficult to model. One of the most frequently occurring of these is the class of non-stationary functions. The optimization of the hyperparameters of machine learning algorithms is a problem domain in which parameters are often manually transformed a priori, for example by optimizing in "log-space," to mitigate the effects of spatially-varying length scale. We develop a methodology for automatically learning a wide family of bijective transformations or warpings of the input space using the Beta cumulative distribution function. We further extend the warping framework to multi-task Bayesian optimization so that multiple tasks can be warped into a jointly stationary space. On a set of challenging benchmark optimization tasks, we observe that the inclusion of warping greatly improves on the state-of-the-art, producing better results faster and more reliably.},
  40. urldate = {2017-07-31},
  41. journal = {arXiv:1402.0929 [cs, stat]},
  42. author = {Snoek, Jasper and Swersky, Kevin and Zemel, Richard S. and Adams, Ryan P.},
  43. month = feb,
  44. year = {2014},
  45. note = {arXiv: 1402.0929},
  46. keywords = {Computer Science - Learning, Statistics - Machine Learning},
  47. file = {arXiv\:1402.0929 PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\ELZ9CMFF\\Snoek et al. - 2014 - Input Warping for Bayesian Optimization of Non-sta.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\5ECB3EET\\1402.html:text/html}
  48. }
  49. @article{hensman_nested_2014,
  50. title = {Nested {Variational} {Compression} in {Deep} {Gaussian} {Processes}},
  51. url = {http://arxiv.org/abs/1412.1370},
  52. abstract = {Deep Gaussian processes provide a flexible approach to probabilistic modelling of data using either supervised or unsupervised learning. For tractable inference approximations to the marginal likelihood of the model must be made. The original approach to approximate inference in these models used variational compression to allow for approximate variational marginalization of the hidden variables leading to a lower bound on the marginal likelihood of the model [Damianou and Lawrence, 2013]. In this paper we extend this idea with a nested variational compression. The resulting lower bound on the likelihood can be easily parallelized or adapted for stochastic variational inference.},
  53. urldate = {2017-07-19},
  54. journal = {arXiv:1412.1370 [stat]},
  55. author = {Hensman, James and Lawrence, Neil D.},
  56. month = dec,
  57. year = {2014},
  58. note = {arXiv: 1412.1370},
  59. keywords = {Statistics - Machine Learning},
  60. file = {arXiv\:1412.1370 PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\ZKNA6NYN\\Hensman and Lawrence - 2014 - Nested Variational Compression in Deep Gaussian Pr.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\UMQ96R94\\1412.html:text/html}
  61. }
  62. @inproceedings{alvarez_sparse_2009,
  63. title = {Sparse convolved {Gaussian} processes for multi-output regression},
  64. url = {http://papers.nips.cc/paper/3553-sparse-convolved-gaussian-processes-for-multi-output-regression},
  65. urldate = {2017-07-14},
  66. booktitle = {Advances in neural information processing systems},
  67. author = {Alvarez, Mauricio and Lawrence, Neil D.},
  68. year = {2009},
  69. pages = {57--64},
  70. file = {[PDF] nips.cc:C\:\\Users\\z003jvyc\\Zotero\\storage\\SIZMYY5F\\Alvarez and Lawrence - 2009 - Sparse convolved Gaussian processes for multi-outp.pdf:application/pdf;Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\A2QU9XT7\\3553-sparse-convolved-gaussian-processes-for-multi-output-regression.html:text/html}
  71. }
  72. @article{salimbeni_doubly_2017,
  73. title = {Doubly {Stochastic} {Variational} {Inference} for {Deep} {Gaussian} {Processes}},
  74. url = {http://arxiv.org/abs/1705.08933},
  75. abstract = {Gaussian processes (GPs) are a good choice for function approximation as they are flexible, robust to over-fitting, and provide well-calibrated predictive uncertainty. Deep Gaussian processes (DGPs) are multi-layer generalisations of GPs, but inference in these models has proved challenging. Existing approaches to inference in DGP models assume approximate posteriors that force independence between the layers, and do not work well in practice. We present a doubly stochastic variational inference algorithm, which does not force independence between layers. With our method of inference we demonstrate that a DGP model can be used effectively on data ranging in size from hundreds to a billion points. We provide strong empirical evidence that our inference scheme for DGPs works well in practice in both classification and regression.},
  76. urldate = {2017-06-02},
  77. journal = {arXiv:1705.08933 [stat]},
  78. author = {Salimbeni, Hugh and Deisenroth, Marc},
  79. month = may,
  80. year = {2017},
  81. note = {arXiv: 1705.08933},
  82. keywords = {Statistics - Machine Learning},
  83. file = {arXiv\:1705.08933 PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\FTCRG5BC\\Salimbeni und Deisenroth - 2017 - Doubly Stochastic Variational Inference for Deep G.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\AP6UXDGD\\1705.html:text/html}
  84. }
  85. @inproceedings{titsias_variational_2009,
  86. title = {Variational {Learning} of {Inducing} {Variables} in {Sparse} {Gaussian} {Processes}.},
  87. volume = {5},
  88. url = {http://www.jmlr.org/proceedings/papers/v5/titsias09a/titsias09a.pdf},
  89. urldate = {2017-04-06},
  90. booktitle = {{AISTATS}},
  91. author = {Titsias, Michalis K.},
  92. year = {2009},
  93. pages = {567--574},
  94. file = {[PDF] jmlr.org:C\:\\Users\\z003jvyc\\Zotero\\storage\\UTMCPPXS\\Titsias - 2009 - Variational Learning of Inducing Variables in Spar.pdf:application/pdf}
  95. }
  96. @article{hensman_gaussian_2013,
  97. title = {Gaussian {Processes} for {Big} {Data}},
  98. url = {http://arxiv.org/abs/1309.6835},
  99. abstract = {We introduce stochastic variational inference for Gaussian process models. This enables the application of Gaussian process (GP) models to data sets containing millions of data points. We show how GPs can be vari- ationally decomposed to depend on a set of globally relevant inducing variables which factorize the model in the necessary manner to perform variational inference. Our ap- proach is readily extended to models with non-Gaussian likelihoods and latent variable models based around Gaussian processes. We demonstrate the approach on a simple toy problem and two real world data sets.},
  100. urldate = {2016-07-06},
  101. journal = {arXiv:1309.6835 [cs, stat]},
  102. author = {Hensman, James and Fusi, Nicolo and Lawrence, Neil D.},
  103. month = sep,
  104. year = {2013},
  105. keywords = {Computer Science - Learning, Statistics - Machine Learning},
  106. file = {arXiv\:1309.6835 PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\XV3VH9PJ\\Hensman et al. - 2013 - Gaussian Processes for Big Data.pdf:application/pdf;arXiv\:1309.6835 PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\EU8WZFR4\\Hensman et al. - 2013 - Gaussian Processes for Big Data.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\ISZ4Z86Q\\1309.html:text/html;arXiv.org Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\2JAR4BNM\\1309.html:text/html}
  107. }
  108. @inproceedings{alvarez_efficient_2010,
  109. title = {Efficient {Multioutput} {Gaussian} {Processes} through {Variational} {Inducing} {Kernels}.},
  110. volume = {9},
  111. url = {http://www.jmlr.org/proceedings/papers/v9/alvarez10a/alvarez10a.pdf},
  112. urldate = {2017-03-02},
  113. booktitle = {{AISTATS}},
  114. author = {Alvarez, Mauricio A. and Luengo, David and Titsias, Michalis K. and Lawrence, Neil D.},
  115. year = {2010},
  116. pages = {25--32},
  117. file = {[PDF] jmlr.org:C\:\\Users\\z003jvyc\\Zotero\\storage\\6Q4I9FRF\\Alvarez et al. - 2010 - Efficient Multioutput Gaussian Processes through V.pdf:application/pdf}
  118. }
  119. @article{hensman_scalable_2014,
  120. title = {Scalable {Variational} {Gaussian} {Process} {Classification}},
  121. url = {http://arxiv.org/abs/1411.2005},
  122. abstract = {Gaussian process classification is a popular method with a number of appealing properties. We show how to scale the model within a variational inducing point framework, outperforming the state of the art on benchmark datasets. Importantly, the variational formulation can be exploited to allow classification in problems with millions of data points, as we demonstrate in experiments.},
  123. urldate = {2017-02-13},
  124. journal = {arXiv:1411.2005 [stat]},
  125. author = {Hensman, James and Matthews, Alex and Ghahramani, Zoubin},
  126. month = nov,
  127. year = {2014},
  128. note = {arXiv: 1411.2005},
  129. keywords = {Statistics - Machine Learning},
  130. file = {arXiv\:1411.2005 PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\T4WFAQPK\\Hensman et al. - 2014 - Scalable Variational Gaussian Process Classificati.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\5GEKF8R7\\1411.html:text/html}
  131. }
  132. @techreport{boyle_multiple_2005,
  133. title = {Multiple output gaussian process regression},
  134. abstract = {Gaussian processes are usually parameterised in terms of their covariance functions. However, this makes it difficult to deal with multiple outputs, because ensuring that the covariance matrix is positive definite is problematic. An alternative formulation is to treat Gaussian processes as white noise sources convolved with smoothing kernels, and to parameterise the kernel instead. Using this, we extend Gaussian processes to handle multiple, coupled outputs. 1},
  135. author = {Boyle, Phillip and Frean, Marcus and Boyle, Phillip and Frean, Marcus},
  136. year = {2005},
  137. file = {Citeseer - Full Text PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\STU7NV59\\Boyle et al. - 2005 - Multiple output gaussian process regression.pdf:application/pdf;Citeseer - Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\ZWMMCM3F\\summary.html:text/html}
  138. }
  139. @article{alvarez_kernels_2011,
  140. title = {Kernels for {Vector}-{Valued} {Functions}: a {Review}},
  141. shorttitle = {Kernels for {Vector}-{Valued} {Functions}},
  142. url = {http://arxiv.org/abs/1106.6251},
  143. abstract = {Kernel methods are among the most popular techniques in machine learning. From a frequentist/discriminative perspective they play a central role in regularization theory as they provide a natural choice for the hypotheses space and the regularization functional through the notion of reproducing kernel Hilbert spaces. From a Bayesian/generative perspective they are the key in the context of Gaussian processes, where the kernel function is also known as the covariance function. Traditionally, kernel methods have been used in supervised learning problem with scalar outputs and indeed there has been a considerable amount of work devoted to designing and learning kernels. More recently there has been an increasing interest in methods that deal with multiple outputs, motivated partly by frameworks like multitask learning. In this paper, we review different methods to design or learn valid kernel functions for multiple outputs, paying particular attention to the connection between probabilistic and functional methods.},
  144. urldate = {2017-02-06},
  145. journal = {arXiv:1106.6251 [cs, math, stat]},
  146. author = {Alvarez, Mauricio A. and Rosasco, Lorenzo and Lawrence, Neil D.},
  147. month = jun,
  148. year = {2011},
  149. note = {arXiv: 1106.6251},
  150. keywords = {Statistics - Machine Learning, Computer Science - Artificial Intelligence, Mathematics - Statistics Theory},
  151. file = {arXiv\:1106.6251 PDF:C\:\\Users\\z003jvyc\\Zotero\\storage\\R6PZ939E\\Alvarez et al. - 2011 - Kernels for Vector-Valued Functions a Review.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\IFE9Z28Q\\1106.html:text/html}
  152. }
  153. @inproceedings{boyle_dependent_2004,
  154. title = {Dependent {Gaussian} {Processes}.},
  155. volume = {17},
  156. url = {https://papers.nips.cc/paper/2561-dependent-gaussian-processes.pdf},
  157. urldate = {2017-01-27},
  158. booktitle = {{NIPS}},
  159. author = {Boyle, Phillip and Frean, Marcus R.},
  160. year = {2004},
  161. pages = {217--224},
  162. file = {[PDF] nips.cc:C\:\\Users\\z003jvyc\\Zotero\\storage\\HJT7BPIT\\Boyle and Frean - 2004 - Dependent Gaussian Processes..pdf:application/pdf}
  163. }
  164. @inproceedings{lazaro-gredilla_bayesian_2012,
  165. title = {Bayesian warped {Gaussian} processes},
  166. url = {http://papers.nips.cc/paper/4494-bayesian-warped-gaussian-processes},
  167. urldate = {2016-12-06},
  168. booktitle = {Advances in {Neural} {Information} {Processing} {Systems}},
  169. author = {Lázaro-Gredilla, Miguel},
  170. year = {2012},
  171. pages = {1619--1627},
  172. file = {[PDF] wustl.edu:C\:\\Users\\z003jvyc\\Zotero\\storage\\NTS9SDBA\\Lázaro-Gredilla - 2012 - Bayesian warped Gaussian processes.pdf:application/pdf;Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\HFAKAI4X\\4494-bayesian-warped-gaussian-processes.html:text/html}
  173. }
  174. @inproceedings{titsias_bayesian_2010,
  175. title = {Bayesian {Gaussian} process latent variable model},
  176. url = {http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_TitsiasL10.pdf},
  177. urldate = {2016-02-01},
  178. booktitle = {International {Conference} on {Artificial} {Intelligence} and {Statistics}},
  179. author = {Titsias, Michalis K. and Lawrence, Neil D.},
  180. year = {2010},
  181. pages = {844--851},
  182. file = {[PDF] von wustl.edu:C\:\\Users\\z003jvyc\\Zotero\\storage\\5HPG3ZG9\\Titsias and Lawrence - 2010 - Bayesian Gaussian process latent variable model.pdf:application/pdf}
  183. }
  184. @book{coburn_geostatistics_2000,
  185. title = {Geostatistics for natural resources evaluation},
  186. publisher = {Taylor \& Francis Group},
  187. author = {Coburn, Timothy C.},
  188. year = {2000},
  189. file = {Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\QKNJSFHQ\\auD.html:text/html}
  190. }
  191. @book{journel_mining_1978,
  192. title = {Mining geostatistics},
  193. publisher = {Academic press},
  194. author = {Journel, Andre G. and Huijbregts, Ch J.},
  195. year = {1978},
  196. file = {Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\T8DUJGCJ\\27687.pdf:application/pdf}
  197. }
  198. @article{soleimanzadeh_controller_2011,
  199. title = {Controller design for a wind farm, considering both power and load aspects},
  200. volume = {21},
  201. number = {4},
  202. journal = {Mechatronics},
  203. author = {Soleimanzadeh, Maryam and Wisniewski, Rafael},
  204. year = {2011},
  205. pages = {720--727},
  206. file = {Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\78N9YN8C\\S0957415811000328.html:text/html}
  207. }
  208. @inproceedings{bitar_coordinated_2013,
  209. title = {Coordinated control of a wind turbine array for power maximization},
  210. booktitle = {American {Control} {Conference} ({ACC}), 2013},
  211. publisher = {IEEE},
  212. author = {Bitar, Eilyan and Seiler, Pete},
  213. year = {2013},
  214. pages = {2898--2904},
  215. file = {Fulltext:C\:\\Users\\z003jvyc\\Zotero\\storage\\TQRZDYMQ\\coordinated-control-of-a-wind-turbine-array-for-power-maximizatio.html:text/html;Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\N8MHP7CD\\auD.html:text/html}
  216. }
  217. @inproceedings{schepers_improved_2007,
  218. title = {Improved modelling of wake aerodynamics and assessment of new farm control strategies},
  219. volume = {75},
  220. booktitle = {Journal of {Physics}: {Conference} {Series}},
  221. publisher = {IOP Publishing},
  222. author = {Schepers, J. G. and Van der Pijl, S. P.},
  223. year = {2007},
  224. pages = {012039},
  225. file = {Snapshot:C\:\\Users\\z003jvyc\\Zotero\\storage\\AUAMVYLD\\auD.html:text/html}
  226. }