
@@ 1,8 +1,8 @@

1

1


2

2

@article{damianou_deep_2012,

3


 title = {Deep Gaussian Processes},


3

+ title = {Deep {G}aussian Processes},

4

4

url = {http://arxiv.org/abs/1211.0358},

5


 abstract = {In this paper we introduce deep Gaussian process ({GP}) models. Deep {GPs} are a deep belief network based on Gaussian process mappings. The data is modeled as the output of a multivariate {GP}. The inputs to that Gaussian process are then governed by another {GP}. A single layer model is equivalent to a standard {GP} or the {GP} latent variable model ({GP}{LVM}). We perform inference in the model by approximate variational marginalization. This results in a strict lower bound on the marginal likelihood of the model which we use for model selection (number of layers and nodes per layer). Deep belief networks are typically applied to relatively large data sets using stochastic gradient descent for optimization. Our fully Bayesian treatment allows for the application of deep models even when data is scarce. Model selection by our variational bound shows that a five layer hierarchy is justified even when modelling a digit data set containing only 150 examples.},


5

+ abstract = {In this paper we introduce deep {G}aussian process ({GP}) models. Deep {GPs} are a deep belief network based on {G}aussian process mappings. The data is modeled as the output of a multivariate {GP}. The inputs to that {G}aussian process are then governed by another {GP}. A single layer model is equivalent to a standard {GP} or the {GP} latent variable model ({GP}{LVM}). We perform inference in the model by approximate variational marginalization. This results in a strict lower bound on the marginal likelihood of the model which we use for model selection (number of layers and nodes per layer). Deep belief networks are typically applied to relatively large data sets using stochastic gradient descent for optimization. Our fully {B}ayesian treatment allows for the application of deep models even when data is scarce. Model selection by our variational bound shows that a five layer hierarchy is justified even when modelling a digit data set containing only 150 examples.},

6

6

journaltitle = {{arXiv}:1211.0358 [cs, math, stat]},

7

7

author = {Damianou, Andreas C. and Lawrence, Neil D.},

8

8

urldate = {20160905},


@@ 10,45 +10,45 @@

10

10

eprinttype = {arxiv},

11

11

eprint = {1211.0358},

12

12

keywords = {60G15, 58E30, Computer Science  Learning, G.1.2, G.3, I.2.6, Mathematics  Probability, Statistics  Machine Learning},

13


 file = {arXiv\:1211.0358 PDF:/home/markus/sync/zotero/storage/BUXWE2UV/Damianou and Lawrence  2012  Deep Gaussian Processes.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/S2KB72DK/1211.html:text/html}


13

+ file = {arXiv\:1211.0358 PDF:/home/markus/sync/zotero/storage/BUXWE2UV/Damianou and Lawrence  2012  Deep {G}aussian Processes.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/S2KB72DK/1211.html:text/html}

14

14

}

15

15


16

16

@article{hensman_gaussian_2013,

17


 title = {Gaussian Processes for Big Data},


17

+ title = {{G}aussian Processes for Big Data},

18

18

url = {http://arxiv.org/abs/1309.6835},

19


 abstract = {We introduce stochastic variational inference for Gaussian process models. This enables the application of Gaussian process ({GP}) models to data sets containing millions of data points. We show how {GPs} can be vari ationally decomposed to depend on a set of globally relevant inducing variables which factorize the model in the necessary manner to perform variational inference. Our ap proach is readily extended to models with nonGaussian likelihoods and latent variable models based around Gaussian processes. We demonstrate the approach on a simple toy problem and two real world data sets.},


19

+ abstract = {We introduce stochastic variational inference for {G}aussian process models. This enables the application of {G}aussian process ({GP}) models to data sets containing millions of data points. We show how {GPs} can be vari ationally decomposed to depend on a set of globally relevant inducing variables which factorize the model in the necessary manner to perform variational inference. Our ap proach is readily extended to models with non{G}aussian likelihoods and latent variable models based around {G}aussian processes. We demonstrate the approach on a simple toy problem and two real world data sets.},

20

20

journaltitle = {{arXiv}:1309.6835 [cs, stat]},

21

21

author = {Hensman, James and Fusi, Nicolo and Lawrence, Neil D.},

22

22

urldate = {20160706},

23

23

date = {20130926},

24

24

keywords = {Computer Science  Learning, Statistics  Machine Learning},

25


 file = {arXiv\:1309.6835 PDF:/home/markus/sync/zotero/storage/EU8WZFR4/Hensman et al.  2013  Gaussian Processes for Big Data.pdf:application/pdf;arXiv\:1309.6835 PDF:/home/markus/sync/zotero/storage/XV3VH9PJ/Hensman et al.  2013  Gaussian Processes for Big Data.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/2JAR4BNM/1309.html:text/html;arXiv.org Snapshot:/home/markus/sync/zotero/storage/ISZ4Z86Q/1309.html:text/html}


25

+ file = {arXiv\:1309.6835 PDF:/home/markus/sync/zotero/storage/EU8WZFR4/Hensman et al.  2013  {G}aussian Processes for Big Data.pdf:application/pdf;arXiv\:1309.6835 PDF:/home/markus/sync/zotero/storage/XV3VH9PJ/Hensman et al.  2013  {G}aussian Processes for Big Data.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/2JAR4BNM/1309.html:text/html;arXiv.org Snapshot:/home/markus/sync/zotero/storage/ISZ4Z86Q/1309.html:text/html}

26

26

}

27

27


28

28

@inproceedings{titsias_bayesian_2010,

29


 title = {Bayesian Gaussian process latent variable model},


29

+ title = {{B}ayesian {G}aussian process latent variable model},

30

30

url = {http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_TitsiasL10.pdf},

31

31

pages = {844851},

32

32

booktitle = {International Conference on Artificial Intelligence and Statistics},

33

33

author = {Titsias, Michalis K. and Lawrence, Neil D.},

34

34

urldate = {20160201},

35

35

date = {2010},

36


 file = {[PDF] von wustl.edu:/home/markus/sync/zotero/storage/5HPG3ZG9/Titsias and Lawrence  2010  Bayesian Gaussian process latent variable model.pdf:application/pdf}


36

+ file = {[PDF] von wustl.edu:/home/markus/sync/zotero/storage/5HPG3ZG9/Titsias and Lawrence  2010  {B}ayesian {G}aussian process latent variable model.pdf:application/pdf}

37

37

}

38

38


39

39

@inproceedings{lazarogredilla_bayesian_2012,

40


 title = {Bayesian warped Gaussian processes},


40

+ title = {{B}ayesian warped {G}aussian processes},

41

41

url = {http://papers.nips.cc/paper/4494bayesianwarpedgaussianprocesses},

42

42

pages = {16191627},

43

43

booktitle = {Advances in Neural Information Processing Systems},

44

44

author = {LázaroGredilla, Miguel},

45

45

urldate = {20161206},

46

46

date = {2012},

47


 file = {[PDF] wustl.edu:/home/markus/sync/zotero/storage/NTS9SDBA/LázaroGredilla  2012  Bayesian warped Gaussian processes.pdf:application/pdf;Snapshot:/home/markus/sync/zotero/storage/HFAKAI4X/4494bayesianwarpedgaussianprocesses.html:text/html}


47

+ file = {[PDF] wustl.edu:/home/markus/sync/zotero/storage/NTS9SDBA/LázaroGredilla  2012  {B}ayesian warped {G}aussian processes.pdf:application/pdf;Snapshot:/home/markus/sync/zotero/storage/HFAKAI4X/4494bayesianwarpedgaussianprocesses.html:text/html}

48

48

}

49

49


50

50

@inproceedings{boyle_dependent_2004,

51


 title = {Dependent Gaussian Processes.},


51

+ title = {Dependent {G}aussian Processes.},

52

52

volume = {17},

53

53

url = {https://papers.nips.cc/paper/2561dependentgaussianprocesses.pdf},

54

54

pages = {217224},


@@ 56,14 +56,14 @@

56

56

author = {Boyle, Phillip and Frean, Marcus R.},

57

57

urldate = {20170127},

58

58

date = {2004},

59


 file = {[PDF] nips.cc:/home/markus/sync/zotero/storage/HJT7BPIT/Boyle and Frean  2004  Dependent Gaussian Processes..pdf:application/pdf}


59

+ file = {[PDF] nips.cc:/home/markus/sync/zotero/storage/HJT7BPIT/Boyle and Frean  2004  Dependent {G}aussian Processes..pdf:application/pdf}

60

60

}

61

61


62

62

@article{alvarez_kernels_2011,

63

63

title = {Kernels for VectorValued Functions: a Review},

64

64

url = {http://arxiv.org/abs/1106.6251},

65

65

shorttitle = {Kernels for VectorValued Functions},

66


 abstract = {Kernel methods are among the most popular techniques in machine learning. From a frequentist/discriminative perspective they play a central role in regularization theory as they provide a natural choice for the hypotheses space and the regularization functional through the notion of reproducing kernel Hilbert spaces. From a Bayesian/generative perspective they are the key in the context of Gaussian processes, where the kernel function is also known as the covariance function. Traditionally, kernel methods have been used in supervised learning problem with scalar outputs and indeed there has been a considerable amount of work devoted to designing and learning kernels. More recently there has been an increasing interest in methods that deal with multiple outputs, motivated partly by frameworks like multitask learning. In this paper, we review different methods to design or learn valid kernel functions for multiple outputs, paying particular attention to the connection between probabilistic and functional methods.},


66

+ abstract = {Kernel methods are among the most popular techniques in machine learning. From a frequentist/discriminative perspective they play a central role in regularization theory as they provide a natural choice for the hypotheses space and the regularization functional through the notion of reproducing kernel Hilbert spaces. From a {B}ayesian/generative perspective they are the key in the context of {G}aussian processes, where the kernel function is also known as the covariance function. Traditionally, kernel methods have been used in supervised learning problem with scalar outputs and indeed there has been a considerable amount of work devoted to designing and learning kernels. More recently there has been an increasing interest in methods that deal with multiple outputs, motivated partly by frameworks like multitask learning. In this paper, we review different methods to design or learn valid kernel functions for multiple outputs, paying particular attention to the connection between probabilistic and functional methods.},

67

67

journaltitle = {{arXiv}:1106.6251 [cs, math, stat]},

68

68

author = {Alvarez, Mauricio A. and Rosasco, Lorenzo and Lawrence, Neil D.},

69

69

urldate = {20170206},


@@ 75,9 +75,9 @@

75

75

}

76

76


77

77

@article{hensman_scalable_2014,

78


 title = {Scalable Variational Gaussian Process Classification},


78

+ title = {Scalable Variational {G}aussian Process Classification},

79

79

url = {http://arxiv.org/abs/1411.2005},

80


 abstract = {Gaussian process classification is a popular method with a number of appealing properties. We show how to scale the model within a variational inducing point framework, outperforming the state of the art on benchmark datasets. Importantly, the variational formulation can be exploited to allow classification in problems with millions of data points, as we demonstrate in experiments.},


80

+ abstract = {{G}aussian process classification is a popular method with a number of appealing properties. We show how to scale the model within a variational inducing point framework, outperforming the state of the art on benchmark datasets. Importantly, the variational formulation can be exploited to allow classification in problems with millions of data points, as we demonstrate in experiments.},

81

81

journaltitle = {{arXiv}:1411.2005 [stat]},

82

82

author = {Hensman, James and Matthews, Alex and Ghahramani, Zoubin},

83

83

urldate = {20170213},


@@ 85,19 +85,19 @@

85

85

eprinttype = {arxiv},

86

86

eprint = {1411.2005},

87

87

keywords = {Statistics  Machine Learning},

88


 file = {arXiv\:1411.2005 PDF:/home/markus/sync/zotero/storage/T4WFAQPK/Hensman et al.  2014  Scalable Variational Gaussian Process Classificati.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/5GEKF8R7/1411.html:text/html}


88

+ file = {arXiv\:1411.2005 PDF:/home/markus/sync/zotero/storage/T4WFAQPK/Hensman et al.  2014  Scalable Variational {G}aussian Process Classificati.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/5GEKF8R7/1411.html:text/html}

89

89

}

90

90


91

91

@report{boyle_multiple_2005,

92

92

title = {Multiple output gaussian process regression},

93


 abstract = {Gaussian processes are usually parameterised in terms of their covariance functions. However, this makes it difficult to deal with multiple outputs, because ensuring that the covariance matrix is positive definite is problematic. An alternative formulation is to treat Gaussian processes as white noise sources convolved with smoothing kernels, and to parameterise the kernel instead. Using this, we extend Gaussian processes to handle multiple, coupled outputs. 1},


93

+ abstract = {{G}aussian processes are usually parameterised in terms of their covariance functions. However, this makes it difficult to deal with multiple outputs, because ensuring that the covariance matrix is positive definite is problematic. An alternative formulation is to treat {G}aussian processes as white noise sources convolved with smoothing kernels, and to parameterise the kernel instead. Using this, we extend {G}aussian processes to handle multiple, coupled outputs. 1},

94

94

author = {Boyle, Phillip and Frean, Marcus and Boyle, Phillip and Frean, Marcus},

95

95

date = {2005},

96

96

file = {Citeseer  Full Text PDF:/home/markus/sync/zotero/storage/STU7NV59/Boyle et al.  2005  Multiple output gaussian process regression.pdf:application/pdf;Citeseer  Snapshot:/home/markus/sync/zotero/storage/ZWMMCM3F/summary.html:text/html}

97

97

}

98

98


99

99

@inproceedings{alvarez_efficient_2010,

100


 title = {Efficient Multioutput Gaussian Processes through Variational Inducing Kernels.},


100

+ title = {Efficient Multioutput {G}aussian Processes through Variational Inducing Kernels.},

101

101

volume = {9},

102

102

url = {http://www.jmlr.org/proceedings/papers/v9/alvarez10a/alvarez10a.pdf},

103

103

pages = {2532},


@@ 105,11 +105,11 @@

105

105

author = {Alvarez, Mauricio A. and Luengo, David and Titsias, Michalis K. and Lawrence, Neil D.},

106

106

urldate = {20170302},

107

107

date = {2010},

108


 file = {[PDF] jmlr.org:/home/markus/sync/zotero/storage/6Q4I9FRF/Alvarez et al.  2010  Efficient Multioutput Gaussian Processes through V.pdf:application/pdf}


108

+ file = {[PDF] jmlr.org:/home/markus/sync/zotero/storage/6Q4I9FRF/Alvarez et al.  2010  Efficient Multioutput {G}aussian Processes through V.pdf:application/pdf}

109

109

}

110

110


111

111

@inproceedings{titsias_variational_2009,

112


 title = {Variational Learning of Inducing Variables in Sparse Gaussian Processes.},


112

+ title = {Variational Learning of Inducing Variables in Sparse {G}aussian Processes.},

113

113

volume = {5},

114

114

url = {http://www.jmlr.org/proceedings/papers/v5/titsias09a/titsias09a.pdf},

115

115

pages = {567574},


@@ 121,9 +121,9 @@

121

121

}

122

122


123

123

@article{salimbeni_doubly_2017,

124


 title = {Doubly Stochastic Variational Inference for Deep Gaussian Processes},


124

+ title = {Doubly Stochastic Variational Inference for Deep {G}aussian Processes},

125

125

url = {http://arxiv.org/abs/1705.08933},

126


 abstract = {Gaussian processes ({GPs}) are a good choice for function approximation as they are flexible, robust to overfitting, and provide wellcalibrated predictive uncertainty. Deep Gaussian processes ({DGPs}) are multilayer generalisations of {GPs}, but inference in these models has proved challenging. Existing approaches to inference in {DGP} models assume approximate posteriors that force independence between the layers, and do not work well in practice. We present a doubly stochastic variational inference algorithm, which does not force independence between layers. With our method of inference we demonstrate that a {DGP} model can be used effectively on data ranging in size from hundreds to a billion points. We provide strong empirical evidence that our inference scheme for {DGPs} works well in practice in both classification and regression.},


126

+ abstract = {{G}aussian processes ({GPs}) are a good choice for function approximation as they are flexible, robust to overfitting, and provide wellcalibrated predictive uncertainty. Deep {G}aussian processes ({DGPs}) are multilayer generalisations of {GPs}, but inference in these models has proved challenging. Existing approaches to inference in {DGP} models assume approximate posteriors that force independence between the layers, and do not work well in practice. We present a doubly stochastic variational inference algorithm, which does not force independence between layers. With our method of inference we demonstrate that a {DGP} model can be used effectively on data ranging in size from hundreds to a billion points. We provide strong empirical evidence that our inference scheme for {DGPs} works well in practice in both classification and regression.},

127

127

journaltitle = {{arXiv}:1705.08933 [stat]},

128

128

author = {Salimbeni, Hugh and Deisenroth, Marc},

129

129

urldate = {20170602},


@@ 135,20 +135,20 @@

135

135

}

136

136


137

137

@inproceedings{alvarez_sparse_2009,

138


 title = {Sparse convolved Gaussian processes for multioutput regression},


138

+ title = {Sparse convolved {G}aussian processes for multioutput regression},

139

139

url = {http://papers.nips.cc/paper/3553sparseconvolvedgaussianprocessesformultioutputregression},

140

140

pages = {5764},

141

141

booktitle = {Advances in neural information processing systems},

142

142

author = {Alvarez, Mauricio and Lawrence, Neil D.},

143

143

urldate = {20170714},

144

144

date = {2009},

145


 file = {[PDF] nips.cc:/home/markus/sync/zotero/storage/SIZMYY5F/Alvarez and Lawrence  2009  Sparse convolved Gaussian processes for multioutp.pdf:application/pdf;Snapshot:/home/markus/sync/zotero/storage/A2QU9XT7/3553sparseconvolvedgaussianprocessesformultioutputregression.html:text/html}


145

+ file = {[PDF] nips.cc:/home/markus/sync/zotero/storage/SIZMYY5F/Alvarez and Lawrence  2009  Sparse convolved {G}aussian processes for multioutp.pdf:application/pdf;Snapshot:/home/markus/sync/zotero/storage/A2QU9XT7/3553sparseconvolvedgaussianprocessesformultioutputregression.html:text/html}

146

146

}

147

147


148

148

@article{hensman_nested_2014,

149


 title = {Nested Variational Compression in Deep Gaussian Processes},


149

+ title = {Nested Variational Compression in Deep {G}aussian Processes},

150

150

url = {http://arxiv.org/abs/1412.1370},

151


 abstract = {Deep Gaussian processes provide a flexible approach to probabilistic modelling of data using either supervised or unsupervised learning. For tractable inference approximations to the marginal likelihood of the model must be made. The original approach to approximate inference in these models used variational compression to allow for approximate variational marginalization of the hidden variables leading to a lower bound on the marginal likelihood of the model [Damianou and Lawrence, 2013]. In this paper we extend this idea with a nested variational compression. The resulting lower bound on the likelihood can be easily parallelized or adapted for stochastic variational inference.},


151

+ abstract = {Deep {G}aussian processes provide a flexible approach to probabilistic modelling of data using either supervised or unsupervised learning. For tractable inference approximations to the marginal likelihood of the model must be made. The original approach to approximate inference in these models used variational compression to allow for approximate variational marginalization of the hidden variables leading to a lower bound on the marginal likelihood of the model [Damianou and Lawrence, 2013]. In this paper we extend this idea with a nested variational compression. The resulting lower bound on the likelihood can be easily parallelized or adapted for stochastic variational inference.},

152

152

journaltitle = {{arXiv}:1412.1370 [stat]},

153

153

author = {Hensman, James and Lawrence, Neil D.},

154

154

urldate = {20170719},


@@ 156,13 +156,13 @@

156

156

eprinttype = {arxiv},

157

157

eprint = {1412.1370},

158

158

keywords = {Statistics  Machine Learning},

159


 file = {arXiv\:1412.1370 PDF:/home/markus/sync/zotero/storage/ZKNA6NYN/Hensman and Lawrence  2014  Nested Variational Compression in Deep Gaussian Pr.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/UMQ96R94/1412.html:text/html}


159

+ file = {arXiv\:1412.1370 PDF:/home/markus/sync/zotero/storage/ZKNA6NYN/Hensman and Lawrence  2014  Nested Variational Compression in Deep {G}aussian Pr.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/UMQ96R94/1412.html:text/html}

160

160

}

161

161


162

162

@article{snoek_input_2014,

163


 title = {Input Warping for Bayesian Optimization of Nonstationary Functions},


163

+ title = {Input Warping for {B}ayesian Optimization of Nonstationary Functions},

164

164

url = {http://arxiv.org/abs/1402.0929},

165


 abstract = {Bayesian optimization has proven to be a highly effective methodology for the global optimization of unknown, expensive and multimodal functions. The ability to accurately model distributions over functions is critical to the effectiveness of Bayesian optimization. Although Gaussian processes provide a flexible prior over functions which can be queried efficiently, there are various classes of functions that remain difficult to model. One of the most frequently occurring of these is the class of nonstationary functions. The optimization of the hyperparameters of machine learning algorithms is a problem domain in which parameters are often manually transformed a priori, for example by optimizing in "logspace," to mitigate the effects of spatiallyvarying length scale. We develop a methodology for automatically learning a wide family of bijective transformations or warpings of the input space using the Beta cumulative distribution function. We further extend the warping framework to multitask Bayesian optimization so that multiple tasks can be warped into a jointly stationary space. On a set of challenging benchmark optimization tasks, we observe that the inclusion of warping greatly improves on the stateoftheart, producing better results faster and more reliably.},


165

+ abstract = {{B}ayesian optimization has proven to be a highly effective methodology for the global optimization of unknown, expensive and multimodal functions. The ability to accurately model distributions over functions is critical to the effectiveness of {B}ayesian optimization. Although {G}aussian processes provide a flexible prior over functions which can be queried efficiently, there are various classes of functions that remain difficult to model. One of the most frequently occurring of these is the class of nonstationary functions. The optimization of the hyperparameters of machine learning algorithms is a problem domain in which parameters are often manually transformed a priori, for example by optimizing in "logspace," to mitigate the effects of spatiallyvarying length scale. We develop a methodology for automatically learning a wide family of bijective transformations or warpings of the input space using the Beta cumulative distribution function. We further extend the warping framework to multitask {B}ayesian optimization so that multiple tasks can be warped into a jointly stationary space. On a set of challenging benchmark optimization tasks, we observe that the inclusion of warping greatly improves on the stateoftheart, producing better results faster and more reliably.},

166

166

journaltitle = {{arXiv}:1402.0929 [cs, stat]},

167

167

author = {Snoek, Jasper and Swersky, Kevin and Zemel, Richard S. and Adams, Ryan P.},

168

168

urldate = {20170731},


@@ 170,11 +170,11 @@

170

170

eprinttype = {arxiv},

171

171

eprint = {1402.0929},

172

172

keywords = {Computer Science  Learning, Statistics  Machine Learning},

173


 file = {arXiv\:1402.0929 PDF:/home/markus/sync/zotero/storage/ELZ9CMFF/Snoek et al.  2014  Input Warping for Bayesian Optimization of Nonsta.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/5ECB3EET/1402.html:text/html}


173

+ file = {arXiv\:1402.0929 PDF:/home/markus/sync/zotero/storage/ELZ9CMFF/Snoek et al.  2014  Input Warping for {B}ayesian Optimization of Nonsta.pdf:application/pdf;arXiv.org Snapshot:/home/markus/sync/zotero/storage/5ECB3EET/1402.html:text/html}

174

174

}

175

175


176

176

@article{matthews_gpflow:_2017,

177


 title = {{GPflow}: A Gaussian process library using {TensorFlow}},


177

+ title = {{GPflow}: A {G}aussian process library using {TensorFlow}},

178

178

volume = {18},

179

179

url = {http://www.jmlr.org/papers/volume18/16537/16537.pdf},

180

180

shorttitle = {{GPflow}},


@@ 184,5 +184,5 @@

184

184

author = {Matthews, Alexander G. de G. and van der Wilk, Mark and Nickson, Tom and Fujii, Keisuke and Boukouvalas, Alexis and LeónVillagrá, Pablo and Ghahramani, Zoubin and Hensman, James},

185

185

urldate = {20170927},

186

186

date = {2017},

187


 file = {Full Text:/home/markus/sync/zotero/storage/X6QGAFR8/Matthews et al.  2017  GPflow A Gaussian process library using TensorFlo.pdf:application/pdf}


187

+ file = {Full Text:/home/markus/sync/zotero/storage/X6QGAFR8/Matthews et al.  2017  GPflow A {G}aussian process library using TensorFlo.pdf:application/pdf}

188

188

}
