17 | module Model.GaussianProcess
19 | import Control.Relation
27 | import Model.MeanFunction
37 | data GaussianProcess : (0 features : Shape) -> Type where
39 | MkGP : MeanFunction features -> Kernel features -> GaussianProcess features
42 | GaussianProcess features ->
44 | {s : _} -> (Tensor ((S s) :: features) F64, Tensor [S s] F64) ->
45 | Tag $
GaussianProcess features
46 | posterior (MkGP priorMeanf priorKernel) noise (xTrain, yTrain) = do
47 | l <- tag !(cholesky $
!(priorKernel xTrain xTrain) + noise * identity)
48 | let alpha = l.T \| (l |\ yTrain)
50 | posteriorMeanf : MeanFunction features
51 | posteriorMeanf x = pure $
!(priorMeanf x) + !(priorKernel x xTrain) @@ alpha
53 | posteriorKernel : Kernel features
54 | posteriorKernel x x' = pure
55 | $
!(priorKernel x x') - (l |\ !(priorKernel xTrain x)).T @@ (l |\ !(priorKernel xTrain x'))
57 | pure $
MkGP posteriorMeanf posteriorKernel
59 | logMarginalLikelihood :
60 | GaussianProcess features ->
62 | {s : _} -> (Tensor ((S s) :: features) F64, Tensor [S s] F64) ->
64 | logMarginalLikelihood (MkGP _ kernel) noise (x, y) = do
65 | l <- tag !(cholesky (!(kernel x x) + noise * identity))
66 | let alpha = l.T \| (l |\ y)
67 | pure $
- y @@ alpha / 2.0 - !(trace (log l)) - fromDouble (cast (S s)) * log (2.0 * pi) / 2.0
81 | data ConjugateGPRegression : (0 features : Shape) -> Type where
88 | (gpFromHyperparameters : Tensor [p] F64 -> Tag $
GaussianProcess features) ->
89 | (hyperparameters : Tensor [p] F64) ->
90 | (noise : Tensor [] F64) ->
91 | ConjugateGPRegression features
95 | [Latent] ProbabilisticModel features [1] Gaussian (ConjugateGPRegression features) where
96 | marginalise (MkConjugateGPR mkGP gpParams _) x = do
97 | MkGP meanf kernel <- mkGP gpParams
98 | [| MkGaussian (expand 1 <$> meanf x) (expand 2 <$> kernel x x) |]
102 | [Observed] ProbabilisticModel features [1] Gaussian (ConjugateGPRegression features) where
103 | marginalise gpr@(MkConjugateGPR _ _ noise) x = do
104 | MkGaussian latentMean latentCov <- marginalise @{Latent} gpr x
105 | pure $
MkGaussian latentMean $
latentCov + broadcast (expand 2 (noise * identity {n = S n}))
109 | fit : (forall n . Tensor [n] F64 -> Optimizer $
Tensor [n] F64)
110 | -> Dataset features [1]
111 | -> ConjugateGPRegression features
112 | -> Tag $
ConjugateGPRegression features
113 | fit optimizer (MkDataset x y) (MkConjugateGPR {p} mkPrior gpParams noise) = do
114 | let objective : Tensor [S p] F64 -> Tag $
Tensor [] F64
115 | objective params = do
116 | let priorParams = slice [1.to (S p)] params
117 | logMarginalLikelihood !(mkPrior priorParams) (slice [at 0] params) (x, squeeze y)
119 | params <- optimizer (concat 0 (expand 0 noise) gpParams) objective
121 | let mkPosterior : Tensor [p] F64 -> Tag $
GaussianProcess features
122 | mkPosterior params' = posterior !(mkPrior params') (squeeze noise) (x, squeeze y)
124 | pure $
MkConjugateGPR mkPosterior (slice [1.to (S p)] params) (slice [at 0] params)
128 | reflexive : {n : _} -> LTE n n
129 | reflexive = Relation.reflexive {ty = Nat}