Skip to content

Commit

Permalink
expose graph
Browse files Browse the repository at this point in the history
  • Loading branch information
joelberkeley committed Dec 4, 2023
1 parent 5d31e9c commit a70922c
Show file tree
Hide file tree
Showing 21 changed files with 390 additions and 371 deletions.
18 changes: 9 additions & 9 deletions src/BayesianOptimization.idr
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,20 @@ import Data
import Model
import Tensor

||| A `Stream`-like collection where each successive element is wrapped in an additional `Ref`.
||| A `Stream`-like collection where each successive element is wrapped in an additional `Graph`.
public export
data RefStream : Type -> Type where
(::) : a -> Inf (Ref (RefStream a)) -> RefStream a
(::) : a -> Inf (Graph (RefStream a)) -> RefStream a

||| Take `n` values from a `RefStream`, sequencing the `Ref` effects.
||| Take `n` values from a `RefStream`, sequencing the `Graph` effects.
public export
take : (n : Nat) -> RefStream a -> Ref $ Vect n a
take : (n : Nat) -> RefStream a -> Graph $ Vect n a
take Z _ = pure Nil
take (S k) (x :: xs) = pure (x :: !(take k !xs))

||| Create an infinite stream of values from a generator function and a starting value.
export covering
iterate : (a -> Ref a) -> a -> Ref $ RefStream a
iterate : (a -> Graph a) -> a -> Graph $ RefStream a
iterate f x = do
x' <- f x
pure (x' :: iterate f x')
Expand All @@ -52,12 +52,12 @@ iterate f x = do
||| @tactic The tactic, such as an optimized acquisition function, to find a new point from the
||| data and model
export
step : (objective : forall n . Tensor (n :: features) F64 -> Ref $ Tensor (n :: targets) F64) ->
step : (objective : forall n . Tensor (n :: features) F64 -> Graph $ Tensor (n :: targets) F64) ->
(probabilisticModel : ProbabilisticModel features targets marginal model) =>
(train : Dataset features targets -> model -> Ref $ model) ->
(tactic : Reader (DataModel {probabilisticModel} model) (Ref $ Tensor (1 :: features) F64)) ->
(train : Dataset features targets -> model -> Graph $ model) ->
(tactic : Reader (DataModel {probabilisticModel} model) (Graph $ Tensor (1 :: features) F64)) ->
DataModel {probabilisticModel} model ->
Ref $ DataModel {probabilisticModel} model
Graph $ DataModel {probabilisticModel} model
step objective train tactic env = do
newPoint <- runReader env tactic
dataset <- concat env.dataset $ MkDataset newPoint !(objective newPoint)
Expand Down
2 changes: 1 addition & 1 deletion src/BayesianOptimization/Acquisition.idr
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ record DataModel modelType {auto probabilisticModel : ProbabilisticModel f t mar
||| @features The shape of the feature domain.
public export 0
Acquisition : (0 batchSize : Nat) -> {auto 0 _ : GT batchSize 0} -> (0 features : Shape) -> Type
Acquisition batchSize features = Tensor (batchSize :: features) F64 -> Ref $ Tensor [] F64
Acquisition batchSize features = Tensor (batchSize :: features) F64 -> Graph $ Tensor [] F64

||| Construct the acquisition function that estimates the absolute improvement in the best
||| observation if we were to evaluate the objective at a given point.
Expand Down
2 changes: 1 addition & 1 deletion src/Constants.idr
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,5 @@ module Constants
import Tensor

export
pi : Ref $ Tensor [] F64
pi : Graph $ Tensor [] F64
pi = fromDouble pi
2 changes: 1 addition & 1 deletion src/Data.idr
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,6 @@ record Dataset (0 featureShape, targetShape : Shape) where

||| Concatenate two datasets along their leading axis.
export
concat : Dataset features targets -> Dataset features targets -> Ref $ Dataset features targets
concat : Dataset features targets -> Dataset features targets -> Graph $ Dataset features targets
concat (MkDataset {s=s} x y) (MkDataset {s=s'} x' y') =
[| MkDataset {s=s + S s'} (concat 0 x x') (concat 0 y y') |]
10 changes: 5 additions & 5 deletions src/Distribution.idr
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@ import Constants
public export
interface Distribution (0 dist : (0 event : Shape) -> (0 dim : Nat) -> Type) where
||| The mean of the distribution.
mean : dist event dim -> Ref $ Tensor (dim :: event) F64
mean : dist event dim -> Graph $ Tensor (dim :: event) F64

||| The covariance, or correlation, between sub-events.
cov : dist event dim -> Ref $ Tensor (dim :: dim :: event) F64
cov : dist event dim -> Graph $ Tensor (dim :: dim :: event) F64

||| The variance of a single random variable.
export
variance : {event : _} -> Distribution dist => dist event 1 -> Ref $ Tensor (1 :: event) F64
variance : {event : _} -> Distribution dist => dist event 1 -> Graph $ Tensor (1 :: event) F64
variance dist = squeeze {from=(1 :: 1 :: event)} =<< cov dist

||| A joint, or multivariate distribution over a tensor of floating point values, where the density
Expand All @@ -52,11 +52,11 @@ interface Distribution dist =>
ClosedFormDistribution (0 event : Shape)
(0 dist : (0 event : Shape) -> (0 dim : Nat) -> Type) where
||| The probability density function of the distribution at the specified point.
pdf : dist event (S d) -> Tensor (S d :: event) F64 -> Ref $ Tensor [] F64
pdf : dist event (S d) -> Tensor (S d :: event) F64 -> Graph $ Tensor [] F64

||| The cumulative distribution function of the distribution at the specified point (that is,
||| the probability the random variable takes a value less than or equal to the given point).
cdf : dist event (S d) -> Tensor (S d :: event) F64 -> Ref $ Tensor [] F64
cdf : dist event (S d) -> Tensor (S d :: event) F64 -> Graph $ Tensor [] F64

||| A joint Gaussian distribution.
|||
Expand Down
2 changes: 1 addition & 1 deletion src/Model.idr
Original file line number Diff line number Diff line change
Expand Up @@ -32,4 +32,4 @@ interface Distribution marginal => ProbabilisticModel
model | model
where
||| Return the marginal distribution over the target domain at the specified feature values.
marginalise : model -> {n : _} -> Tensor (S n :: features) F64 -> Ref $ marginal targets (S n)
marginalise : model -> {n : _} -> Tensor (S n :: features) F64 -> Graph $ marginal targets (S n)
12 changes: 6 additions & 6 deletions src/Model/GaussianProcess.idr
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ posterior :
GaussianProcess features ->
Tensor [] F64 ->
{s : _} -> (Tensor ((S s) :: features) F64, Tensor [S s] F64) ->
Ref $ GaussianProcess features
Graph $ GaussianProcess features
posterior (MkGP priorMeanf priorKernel) noise (xTrain, yTrain) = do
l <- cholesky !(priorKernel xTrain xTrain + pure noise * identity)
let alpha = (pure l).T \| (pure l |\ pure yTrain)
Expand All @@ -61,7 +61,7 @@ logMarginalLikelihood :
GaussianProcess features ->
Tensor [] F64 ->
{s : _} -> (Tensor ((S s) :: features) F64, Tensor [S s] F64) ->
Ref $ Tensor [] F64
Graph $ Tensor [] F64
logMarginalLikelihood (MkGP _ kernel) noise (x, y) = do
l <- cholesky !(kernel x x + pure noise * identity)
let alpha = (pure l).T \| (pure l |\ pure y)
Expand All @@ -86,7 +86,7 @@ data ConjugateGPRegression : (0 features : Shape) -> Type where
||| @noise The likehood amplitude, or observation noise.
MkConjugateGPR :
{p : _} ->
(gpFromHyperparameters : Tensor [p] F64 -> Ref $ GaussianProcess features) ->
(gpFromHyperparameters : Tensor [p] F64 -> Graph $ GaussianProcess features) ->
(hyperparameters : Tensor [p] F64) ->
(noise : Tensor [] F64) ->
ConjugateGPRegression features
Expand All @@ -111,16 +111,16 @@ export
fit : (forall n . Tensor [n] F64 -> Optimizer $ Tensor [n] F64)
-> Dataset features [1]
-> ConjugateGPRegression features
-> Ref $ ConjugateGPRegression features
-> Graph $ ConjugateGPRegression features
fit optimizer (MkDataset x y) (MkConjugateGPR {p} mkPrior gpParams noise) = do
let objective : Tensor [S p] F64 -> Ref $ Tensor [] F64
let objective : Tensor [S p] F64 -> Graph $ Tensor [] F64
objective params = do
priorParams <- slice [1.to (S p)] params
logMarginalLikelihood !(mkPrior priorParams) !(slice [at 0] params) (x, !(squeeze y))

params <- optimizer !(concat 0 !(expand 0 noise) gpParams) objective

let mkPosterior : Tensor [p] F64 -> Ref $ GaussianProcess features
let mkPosterior : Tensor [p] F64 -> Graph $ GaussianProcess features
mkPosterior params' = posterior !(mkPrior params') !(squeeze noise) (x, !(squeeze y))

pure $ MkConjugateGPR mkPosterior !(slice [1.to (S p)] params) !(slice [at 0] params)
Expand Down
4 changes: 2 additions & 2 deletions src/Model/Kernel.idr
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@ Kernel features =
{sk, sk' : _} ->
Tensor (sk :: features) F64 ->
Tensor (sk' :: features) F64 ->
Ref $ Tensor [sk, sk'] F64
Graph $ Tensor [sk, sk'] F64

scaledL2Norm :
Tensor [] F64 ->
{d, n, n' : _} ->
Tensor [n, S d] F64 ->
Tensor [n', S d] F64 ->
Ref $ Tensor [n, n'] F64
Graph $ Tensor [n, n'] F64
scaledL2Norm len x x' =
let xs = broadcast {to=[n, n', S d]} =<< expand 1 x
in reduce @{Sum} [2] =<< ((xs - broadcast !(expand 0 x')) / pure len) ^ fill 2.0
Expand Down
2 changes: 1 addition & 1 deletion src/Model/MeanFunction.idr
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import Tensor
||| @features The shape of the feature domain.
public export 0
MeanFunction : (0 features : Shape) -> Type
MeanFunction features = {sm : _} -> Tensor (sm :: features) F64 -> Ref $ Tensor [sm] F64
MeanFunction features = {sm : _} -> Tensor (sm :: features) F64 -> Graph $ Tensor [sm] F64

||| A mean function where the mean is zero in all target dimensions.
export
Expand Down
2 changes: 1 addition & 1 deletion src/Optimize.idr
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import Tensor
||| @domain The type of the domain over which to find the optimal value.
public export 0
Optimizer : (0 domain : Type) -> Type
Optimizer a = (a -> Ref $ Tensor [] F64) -> Ref a
Optimizer a = (a -> Graph $ Tensor [] F64) -> Graph a

||| Construct an `Optimizer` that implements grid search over a scalar feature space. Grid search
||| approximates the optimum by evaluating the objective over a finite, evenly-spaced grid.
Expand Down
Loading

0 comments on commit a70922c

Please sign in to comment.