Links
Comment on page

Prior Beliefs

You can leverage SigOpt to take advantage of your domain expertise on how metric values behave for certain parameters. These prior beliefs make the optimization process more efficient.

Defining the Prior Belief

Prior belief can be defined through the prior field for each continuous parameter. By default, SigOpt assumes that all parameters are uniformly distributed.
When the prior belief is set for a parameter, SigOpt is more likely to generate configurations from regions of this parameter with high probability density (PDF) value earlier in the SigOpt Experiment. Generally speaking, parameter configurations with PDF value 2 are twice as likely to be suggested as those with PDF value 1. The effect of prior belief is most notable during the initial portion of an experiment.

Example: Normally Distributed Parameters

Suppose in previous experiments, you have observed that the log_learning_rate parameter exhibits properties indicating that the highest performing values are likely to be normally distributed.
Defining this behavior at the start of your next optimization experiment can warm start the process and produce a lift in performance. For further information, check out our blog post and our webinar.

Creating an Experiment with Prior Beliefs

Core Module

Python
Bash
Java
from sigopt import Connection
conn = Connection(client_token="USER_TOKEN")
experiment = conn.experiments().create(
name="xgboost with prior beliefs",
parameters=[
dict(
name="log10_learning_rate",
bounds=dict(
min=-4,
max=0
),
prior=dict(
name="beta",
shape_a=2,
shape_b=4.5
),
type="double"
),
dict(
name="max_depth",
bounds=dict(
min=3,
max=12
),
type="int"
),
dict(
name="colsample_bytree",
bounds=dict(
min=0,
max=1
),
prior=dict(
name="normal",
mean=0.6,
scale=0.15
),
type="double"
)
],
metrics=[
dict(
name="AUPRC",
objective="maximize",
strategy="optimize"
)
],
observation_budget=65,
parallel_bandwidth=2,
type="offline"
)
print("Created experiment: https://app.sigopt.com/experiment/" + experiment.id)
EXPERIMENT=curl -s -X POST https://api.sigopt.com/v1/experiments -u "$SIGOPT_API_TOKEN": \
-H "Content-Type: application/json" -d "`cat experiment_meta.json`"
JSON file defining the Experiment:
experiment_meta.json
{
"name": "xgboost with prior beliefs",
"parameters": [
{
"name": "log10_learning_rate",
"bounds": {
"min": -4,
"max": 0
},
"prior": {
"name": "beta",
"shape_a": 2,
"shape_b": 4.5
},
"type": "double"
},
{
"name": "max_depth",
"bounds": {
"min": 3,
"max": 12
},
"type": "int"
},
{
"name": "colsample_bytree",
"bounds": {
"min": 0,
"max": 1
},
"prior": {
"name": "normal",
"mean": 0.6,
"scale": 0.15
},
"type": "double"
}
],
"metrics": [
{
"name": "AUPRC",
"objective": "maximize",
"strategy": "optimize"
}
],
"observation_budget": 65,
"parallel_bandwidth": 2,
"type": "offline"
}
import com.sigopt.SigOpt;
import com.sigopt.exception.SigoptException;
import com.sigopt.model.*;
import java.util.Arrays;
public class YourSigoptExperiment {
public static Experiment createExperiment() throws SigoptException {
Experiment experiment = Experiment.create()
.data(
new Experiment.Builder()
.name("xgboost with prior beliefs")
.parameters(java.util.Arrays.asList(
new Parameter.Builder()
.name("log10_learning_rate")
.bounds(new Bounds.Builder()
.min(-4)
.max(0)
.build())
.prior(new Prior.Builder()
.name("beta")
.shapeA(2)
.shapeB(4.5)
.build())
.type("double")
.build(),
new Parameter.Builder()
.name("max_depth")
.bounds(new Bounds.Builder()
.min(3)
.max(12)
.build())
.type("int")
.build(),
new Parameter.Builder()
.name("colsample_bytree")
.bounds(new Bounds.Builder()
.min(0)
.max(1)
.build())
.prior(new Prior.Builder()
.name("normal")
.mean(0.6)
.scale(0.15)
.build())
.type("double")
.build()
))
.metrics(java.util.Arrays.asList(
new Metric.Builder()
.name("AUPRC")
.objective("maximize")
.strategy("optimize")
.build()
))
.observationBudget(65)
.parallelBandwidth(2)
.type("offline")
.build()
)
.call();
return experiment;
}

AI Module

Python
YAML
experiment = sigopt.create_experiment(
name="xgboost with prior beliefs",
parameters=[
dict(
name="log10_learning_rate",
bounds=dict(
min=-4,
max=0
),
prior=dict(
name="beta",
shape_a=2,
shape_b=4.5
),
type="double"
),
dict(
name="max_depth",
bounds=dict(
min=3,
max=12
),
type="int"
),
dict(
name="colsample_bytree",
bounds=dict(
min=0,
max=1
),
prior=dict(
name="normal",
mean=0.6,
scale=0.15
),
type="double"
)
],
metrics=[
dict(
name="AUPRC",
objective="maximize",
strategy="optimize"
)
],
budget=65,
parallel_bandwidth=2,
type="offline"
)
print("Created experiment: https://app.sigopt.com/experiment/" + experiment.id)
name: xgboost with prior beliefs
parameters:
- name: log10_learning_rate
bounds:
min: -4
max: 0
prior:
name: beta
shape_a: 2
shape_b: 4.5
type: double
- name: max_depth
bounds:
min: 3
max: 12
type: int
- name: colsample_bytree
bounds:
min: 0
max: 1
prior:
name: normal
mean: 0.6
scale: 0.15
type: double
metrics:
- name: AUPRC
objective: maximize
strategy: optimize
budget: 65
parallel_bandwidth: 2
type: offline

Updating Prior Beliefs

During the progress of an experiment, you can change your belief on how a particular parameter is distributed. The prior beliefs can be updated directly through our API. An example of this is given below, adjusting the prior belief on the learning rate.

Core Module

Python
Bash
Java
experiment = conn.experiments(experiment.id).update(
parameters=[
dict(
name="log10_learning_rate",
prior=dict(
name="beta",
shape_a=8,
shape_b=2
)
),
dict(
name="max_depth"
),
dict(
name="colsample_bytree"
)
]
)
EXPERIMENT=`curl -s -X PUT https://api.sigopt.com/v1/experiments/EXPERIMENT_ID -u "$SIGOPT_API_TOKEN": \
-H 'Content-Type: application/json' \
-d "{\"parameters\":[{\"name\":\"log10_learning_rate\",\"prior\":{\"name\":\"beta\",\"shape_a\":8,\"shape_b\":2}},{\"name\":\"max_depth\"},{\"name\":\"colsample_bytree\"}]}"`
Experiment experiment = Experiment.update(experiment.id)
.data(
new Experiment.Builder()
.parameters(java.util.Arrays.asList(
new Parameter.Builder()
.name("log10_learning_rate")
.prior(new Prior.Builder()
.name("beta")
.shapeA(8)
.shapeB(2)
.build())
.build(),
new Parameter.Builder()
.name("max_depth")
.build(),
new Parameter.Builder()
.name("colsample_bytree")
.build()
))
.build()
)
.call();

AI Module

Updating the prior beliefs is not supported in the AI module.

Removing Prior Beliefs

The prior beliefs can be removed during the progress of an experiment. This means that we default to the belief that the parameter is uniformly distributed. You can simply set the prior field to None. The example given below shows how to remove the prior belief on the colsample_bytree parameter.

Core Module

Python
Bash
Java
experiment = conn.experiments(experiment.id).update(
parameters=[
dict(
name="log10_learning_rate"
),
dict(
name="max_depth"
),
dict(
name="colsample_bytree",
prior=None
)
]
)
EXPERIMENT=`curl -s -X PUT https://api.sigopt.com/v1/experiments/EXPERIMENT_ID -u "$SIGOPT_API_TOKEN": \
-H 'Content-Type: application/json' \
-d "{\"parameters\":[{\"name\":\"log10_learning_rate\"},{\"name\":\"max_depth\"},{\"name\":\"colsample_bytree\",\"prior\":null}]}"`
Experiment experiment = Experiment.update(experiment.id)
.data(
new Experiment.Builder()
.parameters(java.util.Arrays.asList(
new Parameter.Builder()
.name("log10_learning_rate")
.build(),
new Parameter.Builder()
.name("max_depth")
.build(),
new Parameter.Builder()
.name("colsample_bytree")
.prior(null)
.build()
))
.build()
)
.call();

AI Module

Removing the prior beliefs is not supported in the AI module.

Limitations