Skip to content

Commit

Permalink
New notebooks from @roussel-ryan
Browse files Browse the repository at this point in the history
  • Loading branch information
ChristopherMayes committed Oct 10, 2021
1 parent 68220e4 commit 04cabf9
Show file tree
Hide file tree
Showing 2 changed files with 118 additions and 112 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
"outputs": [],
"source": [
"# Import the class\n",
"import torch\n",
"\n",
"from xopt import Xopt\n",
"from botorch.test_functions.multi_fidelity import AugmentedHartmann"
]
Expand Down Expand Up @@ -46,34 +48,28 @@
"source": [
"# Make a proper input file. \n",
"YAML = \"\"\"\n",
"xopt: {output_path: null}\n",
"xopt: \n",
" output_path: null\n",
"\n",
"algorithm:\n",
" name: bayesian_optimization\n",
" options: \n",
" n_initial_samples: 16\n",
" n_steps: 6\n",
" n_steps: 20\n",
" generator_options:\n",
" acquisition_function: custom_acq.acq\n",
" use_gpu: False\n",
"\n",
"simulation: \n",
" name: test_multi_fidelity\n",
" evaluate: xopt.tests.evaluators.multi_fidelity.evaluate\n",
" name: test\n",
" evaluate: xopt.tests.evaluators.quad_3d.evaluate\n",
"\n",
"vocs:\n",
" name: test_multi_fidelity\n",
" description: null\n",
" simulation: test_multi_fidelity\n",
" templates: null\n",
" name: test\n",
" variables:\n",
" x1: [0, 1.0]\n",
" x2: [0, 1.0]\n",
" x3: [0, 1.0]\n",
" x4: [0, 1.0]\n",
" x5: [0, 1.0]\n",
" x6: [0, 1.0]\n",
" cost: [0, 1.0] ## NOTE: THIS IS REQUIRED FOR MULTI-FIDELITY OPTIMIZATION\n",
" objectives:\n",
" y1: 'MINIMIZE'\n",
" linked_variables: {}\n",
Expand All @@ -98,9 +94,6 @@
"Loading config from dict.\n",
"Loading config from dict.\n",
"`name` keyword no longer allowed in vocs config, removing\n",
"`description` keyword no longer allowed in vocs config, removing\n",
"`simulation` keyword no longer allowed in vocs config, removing\n",
"`templates` keyword no longer allowed in vocs config, moving to simulation `options`\n",
"Warning: No path set for key xopt : output_path\n"
]
},
Expand All @@ -110,30 +103,26 @@
"\n",
" Xopt \n",
"________________________________ \n",
"Version: 0.4.3+221.g8413e80.dirty\n",
"Version: 0.4.3+227.g68220e4.dirty\n",
"Configured: True\n",
"Config as YAML:\n",
"xopt: {output_path: null}\n",
"algorithm:\n",
" name: bayesian_optimization\n",
" options:\n",
" n_initial_samples: 16\n",
" n_steps: 6\n",
" n_steps: 20\n",
" generator_options: {acquisition_function: custom_acq.acq, use_gpu: false}\n",
" function: xopt.bayesian.algorithms.bayesian_optimize\n",
"simulation:\n",
" name: test_multi_fidelity\n",
" evaluate: xopt.tests.evaluators.multi_fidelity.evaluate\n",
" options: {templates: null, extra_option: abc}\n",
" name: test\n",
" evaluate: xopt.tests.evaluators.quad_3d.evaluate\n",
" options: {extra_option: abc}\n",
"vocs:\n",
" variables:\n",
" x1: [0, 1.0]\n",
" x2: [0, 1.0]\n",
" x3: [0, 1.0]\n",
" x4: [0, 1.0]\n",
" x5: [0, 1.0]\n",
" x6: [0, 1.0]\n",
" cost: [0, 1.0]\n",
" objectives: {y1: MINIMIZE}\n",
" linked_variables: {}\n",
" constants: {a: dummy_constant}\n",
Expand Down Expand Up @@ -175,21 +164,39 @@
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Starting at time 2021-10-08T16:12:50-07:00\n",
"started running optimization with generator: <xopt.bayesian.generators.generator.BayesianGenerator object at 0x130f791f0>\n",
"Starting at time 2021-10-09T17:33:11-07:00\n",
"started running optimization with generator: <xopt.bayesian.generators.generator.BayesianGenerator object at 0x133c80490>\n",
"submitting initial candidates\n",
"starting optimization loop\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n",
"submitting candidates\n"
]
}
Expand All @@ -200,58 +207,48 @@
"results = X.results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get highest fidelity global optimum"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"# create generator object\n",
"from xopt.bayesian.generators.multi_fidelity import MultiFidelityGenerator\n",
"from xopt.bayesian.models.models import create_multi_fidelity_model\n",
"\n",
"gen = MultiFidelityGenerator(X.vocs)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
},
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [
{
"ename": "KeyError",
"evalue": "'corrected_constraints'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/var/folders/wj/lfgr01993dx79p9cm_skykbw0000gn/T/ipykernel_47729/2151074823.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# create model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_multi_fidelity_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresults\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'variables'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresults\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'corrected_objectives'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresults\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'corrected_constraints'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvocs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mKeyError\u001b[0m: 'corrected_constraints'"
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([5.5905e-05], dtype=torch.float64)\n",
"tensor([0.1499, 0.1480, 0.1428], dtype=torch.float64)\n"
]
}
],
"source": [
"# create model\n",
"model = create_multi_fidelity_model(results['variables'], results['corrected_objectives'], results['corrected_constraints'], X.vocs)"
"import torch\n",
"# print out the best observed point\n",
"best_idx = torch.argmin(results['objectives'])\n",
"\n",
"# best value\n",
"print(results['objectives'][best_idx])\n",
"\n",
"# best value location\n",
"print(results['variables'][best_idx])"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"## NOTE: we want to get the minimum evaluated at the highest fidelity -> make sure to use get_recommendation\n",
"rec = gen.get_recommendation(model)\n",
"problem = AugmentedHartmann(negate=False)\n",
"problem(rec) ## NOTE: the correct global minimum is -3.32237"
"# Cleanup\n",
"!rm results.json"
]
}
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Import the class\n",
"from xopt import Xopt\n",
"from xopt.bayesian.generators.multi_fidelity import MultiFidelityGenerator\n",
"from xopt.bayesian.models.models import create_multi_fidelity_model\n",
Expand Down Expand Up @@ -119,7 +120,7 @@
"\n",
" Xopt \n",
"________________________________ \n",
"Version: 0.4.3+221.g8413e80.dirty\n",
"Version: 0.4.3+227.g68220e4.dirty\n",
"Configured: True\n",
"Config as YAML:\n",
"xopt: {output_path: null}\n",
Expand Down Expand Up @@ -182,11 +183,11 @@
"source": [
"# Pick one of these\n",
"from concurrent.futures import ThreadPoolExecutor as PoolExecutor\n",
"#from concurrent.futures import ProcessPoolExecutor \n",
"#from concurrent.futures import ProcessPoolExecutor as PoolExecutor\n",
"\n",
"executor = PoolExecutor()\n",
"#executor = PoolExecutor()\n",
"# This will also work. \n",
"# executor=None"
"executor=None"
]
},
{
Expand All @@ -198,58 +199,54 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Starting at time 2021-10-08T15:27:05-07:00\n",
"started running optimization with generator: <xopt.bayesian.generators.multi_fidelity.MultiFidelityGenerator object at 0x12c30dd00>\n",
"Starting at time 2021-10-09T17:40:02-07:00\n",
"started running optimization with generator: <xopt.bayesian.generators.multi_fidelity.MultiFidelityGenerator object at 0x129617ee0>\n",
"starting optimization loop\n",
"Submitted candidate 0, cost: 1.26, total cost: 1.26\n",
"Submitted candidate 1, cost: 1.11, total cost: 2.372\n",
"Submitted candidate 2, cost: 1.1, total cost: 3.468\n",
"Submitted candidate 3, cost: 1.13, total cost: 4.598\n",
"Submitted candidate 0, cost: 1.38, total cost: 1.376\n",
"Submitted candidate 1, cost: 1.17, total cost: 2.548\n",
"Submitted candidate 2, cost: 1.12, total cost: 3.664\n",
"Submitted candidate 3, cost: 1.37, total cost: 5.035\n",
"generating 4 new candidate(s)\n",
"Submitted candidate 4, cost: 1.0, total cost: 5.598\n",
"Submitted candidate 5, cost: 1.0, total cost: 6.598\n",
"Submitted candidate 6, cost: 1.0, total cost: 7.598\n",
"Submitted candidate 7, cost: 1.0, total cost: 8.598\n",
"Submitted candidate 4, cost: 1.0, total cost: 6.035\n",
"Submitted candidate 5, cost: 1.0, total cost: 7.035\n",
"Submitted candidate 6, cost: 1.0, total cost: 8.035\n",
"Submitted candidate 7, cost: 1.01, total cost: 9.045\n",
"generating 4 new candidate(s)\n",
"Submitted candidate 8, cost: 1.01, total cost: 9.611\n",
"Submitted candidate 9, cost: 1.0, total cost: 10.61\n",
"Submitted candidate 10, cost: 1.0, total cost: 11.61\n",
"Submitted candidate 11, cost: 1.0, total cost: 12.61\n",
"Submitted candidate 8, cost: 1.0, total cost: 10.05\n",
"Submitted candidate 9, cost: 1.0, total cost: 11.05\n",
"Submitted candidate 10, cost: 1.0, total cost: 12.05\n",
"Submitted candidate 11, cost: 1.0, total cost: 13.05\n",
"generating 4 new candidate(s)\n",
"Submitted candidate 12, cost: 1.02, total cost: 13.63\n",
"Submitted candidate 13, cost: 1.0, total cost: 14.63\n",
"Submitted candidate 14, cost: 1.0, total cost: 15.63\n",
"Submitted candidate 15, cost: 1.0, total cost: 16.63\n",
"generating 3 new candidate(s)\n",
"Submitted candidate 16, cost: 1.01, total cost: 17.64\n",
"Submitted candidate 17, cost: 1.0, total cost: 18.64\n",
"Submitted candidate 18, cost: 1.0, total cost: 19.64\n",
"generating 1 new candidate(s)\n",
"Submitted candidate 19, cost: 1.01, total cost: 20.65\n",
"generating 3 new candidate(s)\n",
"Submitted candidate 20, cost: 1.02, total cost: 21.67\n",
"Submitted candidate 21, cost: 1.0, total cost: 22.67\n",
"Submitted candidate 22, cost: 1.02, total cost: 23.69\n",
"generating 1 new candidate(s)\n",
"Submitted candidate 23, cost: 1.04, total cost: 24.73\n",
"generating 3 new candidate(s)\n",
"Submitted candidate 24, cost: 1.17, total cost: 25.9\n",
"Submitted candidate 25, cost: 1.19, total cost: 27.09\n",
"Submitted candidate 26, cost: 1.13, total cost: 28.21\n",
"generating 1 new candidate(s)\n",
"Submitted candidate 27, cost: 1.27, total cost: 29.48\n",
"generating 3 new candidate(s)\n",
"Submitted candidate 28, cost: 1.23, total cost: 30.72\n",
"Submitted candidate 29, cost: 1.33, total cost: 32.04\n",
"Submitted candidate 12, cost: 1.0, total cost: 14.05\n",
"Submitted candidate 13, cost: 1.04, total cost: 15.09\n",
"Submitted candidate 14, cost: 1.03, total cost: 16.12\n",
"Submitted candidate 15, cost: 1.03, total cost: 17.14\n",
"generating 4 new candidate(s)\n",
"Submitted candidate 16, cost: 1.13, total cost: 18.27\n",
"Submitted candidate 17, cost: 1.14, total cost: 19.41\n",
"Submitted candidate 18, cost: 1.15, total cost: 20.57\n",
"Submitted candidate 19, cost: 1.15, total cost: 21.72\n",
"generating 4 new candidate(s)\n",
"Submitted candidate 20, cost: 1.0, total cost: 22.72\n",
"Submitted candidate 21, cost: 1.0, total cost: 23.72\n",
"Submitted candidate 22, cost: 1.0, total cost: 24.72\n",
"Submitted candidate 23, cost: 1.0, total cost: 25.72\n",
"generating 4 new candidate(s)\n",
"Submitted candidate 24, cost: 1.03, total cost: 26.74\n",
"Submitted candidate 25, cost: 1.04, total cost: 27.79\n",
"Submitted candidate 26, cost: 1.04, total cost: 28.83\n",
"Submitted candidate 27, cost: 1.03, total cost: 29.85\n",
"generating 4 new candidate(s)\n",
"Submitted candidate 28, cost: 1.01, total cost: 30.87\n",
"Submitted candidate 29, cost: 1.02, total cost: 31.89\n",
"Submitted candidate 30, cost: 1.0, total cost: 32.89\n",
"budget exceeded, waiting for simulations to end\n",
"Budget exceeded and simulations finished\n",
"CPU times: user 22min 54s, sys: 1min 11s, total: 24min 6s\n",
"Wall time: 6min 17s\n"
"Budget exceeded and simulations finished\n"
]
}
],
"source": [
"%%time \n",
"# Change max generations\n",
"X.run(executor=executor)\n",
"results = X.results"
]
Expand All @@ -258,7 +255,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get highest fidelity global optimum"
"### Get highest fidelity global optimum\n",
"\n",
"NOTE: the correct global minimum is -3.32237"
]
},
{
Expand Down Expand Up @@ -292,7 +291,7 @@
{
"data": {
"text/plain": [
"tensor([-3.2211], dtype=torch.float64)"
"tensor([-3.0976], dtype=torch.float64)"
]
},
"execution_count": 9,
Expand All @@ -306,6 +305,16 @@
"problem = AugmentedHartmann(negate=False)\n",
"problem(rec) ## NOTE: the correct global minimum is -3.32237"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# Cleanup\n",
"!rm results.json"
]
}
],
"metadata": {
Expand Down

0 comments on commit 04cabf9

Please sign in to comment.