forked from google-deepmind/acme
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_dmpo.py
108 lines (87 loc) · 3.36 KB
/
run_dmpo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running MPO on the control suite locally."""
from typing import Dict, Sequence
from absl import app
from absl import flags
import acme
from acme import specs
from acme import types
from acme import wrappers
from acme.agents.tf import dmpo
from acme.tf import networks
from acme.tf import utils as tf2_utils
from dm_control import suite
import dm_env
import numpy as np
import sonnet as snt
flags.DEFINE_integer('num_episodes', 100, 'Number of episodes to run for.')
FLAGS = flags.FLAGS
def make_environment(domain_name: str = 'cartpole',
task_name: str = 'balance') -> dm_env.Environment:
"""Creates a control suite environment."""
environment = suite.load(domain_name, task_name)
environment = wrappers.SinglePrecisionWrapper(environment)
return environment
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> Dict[str, types.TensorTransformation]:
"""Creates networks used by the agent."""
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(action_spec.shape, dtype=int)
# Create the shared observation network; here simply a state-less operation.
observation_network = tf2_utils.batch_concat
# Create the policy network.
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes),
networks.MultivariateNormalDiagHead(num_dimensions)
])
# The multiplexer transforms concatenates the observations/actions.
multiplexer = networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(critic_layer_sizes),
action_network=networks.ClipToSpec(action_spec))
# Create the critic network.
critic_network = snt.Sequential([
multiplexer,
networks.DiscreteValuedHead(vmin, vmax, num_atoms),
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': observation_network,
}
def main(_):
# Create an environment and grab the spec.
environment = make_environment()
environment_spec = specs.make_environment_spec(environment)
# Create the networks to optimize (online) and target networks.
agent_networks = make_networks(environment_spec.actions)
# Construct the agent.
agent = dmpo.DistributionalMPO(
environment_spec=environment_spec,
policy_network=agent_networks['policy'],
critic_network=agent_networks['critic'],
observation_network=agent_networks['observation'], # pytype: disable=wrong-arg-types
)
# Run the environment loop.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=FLAGS.num_episodes)
if __name__ == '__main__':
app.run(main)