forked from rail-berkeley/rlkit
-
Notifications
You must be signed in to change notification settings - Fork 1
/
ddpg.py
68 lines (63 loc) · 1.92 KB
/
ddpg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
"""
Example of running PyTorch implementation of DDPG on HalfCheetah.
"""
from gym.envs.mujoco import HalfCheetahEnv
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.exploration_strategies.base import (
PolicyWrappedWithExplorationStrategy
)
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.launchers.launcher_util import setup_logger
from rlkit.torch.networks import FlattenMlp, TanhMlpPolicy
from rlkit.torch.ddpg.ddpg import DDPG
import rlkit.torch.pytorch_util as ptu
def experiment(variant):
env = NormalizedBoxEnv(HalfCheetahEnv())
# Or for a specific version:
# import gym
# env = NormalizedBoxEnv(gym.make('HalfCheetah-v1'))
es = OUStrategy(action_space=env.action_space)
obs_dim = env.observation_space.low.size
action_dim = env.action_space.low.size
qf = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[400, 300],
)
policy = TanhMlpPolicy(
input_size=obs_dim,
output_size=action_dim,
hidden_sizes=[400, 300],
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = DDPG(
env,
qf=qf,
policy=policy,
exploration_policy=exploration_policy,
**variant['algo_params']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_params=dict(
num_epochs=1000,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
use_soft_update=True,
tau=1e-2,
batch_size=128,
max_path_length=1000,
discount=0.99,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
),
)
setup_logger('name-of-experiment', variant=variant)
experiment(variant)