From 8f3e378ed6069663ca34d0154148d5c0b6f25d33 Mon Sep 17 00:00:00 2001 From: Vladimir Ivanov Date: Thu, 30 May 2024 19:34:47 +0200 Subject: [PATCH 1/9] Added all learning code I wrote so far. --- .gitignore | 4 + scripts/test_dqn.py | 287 ++++++ src/satisfia/agents/learning/dqn.py | 953 ++++++++++++++++++ src/satisfia/agents/learning/dqn/__init__.py | 0 .../agents/learning/dqn/agent_mdp_dqn.py | 241 +++++ .../agents/learning/dqn/bellman_formula.py | 134 +++ src/satisfia/agents/learning/dqn/config.py | 94 ++ src/satisfia/agents/learning/dqn/criteria.py | 20 + .../learning/dqn/exploration_strategy.py | 57 ++ .../agents/learning/dqn/replay_buffer.py | 70 ++ src/satisfia/agents/learning/dqn/train.py | 270 +++++ .../agents/learning/environment_wrappers.py | 22 + .../agents/learning/models/building_blocks.py | 244 +++++ src/satisfia/util/interval_tensor.py | 61 ++ src/world_model/simple_gridworld.py | 5 + 15 files changed, 2462 insertions(+) create mode 100644 scripts/test_dqn.py create mode 100644 src/satisfia/agents/learning/dqn.py create mode 100644 src/satisfia/agents/learning/dqn/__init__.py create mode 100644 src/satisfia/agents/learning/dqn/agent_mdp_dqn.py create mode 100644 src/satisfia/agents/learning/dqn/bellman_formula.py create mode 100644 src/satisfia/agents/learning/dqn/config.py create mode 100644 src/satisfia/agents/learning/dqn/criteria.py create mode 100644 src/satisfia/agents/learning/dqn/exploration_strategy.py create mode 100644 src/satisfia/agents/learning/dqn/replay_buffer.py create mode 100644 src/satisfia/agents/learning/dqn/train.py create mode 100644 src/satisfia/agents/learning/environment_wrappers.py create mode 100644 src/satisfia/agents/learning/models/building_blocks.py create mode 100644 src/satisfia/util/interval_tensor.py diff --git a/.gitignore b/.gitignore index ddd3247..ae60d7e 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,10 @@ __pycache__/ *.py[cod] *$py.class +# saved python or pytorch objects +*.pickle +*.pt + # C extensions *.so diff --git a/scripts/test_dqn.py b/scripts/test_dqn.py new file mode 100644 index 0000000..38d3017 --- /dev/null +++ b/scripts/test_dqn.py @@ -0,0 +1,287 @@ +import sys +sys.path.insert(0, "./src/") + +from environments.very_simple_gridworlds import make_simple_gridworld, all_worlds +from satisfia.agents.learning.dqn.train import train_dqn +from satisfia.agents.learning.dqn.config import DQNConfig, UniformPointwiseAspirationSampler, \ + UniformAspirationSampler, PiecewiseLinearScheduler +from satisfia.agents.learning.dqn.agent_mdp_dqn import AgentMDPDQN, local_policy +from satisfia.agents.learning.dqn.criteria import complete_criteria +from satisfia.agents.learning.models.building_blocks import SatisfiaMLP +from satisfia.agents.learning.environment_wrappers import RestrictToPossibleActionsWrapper +from satisfia.agents.makeMDPAgentSatisfia import AspirationAgent, AgentMDPPlanning +from satisfia.util.interval_tensor import IntervalTensor + +import gymnasium as gym +import torch +from torch import tensor +from torch.nn import Module +import numpy as np +import scipy +import pickle +from joblib import Parallel, delayed +from statistics import mean +from functools import partial +from tqdm import tqdm +from typing import Tuple, List, Dict, Iterable, Callable, Generator +from os.path import isfile +import dataclasses +from plotly.colors import DEFAULT_PLOTLY_COLORS +from plotly.graph_objects import Figure, Scatter, Layout + +device = "cuda" if torch.cuda.is_available() else "cpu" +# device = "cpu" +print("using", device) + +def multi_tqdm(num_tqdms: int) -> List[Callable[[Iterable], Iterable]]: + def itr_wrapper(itr: Iterable, progress_bar: tqdm, desc: str | None = None, total: int | None = None) -> Generator: + progress_bar.desc = desc + progress_bar.reset() + if total is not None: + progress_bar.total = total + else: + progress_bar.total = len(itr) if hasattr(itr, "__len__") else None + progress_bar.refresh() + for item in itr: + yield item + progress_bar.update() + progress_bar.refresh() + + progress_bars = [tqdm() for _ in range(num_tqdms)] + return [partial(itr_wrapper, progress_bar=progress_bar) for progress_bar in progress_bars] + +def confidence_interval(xs: List[float], confidence: float): + return scipy.stats.t.interval(confidence, len(xs)-1, loc=np.mean(xs), scale=scipy.stats.sem(xs)) + +def error_bars(xs: List[float], confidence: float): + mean_ = mean(xs) + lower_confidence, upper_confidence = confidence_interval(xs, confidence) + return mean_ - lower_confidence, upper_confidence - mean_ + +def run_or_load(filename, function, *args, **kwargs): + if isfile(filename): + with open(filename, "rb") as f: + return pickle.load(f) + + result = function(*args, **kwargs) + with open(filename, "wb") as f: + pickle.dump(result, f) + return result + +def compute_total(agent: AspirationAgent, env: gym.Env, aspiration4state: float | Tuple[float, float], verbose=False) -> float: + if isinstance(aspiration4state, (int, float)): + aspiration4state = (aspiration4state, aspiration4state) + + total = 0. + observation, _ = env.reset() + done = False + while not done: + action, aspiration4action = agent.localPolicy(observation, aspiration4state).sample()[0] + if verbose: + print(observation, total, aspiration, action) + print(agent.maximizer_model(tensor([*observation], dtype=torch.float)).tolist()) + print(agent.minimizer_model(tensor([*observation], dtype=torch.float)).tolist()) + next_observation, delta, done, truncated, _ = env.step(action) + done = done or truncated + total += delta + aspiration4state = agent.propagateAspiration(observation, action, aspiration4action, Edel=None, nextState=next_observation) + observation = next_observation + return total + +def scatter_with_y_error_bars( x: Iterable[float], + y: Iterable[Iterable[float]], + confidence: float, + **plotly_kwargs ) -> Scatter: + + means = [mean(point_ys) for point_ys in y] + error_bars_ = [error_bars(point_ys, confidence) for point_ys in y] + return Scatter( x = x, + y = means, + error_y = dict( type = "data", + symmetric = False, + array = [lower for lower, upper in error_bars_], + arrayminus = [upper for lower, upper in error_bars_] ), + **plotly_kwargs ) + +def plot_totals_vs_aspiration( agents: Iterable[AspirationAgent] | Dict[str, AspirationAgent] | AspirationAgent, + env: gym.Env, + aspirations: Iterable[int | Tuple[int, int]] | int | Tuple[int, int], + sample_size: int, + reference_agents: Iterable[AspirationAgent] | Dict[str, AspirationAgent] | AspirationAgent = [], + error_bar_confidence: float = 0.95, + n_jobs: int = -1, + title: str = "Totals for agent(s)", + save_to: str | None = None ): + + if not isinstance(agents, Iterable): + agents = [agents] + if not isinstance(agents, Dict): + agents = {f"agent {i}": agent for i, agent in enumerate(agents)} + if not isinstance(reference_agents, Iterable): + reference_agents = [reference_agents] + if not isinstance(reference_agents, Dict): + reference_agents = {f"agent {i}": agent for i, agent in enumerate(reference_agents)} + if not isinstance(aspirations, Iterable): + aspirations = [aspirations] + + agent_tqdm, aspiration_tqdm, sample_tqdm = multi_tqdm(3) + + totals = dict() + reference_totals = dict() + for is_reference in [False, True]: + for agent_name, agent in (agent_tqdm(agents.items(), desc="agents") + if not is_reference else agent_tqdm(reference_agents.items(), desc="reference agents")): + + for aspiration in aspiration_tqdm(aspirations, desc=agent_name): + if n_jobs == 1: + t = [ + compute_total(agent, env, aspiration) + for _ in sample_tqdm(range(sample_size), desc=f"{agent_name}, {aspiration=}") + ] + else: + t = Parallel(n_jobs=n_jobs)( + delayed(compute_total)(agent, env, aspiration) + for _ in sample_tqdm(range(sample_size), desc=f"{agent_name}, {aspiration=}") + ) + + if is_reference: + reference_totals[agent_name, aspiration] = t + else: + totals[agent_name, aspiration] = t + + fig = Figure(layout=Layout( title = title + f". {error_bar_confidence:.0%} confidence error bars", + xaxis_title = "Aspiration", + yaxis_title = "Total" )) + + aspirations_as_points = [ (aspiration if isinstance(aspiration, (float, int)) else mean(aspiration)) + for aspiration in aspirations ] + + point_aspirations = all(isinstance(aspiration, (int, float)) for aspiration in aspirations) + for i_lower_or_upper, lower_or_upper in enumerate([None] if point_aspirations else ["lower", "upper"]): + fig.add_trace(Scatter( x = aspirations_as_points, + y = [ aspiration if isinstance(aspiration, (float, int)) else aspiration[i_lower_or_upper] + for aspiration in aspirations ], + name = "aspiration" if point_aspirations else f"{lower_or_upper} aspiration" )) + + for is_reference in [False, True]: + for i_agent, agent_name in enumerate(reference_agents.keys() if is_reference else agents.keys()): + t = reference_totals if is_reference else totals + fig.add_trace(scatter_with_y_error_bars( x = aspirations_as_points, + y = [t[agent_name, aspiration] for aspiration in aspirations], + confidence = error_bar_confidence, + line = dict(color = DEFAULT_PLOTLY_COLORS[i_agent], dash = "dash" if is_reference else "solid"), + name = ("reference " if is_reference else "") + + (agent_name if not (len(agents) == 1 and agent_name == "agent 0") else "") )) + + fig.show() + + if save_to is not None: + fig.write_html(save_to) + +cfg = DQNConfig( aspiration_sampler = UniformPointwiseAspirationSampler(-20, 5), + criterion_coefficients_for_loss = dict( maxAdmissibleQ = 1., + minAdmissibleQ = 1., + Q = 1. ), + exploration_rate_scheduler = + PiecewiseLinearScheduler([0., 0.1, 1.], [1., 0.05, 0.05]), + noisy_network_exploration = False, + # noisy_network_exploration_rate_scheduler = + # PiecewiseLinearScheduler([0., 0.1, 1.], [1., 0.05, 0.05]), + num_envs = 10, + async_envs = False, + discount = 1, + total_timesteps = 10_000, + training_starts = 100, + training_frequency = 1, + target_network_update_frequency = 50, + satisfia_agent_params = { "lossCoeff4FeasibilityPowers": 0, + "lossCoeff4LRA1": 0, + "lossCoeff4Time1": 0, + "lossCoeff4Entropy1": 0, + "defaultPolicy": None }, + device = device, + plotted_criteria = ["maxAdmissibleQ", "minAdmissibleQ", "Q"], + plot_criteria_frequency = 100, + states_for_plotting_criteria = [(time, 2, 2) for time in range(10)], + state_aspirations_for_plotting_criteria = [(-5, -5), (-1, -1), (1, 1)], + actions_for_plotting_criteria = [2, 4] ) + +def train_and_plot(gridworld_name: str): + print(gridworld_name) + + def make_env(): + env, _ = make_simple_gridworld(gridworld_name) + return env + + def make_model(pretrained=None): + d_observation = len(make_env().observation_space) + n_actions = make_env().action_space.n + model = SatisfiaMLP( + input_size = d_observation, + output_not_depending_on_agent_parameters_sizes = { "maxAdmissibleQ": n_actions, + "minAdmissibleQ": n_actions }, + output_depending_on_agent_parameters_sizes = { "Q": n_actions }, + common_hidden_layer_sizes = [64, 64], + hidden_layer_not_depending_on_agent_parameters_sizes = [64], + hidden_layer_depending_on_agent_parameters_sizes = [64], + batch_size = cfg.num_envs, + layer_norms = True, + dropout = 0.1 + ) + if pretrained is not None: + model.load_state_dict(pretrained) + return model + # return NoisyMLPReturningDict( [ d_observation + d_aspiration, + # 64, + # 64, + # { "maxAdmissibleQ": n_actions, + # "minAdmissibleQ": n_actions, + # "Q": n_actions } ], + # batch_size=cfg.num_envs ).to(device) + + planning_agent = AgentMDPPlanning( cfg.satisfia_agent_params, + RestrictToPossibleActionsWrapper(make_env()) ) + + model = run_or_load( f"dqn-{gridworld_name}-no-discount-with-criteria.pickle", + train_dqn, + make_env, + make_model, + dataclasses.replace( + cfg, + planning_agent_for_plotting_ground_truth=planning_agent + ) ) + model = model.to(device) + + learning_agent = AgentMDPDQN( cfg.satisfia_agent_params, + model, + num_actions = make_env().action_space.n, + device = device ) + + for state in [(time, 2, 2) for time in range(10)]: + for action in range(5): + for state_aspiration in [(0, 0), (1, 1), (2, 2)]: + action_aspiration = planning_agent.aspiration4action(state, action, state_aspiration) + print( state, + action, + action_aspiration, + planning_agent.Q(state, action, state_aspiration) + - learning_agent.Q(state, action, state_aspiration) ) + + first_observation, _ = make_env().reset() + min_achievable_total = planning_agent.minAdmissibleV(first_observation) + max_achievable_total = planning_agent.maxAdmissibleV(first_observation) + plot_totals_vs_aspiration( agents = learning_agent, + env = RestrictToPossibleActionsWrapper(make_env()), + aspirations = np.linspace( min_achievable_total - 1, + max_achievable_total + 1, + 20 ), + sample_size = 1_000, + # reference_agents = planning_agent, + title = f"totals for agent with no discount and longer training in {gridworld_name}" ) + +all_gridworlds = ["GW1", "GW2", "GW3", "GW4", "GW5", "GW6", "GW22", "GW23", "GW24", "GW25", "GW26", "GW27", "GW28", "GW29", "GW30", "GW31", "GW32", "AISG2", "test_return", "test_box"] +require_longer_training = ["GW22", "GW28", "AISG2", "test_box"] + +# Parallel(n_jobs=-1)(delayed(train_and_plot)(gridworld_name) for gridworld_name in require_longer_training) +for gridworld_name in ["GW1"]: + train_and_plot(gridworld_name) diff --git a/src/satisfia/agents/learning/dqn.py b/src/satisfia/agents/learning/dqn.py new file mode 100644 index 0000000..12433c1 --- /dev/null +++ b/src/satisfia/agents/learning/dqn.py @@ -0,0 +1,953 @@ +from satisfia.agents.makeMDPAgentSatisfia import AspirationAgent +import gymnasium as gym +import torch +from torch import tensor, Tensor, empty, cat, randn_like, randperm, where, no_grad +from satisfia.agents.makeMDPAgentSatisfia import AgentMDPPlanning +from torch.nn import Module +from torch.nn.functional import mse_loss +from torch.optim import AdamW +import numpy as np +import random +from functools import cache +from dataclasses import dataclass, field +from tqdm import tqdm +from more_itertools import pairwise, chunked +from statistics import mean +from typing import List, Callable, Iterable, Dict, Tuple, Any +from plotly.graph_objects import Figure, Layout +from plotly.subplots import make_subplots +from plotly.colors import DEFAULT_PLOTLY_COLORS + +class PiecewiseLinearScheduler: + def __init__(self, x: List[float], y: List[float]): + self.x = x + self.y = y + assert len(self.x) == len(self.y) + assert len(self.x) > 0 + assert all(x1 < x2 for x1, x2 in pairwise(self.x)) + + def __call__(self, x: float): + if x <= self.x[0]: + return self.y[0] + if x >= self.x[-1]: + return self.y[-1] + for (x1, x2), (y1, y2) in zip(pairwise(self.x), pairwise(self.y)): + if x1 <= x <= x2: + return y1 + (x - x1) / (x2 - x1) * (y2 - y1) + assert False, "unreachable" + +@dataclass +class DQNConfig: + train_minimizer: bool = True + train_maximizer: bool = True + total_timesteps: int = 500_000 + lambdas: Dict[str, float] = field(default_factory=lambda: {"max": 1., "min": 0.}) + lambda_sampler: Callable[[], Callable[[], float]] | None = None + aspiration_sampler: Callable[[], Tuple[float, float]] = None + discount: float = 0.99 + exploration_rate_scheduler: Callable[[float], float] = \ + PiecewiseLinearScheduler([0., 0.5, 1.], [1., 0.05, 0.05]) + parameter_noise_exploration: bool = False + mix_maximizer_and_minimizer_strategies: bool = False + learning_rate: float = 2.5e-4 + batch_size: int = 128 + train_corresponding_network: bool = False + double_q_learning: bool = False + fraction_samples_from_opposite_replay_buffer: float = 0 + true_double_q_learning: bool = False + replay_buffer_size: int = 10_000 + learning_starts: int = 10_000 + target_network_update_frequency: int = 500 + train_frequency: int = 10 + plot: bool = True + plot_smoothness: int = 1_000 + plot_title: str = "Training DQN." + plot_q_values: bool = False + observations_for_plotting_q_values: List[Any] | None = None + actions_for_plotting_q_values: List[int] | None = None + lambdas_for_plotting_q_values: List[float] | None = None + plot_q_values_frequency: int = 100 + + def __post_init__(self): + assert self.train_maximizer or self.train_minimizer + +class AgentMDPTabularLearning(AspirationAgent): + def __init__(self, params: dict, planning_agent: AgentMDPPlanning, maximizer_q_table: Dict[Tuple, List[float]], minimizer_q_table: Dict[Tuple, List[float]]): + super().__init__(params | {"defaultPolicy": planning_agent.world.default_policy}) + self.planning_agent = planning_agent + self.maximizer_q_table = maximizer_q_table + self.minimizer_q_table = minimizer_q_table + + def maxAdmissibleQ(self, state, action): + if state not in self.maximizer_q_table: + self.maximizer_q_table[state] = [np.random.normal() for _ in range(self.planning_agent.world.action_space.n)] + return self.maximizer_q_table[state][action] + + def minAdmissibleQ(self, state, action): + if state not in self.minimizer_q_table: + self.minimizer_q_table[state] = [np.random.normal() for _ in range(self.planning_agent.world.action_space.n)] + return self.minimizer_q_table[state][action] + + def possible_actions(self, state): + return self.planning_agent.possible_actions(state) + +def train_tabular_q_network_on_satisfia_policy(make_env: gym.Env, cfg: DQNConfig): + env = make_env() + + maximizer_q_table = dict() + minimizer_q_table = dict() + stats = TabularQLeraningStatistics(cfg) + + agent_params = {} + + planning_agent = AgentMDPPlanning(agent_params, make_env()) + tabular_learning_agent = AgentMDPTabularLearning(agent_params, planning_agent, maximizer_q_table=maximizer_q_table, minimizer_q_table=minimizer_q_table) + aspiration = cfg.aspiration_sampler() + observation, _ = env.reset() + for timestep in tqdm(range(cfg.total_timesteps), desc="tabular q learning"): + exploration_rate = cfg.exploration_rate_scheduler(timestep / cfg.total_timesteps) + explore = random.random() <= exploration_rate + if explore: + action = random.choice(tabular_learning_agent.possible_actions(observation)) + action_aspiration = tabular_learning_agent.aspiration4action(observation, action, aspiration) + else: + action, action_aspiration = tabular_learning_agent.localPolicy(observation, aspiration).sample()[0] + + next_observation, delta, done, terminated, _ = env.step(action) + done = done or terminated + + if observation not in maximizer_q_table: + maximizer_q_table[observation] = [np.random.normal() for _ in range(env.action_space.n)] + if observation not in minimizer_q_table: + minimizer_q_table[observation] = [np.random.normal() for _ in range(env.action_space.n)] + if next_observation not in maximizer_q_table: + maximizer_q_table[next_observation] = [np.random.normal() for _ in range(env.action_space.n)] + if next_observation not in minimizer_q_table: + minimizer_q_table[next_observation] = [np.random.normal() for _ in range(env.action_space.n)] + + possible_next_actions = planning_agent.possible_actions(next_observation) + # if possible_next_actions != []: + # assert 4 in possible_next_actions + # if 4 not in possible_next_actions: + # possible_next_actions.append(4) + next_maximizer_q = [q for action, q in enumerate(maximizer_q_table[next_observation]) if action in possible_next_actions] + maximizer_q_table[observation][action] = (1 - cfg.learning_rate) * maximizer_q_table[observation][action] \ + + cfg.learning_rate * (delta + (cfg.discount * max(next_maximizer_q) if not done else 0.)) + next_minimizer_q = [q for action, q in enumerate(minimizer_q_table[next_observation]) if action in possible_next_actions] + minimizer_q_table[observation][action] = (1 - cfg.learning_rate) * minimizer_q_table[observation][action] \ + + cfg.learning_rate * (delta + (cfg.discount * min(next_minimizer_q) if not done else 0.)) + + if timestep % cfg.plot_q_values_frequency == 0: + stats.register_q_values("max", tuple(observation.int().tolist()), timestep, tuple(maximizer_q_table[observation].tolist())) + stats.register_q_values("min", tuple(observation.int().tolist()), timestep, tuple(minimizer_q_table[observation].tolist())) + + aspiration = tabular_learning_agent.propagateAspiration(observation, action, action_aspiration, Edel=None, nextState=next_observation) + observation = next_observation + + if done: + planning_agent = AgentMDPPlanning(agent_params, make_env()) + tabular_learning_agent = AgentMDPTabularLearning(agent_params, planning_agent, maximizer_q_table=maximizer_q_table, minimizer_q_table=minimizer_q_table) + aspiration = cfg.aspiration_sampler() + observation, _ = env.reset() + + if cfg.plot: + stats.plot(make_env) + + def q_table_to_function(q): + def f(observation): + observation = tuple(observation.flatten().tolist()) + if observation not in q: + q[observation] = {action: np.random.normal() for action in range(env.action_space.n)} + return tensor([q[observation][action] for action in range(env.action_space.n)]) + return f + + return { "maximizer": q_table_to_function(maximizer_q_table), + "minimizer": q_table_to_function(minimizer_q_table) } + + +def train_tabular_q_network(make_env: gym.Env, cfg: DQNConfig): + stats = DQNTrainingStatistics(cfg, keys=[(0, "max"), (0, "min")]) + + q_table = {"max": dict(), "min": dict()} + + envs = {"max": make_env(), "min": make_env()} + + observations = dict() + for key in ["max", "min"]: + observations[key], _ = envs[key].reset() + for timestep in tqdm(range(cfg.total_timesteps), desc="tabular q learning"): + exploration_rate = cfg.exploration_rate_scheduler(timestep / cfg.total_timesteps) + for key in ["max", "min"]: + max_or_min = {"max": max, "min": min}[key] + + if hasattr(envs[key], "possible_actions"): + possible_actions = envs[key].possible_actions() + else: + possible_actions = list(range(envs[key].action_space.n)) + + + if observations[key] not in q_table[key]: + q_table[key][observations[key]] = {action: np.random.normal() for action in range(envs[key].action_space.n)} + + explore = random.random() < exploration_rate + if explore: + action = random.choice(possible_actions) + else: + possible_actions_with_qs = [action for action in possible_actions if action in q_table[key][observations[key]]] + possible_qs = [q_table[key][observations[key]][action] for action in possible_actions_with_qs] + action = possible_actions_with_qs[possible_qs.index(max_or_min(possible_qs))] + # print(f"{possible_actions_with_qs=} {possible_qs=} {action=} {possible_qs.index(max_or_min(possible_qs))=}") + + # print(key, observations[key], action, explore) + + next_observation, reward, done, truncated, _ = envs[key].step(action) + done = done or truncated + + train_key = key if cfg.fraction_samples_from_opposite_replay_buffer(timestep / cfg.total_timesteps) <= random.random() else {"max": "min", "min": "max"}[key] + + if observations[key] not in q_table[train_key]: + q_table[train_key][observations[key]] = {action: np.random.normal() for action in range(envs[key].action_space.n)} + + if next_observation not in q_table[train_key]: + q_table[train_key][next_observation] = {action: np.random.normal() for action in range(envs[key].action_space.n)} + + # print(key, observations[key], action, reward + cfg.discount * max_or_min(q_table[key][next_observation].values()) * float(not done)) + q_table[train_key][observations[key]][action] = (1 - cfg.learning_rate) * q_table[train_key][observations[key]][action] \ + + cfg.learning_rate * (reward + cfg.discount * {"max": max, "min": min}[train_key](q_table[train_key][next_observation].values()) * float(not done)) + + if timestep % cfg.plot_q_values_frequency == 0: + stats.q_values[0, key][timestep] = { + observation_for_q_value: [q_table[key][observation_for_q_value].get(action) for action in range(envs[key].action_space.n)] + if observation_for_q_value in q_table[key] else [None] * envs[key].action_space.n + for observation_for_q_value in cfg.observations_for_plotting_q_values + } + + observations[key] = next_observation + + if done: + observations[key], _ = envs[key].reset() + + if cfg.plot: + stats.plot(make_env=make_env) + + def q_table_to_function(q): + def f(observation): + observation = tuple(observation.flatten().tolist()) + if observation not in q: + q[observation] = {action: np.random.normal() for action in range(envs["max"].action_space.n)} + return tensor([q[observation][action] for action in range(envs["max"].action_space.n)]) + return f + + return { "maximizer": q_table_to_function(q_table["max"]), + "minimizer": q_table_to_function(q_table["min"]) } + +class AgentMDPLearning(AspirationAgent): + # the planning agent is only used for possible_actions and default_policy + def __init__( self, + params: dict, + maximizer_model: Callable[[Tensor], Tensor], + minimizer_model: Callable[[Tensor], Tensor], + planning_agent: AgentMDPPlanning, + observation_action_pair_distribution: Dict[str, Dict[Tuple["observation", "action"], float]] | None = None, + min_possible_action_frequency: float | None = None): + + super().__init__(params | {"defaultPolicy": planning_agent.world.default_policy}) + self.planning_agent = planning_agent + self.maximizer_model = maximizer_model + self.minimizer_model = minimizer_model + self.observation_action_pair_distribution = observation_action_pair_distribution + self.min_possible_action_frequency = min_possible_action_frequency + + @cache + def maxAdmissibleQTable(self, state): + state = tensor([*state], dtype=torch.float) + return tuple(x.item() for x in self.maximizer_model(state)) + + @cache + def minAdmissibleQTable(self, state): + state = tensor([*state], dtype=torch.float) + return tuple(x.item() for x in self.minimizer_model(state)) + + def maxAdmissibleQ(self, state, action): + return self.maxAdmissibleQTable(state)[action] + + def minAdmissibleQ(self, state, action): + return self.minAdmissibleQTable(state)[action] + + def possible_actions(self, state): + return self.planning_agent.possible_actions(state) + +def train_dqn_on_satisfia_policy(make_model: Callable[[], Module], make_env: Callable[[], gym.Env], cfg: DQNConfig): + stats = DQNTrainingStatistics(cfg, keys=[(0, "max"), (0, "min")]) + observations_for_plotting_q_values = set(cfg.observations_for_plotting_q_values) if cfg.observations_for_plotting_q_values else set() + + env = ToTensorWrapper(make_env()) + q_networks = {"max": make_model(), "min": make_model()} + target_networks = {"max": make_model(), "min": make_model()} + for max_or_min in ["max", "min"]: + target_networks[max_or_min].load_state_dict(q_networks[max_or_min].state_dict()) + optimizers = {max_or_min: AdamW(n.parameters(), lr=cfg.learning_rate) for max_or_min, n in q_networks.items()} + + replay_buffer = ReplayBuffer(cfg.replay_buffer_size, num_actions=env.action_space.n) + + agent_params = {} + + planning_agent = AgentMDPPlanning(agent_params, make_env()) + learning_agent = AgentMDPLearning(agent_params, maximizer_model=target_networks["max"], minimizer_model=target_networks["min"], planning_agent=planning_agent) + aspiration = cfg.aspiration_sampler() + + observation, _ = env.reset() + for timestep in tqdm(range(cfg.total_timesteps), desc="training dqn"): + possible_actions = learning_agent.possible_actions(tuple(observation.int().tolist())) + + exploration_rate = cfg.exploration_rate_scheduler(timestep / cfg.total_timesteps) + explore = random.random() <= exploration_rate + if explore: + action = random.choice(possible_actions) + action_aspiration = learning_agent.aspiration4action(observation, action, aspiration) + else: + action, action_aspiration = learning_agent.localPolicy(tuple(observation.int().tolist()), aspiration).sample()[0] + + next_observation, delta, done, truncated, _ = env.step(action) + done = done or truncated + + replay_buffer.add( observation = observation, + action = action, + next_observation = next_observation, + delta = delta, + possible_actions = possible_actions, + done = done ) + + if not done and cfg.observations_for_plotting_q_values is None: + observations_for_plotting_q_values.add(tuple(observation.int().tolist())) + + record_q_values = cfg.plot_q_values and timestep % cfg.plot_q_values_frequency == 0 + if record_q_values: + for max_or_min in ["max", "min"]: + # print("q", max_or_min, q_networks[max_or_min](observation)[4]) + # print(q_networks["max"](tensor([9., 2, 2]))[4]) + stats.q_values[(0, max_or_min)][timestep] = { + observation_for_q_value: q_networks[max_or_min](tensor(list(observation_for_q_value), dtype=torch.float32)).tolist() + for observation_for_q_value in observations_for_plotting_q_values + } + # if max_or_min == "max": + # timesteps = list(stats.q_values[(0, "max")].keys()) + # print(timesteps) + # print([stats.q_values[(0, "max")][timestep][tuple(observation.int().tolist())][action] for timestep in timesteps]) + + aspiration = learning_agent.propagateAspiration( tuple(observation.int().tolist()), + action, + action_aspiration, + Edel=None, + nextState=tuple(next_observation.int().tolist()) ) + observation = next_observation + if done or truncated: + observation, _ = env.reset() + aspiration = cfg.aspiration_sampler() + + # needed to reset cache + learning_agent = AgentMDPLearning(agent_params, maximizer_model=target_networks["max"], minimizer_model=target_networks["min"], planning_agent=planning_agent) + + train = timestep >= cfg.learning_starts and timestep % cfg.train_frequency == 0 + if train: + data = replay_buffer.sample(cfg.batch_size) + for max_or_min in ["max", "min"]: + with no_grad(): + target = target_networks[max_or_min](data.next_observations) + target_argmax_network = q_networks[max_or_min] if cfg.double_q_learning else target_networks[max_or_min] + target_for_argmax_or_argmin = where( data.possible_actions, + target_argmax_network(data.next_observations), + {"max": float("-inf"), "min": float("inf")}[max_or_min] ) + target_argmax_or_argmin = {"max": torch.argmax, "min": torch.argmin}[max_or_min](target_for_argmax_or_argmin, dim=-1) + target_max_or_min = target.gather(-1, target_argmax_or_argmin.unsqueeze(-1)).squeeze(-1) + td_target = data.deltas + cfg.discount * target_max_or_min * data.dones.logical_not().float() + # if max_or_min == "max": + # print(td_target[(data.observations == tensor([9, 2, 2])).all(-1).logical_and(data.actions == 4)].numel()) + # print("target", td_target[(data.observations == tensor([9, 2, 2])).all(-1).logical_and(data.actions == 4)]) + q = q_networks[max_or_min](data.observations) + # print("q", q[(data.observations == tensor([9, 2, 2])).all(-1), 4]) + # if max_or_min == "max": + # print(q_networks[max_or_min](tensor([9., 2, 2]))[4]) + q = q.gather(-1, data.actions.unsqueeze(-1)).squeeze(-1) + td_loss = mse_loss(q, td_target) + optimizers[max_or_min].zero_grad() + td_loss.backward() + optimizers[max_or_min].step() + + update_target_network = timestep >= cfg.learning_starts and timestep % cfg.target_network_update_frequency == 0 + if update_target_network: + for max_or_min in ["max", "min"]: + target_networks[max_or_min].load_state_dict(q_networks[max_or_min].state_dict()) + + # needed to reset cache + learning_agent = AgentMDPLearning(agent_params, maximizer_model=target_networks["max"], minimizer_model=target_networks["min"], planning_agent=planning_agent) + + if cfg.plot: + stats.plot(make_env=make_env, observations_for_plotting_q_values=sorted(list(observations_for_plotting_q_values))) + + return {"maximizer": target_networks["max"], "minimizer": target_networks["min"]} + +def train_dqn_random_lambda(make_model: Callable[[], Module], make_env: Callable[[], gym.Env], cfg: DQNConfig): + stats = DQNTrainingStatistics(cfg, keys=[()]) + + env = ToTensorWrapper(make_env()) + q_network = make_model() + target_network = make_model() + target_network.load_state_dict(q_network.state_dict()) + optimizer = AdamW(q_network.parameters(), cfg.learning_rate) + + replay_buffer = ReplayBuffer(cfg.replay_buffer_size, num_actions=env.action_space.n) + + observation, _ = env.reset() + episode_deltas = [] + episode_lambda_sampler = cfg.lambda_sampler() + for timestep in tqdm(range(cfg.total_timesteps), desc="training dqn"): + lambda_ = episode_lambda_sampler() + + if hasattr(env, "possible_actions"): + possible_actions = env.env.possible_actions() + else: + possible_actions = list(range(env.action_space.n)) + + exploration_rate = cfg.exploration_rate_scheduler(timestep / cfg.total_timesteps) + explore = random.random() <= exploration_rate + if explore: + action = random.choice(possible_actions) + else: + q = q_network(tensor([*observation] + [lambda_], dtype=torch.float)) + action = possible_actions[q[possible_actions].argmax().item()] + + next_observation, delta, done, truncated, _ = env.step(action) + done = done or truncated + episode_deltas.append(delta) + + replay_buffer.add( observation = observation, + action = action, + next_observation = next_observation, + delta = delta, + possible_actions = possible_actions, + done = done, + lambda_ = lambda_ ) + + observation = next_observation + + if done or truncated: + observation, _ = env.reset() + episode_lambda_sampler = cfg.lambda_sampler() + stats.episode_lengths[()][timestep] = len(episode_deltas) + stats.totals[()][timestep] = mean(episode_deltas) + episode_deltas = [] + + record_q_values = cfg.plot_q_values and timestep % cfg.plot_q_values_frequency == 0 + if record_q_values: + for lambda_for_q_value in cfg.lambdas_for_plotting_q_values: + stats.q_values[lambda_for_q_value][timestep] = { + observation_for_q_value: q_network(tensor([*observation_for_q_value] + [lambda_for_q_value], dtype=torch.float)).tolist() + for observation_for_q_value in cfg.observations_for_plotting_q_values + } + + train = timestep >= cfg.learning_starts and timestep % cfg.train_frequency == 0 + if train: + with no_grad(): + target_argmax_network = q_network if cfg.double_q_learning else target_network + data = replay_buffer.sample(cfg.batch_size) + target = target_network(cat((data.observations, data.lambdas.unsqueeze(-1)), -1)) + target_for_argmax_and_argmin = target_argmax_network(cat((data.next_observations, data.lambdas.unsqueeze(-1)), -1)) + target_for_argmax = where(data.possible_actions, target_for_argmax_and_argmin, float("-inf")) + target_for_argmin = where(data.possible_actions, target_for_argmax_and_argmin, float("inf")) + target_argmax = target_for_argmax.argmax(-1) + target_argmin = target_for_argmin.argmin(-1) + target_max = target.gather(-1, target_argmax.unsqueeze(-1)).squeeze(-1) + target_min = target.gather(-1, target_argmin.unsqueeze(-1)).squeeze(-1) + target_max_min_mix = (1 - data.lambdas) * target_min + data.lambdas * target_max + td_target = data.deltas + cfg.discount * target_max_min_mix * data.dones.logical_not().float() + q = q_network(cat((data.observations, data.lambdas.unsqueeze(-1)), -1)) + q = q.gather(-1, data.actions.unsqueeze(-1)).squeeze(-1) + td_loss = mse_loss(q, td_target) + stats.td_losses[()][timestep] = td_loss.item() + optimizer.zero_grad() + td_loss.backward() + optimizer.step() + + update_target_network = timestep >= cfg.learning_starts and timestep % cfg.target_network_update_frequency == 0 + if update_target_network: + target_network.load_state_dict(q_network.state_dict()) + + if cfg.plot: + stats.plot(make_env=make_env) + + return { "maximizer": lambda observation: q_network(tensor([*observation, 1.])), + "minimizer": lambda observation: q_network(tensor([*observation, 0.])) } + +def train_dqn(make_model: Callable[[], Module], make_env: Callable[[], gym.Env], cfg: DQNConfig): + max_or_min_keys = [] + if cfg.train_maximizer: + max_or_min_keys.append("max") + if cfg.train_minimizer: + max_or_min_keys.append("min") + network_keys = [0, 1] if cfg.true_double_q_learning else [0] + keys = [ (network_key, max_or_min_key) + for network_key in network_keys + for max_or_min_key in max_or_min_keys ] + + stats = DQNTrainingStatistics(cfg, keys) + + envs = {key: ToTensorWrapper(make_env()) for key in keys} + q_networks = {key: make_model() for key in keys} + target_networks = {key: make_model() for key in keys} + buffer_models = {key: make_model() for key in keys} + replay_buffers = { key: ReplayBuffer(cfg.replay_buffer_size, num_actions=envs[key].action_space.n) + for key in keys} + optimizers = { key: AdamW(q_network.parameters(), lr=cfg.learning_rate) + for key, q_network in q_networks.items() } + + def update_buffer_model(key, exploration_rate): + buffer_models[key].load_state_dict(target_networks[key].state_dict()) + with no_grad(): + for param in buffer_models[key].parameters(): + param = param + exploration_rate * randn_like(param) + + for key in keys: + update_buffer_model(key, exploration_rate=cfg.exploration_rate_scheduler(0)) + + observations = {key: env.reset()[0] for key, env in envs.items()} + episode_deltas = {key: [] for key in keys} + for timestep in tqdm(range(cfg.total_timesteps), desc="training dqn"): + exploration_rate = cfg.exploration_rate_scheduler(timestep / cfg.total_timesteps) + for key in keys: + if hasattr(envs[key], "possible_actions"): + possible_actions = envs[key].env.possible_actions() + else: + possible_actions = list(range(envs[key].action_space.n)) + + if cfg.mix_maximizer_and_minimizer_strategies: + minimizer_action = random.random() <= cfg.lambdas[key[1]] + else: + minimizer_action = {"max": False, "min": True}[key[1]] + + if cfg.parameter_noise_exploration: + q = buffer_models[key](observations[key]) + argmax_or_argmin = torch.argmin if minimizer_action else torch.argmax + action = possible_actions[argmax_or_argmin(q[possible_actions]).item()] + else: + explore = random.random() <= exploration_rate + if explore: + action = random.choice(possible_actions) + else: + q = target_networks[key](observations[key]) + argmax_or_argmin = torch.argmin if minimizer_action else torch.argmax + # argmax_or_argmin = {"max": torch.argmax, "min": torch.argmin}[key[1]] + action = possible_actions[argmax_or_argmin(q[possible_actions]).item()] + + next_observation, delta, done, truncated, _ = envs[key].step(action) + done = done or truncated + episode_deltas[key].append(delta) + + replay_buffers[key].add( observation = observations[key], + action = action, + next_observation = next_observation, + delta = delta, + possible_actions = possible_actions, + done = done, + minimizer_action = minimizer_action ) + + observations[key] = next_observation + + if done or truncated: + observations[key], _ = envs[key].reset() + + stats.totals[key][timestep] = float(sum(episode_deltas[key])) + stats.episode_lengths[key][timestep] = len(episode_deltas[key]) + episode_deltas[key] = [] + + if cfg.parameter_noise_exploration: + update_buffer_model(key, exploration_rate=cfg.exploration_rate_scheduler(timestep / cfg.total_timesteps)) + + record_q_values = cfg.plot_q_values and timestep % cfg.plot_q_values_frequency == 0 + if record_q_values: + stats.q_values[key][timestep] = { + observation_for_q_value: q_networks[key](tensor([*observation_for_q_value], dtype=torch.float)).tolist() + for observation_for_q_value in cfg.observations_for_plotting_q_values + } + + + train = timestep >= cfg.learning_starts and timestep % cfg.train_frequency == 0 + if train: + network_key, max_or_min_key = key + with no_grad(): + target_argmax_networks = q_networks if cfg.double_q_learning else target_networks + if cfg.true_double_q_learning: + target_argmax_network = target_argmax_networks[{1: 0, 0: 1}[network_key], max_or_min_key] + else: + target_argmax_network = target_argmax_networks[key] + + is_scheduled = isinstance(cfg.fraction_samples_from_opposite_replay_buffer, Callable) + if is_scheduled: + fraction_samples_from_opposite_replay_buffer = cfg.fraction_samples_from_opposite_replay_buffer(timestep / cfg.total_timesteps) + else: + fraction_samples_from_opposite_replay_buffer = cfg.fraction_samples_from_opposite_replay_buffer + + if fraction_samples_from_opposite_replay_buffer == 0: + data = replay_buffers[key].sample(cfg.batch_size) + else: + network_key, max_or_min_key = key + num_samples_from_opposite = int(fraction_samples_from_opposite_replay_buffer * cfg.batch_size) + num_samples_from_corresponding = cfg.batch_size - num_samples_from_opposite + data = replay_buffers[key].sample(num_samples_from_corresponding) + data_from_opposite = replay_buffers[network_key, {"max": "min", "min": "max"}[max_or_min_key]].sample(num_samples_from_opposite) + data = data.concatenate(data_from_opposite) + data.shuffle() + + target = target_networks[key](data.next_observations) + target_for_argmax_and_argmin = target_argmax_network(data.next_observations) + target_for_argmax = where(data.possible_actions, target_for_argmax_and_argmin, float("-inf")) + target_for_argmin = where(data.possible_actions, target_for_argmax_and_argmin, float("inf")) + target_argmax = target_for_argmax.argmax(-1) + target_argmin = target_for_argmin.argmin(-1) + target_max = target.gather(-1, target_argmax.unsqueeze(-1)).squeeze(-1) + target_min = target.gather(-1, target_argmin.unsqueeze(-1)).squeeze(-1) + + lambda_ = cfg.lambdas[max_or_min_key] + target_max_min_mix = (1 - lambda_) * target_min + lambda_ * target_max + + td_target = data.deltas + cfg.discount * target_max_min_mix * data.dones.logical_not().float() + + if cfg.train_corresponding_network: + # this does the training twice (once for the maximizer and once for the minimizer) instead of just once + # fix this + for max_or_min, corresponding_actions in [ ("max", data.minimizer_actions.logical_not()), + ("min", data.minimizer_actions) ]: + q = q_networks[network_key, max_or_min](data.observations[corresponding_actions, ...]) + q = q.gather(-1, data.actions[corresponding_actions, ...].unsqueeze(-1)).squeeze(-1) + td_loss = mse_loss(q, td_target[corresponding_actions]) + stats.td_losses[network_key, max_or_min][timestep] = td_loss.item() + optimizers[network_key, max_or_min].zero_grad() + td_loss.backward() + optimizers[network_key, max_or_min].step() + else: + q = q_networks[key](data.observations) + q = q.gather(-1, data.actions.unsqueeze(-1)).squeeze(-1) + td_loss = mse_loss(q, td_target) + stats.td_losses[key][timestep] = td_loss.item() + optimizers[key].zero_grad() + td_loss.backward() + optimizers[key].step() + + update_target_network = timestep >= cfg.learning_starts \ + and timestep % cfg.target_network_update_frequency == 0 + if update_target_network: + target_networks[key].load_state_dict(q_networks[key].state_dict()) + + if cfg.plot: + stats.plot(make_env=make_env) + + return {"maximizer": target_networks[0, "max"], "minimizer": target_networks[0, "min"]} + +@dataclass +class ReplayBufferSample: + observations: Tensor + actions: Tensor + next_observations: Tensor + deltas: Tensor + dones: Tensor + possible_actions: Tensor + minimizer_actions: Tensor | None + lambdas: Tensor | None + + def __post_init__(self): + assert self.observations.size(0) == self.actions.size(0) == self.next_observations.size(0) \ + == self.deltas.size(0) == self.dones.size(0) + if self.minimizer_actions is not None: + assert self.minimizer_actions.size(0) == self.observations.size(0) + if self.lambdas is not None: + assert self.lambdas.size(0) == self.observations.size(0) + + def size(self): + return self.observations.size(0) + + def concatenate(self, other: "ReplayBufferSample"): + assert (self.minimizer_actions is None) == (other.minimizer_actions is None) + assert (self.lambdas is None) == (other.lambdas is None) + + return ReplayBufferSample( observations = cat((self.observations, other.observations)), + actions = cat((self.actions, other.actions)), + next_observations = cat((self.next_observations, other.next_observations)), + deltas = cat((self.deltas, other.deltas)), + dones = cat((self.dones, other.dones)), + minimizer_actions = cat((self.minimizer_actions, other.minimizer_actions)) + if self.minimizer_actions is not None else None, + lambdas = cat((self.lambdas, other.lambdas)) + if self.lambdas is not None else None ) + + def shuffle(self): + shuffled_indices = randperm(self.size()) + self.observations = self.observations [shuffled_indices, ...] + self.actions = self.actions [shuffled_indices, ...] + self.next_observations = self.next_observations[shuffled_indices, ...] + self.deltas = self.deltas [shuffled_indices, ...] + self.dones = self.dones [shuffled_indices, ...] + if self.minimizer_actions is not None: + self.minimizer_actions = self.minimizer_actions[shuffled_indices, ...] + if self.lambdas is not None: + self.lambdas = self.lambdas [shuffled_indices, ...] + +class ReplayBuffer: + def __init__(self, size, num_actions=None): + self.size = size + self.initialized = False + self.num_actions = num_actions + + def add( self, + observation: Tensor, + action: int, + next_observation: Tensor, + delta: float, + done: bool, + possible_actions: Tensor | Iterable[int], + minimizer_action: bool | None = None, + lambda_: float | None = None ): + + if not isinstance(possible_actions, Tensor): + assert all(isinstance(action, int) for action in possible_actions) + assert all(action in range(self.num_actions) for action in possible_actions) + possible_actions = tensor([action in possible_actions for action in range(self.num_actions)]) + + if self.num_actions is None: + self.num_actions = possible_actions.numel() + + if not self.initialized: + device = observation.device + self.observations = empty(self.size, *observation.shape, dtype=torch.float, device=device) + self.actions = empty(self.size, dtype=int, device=device) + self.next_observations = empty(self.size, *next_observation.shape, dtype=torch.float, device=device) + self.deltas = empty(self.size, dtype=torch.float, device=device) + self.dones = empty(self.size, dtype=bool, device=device) + self.possible_actions = empty(self.size, self.num_actions, dtype=bool, device=device) + self.minimizer_actions = empty(self.size, dtype=bool, device=device) \ + if minimizer_action is not None else None + self.lambdas = empty(self.size, dtype=torch.float, device=device) \ + if lambda_ is not None else None + + self.num_written = 0 + + self.initialized = True + + self.observations [self.num_written % self.size, ...] = observation + self.actions [self.num_written % self.size] = action + self.next_observations [self.num_written % self.size, ...] = next_observation + self.deltas [self.num_written % self.size] = delta + self.dones [self.num_written % self.size] = done + self.possible_actions [self.num_written % self.size, ...] = possible_actions + if self.minimizer_actions is not None: + self.minimizer_actions[self.num_written % self.size] = minimizer_action + if self.lambdas is not None: + self.lambdas [self.num_written % self.size] = lambda_ + + self.num_written += 1 + + def sample(self, sample_size: int) -> ReplayBufferSample: + if sample_size == 0: + return ReplayBufferSample( observations = tensor([]), + actions = tensor([], dtype=torch.int), + next_observations = tensor([]), + deltas = tensor([]), + dones = tensor([], dtype=torch.bool), + possible_actions = tensor([], dtype=torch.bool), + minimizer_actions = tensor([], dtype=torch.bool) if self.minimizer_actions is not None else None, + lambdas = tensor([]) if self.lambdas is not None else None ) + + assert sample_size <= self.num_written + all_indices = range(min(self.num_written, self.size)) + sample_indices = tensor(random.sample(all_indices, sample_size)) + return ReplayBufferSample( observations = self.observations [sample_indices, ...], + actions = self.actions [sample_indices], + next_observations = self.next_observations[sample_indices, ...], + deltas = self.deltas [sample_indices], + dones = self.dones [sample_indices], + possible_actions = self.possible_actions [sample_indices, ...], + minimizer_actions = self.minimizer_actions[sample_indices] + if self.minimizer_actions is not None else None, + lambdas = self.lambdas [sample_indices] + if self.lambdas is not None else None ) + +def smoothen(xs: Iterable[float] | Dict[int, float], smoothness: int): + if isinstance(xs, Dict): + return dict(zip( smoothen(xs.keys(), smoothness), + smoothen(xs.values(), smoothness) )) + + return [mean(chunk) for chunk in chunked(xs, smoothness)] + +@dataclass +class TabularQLeraningStatistics: + cfg: DQNConfig + q_values: Dict[str, Dict["observation", Dict[int, List[float]]]] = field(default_factory=lambda: {"max": dict(), "min": dict()}) + + def register_q_values(self, max_or_min, observation, timestep, q_values): + if observation not in self.q_values[max_or_min]: + self.q_values[max_or_min][observation] = dict() + self.q_values[max_or_min][observation][timestep] = list(q_values) # we need the call to list because it needs to be cloned! + + def plot(self, make_env): + actions = list(range(len(next(iter(next(iter(self.q_values["max"].values())).values()))))) + fig = Figure() + + planning_agent = AgentMDPPlanning(params={}, world=make_env()) + plot_titles = [] + first_iteration = True + for max_or_min in ["max", "min"]: + for observation in tqdm(self.q_values[max_or_min], desc=f"calculating {max_or_min}imizer true q values"): + plot_titles.append(f"{max_or_min}imizer {observation=}") + for action in actions: + fig.add_scatter( x = list(self.q_values[max_or_min][observation].keys()), + y = [q[action] for q in self.q_values[max_or_min][observation].values()], + name = f"action {action}", + line = dict(color=DEFAULT_PLOTLY_COLORS[action]), + visible = first_iteration ) + if action in planning_agent.possible_actions(observation): + true_value = { "max": planning_agent.maxAdmissibleQ(observation, action), + "min": planning_agent.minAdmissibleQ(observation, action) }[max_or_min] + else: + true_value = None + fig.add_scatter( x = [0, self.cfg.total_timesteps - 1], + y = [true_value] * 2, + name = f"action {action} true q value", + line = dict(dash="dash", color=DEFAULT_PLOTLY_COLORS[action]), + visible=first_iteration ) + first_iteration = False + + fig.update_layout(updatemenus=[dict( type="dropdown", + direction="down", + buttons=[ dict( label=title, + method="update", + args=[dict(visible = [False] * 2 * len(actions) * i + + [True] * 2 * len(actions) + + [False] * 2 * len(actions) * (len(plot_titles) - i - 1) )] ) + for i, title in enumerate(plot_titles) ] )]) + + fig.show() + +class DQNTrainingStatistics: + episode_lengths: Dict[Tuple[int, str], Dict[int, float]] + totals: Dict[Tuple[int, str], Dict[int, float]] + td_losses: Dict[Tuple[int, str], Dict[int, float]] + q_values: Dict[Tuple[int, str] | float, Dict[int, Dict["observation", List[float]]]] + + def __init__(self, cfg: DQNConfig, keys: List[Tuple[int, str]]): + self.keys = keys + self.cfg = cfg + self.episode_lengths = {key: dict() for key in self.keys} + self.totals = {key: dict() for key in self.keys} + self.td_losses = {key: dict() for key in self.keys} + if self.cfg.lambdas_for_plotting_q_values is not None: + self.q_values = {lambda_: dict() for lambda_ in self.cfg.lambdas_for_plotting_q_values} + else: + self.q_values = {key: dict() for key in self.keys} + + def plot(self, make_env=None, observations_for_plotting_q_values=None): + fig = make_subplots( + rows = int(self.cfg.train_maximizer) + self.cfg.train_minimizer, + cols = 2 if self.cfg.true_double_q_learning else 1, + subplot_titles = [ f"{'second ' if network_key > 0 else ''}{max_or_min_key}imizer" + for network_key, max_or_min_key in self.keys ] + if self.keys != [()] else "" + ) + fig.update_layout(title=self.cfg.plot_title) + fig.update_layout(xaxis=dict(title="timestep")) + fig.update_layout(yaxis=dict(type="log")) + + for statistic_name, statistic in \ + [ ("episode length", self.episode_lengths), + ("total", self.totals), + ("td loss", self.td_losses) ]: + + for key in self.keys: + network_key, max_or_min_key = key if len(key) == 2 else [0, None] + smooth_statistic = smoothen(statistic[key], self.cfg.plot_smoothness) + fig.add_scatter( + x = list(smooth_statistic.keys()), + y = list(smooth_statistic.values()), + name = statistic_name, + mode = "markers", + row = 2 if self.cfg.train_maximizer and max_or_min_key == "max" else 1, + col = network_key + 1 + ) + + fig.show() + + if self.cfg.plot_q_values: + if observations_for_plotting_q_values is None: + observations_for_plotting_q_values = self.cfg.observations_for_plotting_q_values + + fig = Figure() + fig.update_layout( title = self.cfg.plot_title + " q values", + xaxis_title = "timestep", + yaxis_title = "q value" ) + + n_actions = make_env().action_space.n + + plot_titles = [] + visibilities = [] + first_iteration = True + for key in self.q_values.keys(): + if make_env is not None and isinstance(key, float): + planning_agent = AgentMDPPlanning(params={"maxLambda": key}, world=make_env()) + minimizer = False + elif make_env is not None and len(key) == 2: + planning_agent = AgentMDPPlanning(params={"maxLambda": self.cfg.lambdas[key[1]]}, world=make_env()) + minimizer = key[1] == "min" + else: + planning_agent = None + + for observation in observations_for_plotting_q_values: + plot_titles.append(f"{key} {observation}") + visibilities.append([] if visibilities == [] else [False] * len(visibilities[-1])) + for action in list(range(n_actions)): + if planning_agent is not None and action not in planning_agent.possible_actions(observation): + continue + + timesteps = list(self.q_values[key].keys()) + fig.add_scatter( x = timesteps, + y = [self.q_values[key][timestep].get(observation, [None] * n_actions)[action] for timestep in timesteps], + line = dict(color=DEFAULT_PLOTLY_COLORS[action]), + name = f"action {action}", + visible = first_iteration ) + visibilities[-1].append(True) + + if planning_agent is not None: + fig.add_scatter( x = [0, self.cfg.total_timesteps - 1], + y = [planning_agent.minAdmissibleQ(observation, action) if minimizer else planning_agent.maxAdmissibleQ(observation, action)] * 2, + line_color = DEFAULT_PLOTLY_COLORS[action], + line_dash = "dash", + name = f"action {action} true", + visible = first_iteration ) + visibilities[-1].append(True) + + first_iteration = False + + visibilities = [v + [False] * (len(visibilities[-1]) - len(v)) for v in visibilities] + + fig.update_layout(updatemenus=[dict( type="dropdown", + direction="down", + buttons=[ dict( label=title, + method="update", + args=[dict(visible=visibilities[i])] ) + for i, title in enumerate(plot_titles) ] )]) + + fig.show() + +class ToTensorWrapper(gym.Wrapper): + def __init__(self, env): + super().__init__(env) + + def _to_tensor(self, x): + if isinstance(x, tuple): + x = [*x] + return tensor(x, dtype=torch.float) + + def reset(self, *args, **kwargs): + observation, info = self.env.reset(*args, **kwargs) + return self._to_tensor(observation), info + + def step(self, *args, **kwargs): + observation, reward, done, truncated, info = self.env.step(*args, **kwargs) + return self._to_tensor(observation), reward, done, truncated, info diff --git a/src/satisfia/agents/learning/dqn/__init__.py b/src/satisfia/agents/learning/dqn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/satisfia/agents/learning/dqn/agent_mdp_dqn.py b/src/satisfia/agents/learning/dqn/agent_mdp_dqn.py new file mode 100644 index 0000000..aa3dc80 --- /dev/null +++ b/src/satisfia/agents/learning/dqn/agent_mdp_dqn.py @@ -0,0 +1,241 @@ +from satisfia.agents.makeMDPAgentSatisfia import AspirationAgent +from satisfia.agents.learning.dqn.criteria import action_losses +from satisfia.util.interval_tensor import IntervalTensor, relative_position + +import torch +from torch import tensor, Tensor, zeros_like, full_like, allclose, inference_mode +from torch.distributions.categorical import Categorical +from torch.nn import Module +from functools import cache +from dataclasses import dataclass +from numbers import Number +from typing import List, Dict, Any, Callable + +@inference_mode() +def state_aspirations(criteria: Dict[str, Tensor], aspirations: IntervalTensor) -> IntervalTensor: + state_feasibility_intervals = IntervalTensor( criteria["minAdmissibleV"], + criteria["maxAdmissibleV"] ) + return aspirations.clip_to(state_feasibility_intervals) + +@inference_mode() +def action_aspirations( criteria: Dict[str, Tensor], + state_aspirations: IntervalTensor ) -> IntervalTensor: + + action_feasibility_intervals = IntervalTensor( criteria["minAdmissibleQ"], + criteria["maxAdmissibleQ"] ) + action_aspirations = state_aspirations.unsqueeze(-1) + action_aspirations = \ + (action_aspirations - action_aspirations.lower + action_feasibility_intervals.lower).where( + action_aspirations.lower < action_feasibility_intervals.lower, + action_aspirations + ) + action_aspirations = \ + (action_aspirations - action_aspirations.upper + action_feasibility_intervals.lower).where( + action_aspirations.upper > action_feasibility_intervals.upper, + action_aspirations + ) + action_aspirations = action_aspirations.clip_to(action_feasibility_intervals) + return action_aspirations + +@inference_mode() +def combined_action_losses( params: Dict[str, Any], + criteria: Dict[str, Tensor], + state_aspirations: IntervalTensor, + action_aspirations: IntervalTensor, + estimated_action_probabilities: Tensor ) -> Tensor: + + LOSS_COEFFICIENT_PREFIX = "lossCoeff4" + loss_coefficients = { param_name[len(LOSS_COEFFICIENT_PREFIX):]: param_value + for param_name, param_value in params.items() + if param_name.startswith(LOSS_COEFFICIENT_PREFIX) } + + action_losses_ = action_losses( list(loss_coefficients.keys()), + criteria, + state_aspirations, + action_aspirations, + estimated_action_probabilities ) + + combined_action_losses = zeros_like(action_aspirations.lower) + for loss_name, coefficient in loss_coefficients.items(): + if coefficient == 0: + continue + coecombined_action_losess += coefficient * action_losses_[loss_name] + + return combined_action_losses + +@inference_mode() +def action_propensities( params: Dict[str, Any], + criteria: Dict[str, Tensor], + state_aspirations: IntervalTensor, + action_aspirations: IntervalTensor ) -> Tensor: + + num_actions = action_aspirations.lower.size(-1) + losses = combined_action_losses( params, + criteria, + state_aspirations, + action_aspirations, + estimated_action_probabilities = 1 / num_actions ) + + # replaced 1e-100 by 1e-10 because with tocrh's precision, 1e-100 == 0 + return (-(losses - losses.min(-1, keepdim=True).values)).exp().maximum(tensor(1e-10)) + +@inference_mode() +def local_policy( params: Dict[str, Any], + criteria: Dict[str, Tensor], + aspirations: IntervalTensor ) -> Categorical: + + # TensorInterval[batch] + state_aspirations_ = state_aspirations(criteria, aspirations) + + # TensorInterval[batch, action] + action_aspirations_ = action_aspirations(criteria, state_aspirations_) + + # Tensor[batch, action] + action_propensities_ = action_propensities( params, + criteria, + state_aspirations=state_aspirations_, + action_aspirations=action_aspirations_ ) + + # TensorInterval[batch, action] + action_probabilities = action_propensities_ / action_propensities_.sum(-1, keepdim=True) + + # Tensor[batch, action] of bools + action_aspiration_midpoint_sides = \ + action_aspirations_.midpoint() > state_aspirations_.midpoint().unsqueeze(-1) + + # Tensor[batch, action_candidate] of bools + # action_aspiration_midpoints_close_to_state_aspiration_midpoints = \ + # (action_aspirations_.midpoint() - state_aspirations_.midpoint().unsqueeze(-1)).abs() <= 1e-5 + + # Tensor[batch, first_action_candidate, second_action_candidate] of bools + action_aspiration_midpoints_on_same_side = action_aspiration_midpoint_sides.unsqueeze(-1) \ + == action_aspiration_midpoint_sides.unsqueeze(-2) + + # action_aspiration_midpoints_on_same_side &= \ + # action_aspiration_midpoints_close_to_state_aspiration_midpoints.logical_not().unsqueeze(-1) + # action_aspiration_midpoints_on_same_side &= \ + # action_aspiration_midpoints_close_to_state_aspiration_midpoints.logical_not().unsqueeze(-2) + + # Tensor[batch, first_action_candidate] + first_action_candidate_probabilities = action_probabilities + + # Tensor[batch, first_action_candidate, second_action_candidate] + second_action_candidate_probabilities_conditional_on_first_action_candidate = \ + action_probabilities.unsqueeze(-1).where( + action_aspiration_midpoints_on_same_side.logical_not(), + full_like(action_probabilities.unsqueeze(-1), 1e-10) + ) + second_action_candidate_probabilities_conditional_on_first_action_candidate /= \ + second_action_candidate_probabilities_conditional_on_first_action_candidate.sum(-1, keepdim=True) + + # Tensor[batch, first_action_candidate, second_action_candidate] + action_pair_probabilities = \ + first_action_candidate_probabilities.unsqueeze(-1) \ + * second_action_candidate_probabilities_conditional_on_first_action_candidate + + # Tensor[batch, first_action_candidate, second_action_candidate] + action_candidate_mixture_probabilities = relative_position( + action_aspirations_.midpoint().unsqueeze(-2), + state_aspirations_.midpoint().unsqueeze(-1).unsqueeze(-1), + action_aspirations_.midpoint().unsqueeze(-1) + ).clip(0, 1) + + # Tensor[batch, action] + action_probabilities = \ + (action_pair_probabilities * action_candidate_mixture_probabilities ).sum(-1) \ + + (action_pair_probabilities * (1 - action_candidate_mixture_probabilities)).sum(-2) + + return Categorical(probs=action_probabilities, validate_args=True) + +class AgentMDPDQN(AspirationAgent): + def __init__(self, params: Dict[str, Any], + model: Callable[[Tensor], Dict[str, Tensor]], + num_actions: int, + device: str = "cpu" ): + + super().__init__(params) + + self.model = model + self.num_actions = num_actions + self.device = device + + @cache + def modelOutput(self, state, aspiration) -> Dict[str, Tensor]: + state = tensor(list(state), dtype=torch.float, device=self.device).unsqueeze(0) + aspiration_low, aspiration_high = aspiration + aspiration = IntervalTensor( tensor([aspiration_low], dtype=torch.float, device=self.device), + tensor([aspiration_high], dtype=torch.float, device=self.device) ) + output = self.model(state, aspiration, noisy=False) + return {key: value.squeeze(0) for key, value in output.items()} + + def maxAdmissibleQ(self, state, action): + return self.modelOutput(state, aspiration=(0, 0))["maxAdmissibleQ"][action].item() + + def minAdmissibleQ(self, state, action): + return self.modelOutput(state, aspiration=(0, 0))["minAdmissibleQ"][action].item() + + def Q(self, state, action, action_aspiration): + return self.modelOutput(state, action_aspiration)["Q"][action].item() + + def possible_actions(self, state=None): + return list(range(self.num_actions)) + + + def disorderingPotential_action(self, state, action): + raise NotImplemented() + + def agencyChange_action(self, state, action): + raise NotImplemented() + + + def LRAdev_action(self, state, action, aleph4action, myopic=False): + raise NotImplemented() + + def behaviorEntropy_action(self, state, actionProbability, action, aleph4action): + raise NotImplemented() + + def behaviorKLdiv_action(self, state, actionProbability, action, aleph4action): + raise NotImplemented() + + def trajectoryEntropy_action(self, state, actionProbability, action, aleph4action): + raise NotImplemented() + + def stateDistance_action(self, state, action, aleph4action): + raise NotImplemented() + + def causation_action(self, state, action, aleph4action): + raise NotImplemented() + + def causationPotential_action(self, state, action, aleph4action): + raise NotImplemented() + + def otherLoss_action(self, state, action, aleph4action): + raise NotImplemented() + + + def Q2(self, state, action, aleph4action): + raise NotImplemented() + + def Q3(self, state, action, aleph4action): + raise NotImplemented() + + def Q4(self, state, action, aleph4action): + raise NotImplemented() + + def Q5(self, state, action, aleph4action): + raise NotImplemented() + + def Q6(self, state, action, aleph4action): + raise NotImplemented() + + def Q_ones(self, state, action, aleph4action): + raise NotImplemented() + + def Q_DeltaSquare(self, state, action, aleph4action): + raise NotImplemented() + + def ETerminalState_action(self, state, action, aleph4action, policy="actual"): + raise NotImplemented() + + def ETerminalState2_action(self, state, action, aleph4action, policy="actual"): + raise NotImplemented() diff --git a/src/satisfia/agents/learning/dqn/bellman_formula.py b/src/satisfia/agents/learning/dqn/bellman_formula.py new file mode 100644 index 0000000..a3c2570 --- /dev/null +++ b/src/satisfia/agents/learning/dqn/bellman_formula.py @@ -0,0 +1,134 @@ +from satisfia.agents.learning.dqn.agent_mdp_dqn import local_policy +from satisfia.agents.learning.dqn.criteria import complete_criteria +from satisfia.agents.learning.dqn.config import DQNConfig +from satisfia.agents.learning.dqn.replay_buffer import ReplayBufferSample + +from torch import Tensor, argmax, argmin, max, min, where, zeros_like, no_grad +from typing import Dict, Callable + +@no_grad() +def bellman_formula( replay_buffer_sample: ReplayBufferSample, + q_network: Callable[[Tensor], Dict[str, Tensor]], + target_network: Callable[[Tensor], Dict[str, Tensor]], + predicted_criteria: Dict[str, Tensor], + cfg: DQNConfig ) -> Dict[str, Tensor]: + + criteria = dict() + + criterion_names = [ criterion + for criterion, coefficient in cfg.criterion_coefficients_for_loss.items() + if coefficient != 0 ] + + next_criteria = target_network( replay_buffer_sample.next_observations, + replay_buffer_sample.aspirations, + noisy=False ) + complete_criteria(next_criteria) + + if cfg.double_q_learning: + next_criteria_from_q_network = q_network( replay_buffer_sample.next_observations, + replay_buffer_sample.aspirations, + noisy=False ) + complete_criteria(next_criteria) + + if any( criterion_name in criterion_names + for criterion_name in ["Q", "Q2", "Q3", "Q4", "Q5", "Q6"] ): + + if cfg.frozen_model_for_exploration: + predicted_criteria_for_policy = cfg.frozen_model_for_exploration( + replay_buffer_sample.next_observations, + replay_buffer_sample.aspirations, + noisy=False + ) + complete_criteria(predicted_criteria_for_policy) + else: + predicted_criteria_for_policy = predicted_criteria + + policy = local_policy( cfg.satisfia_agent_params, + predicted_criteria_for_policy, + replay_buffer_sample.aspirations ) + + for max_or_min in ["max", "min"]: + criterion_name = f"{max_or_min}AdmissibleQ" + torch_max_or_min = {"max": max, "min": min} [max_or_min] + torch_argmax_or_argmin = {"max": argmax, "min": argmin}[max_or_min] + + if criterion_name in criterion_names: + if cfg.double_q_learning: + target_for_q_network = next_criteria_from_q_network[criterion_name] + target_max_or_min = next_criteria[criterion_name].gather( + -1, + torch_argmax_or_argmin(target_for_q_network, -1, keepdim=True) + ).squeeze(-1) + else: + target_max_or_min = torch_max_or_min(next_criteria[criterion_name], dim=-1) + + target_max_or_min = where( replay_buffer_sample.dones, + zeros_like(target_max_or_min), + target_max_or_min ) + criteria[criterion_name] = \ + replay_buffer_sample.deltas + cfg.discount * target_max_or_min + + if "Q2" in criterion_names: + assert "Q" in criterion_names + for i in range(3, 7): + if f"Q{i}" in criterion_names: + assert f"Q{i-1}" in criterion_names + + if "Q" in criterion_names: + V = (policy.probs * next_criteria["Q"]).sum(-1) + criteria["Q"] = replay_buffer_sample.deltas \ + + where( replay_buffer_sample.dones, + zeros_like(replay_buffer_sample.deltas), + cfg.discount * V ) + + if "Q2" in criterion_names: + V2 = (policy.probs * next_criteria["Q2"]).sum(-1) + criteria["Q2"] = replay_buffer_sample.delats ** 2 \ + + where( replay_buffer_sample.dones, + zeros_like(replay_buffer_sample.deltas), + 2 * cfg.discount * V + + cfg.discount ** 2 * V2 ) + + if "Q3" in criterion_names: + V3 = (policy.probs * next_criteria["Q3"]).sum(-1) + criteria["Q3"] = replay_buffer_sample.deltas ** 3 \ + + where( replay_buffer_sample.dones, + zeros_like(replay_buffer_sample.deltas), + 3 * cfg.discount * replay_buffer_sample.deltas ** 2 * V + + 3 * cfg.discount ** 2 * replay_buffer_sample.deltas * V2 + + cfg.discount ** 3 * V3 ) + + if "Q4" in criterion_names: + V4 = (policy.probs * next_criteria["Q4"]).sum(-1) + criteria["Q4"] = replay_buffer_sample.deltas ** 4 \ + + where( replay_buffer_sample.dones, + zeros_like(replay_buffer_sample.deltas), + 4 * cfg.discount * replay_buffer_sample.deltas ** 3 * V + + 6 * cfg.discount ** 2 * replay_buffer_sample.deltas ** 2 * V2 + + 4 * cfg.discount ** 3 * replay_buffer_sample.deltas * V3 + + cfg.discount ** 4 * V4 ) + + if "Q5" in criterion_names: + V5 = (policy.probs * next_criteria["Q5"]).sum(-1) + criteria["Q5"] = replay_buffer_sample.deltas ** 5 \ + + where( replay_buffer_sample.dones, + zeros_like(replay_buffer_sample.deltas), + 5 * cfg.discount * replay_buffer_sample.deltas ** 4 * V + + 10 * cfg.discount ** 2 * replay_buffer_sample.deltas ** 3 * V2 + + 10 * cfg.discount ** 3 * replay_buffer_sample.deltas ** 2 * V3 + + 5 * cfg.discount ** 4 * replay_buffer_sample.deltas * V4 + + cfg.discount ** 5 * V5 ) + + if "Q6" in criterion_names: + V6 = (policy.probs * next_criteria["Q6"]).sum(-1) + criteria["Q6"] = replay_buffer_sample.deltas ** 6 \ + + where( replay_buffer_sample.dones, + zeros_like(replay_buffer_sample.deltas), + 6 * cfg.discount * replay_buffer_sample.deltas ** 5 * V + + 15 * cfg.discount ** 2 * replay_buffer_sample.deltas ** 4 * V2 + + 20 * cfg.discount ** 3 * replay_buffer_sample.deltas ** 3 * V3 + + 15 * cfg.discount ** 4 * replay_buffer_sample.deltas ** 2 * V4 + + 6 * cfg.discount ** 5 * replay_buffer_sample.deltas * V5 + + cfg.discount ** 6 * V6 ) + + return criteria diff --git a/src/satisfia/agents/learning/dqn/config.py b/src/satisfia/agents/learning/dqn/config.py new file mode 100644 index 0000000..0e7eff6 --- /dev/null +++ b/src/satisfia/agents/learning/dqn/config.py @@ -0,0 +1,94 @@ +from satisfia.agents.makeMDPAgentSatisfia import AgentMDPPlanning +from satisfia.util.interval_tensor import IntervalTensor + +from torch import Tensor, rand +from torch.nn import Module, MSELoss +from dataclasses import dataclass, field +from more_itertools import pairwise +from typing import Any, Tuple, List, Dict, Callable + +class ConstantScheduler: + def __init__(self, value: float): + self.value = value + + def __call__(self, _) -> float: + return self.value + +class PiecewiseLinearScheduler: + def __init__(self, x: List[float], y: List[float]): + self.x = x + self.y = y + assert len(self.x) == len(self.y) + assert len(self.x) > 0 + assert all(x1 < x2 for x1, x2 in pairwise(self.x)) + + def __call__(self, x: float): + if x <= self.x[0]: + return self.y[0] + if x >= self.x[-1]: + return self.y[-1] + for (x1, x2), (y1, y2) in zip(pairwise(self.x), pairwise(self.y)): + if x1 <= x <= x2: + return y1 + (x - x1) / (x2 - x1) * (y2 - y1) + assert False, "unreachable" + +def uniform(size, min, max): + return min + (max - min) * rand(size) + +@dataclass +class UniformAspirationSampler: + min_aspiration: float + max_aspiration: float + + def __call__(self, size: int) -> IntervalTensor: + a = uniform(size, self.min_aspiration, self.max_aspiration) + b = uniform(size, self.min_aspiration, self.max_aspiration) + return IntervalTensor(a.minimum(b), a.maximum(b)) + +@dataclass +class UniformPointwiseAspirationSampler: + min_aspiration: float + max_aspiration: float + + def __call__(self, size: int) -> IntervalTensor: + a = uniform(size, self.min_aspiration, self.max_aspiration) + return IntervalTensor(a, a) + +@dataclass +class DQNConfig: + total_timesteps: int = 500_000 + num_envs: int = 1 + async_envs: bool = True + buffer_size = 10_000 + learning_rate_scheduler: Callable[[float], float] = \ + ConstantScheduler(1e-3) + batch_size: int = 128 + training_starts: int = 10_000 + training_frequency: int = 10 + target_network_update_frequency: int = 500 + soft_target_network_update_coefficient: float = 0. + discount: float = 0.99 + criterion_coefficients_for_loss: Dict[str, float] = \ + field(default_factory=lambda: dict(maxAdmissibleQ=1., minAdmissibleQ=1.)) + criterion_loss_fns: Dict[str, Callable[[Tensor, Tensor], Tensor]] = \ + field(default_factory=lambda: dict( maxAdmissibleQ = MSELoss(), + minAdmissibleQ = MSELoss(), + Q = MSELoss() )) + double_q_learning: bool = True + exploration_rate_scheduler: Callable[[float], float] = \ + PiecewiseLinearScheduler([0., 0.5, 1.], [1., 0.05, 0.05]) + noisy_network_exploration: bool = True + noisy_network_exploration_rate_scheduler: Callable[[float], float] = \ + PiecewiseLinearScheduler([0., 0.5, 1.], [1., 0.05, 0.05]) + frozen_model_for_exploration: Module | None = None + satisfia_policy: bool = True + satisfia_agent_params: Dict[str, Any] = \ + field(default_factory=lambda: dict(defaultPolicy=None)) + aspiration_sampler: Callable[[int], IntervalTensor] = None + device: str = "cpu" + plotted_criteria: List[str] | None = None + plot_criteria_frequency: int | None = None + states_for_plotting_criteria: List | None = None + state_aspirations_for_plotting_criteria: List | None = None + actions_for_plotting_criteria: List | None = None + planning_agent_for_plotting_ground_truth: AgentMDPPlanning | None = None \ No newline at end of file diff --git a/src/satisfia/agents/learning/dqn/criteria.py b/src/satisfia/agents/learning/dqn/criteria.py new file mode 100644 index 0000000..ecfc527 --- /dev/null +++ b/src/satisfia/agents/learning/dqn/criteria.py @@ -0,0 +1,20 @@ +from satisfia.util.interval_tensor import IntervalTensor + +from torch import Tensor, no_grad +from typing import List, Dict + +def action_losses( loss_names: List[str], + criteria: Dict[str, Tensor], + state_aspirations: IntervalTensor, + action_aspirations: IntervalTensor, + estimated_action_probabilities: Tensor ) -> Tensor: + + ... + +@no_grad() +def complete_criteria(criteria: Dict[str, Tensor]): + if "maxAdmissibleQ" in criteria and "maxAdmissibleV" not in criteria: + criteria["maxAdmissibleV"] = criteria["maxAdmissibleQ"].max(-1).values + + if "minAdmissibleQ" in criteria and "minAdmissibleV" not in criteria: + criteria["minAdmissibleV"] = criteria["minAdmissibleQ"].min(-1).values \ No newline at end of file diff --git a/src/satisfia/agents/learning/dqn/exploration_strategy.py b/src/satisfia/agents/learning/dqn/exploration_strategy.py new file mode 100644 index 0000000..3cc7e40 --- /dev/null +++ b/src/satisfia/agents/learning/dqn/exploration_strategy.py @@ -0,0 +1,57 @@ +from satisfia.agents.learning.dqn.config import DQNConfig +import satisfia.agents.learning.dqn.agent_mdp_dqn as agent_mpd_dqn +from satisfia.agents.learning.dqn.criteria import complete_criteria +from satisfia.util.interval_tensor import IntervalTensor + +from torch import Tensor, empty, ones, full_like, randint, bernoulli, no_grad +from torch.nn import Module +from torch.distributions.categorical import Categorical + +class ExplorationStrategy: + def __init__(self, target_network: Module, cfg: DQNConfig, num_actions: int): + self.target_network = target_network + self.cfg = cfg + self.num_actions = num_actions + + self.aspirations = IntervalTensor( empty(self.cfg.num_envs, device=cfg.device), + empty(self.cfg.num_envs, device=cfg.device) ) + self.on_done(dones=ones(self.cfg.num_envs, dtype=bool, device=cfg.device), timestep=0) + + @no_grad() + def __call__(self, observations: Tensor, timestep: int): + actions = self.satisfia_policy_actions(observations).sample() + + exploration_rate = self.cfg.exploration_rate_scheduler(timestep / self.cfg.total_timesteps) + explore = bernoulli(full_like(actions, exploration_rate, dtype=float)).bool() + actions[explore] = randint( low=0, + high=self.num_actions, + size=(explore.int().sum().item(),), + device=self.cfg.device ) + + return actions + + @no_grad() + def satisfia_policy_actions(self, observations: Tensor) -> Categorical: + criteria = self.target_network(observations, self.aspirations) + complete_criteria(criteria) + return agent_mpd_dqn.local_policy( self.cfg.satisfia_agent_params, + criteria, + self.aspirations ) + + @no_grad() + def on_done(self, dones: Tensor, timestep: int): + self.new_aspirations(which=dones) + + if self.cfg.noisy_network_exploration: + self.new_network_noise(timestep=timestep, which_in_batch=dones) + + @no_grad() + def new_aspirations(self, which: Tensor): + num_new_aspirations = sum(which.int()).item() + self.aspirations[which] = \ + self.cfg.aspiration_sampler(num_new_aspirations).to(self.cfg.device) + + @no_grad() + def new_network_noise(self, timestep: int, which_in_batch: Tensor | None = None): + std = self.cfg.noisy_network_exploration_rate_scheduler(timestep / self.cfg.total_timesteps) + self.target_network.new_noise(std, which_in_batch) diff --git a/src/satisfia/agents/learning/dqn/replay_buffer.py b/src/satisfia/agents/learning/dqn/replay_buffer.py new file mode 100644 index 0000000..1c30ac4 --- /dev/null +++ b/src/satisfia/agents/learning/dqn/replay_buffer.py @@ -0,0 +1,70 @@ +from satisfia.util.interval_tensor import IntervalTensor + +import torch +from torch import Tensor, empty, arange +import random +from dataclasses import dataclass + +@dataclass +class ReplayBufferSample: + observations: Tensor + actions: Tensor + deltas: Tensor + dones: Tensor + next_observations: Tensor + aspirations: IntervalTensor + + def to(self, device: str) -> "ReplayBufferSample": + return ReplayBufferSample( observations = self.observations .to(device), + actions = self.actions .to(device), + deltas = self.deltas .to(device), + dones = self.dones .to(device), + next_observations = self.next_observations.to(device), + aspirations = self.aspirations .to(device) ) + +class ReplayBuffer: + def __init__(self, size, device="cpu"): + self.size = size + self.device = device + self.num_written = 0 + self.initialized = False + + def add( self, + observations: Tensor, + actions: Tensor, + deltas: Tensor, + dones: Tensor, + next_observations: Tensor, + aspirations: IntervalTensor ): + + if not self.initialized: + self.observations = empty(self.size, *observations.shape[1:], device=self.device) + self.actions = empty(self.size, dtype=torch.long, device=self.device) + self.deltas = empty(self.size, device=self.device) + self.dones = empty(self.size, dtype=torch.bool, device=self.device) + self.next_observations = empty(self.size, *next_observations.shape[1:], device=self.device) + self.aspirations = IntervalTensor( empty(self.size, device=self.device), + empty(self.size, device=self.device) ) + + self.initialized = True + + num_newly_written = observations.size(0) + + i_write = arange(self.num_written, self.num_written + num_newly_written) % self.size + self.observations [i_write, ...] = observations.float() + self.actions [i_write] = actions + self.deltas [i_write] = deltas.float() + self.dones [i_write] = dones + self.next_observations[i_write, ...] = next_observations.float() + self.aspirations [i_write] = aspirations + + self.num_written += num_newly_written + + def sample(self, how_many: int): + i = random.sample(range(min(self.num_written, self.size)), how_many) + return ReplayBufferSample( observations = self.observations[i, ...], + actions = self.actions[i], + deltas = self.deltas[i], + dones = self.dones[i], + next_observations = self.next_observations[i, ...], + aspirations = self.aspirations[i] ) \ No newline at end of file diff --git a/src/satisfia/agents/learning/dqn/train.py b/src/satisfia/agents/learning/dqn/train.py new file mode 100644 index 0000000..4fc5d8c --- /dev/null +++ b/src/satisfia/agents/learning/dqn/train.py @@ -0,0 +1,270 @@ +from satisfia.agents.learning.dqn.config import DQNConfig +from satisfia.agents.learning.dqn.bellman_formula import bellman_formula +from satisfia.agents.learning.dqn.criteria import complete_criteria +from satisfia.agents.learning.dqn.replay_buffer import ReplayBuffer +from satisfia.agents.learning.dqn.exploration_strategy import ExplorationStrategy +from satisfia.agents.learning.environment_wrappers import RestrictToPossibleActionsWrapper +from satisfia.agents.learning.dqn.agent_mdp_dqn import AgentMDPDQN +from satisfia.util.interval_tensor import IntervalTensor + +from gymnasium import Env +from gymnasium.wrappers import AutoResetWrapper +from gymnasium.vector import AsyncVectorEnv, SyncVectorEnv +import torch +from torch import tensor, zeros_like +from torch.nn import Module +from torch.optim import AdamW, Optimizer +from joblib import Parallel, delayed +from dataclasses import dataclass, field +from collections import Counter +from statistics import mean +from tqdm import tqdm +from typing import Callable, Tuple, List, Dict +from plotly.colors import DEFAULT_PLOTLY_COLORS +from plotly.graph_objects import Figure + +def train_dqn( make_env: Callable[[], Env], + make_model: Callable[[], Module], + cfg: DQNConfig ) -> Module: + + stats = DQNTrainingStatistics(cfg) + + num_visits_per_state = Counter() + + q_network = make_model() + target_network = make_model() + target_network.load_state_dict(q_network.state_dict()) + optimizer = AdamW(q_network.parameters(), lr=cfg.learning_rate_scheduler(0)) + + make_envs = [ (lambda: AutoResetWrapper(RestrictToPossibleActionsWrapper(make_env()))) + for _ in range(cfg.num_envs) ] + envs = AsyncVectorEnv(make_envs) if cfg.async_envs else SyncVectorEnv(make_envs) + + exploration_strategy = ExplorationStrategy( + target_network + if cfg.frozen_model_for_exploration is None + else cfg.frozen_model_for_exploration, + cfg, + num_actions=envs.action_space.nvec[0] + ) + + replay_buffer = ReplayBuffer(cfg.buffer_size, device=cfg.device) + + observations, _ = envs.reset() + for timestep in tqdm(range(cfg.total_timesteps), desc="training dqn"): + for observation in observations: + num_visits_per_state[tuple(observation)] += 1 + + actions = exploration_strategy(tensor(observations, device=cfg.device), timestep=timestep) + + next_observations, deltas, dones, truncations, _ = envs.step(actions.cpu().numpy()) + + replay_buffer.add( observations = tensor(observations, device=cfg.device), + actions = actions, + deltas = tensor(deltas, device=cfg.device), + dones = tensor(dones | truncations, device=cfg.device), + next_observations = tensor(next_observations, device=cfg.device), + aspirations = IntervalTensor( + exploration_strategy.aspirations.lower, + exploration_strategy.aspirations.upper + ) ) + + observations = next_observations + + exploration_strategy.on_done(tensor(dones), timestep=timestep) + + register_criteria_in_stats = cfg.plotted_criteria is not None \ + and timestep % cfg.plot_criteria_frequency == 0 + if register_criteria_in_stats: + stats.register_criteria(q_network, timestep) + + train = timestep >= cfg.training_starts and timestep % cfg.training_frequency == 0 + if train: + set_learning_rate( optimizer, + cfg.learning_rate_scheduler(timestep / cfg.total_timesteps) ) + + replay_buffer_sample = replay_buffer.sample(cfg.batch_size).to(cfg.device) + + predicted_criteria = q_network( replay_buffer_sample.observations, + replay_buffer_sample.aspirations, + noisy=False ) + complete_criteria(predicted_criteria) + + td_target = bellman_formula( replay_buffer_sample, + q_network=q_network, + target_network=target_network, + predicted_criteria=predicted_criteria, + cfg=cfg ) + + loss = 0 + for criterion, coefficient in cfg.criterion_coefficients_for_loss.items(): + if coefficient == 0: + continue + + loss_fn = cfg.criterion_loss_fns[criterion] + prediction_for_actions = predicted_criteria[criterion].gather( + -1, + replay_buffer_sample.actions.unsqueeze(-1) + ).squeeze(-1) + loss += coefficient * loss_fn( + prediction_for_actions, + td_target[criterion] + ) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + update_target_network = timestep >= cfg.training_starts \ + and timestep % cfg.target_network_update_frequency == 0 + if update_target_network: + for target_network_param, q_network_param in zip( target_network.parameters(), + q_network.parameters() ): + target_network_param.data.copy_( + cfg.soft_target_network_update_coefficient * target_network_param.data + + (1 - cfg.soft_target_network_update_coefficient) * q_network_param.data + ) + + print(f"{num_visits_per_state=}") + + if cfg.plotted_criteria is not None: + stats.plot_criteria(q_network, RestrictToPossibleActionsWrapper(make_env())) + + return q_network + +def set_learning_rate(optimizer: Optimizer, learning_rate: float) -> None: + for param_group in optimizer.param_groups: + param_group["lr"] = learning_rate + +def compute_total(agent, env, state, state_aspiration, first_action=None): + if isinstance(state_aspiration, (int, float)): + state_aspiration = (state_aspiration, state_aspiration) + + total = 0. + env.reset() # reset just in case + env.set_state(state) + observation = state + done = False + first_iteration = True + while not done: + if first_iteration and first_action is not None: + first_iteration = False + action = first_action + action_aspiration = agent.aspiration4action(state, action, state_aspiration) + else: + action, action_aspiration = agent.localPolicy(observation, state_aspiration).sample()[0] + next_observation, delta, done, truncated, _ = env.step(action) + done = done or truncated + total += delta + state_aspiration = agent.propagateAspiration(observation, action, action_aspiration, Edel=None, nextState=next_observation) + observation = next_observation + return total + +@dataclass +class DQNTrainingStatistics: + cfg: DQNConfig + criterion_history: Dict[Tuple["timestep", "state", "state_aspiration", "criterion", "action"], float] = \ + field(default_factory=lambda: dict()) + + def register_criteria(self, model, timestep): + for state in self.cfg.states_for_plotting_criteria: + for state_aspiration in self.cfg.state_aspirations_for_plotting_criteria: + state_as_tensor = tensor([state], dtype=torch.float, device=self.cfg.device) + state_aspiration_low, state_aspiration_high = state_aspiration + state_aspiration_as_tensor = IntervalTensor( + tensor([state_aspiration_low], dtype=torch.float, device=self.cfg.device), + tensor([state_aspiration_high], dtype=torch.float, device=self.cfg.device) + ) + criteria = model(state_as_tensor, state_aspiration_as_tensor, noisy=False) + complete_criteria(criteria) + for criterion in self.cfg.plotted_criteria: + for action in self.cfg.actions_for_plotting_criteria: + self.criterion_history[timestep, state, state_aspiration, criterion, action] = \ + criteria[criterion].squeeze(0)[action].item() + + def ground_truth_criteria(self, model, env) -> Dict[Tuple["state", "state_aspiration", "criterion", "action"], float] | None: + criteria = dict() + + for state in self.cfg.states_for_plotting_criteria: + for state_aspiration in self.cfg.state_aspirations_for_plotting_criteria: + for criterion in self.cfg.plotted_criteria: + for action in self.cfg.actions_for_plotting_criteria: + criterion_function =\ + getattr(self.cfg.planning_agent_for_plotting_ground_truth, criterion) + + if criterion in ["maxAdmissibleQ", "minAdmissibleQ"]: + criterion_value = criterion_function(state, action) + elif criterion in ["Q"]: + agent = AgentMDPDQN( self.cfg.satisfia_agent_params, + model + if self.cfg.frozen_model_for_exploration is None + else self.cfg.frozen_model_for_exploration, + env.action_space.n ) + criterion_value = mean(Parallel(n_jobs=-1)( + delayed(compute_total)( agent, + env, + state, + state_aspiration, + first_action=action ) + for _ in tqdm(range(1000)) # TO DO: ERROR BARS!!! + )) + else: + raise ValueError(f"Unknown criterion '{criterion}'.") + + criteria[state, state_aspiration, criterion, action] = criterion_value + + return criteria + + def plot_criteria(self, model, env): + ground_truth_criteria = self.ground_truth_criteria(model, env) + + fig = Figure() + fig.update_layout(title = "Predictde criteria during DQN training.") + + timesteps = sorted(list(set(x[0] for x in self.criterion_history.keys()))) + dropdown_menu_titles = [] + first_iteration = True + for criterion in self.cfg.plotted_criteria: + for state in self.cfg.states_for_plotting_criteria: + for state_aspiration in self.cfg.state_aspirations_for_plotting_criteria: + dropdown_menu_titles.append(f"{criterion} in state {state} with state aspiration {state_aspiration}") + for action in self.cfg.actions_for_plotting_criteria: + fig.add_scatter( + x = timesteps, + y = [ self.criterion_history[timestep, state, state_aspiration, criterion, action] + for timestep in timesteps ], + line = dict(color=DEFAULT_PLOTLY_COLORS[action]), + name = f"action {action}", + visible = first_iteration + ) + if ground_truth_criteria is not None: + fig.add_scatter( + x = [timesteps[0], timesteps[-1]], + y = [ground_truth_criteria[state, state_aspiration, criterion, action]] * 2, + line = dict(dash="dot", color=DEFAULT_PLOTLY_COLORS[action]), + name = f"action {action} ground truth", + visible = first_iteration + ) + first_iteration = False + + num_plotted_actions = len(self.cfg.actions_for_plotting_criteria) + num_scatters_per_dropdown_menu_option = \ + num_plotted_actions if ground_truth_criteria is None else 2 * num_plotted_actions + fig.update_layout(updatemenus=[dict( + direction="down", + showactive=True, + buttons=[ + dict( label=menu_title, + method="update", + args=[dict( + visible = [False] * i + * num_scatters_per_dropdown_menu_option + + [True] * num_scatters_per_dropdown_menu_option + + [False] * (len(dropdown_menu_titles) - i - 1) + * num_scatters_per_dropdown_menu_option + )] ) + for i, menu_title in enumerate(dropdown_menu_titles) + ] + )]) + + fig.show() diff --git a/src/satisfia/agents/learning/environment_wrappers.py b/src/satisfia/agents/learning/environment_wrappers.py new file mode 100644 index 0000000..d9b5f1a --- /dev/null +++ b/src/satisfia/agents/learning/environment_wrappers.py @@ -0,0 +1,22 @@ +from gymnasium import Env, Wrapper +import random + +DO_NOTHING_ACTION = 4 + +class RestrictToPossibleActionsWrapper(Wrapper): + def __init__(self, env: Env, default_action: int = DO_NOTHING_ACTION): + super().__init__(env) + self.default_action = default_action + + def reset(self, *args, **kwargs): + return self.env.reset(*args, **kwargs) + + def step(self, action, *args, **kwargs): + possible_actions = self.env.possible_actions() + if action not in possible_actions: + # ah, this condition should always be true, but for some reason isn't in GW23 + if self.default_action in possible_actions: + action = self.default_action + else: + action = random.choice(possible_actions) + return self.env.step(action, *args, **kwargs) \ No newline at end of file diff --git a/src/satisfia/agents/learning/models/building_blocks.py b/src/satisfia/agents/learning/models/building_blocks.py new file mode 100644 index 0000000..852396a --- /dev/null +++ b/src/satisfia/agents/learning/models/building_blocks.py @@ -0,0 +1,244 @@ +from satisfia.util.interval_tensor import IntervalTensor + +from torch import Tensor, cat, stack, empty, zeros, ones, no_grad +from torch.nn import Module, Linear, ReLU, LayerNorm, Dropout, Parameter, ModuleList, ModuleDict +from more_itertools import pairwise +from math import sqrt +from typing import List, Dict, Callable + +def concatenate_observations_and_aspirations( observation: Tensor, + aspiration: IntervalTensor ) -> Tensor: + + return cat((observation, stack((aspiration.lower, aspiration.upper), dim=-1)), dim=-1) + +class LinearReturningDict(Module): + def __init__(self, in_features: int, out_features: Dict[str, int], bias: bool = True): + super().__init__() + self.linears = ModuleDict({ key: Linear(in_features, out_feats, bias=bias) + for key, out_feats in out_features.items() }) + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + return {key: linear(x) for key, linear in self.linears.items()} + +class NoisyLinear(Module): + def __init__(self, in_features: int, + out_features: int, + same_noise_along_batch: bool = False, + batch_size: int | None = None, + bias: bool = True ): + + super().__init__() + + assert (batch_size is not None) == same_noise_along_batch + + self.in_features = in_features + self.out_features = out_features + self.same_noise_along_batch = same_noise_along_batch + self.batch_size = batch_size + + self.weight = Parameter(empty(out_features, in_features)) + self.bias = Parameter(empty(out_features)) if bias else None + k = 1 / sqrt(self.in_features) + self.weight.data.uniform_(-k, k) + if bias: + self.bias .data.uniform_(-k, k) + + if self.same_noise_along_batch: + self.weight_noise = Parameter(zeros(out_features, in_features)) + self.bias_noise = Parameter(zeros(out_features)) if bias else None + else: + self.weight_noise = Parameter(zeros(batch_size, out_features, in_features)) + self.bias_noise = Parameter(zeros(batch_size, out_features)) if bias else None + + def forward(self, x, noisy=True): + assert x.dim() == 2 + if noisy and self.batch_size is not None: + assert x.size(0) == self.batch_size + + weight = self.weight + self.weight_noise if noisy else self.weight + bias = self.bias + self.bias_noise if noisy else self.bias + return (x.unsqueeze(-2) * weight).sum(-1) + bias + + @no_grad() + def new_noise(self, std: float, which_in_batch: Tensor | None = None): + k = 1 / sqrt(self.in_features) + if self.same_noise_along_batch: + self.weight_noise.data.normal_(k * std) + if self.bias_noise is not None: + self.bias_noise.data.normal_(k * std) + else: + if which_in_batch is None: + which_in_batch = ones(self.batch_size, dtype=bool) + self.weight_noise.data[which_in_batch, ...].normal_(k * std) + if self.bias_noise is not None: + self.bias_noise.data[which_in_batch, ...].normal_(k * std) + +class NoisyLinearReturningDict(Module): + def __init__( self, in_features: int, + out_features: Dict[str, int], + same_noise_along_batch: bool = False, + batch_size: int | None = None, + bias: bool = True ): + + super().__init__() + self.noisy_linears = ModuleDict({ + key: NoisyLinear( in_features=in_features, + out_features=out_feats, + same_noise_along_batch=same_noise_along_batch, + batch_size=batch_size, + bias=bias ) + for key, out_feats in out_features.items() + }) + + def forward(self, x, noisy=True): + return { key: noisy_linear(x, noisy=noisy) + for key, noisy_linear in self.noisy_linears.items() } + + @no_grad() + def new_noise(self, std: float, which_in_batch: Tensor | None = None): + for noisy_linear in self.noisy_linears.values(): + noisy_linear.new_noise(std=std, which_in_batch=which_in_batch) + +class NoisyMLP(Module): + def __init__(self, layer_sizes: List[int], + activation_function: Callable[[Tensor], Tensor] = ReLU(), + layer_norms: bool = True, + dropout: int | None = None, + same_noise_along_batch: bool = True, + batch_size: int | None = None, + final_activation_function: bool = False ): + + super().__init__() + + self.activation_function = activation_function + self.final_activation_function = final_activation_function + + self.noisy_linears = ModuleList() + for size_in, size_out in pairwise(layer_sizes): + if isinstance(size_out, Dict): + self.noisy_linears.append( + NoisyLinearReturningDict( size_in, + size_out, + same_noise_along_batch = same_noise_along_batch, + batch_size = batch_size ) + ) + else: + self.noisy_linears.append( + NoisyLinear( size_in, + size_out, + same_noise_along_batch = same_noise_along_batch, + batch_size = batch_size ) + ) + + if layer_norms: + layer_norm_sizes = layer_sizes[1:] if final_activation_function else layer_sizes[1:-1] + self.layer_norms = ModuleList(LayerNorm(size) for size in layer_norm_sizes) + else: + self.layer_norms = None + + if dropout is not None: + self.dropout = Dropout(dropout) + else: + self.dropout = None + + def forward(self, x: Tensor, noisy: bool = True): + for i, noisy_linear in enumerate(self.noisy_linears): + x = noisy_linear(x, noisy=noisy) + + last_iteration = i == len(self.noisy_linears) - 1 + if not last_iteration or self.final_activation_function: + if self.layer_norms is not None: + x = self.layer_norms[i](x) + + x = self.activation_function(x) + + if self.dropout is not None: + x = self.dropout(x) + + return x + + def new_noise(self, std: int, which_in_batch: Tensor | None = None): + for noisy_linear in self.noisy_linears: + noisy_linear.new_noise(std=std, which_in_batch=which_in_batch) + + +class SatisfiaMLP(Module): + def __init__(self, input_size: int, + output_not_depending_on_agent_parameters_sizes: Dict[str, int], + output_depending_on_agent_parameters_sizes: Dict[str, int], + common_hidden_layer_sizes: List[int], + hidden_layer_not_depending_on_agent_parameters_sizes: List[int], + hidden_layer_depending_on_agent_parameters_sizes: List[int], + same_noise_along_batch: bool = True, + batch_size: int | None = None, + activation_function: Callable[[Tensor], Tensor] = ReLU(), + dropout: int | None = 0.1, + layer_norms: bool = True ): + + super().__init__() + + self.common_layers = NoisyMLP( + layer_sizes = [input_size] + common_hidden_layer_sizes, + activation_function = activation_function, + layer_norms = layer_norms, + dropout = dropout, + same_noise_along_batch = same_noise_along_batch, + batch_size = batch_size, + final_activation_function = True + ) + + last_common_layer_size = common_hidden_layer_sizes[-1]\ + if len(common_hidden_layer_sizes) > 0 \ + else input_size + + self.layers_not_depending_on_agent_parameters = NoisyMLP( + layer_sizes = [last_common_layer_size] + + hidden_layer_not_depending_on_agent_parameters_sizes + + [output_not_depending_on_agent_parameters_sizes], + activation_function = activation_function, + layer_norms = layer_norms, + dropout = dropout, + same_noise_along_batch = same_noise_along_batch, + batch_size = batch_size + ) + + agent_parameters_size = 2 + + self.layers_depending_on_agent_parameters = NoisyMLP( + layer_sizes = [last_common_layer_size + agent_parameters_size] + + hidden_layer_depending_on_agent_parameters_sizes + + [output_depending_on_agent_parameters_sizes], + activation_function = activation_function, + layer_norms = layer_norms, + dropout = dropout, + same_noise_along_batch = same_noise_along_batch, + batch_size = batch_size + ) + + def forward(self, observations: Tensor, aspirations: Tensor, noisy: bool = True): + agent_parameters_emebdding = stack((aspirations.lower, aspirations.upper), -1) + + common_hidden = self.common_layers( + observations, + noisy=noisy + ) + + output_not_depending_on_agent_parameters = self.layers_not_depending_on_agent_parameters( + common_hidden, + noisy = noisy + ) + + output_depending_on_agent_parameters = self.layers_depending_on_agent_parameters( + cat((common_hidden, agent_parameters_emebdding), -1), + noisy = noisy + ) + + assert set(output_not_depending_on_agent_parameters.keys()) \ + .isdisjoint(set(output_depending_on_agent_parameters.keys())) + + return output_not_depending_on_agent_parameters | output_depending_on_agent_parameters + + def new_noise(self, std: int, which_in_batch: Tensor | None = None): + self.common_layers .new_noise(std, which_in_batch) + self.layers_not_depending_on_agent_parameters.new_noise(std, which_in_batch) + self.layers_depending_on_agent_parameters .new_noise(std, which_in_batch) diff --git a/src/satisfia/util/interval_tensor.py b/src/satisfia/util/interval_tensor.py new file mode 100644 index 0000000..40dc1ee --- /dev/null +++ b/src/satisfia/util/interval_tensor.py @@ -0,0 +1,61 @@ +from torch import Tensor, full_like, stack +from dataclasses import dataclass +from numbers import Number +from typing import Union + +@dataclass +class IntervalTensor: + lower: Tensor + upper: Tensor + + def clip_to(self, bounds: "IntervalTensor"): + return IntervalTensor( self.lower.maximum(bounds.lower).minimum(bounds.upper), + self.upper.maximum(bounds.lower).minimum(bounds.upper) ) + + def midpoint(self) -> Tensor: + return (self.lower + self.upper) / 2 + + def unsqueeze(self, dim): + return IntervalTensor( self.lower.unsqueeze(dim), + self.upper.unsqueeze(dim) ) + + def where(self, condition: Tensor, other: "IntervalTensor") -> "IntervalTensor": + return IntervalTensor( self.lower.where(condition, other.lower), + self.upper.where(condition, other.upper) ) + + def to(self, device): + return IntervalTensor(self.lower.to(device), self.upper.to(device)) + + + def __add__(self, other: Union["IntervalTensor", Tensor, Number]) -> "IntervalTensor": + if isinstance(other, IntervalTensor): + return IntervalTensor( self.lower + other.lower, + self.upper + other.upper ) + + if isinstance(other, (Tensor, Number)): + return IntervalTensor( self.lower + other, + self.upper + other ) + + raise TypeError(f"Cannot add IntervalTensor and object of type '{type(other)}'.") + + def __sub__(self, other: Union["IntervalTensor", Tensor, Number]) -> "IntervalTensor": + if isinstance(other, IntervalTensor): + return IntervalTensor( self.lower + other.upper, + self.upper + other.lower ) + + if isinstance(other, (Tensor, Number)): + return IntervalTensor( self.lower + other, + self.upper + other ) + + raise TypeError(f"Cannot add IntervalTensor and object of type '{type(other)}'.") + + def __getitem__(self, i) -> "IntervalTensor": + return IntervalTensor(self.lower[i], self.upper[i]) + + def __setitem__(self, i, x: "IntervalTensor"): + self.lower[i] = x.lower + self.upper[i] = x.upper + +def relative_position(x, z, y, eps=1e-5): + diff = y - x + return ((z - x) / diff).where(diff.abs() >= eps, full_like(diff, 0.5)) \ No newline at end of file diff --git a/src/world_model/simple_gridworld.py b/src/world_model/simple_gridworld.py index 506e31e..1d12528 100644 --- a/src/world_model/simple_gridworld.py +++ b/src/world_model/simple_gridworld.py @@ -411,6 +411,11 @@ def _extract_state_attributes(self, state, gridcontents=False) -> tuple: ) return t, loc, imm_states, mc_locs, mv_locs, mv_states, gc + def set_state(self, state): + """This is a temporary workaround around me wanting to call _set_state but calling private + functions (i.e. ones whose names start with an underscore) being illegal.""" + self._set_state(state) + def _set_state(self, state): """Set the current state to the provided one.""" self._previous_agent_location = self._agent_location From bddd32edc2f2f6db7c3e2951ede6207d5c5ecfea Mon Sep 17 00:00:00 2001 From: Vladimir Ivanov Date: Mon, 3 Jun 2024 04:26:55 +0200 Subject: [PATCH 2/9] Added aspiration propagation to the exploration strategy during training. --- scripts/test_dqn.py | 6 +--- .../agents/learning/dqn/bellman_formula.py | 16 +++++----- .../learning/dqn/exploration_strategy.py | 30 ++++++++++++++++++- .../agents/learning/dqn/replay_buffer.py | 17 ++++++++--- src/satisfia/agents/learning/dqn/train.py | 14 +++++---- src/satisfia/util/interval_tensor.py | 10 +++++++ 6 files changed, 69 insertions(+), 24 deletions(-) diff --git a/scripts/test_dqn.py b/scripts/test_dqn.py index 38d3017..61d223f 100644 --- a/scripts/test_dqn.py +++ b/scripts/test_dqn.py @@ -68,7 +68,7 @@ def run_or_load(filename, function, *args, **kwargs): pickle.dump(result, f) return result -def compute_total(agent: AspirationAgent, env: gym.Env, aspiration4state: float | Tuple[float, float], verbose=False) -> float: +def compute_total(agent: AspirationAgent, env: gym.Env, aspiration4state: float | Tuple[float, float]) -> float: if isinstance(aspiration4state, (int, float)): aspiration4state = (aspiration4state, aspiration4state) @@ -77,10 +77,6 @@ def compute_total(agent: AspirationAgent, env: gym.Env, aspiration4state: float done = False while not done: action, aspiration4action = agent.localPolicy(observation, aspiration4state).sample()[0] - if verbose: - print(observation, total, aspiration, action) - print(agent.maximizer_model(tensor([*observation], dtype=torch.float)).tolist()) - print(agent.minimizer_model(tensor([*observation], dtype=torch.float)).tolist()) next_observation, delta, done, truncated, _ = env.step(action) done = done or truncated total += delta diff --git a/src/satisfia/agents/learning/dqn/bellman_formula.py b/src/satisfia/agents/learning/dqn/bellman_formula.py index a3c2570..18caf53 100644 --- a/src/satisfia/agents/learning/dqn/bellman_formula.py +++ b/src/satisfia/agents/learning/dqn/bellman_formula.py @@ -20,7 +20,7 @@ def bellman_formula( replay_buffer_sample: ReplayBufferSample, if coefficient != 0 ] next_criteria = target_network( replay_buffer_sample.next_observations, - replay_buffer_sample.aspirations, + replay_buffer_sample.next_aspirations, noisy=False ) complete_criteria(next_criteria) @@ -36,16 +36,16 @@ def bellman_formula( replay_buffer_sample: ReplayBufferSample, if cfg.frozen_model_for_exploration: predicted_criteria_for_policy = cfg.frozen_model_for_exploration( replay_buffer_sample.next_observations, - replay_buffer_sample.aspirations, + replay_buffer_sample.next_aspirations, noisy=False ) complete_criteria(predicted_criteria_for_policy) else: - predicted_criteria_for_policy = predicted_criteria + predicted_criteria_for_policy = next_criteria - policy = local_policy( cfg.satisfia_agent_params, - predicted_criteria_for_policy, - replay_buffer_sample.aspirations ) + next_policy = local_policy( cfg.satisfia_agent_params, + predicted_criteria_for_policy, + replay_buffer_sample.next_aspirations ) for max_or_min in ["max", "min"]: criterion_name = f"{max_or_min}AdmissibleQ" @@ -75,11 +75,11 @@ def bellman_formula( replay_buffer_sample: ReplayBufferSample, assert f"Q{i-1}" in criterion_names if "Q" in criterion_names: - V = (policy.probs * next_criteria["Q"]).sum(-1) + next_V = (next_policy.probs * next_criteria["Q"]).sum(-1) criteria["Q"] = replay_buffer_sample.deltas \ + where( replay_buffer_sample.dones, zeros_like(replay_buffer_sample.deltas), - cfg.discount * V ) + cfg.discount * next_V ) if "Q2" in criterion_names: V2 = (policy.probs * next_criteria["Q2"]).sum(-1) diff --git a/src/satisfia/agents/learning/dqn/exploration_strategy.py b/src/satisfia/agents/learning/dqn/exploration_strategy.py index 3cc7e40..71d8592 100644 --- a/src/satisfia/agents/learning/dqn/exploration_strategy.py +++ b/src/satisfia/agents/learning/dqn/exploration_strategy.py @@ -1,7 +1,7 @@ from satisfia.agents.learning.dqn.config import DQNConfig import satisfia.agents.learning.dqn.agent_mdp_dqn as agent_mpd_dqn from satisfia.agents.learning.dqn.criteria import complete_criteria -from satisfia.util.interval_tensor import IntervalTensor +from satisfia.util.interval_tensor import IntervalTensor, relative_position, interpolate from torch import Tensor, empty, ones, full_like, randint, bernoulli, no_grad from torch.nn import Module @@ -34,10 +34,38 @@ def __call__(self, observations: Tensor, timestep: int): def satisfia_policy_actions(self, observations: Tensor) -> Categorical: criteria = self.target_network(observations, self.aspirations) complete_criteria(criteria) + self.criteria = criteria return agent_mpd_dqn.local_policy( self.cfg.satisfia_agent_params, criteria, self.aspirations ) + # TO DO: move all this stuff into agent_mdp_dqn.py + def propagate_aspirations(self, actions: Tensor, next_observations: Tensor): + state_aspirations = agent_mpd_dqn.state_aspirations (self.criteria, self.aspirations) + action_aspirations = agent_mpd_dqn.action_aspirations(self.criteria, state_aspirations) + action_aspirations = action_aspirations.gather(-1, actions.unsqueeze(-1)).squeeze(-1) + + min_admissible_q = self.criteria["minAdmissibleQ"] + max_admissible_q = self.criteria["maxAdmissibleQ"] + min_admissible_q = min_admissible_q.gather(-1, actions.unsqueeze(-1)).squeeze(-1) + max_admissible_q = max_admissible_q.gather(-1, actions.unsqueeze(-1)).squeeze(-1) + + lambda_low = relative_position(min_admissible_q, action_aspirations.lower, max_admissible_q) + lambda_high = relative_position(min_admissible_q, action_aspirations.upper, max_admissible_q) + + # this will be recalculated in the next call to satisfia_policy_actions, which is necessary + # there the aspirations would be different + next_criteria = self.target_network(next_observations, self.aspirations) + complete_criteria(next_criteria) + + next_min_admissible_v = next_criteria["minAdmissibleV"] + next_max_admissible_v = next_criteria["minAdmissibleV"] + + return IntervalTensor( + interpolate(next_min_admissible_v, lambda_low, next_max_admissible_v), + interpolate(next_min_admissible_v, lambda_high, next_max_admissible_v) + ) + @no_grad() def on_done(self, dones: Tensor, timestep: int): self.new_aspirations(which=dones) diff --git a/src/satisfia/agents/learning/dqn/replay_buffer.py b/src/satisfia/agents/learning/dqn/replay_buffer.py index 1c30ac4..04bb38d 100644 --- a/src/satisfia/agents/learning/dqn/replay_buffer.py +++ b/src/satisfia/agents/learning/dqn/replay_buffer.py @@ -1,7 +1,7 @@ from satisfia.util.interval_tensor import IntervalTensor import torch -from torch import Tensor, empty, arange +from torch import Tensor, empty, arange, no_grad import random from dataclasses import dataclass @@ -13,6 +13,7 @@ class ReplayBufferSample: dones: Tensor next_observations: Tensor aspirations: IntervalTensor + next_aspirations: IntervalTensor def to(self, device: str) -> "ReplayBufferSample": return ReplayBufferSample( observations = self.observations .to(device), @@ -20,7 +21,8 @@ def to(self, device: str) -> "ReplayBufferSample": deltas = self.deltas .to(device), dones = self.dones .to(device), next_observations = self.next_observations.to(device), - aspirations = self.aspirations .to(device) ) + aspirations = self.aspirations .to(device), + next_aspirations = self.next_aspirations .to(device) ) class ReplayBuffer: def __init__(self, size, device="cpu"): @@ -29,13 +31,15 @@ def __init__(self, size, device="cpu"): self.num_written = 0 self.initialized = False + @no_grad() def add( self, observations: Tensor, actions: Tensor, deltas: Tensor, dones: Tensor, next_observations: Tensor, - aspirations: IntervalTensor ): + aspirations: IntervalTensor, + next_aspirations: IntervalTensor ): if not self.initialized: self.observations = empty(self.size, *observations.shape[1:], device=self.device) @@ -45,6 +49,8 @@ def add( self, self.next_observations = empty(self.size, *next_observations.shape[1:], device=self.device) self.aspirations = IntervalTensor( empty(self.size, device=self.device), empty(self.size, device=self.device) ) + self.next_aspirations = IntervalTensor( empty(self.size, device=self.device), + empty(self.size, device=self.device) ) self.initialized = True @@ -57,9 +63,11 @@ def add( self, self.dones [i_write] = dones self.next_observations[i_write, ...] = next_observations.float() self.aspirations [i_write] = aspirations + self.next_aspirations [i_write] = next_aspirations self.num_written += num_newly_written + @no_grad() def sample(self, how_many: int): i = random.sample(range(min(self.num_written, self.size)), how_many) return ReplayBufferSample( observations = self.observations[i, ...], @@ -67,4 +75,5 @@ def sample(self, how_many: int): deltas = self.deltas[i], dones = self.dones[i], next_observations = self.next_observations[i, ...], - aspirations = self.aspirations[i] ) \ No newline at end of file + aspirations = self.aspirations[i], + next_aspirations = self.next_aspirations[i] ) diff --git a/src/satisfia/agents/learning/dqn/train.py b/src/satisfia/agents/learning/dqn/train.py index 4fc5d8c..ad2c644 100644 --- a/src/satisfia/agents/learning/dqn/train.py +++ b/src/satisfia/agents/learning/dqn/train.py @@ -59,20 +59,22 @@ def train_dqn( make_env: Callable[[], Env], next_observations, deltas, dones, truncations, _ = envs.step(actions.cpu().numpy()) + aspirations = exploration_strategy.aspirations + exploration_strategy.propagate_aspirations( actions, + tensor(next_observations, device=cfg.device) ) + + exploration_strategy.on_done(tensor(dones), timestep=timestep) + replay_buffer.add( observations = tensor(observations, device=cfg.device), actions = actions, deltas = tensor(deltas, device=cfg.device), dones = tensor(dones | truncations, device=cfg.device), next_observations = tensor(next_observations, device=cfg.device), - aspirations = IntervalTensor( - exploration_strategy.aspirations.lower, - exploration_strategy.aspirations.upper - ) ) + aspirations = aspirations, + next_aspirations = exploration_strategy.aspirations ) observations = next_observations - exploration_strategy.on_done(tensor(dones), timestep=timestep) - register_criteria_in_stats = cfg.plotted_criteria is not None \ and timestep % cfg.plot_criteria_frequency == 0 if register_criteria_in_stats: diff --git a/src/satisfia/util/interval_tensor.py b/src/satisfia/util/interval_tensor.py index 40dc1ee..ca8d772 100644 --- a/src/satisfia/util/interval_tensor.py +++ b/src/satisfia/util/interval_tensor.py @@ -15,6 +15,10 @@ def clip_to(self, bounds: "IntervalTensor"): def midpoint(self) -> Tensor: return (self.lower + self.upper) / 2 + def squeeze(self, dim): + return IntervalTensor( self.lower.squeeze(dim), + self.upper.squeeze(dim) ) + def unsqueeze(self, dim): return IntervalTensor( self.lower.unsqueeze(dim), self.upper.unsqueeze(dim) ) @@ -26,6 +30,9 @@ def where(self, condition: Tensor, other: "IntervalTensor") -> "IntervalTensor": def to(self, device): return IntervalTensor(self.lower.to(device), self.upper.to(device)) + def gather(self, dim: int, other: Tensor) -> "IntervalTensor": + return IntervalTensor( self.lower.gather(dim, other), + self.upper.gather(dim, other) ) def __add__(self, other: Union["IntervalTensor", Tensor, Number]) -> "IntervalTensor": if isinstance(other, IntervalTensor): @@ -56,6 +63,9 @@ def __setitem__(self, i, x: "IntervalTensor"): self.lower[i] = x.lower self.upper[i] = x.upper +def interpolate(x, lambda_, y): + return x + (y - x) * lambda_ + def relative_position(x, z, y, eps=1e-5): diff = y - x return ((z - x) / diff).where(diff.abs() >= eps, full_like(diff, 0.5)) \ No newline at end of file From 2ca6492d7753c6c072411c7c23fd6b2addf8f99c Mon Sep 17 00:00:00 2001 From: Vladimir Ivanov Date: Mon, 3 Jun 2024 04:52:59 +0200 Subject: [PATCH 3/9] Made plotting ground truth optional. --- src/satisfia/agents/learning/dqn/train.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/satisfia/agents/learning/dqn/train.py b/src/satisfia/agents/learning/dqn/train.py index ad2c644..16f7b5f 100644 --- a/src/satisfia/agents/learning/dqn/train.py +++ b/src/satisfia/agents/learning/dqn/train.py @@ -185,6 +185,9 @@ def register_criteria(self, model, timestep): criteria[criterion].squeeze(0)[action].item() def ground_truth_criteria(self, model, env) -> Dict[Tuple["state", "state_aspiration", "criterion", "action"], float] | None: + if self.cfg.planning_agent_for_plotting_ground_truth is None: + return None + criteria = dict() for state in self.cfg.states_for_plotting_criteria: From 50801d6e04e73c21b0928a903272d70668510149 Mon Sep 17 00:00:00 2001 From: Vladimir Ivanov <96141489+astOwOlfo@users.noreply.github.com> Date: Thu, 6 Jun 2024 04:42:55 +0200 Subject: [PATCH 4/9] Add files via upload --- deep-learning-writeup.md | 44 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 deep-learning-writeup.md diff --git a/deep-learning-writeup.md b/deep-learning-writeup.md new file mode 100644 index 0000000..5c3e86a --- /dev/null +++ b/deep-learning-writeup.md @@ -0,0 +1,44 @@ +# Satisfia algorithms with deep reinforcement learning +We want to make be able to run satisfia algorithms on environments which aren't simple gridworlds where planning is not tractable. For this, we train a neural network to predict all the quantities which are computed recursively in planning algorithm. Those quantities can then be used deduce the satisfia policy by inheriting from `AspirationAgent` and overriding the methods which compute those quantities with methods which use the neural network. This is done by the class `AgentMDPDQN`. + +## Output of the network +The quantities to which we fit a neural network are: +- `maxAdmissibleQ`, `minAdmissibleQ` (or $\overline{Q}, \underline{Q}$) - the Q values of the maximizer and minimizer. Note: the satisfia algorithm has hyperparameters $\overline\lambda, \underline\lambda$ which we set to $0$ and $1$. For other values of $\overline\lambda, \underline\lambda$, $\overline{Q}, \underline{Q}$ are not the Q values of the maximizer and minimizer anymore and are defined by the following, where $s$ is always a state and $a$ always an action, maxima and minima are taken over the set of all possible actions, $0 \le \gamma \le 1$ is the discount rate, and $\mathbb{E}_{(\delta, s')\sim\mathrm{step}(s, a)}$ means the expectation when $\delta$ and $s'$ are drawn from the reward - next action distribution after taking action $a$ in state $s$. Note how we obtain the classical bellman equations for the Q and V values of the maximizer and minimizer when $(\underline\lambda, \overline\lambda) = (0, 1)$. +$$\overline{V}(s) := \min_a \underline{Q}(s, a) : \overline\lambda : max_a \overline{Q}(s, a)$$ +$$\underline{V}(s) := \min_a \underline{Q}(s, a) : \underline\lambda : max_a \overline{Q}(s, a)$$ +$$\overline{Q}(s, a) := \mathbb{E}_{(\delta, s')\sim\mathrm{step}(s, a)} (\delta + \gamma\overline{V}(s'))$$ +$$\underline{Q}(s, a) := \mathbb{E}_{(\delta, s')\sim\mathrm{step}(s, a)} (\delta + \gamma\underline{V}(s'))$$ +where the notation $x:\lambda:y$ stands for interpolation, that is, $x:\lambda:y = x + \lambda(y - x) = (1 - \lambda) x + \lambda y$. + +- The safety criteria for which bellman equations exist. An example of a safety criterion for which a bellman formula exists is the safety criterion `Q`, which is the Q value of the actual satisfia policy (that is, `Q(s, a)` is the expected sum of the rewards which one would get if one took action `a` in state `s` and then followed the satisfia policy until the end of the episode). Note that this is different from `maxAdmissibleQ` and `minAdmissibleQ`, which are (when $(\underline\lambda, \overline\lambda) = (0, 1)$) the Q values of the maximizer and minimezer policy. Similar safety criteria are `Q2, Q3, ...`, defined by `Q2(s, a)` being the expected value of the square of the the sum of the rewards which one would get if one took action `a` in state `s` and then followed the satisfia policy until the end of the episode. `Q3, ...` are like `Q2` but with higher powers instead of the square. Note that `Q2(s, a) != (Q(s, a))^2` since the square of an expected value does not equal the expected value of the square. An example of safety criteria for which no bellman formula exists is safety criteria using the Wasserstein distance. + +## Architecture of the network +Note that some outputs of the network do not depend on the aspiration and hyperparameters to the satisfia algorithm (i.e. $\overline\lambda, \underline\lambda$, and the weights given to the safety criteria), whereas some outputs of the network do depend on them. For instance, `maxAdmissibleQ` and `minAdmissibleQ` only depend on $\overline\lambda$, $\underline\lambda$ (and we fix $(\overline\lambda, \underline\lambda) = (0, 1)$ for now). In effet, with $(\overline\lambda, \underline\lambda) = (0, 1)$, `maxAdmissibleQ` and `minAdmissibleQ` are the Q values of the maximizer and minimizer, so they don't depend on the aspiration and hyperparameters to the satisfia policy. However, `Q` depends on the aspiration and hyperparameters to the satisfia policy, since it is the Q values of the satisfia policy. The same goes for `Q3, ...`. + +Because of this, we decompose the network into multiple parts $f_\theta, g_\phi, h_\psi$. We train the the network by fitting $g_\phi(f_\theta(s))$ to `(maxAdmissibleQ(s, .), minAdmissibleQ(s, .))` and $h_\psi(f_\theta(s), \aleph)$ to `Q(s, .)` (or `(Q(s, .), Q2(s, .), Q3(s, .), ...`), where $s$ is a state and $\aleph$ is an aspiration. Thus, we have a network which has an output not dependent on the aspiration and an output dependent on the aspiration, without the cost of training two networks. Note that the outputs of the network are Q tables, that is, vectors of the form `[maxAdmissibleQ(s, a) for a in all_actions]` (replacing `maxAdmissibleQ` by whatever). Hence the notation `maxAdmissibleQ(s, .)`. + +For now, $f_\theta, g_\phi, h_\psi$ are fully connected networks with dropout and layer normalization. + +## Training + +We use the [DQN algorithm](https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html). + +We could learn this algorithm to learn `maxAdmissibleQ` and `minAdmissibleQ`, just by applying it to train a maximizer and a minimizer. However, this doesn't work that well because there are some states which the maximizer or minimizer never visit, but which the satisfia policy visits. Thus, the Q values in this these states are not learned correctly, which doesn't matter for the maximizer and minimizer, but matters for the satisfia algorithm and makes it not work. + +To mitigate this problem, we notice that DQN need not follow the maximizer or minimizer policy in order to learn the maximizer's and minimizer's Q values (this is a well known fact about DQN in classical settings: even though DQN explores using an $\epsilon$-greedy policy where it takes a random action with probability $\epsilon$ at each step, it learns the Q values of the maximizer, not of the $\epsilon$-greedy policy). So we explore using the satisfia policy during training (we also take a random action instead of the satisfia policy's action with some probability, like in $\epsilon$-greedy exploration). Note that to explore using the satisfia policy requires to have an aspiration, which we draw randomly at the beginning of each episode. + +In order to learn safety criteria, we do the same thing as DQN but with a different bellman equation. + +Note that it is slow to evaluate the satisfia policy at every step when exploring because it is not vectorized, so we reimplement a part of the satisfia algorithm in `src/satisfia/agents/learning/dqn/agent_mdp_dqn.py` in a vectorized manner. + +## Environments + +For now, we use simple gridworlds for debugging because things don't quite work yet and gridwords require less compute and we have access to the ground truth Q values and safety criteria. + +It should, in theory, be easy to make the learning algorithm work with any environment which is a `gymnasium.Env`, since the learning algorithm only uses the `gymnasium.Env` interface and doesn't use anything specific to the gridworlds. + +There is a list of environments where misalignment happened, usually without people expecting it to happen, in `environment-compilation.md`. I think it would be really great if we were able to run the satisfia deep learning algorithm on some of them and see if they mitigate the misalignment. I tried to sort them in order of relevance to satisfia, but it was some time ago and I now disagree with my ranking in some cases. I think the best thing to start with is environments of the type "a robot in a physics simulation exploits a big in the psysics engine" or "an agent exploits a weird balance in a video game", with a slight preference for the former because there are more such environments in the list. + +## Code structure + +The main algorithm is in the `train_dqn` function in `src/satisfia/agents/learning/dqn/train.py`. This is the most important function to understand. All the other files relevant to the train algorithm are in `src/satisfia/agents/learning` and `train_dqn` calls into them. To run the code, run `python scripts/test_dqn.py` in the root directory. `scripts/test_dqn.py` runs the satisfia deep learning algorithm on simple gridworlds and plot some stuff, namely, the evolution of the outputs of the networks during training and an achieved total vs aspiration graph (on which we should see an `x = clamp(y, max=..., min=...)` line if everything is working). \ No newline at end of file From 01b1e8399642864bd3bd9d5b8219fb1cc2a75e97 Mon Sep 17 00:00:00 2001 From: Vladimir Ivanov Date: Sun, 16 Jun 2024 21:53:16 +0200 Subject: [PATCH 5/9] Fixed some bugs --- deep-learning-writeup.md | 44 +++ scripts/test_dqn.py | 124 +++++--- scripts/test_dqn_copy.py | 271 ++++++++++++++++++ src/environments/very_simple_gridworlds.py | 2 +- .../agents/learning/dqn/agent_mdp_dqn.py | 16 +- .../agents/learning/dqn/bellman_formula.py | 6 +- src/satisfia/agents/learning/dqn/config.py | 1 + .../learning/dqn/exploration_strategy.py | 1 + src/satisfia/agents/learning/dqn/train.py | 37 +-- .../agents/learning/environment_wrappers.py | 48 +++- src/satisfia/agents/makeMDPAgentSatisfia.py | 4 +- src/world_model/world_model.py | 5 + 12 files changed, 480 insertions(+), 79 deletions(-) create mode 100644 deep-learning-writeup.md create mode 100644 scripts/test_dqn_copy.py diff --git a/deep-learning-writeup.md b/deep-learning-writeup.md new file mode 100644 index 0000000..5c3e86a --- /dev/null +++ b/deep-learning-writeup.md @@ -0,0 +1,44 @@ +# Satisfia algorithms with deep reinforcement learning +We want to make be able to run satisfia algorithms on environments which aren't simple gridworlds where planning is not tractable. For this, we train a neural network to predict all the quantities which are computed recursively in planning algorithm. Those quantities can then be used deduce the satisfia policy by inheriting from `AspirationAgent` and overriding the methods which compute those quantities with methods which use the neural network. This is done by the class `AgentMDPDQN`. + +## Output of the network +The quantities to which we fit a neural network are: +- `maxAdmissibleQ`, `minAdmissibleQ` (or $\overline{Q}, \underline{Q}$) - the Q values of the maximizer and minimizer. Note: the satisfia algorithm has hyperparameters $\overline\lambda, \underline\lambda$ which we set to $0$ and $1$. For other values of $\overline\lambda, \underline\lambda$, $\overline{Q}, \underline{Q}$ are not the Q values of the maximizer and minimizer anymore and are defined by the following, where $s$ is always a state and $a$ always an action, maxima and minima are taken over the set of all possible actions, $0 \le \gamma \le 1$ is the discount rate, and $\mathbb{E}_{(\delta, s')\sim\mathrm{step}(s, a)}$ means the expectation when $\delta$ and $s'$ are drawn from the reward - next action distribution after taking action $a$ in state $s$. Note how we obtain the classical bellman equations for the Q and V values of the maximizer and minimizer when $(\underline\lambda, \overline\lambda) = (0, 1)$. +$$\overline{V}(s) := \min_a \underline{Q}(s, a) : \overline\lambda : max_a \overline{Q}(s, a)$$ +$$\underline{V}(s) := \min_a \underline{Q}(s, a) : \underline\lambda : max_a \overline{Q}(s, a)$$ +$$\overline{Q}(s, a) := \mathbb{E}_{(\delta, s')\sim\mathrm{step}(s, a)} (\delta + \gamma\overline{V}(s'))$$ +$$\underline{Q}(s, a) := \mathbb{E}_{(\delta, s')\sim\mathrm{step}(s, a)} (\delta + \gamma\underline{V}(s'))$$ +where the notation $x:\lambda:y$ stands for interpolation, that is, $x:\lambda:y = x + \lambda(y - x) = (1 - \lambda) x + \lambda y$. + +- The safety criteria for which bellman equations exist. An example of a safety criterion for which a bellman formula exists is the safety criterion `Q`, which is the Q value of the actual satisfia policy (that is, `Q(s, a)` is the expected sum of the rewards which one would get if one took action `a` in state `s` and then followed the satisfia policy until the end of the episode). Note that this is different from `maxAdmissibleQ` and `minAdmissibleQ`, which are (when $(\underline\lambda, \overline\lambda) = (0, 1)$) the Q values of the maximizer and minimezer policy. Similar safety criteria are `Q2, Q3, ...`, defined by `Q2(s, a)` being the expected value of the square of the the sum of the rewards which one would get if one took action `a` in state `s` and then followed the satisfia policy until the end of the episode. `Q3, ...` are like `Q2` but with higher powers instead of the square. Note that `Q2(s, a) != (Q(s, a))^2` since the square of an expected value does not equal the expected value of the square. An example of safety criteria for which no bellman formula exists is safety criteria using the Wasserstein distance. + +## Architecture of the network +Note that some outputs of the network do not depend on the aspiration and hyperparameters to the satisfia algorithm (i.e. $\overline\lambda, \underline\lambda$, and the weights given to the safety criteria), whereas some outputs of the network do depend on them. For instance, `maxAdmissibleQ` and `minAdmissibleQ` only depend on $\overline\lambda$, $\underline\lambda$ (and we fix $(\overline\lambda, \underline\lambda) = (0, 1)$ for now). In effet, with $(\overline\lambda, \underline\lambda) = (0, 1)$, `maxAdmissibleQ` and `minAdmissibleQ` are the Q values of the maximizer and minimizer, so they don't depend on the aspiration and hyperparameters to the satisfia policy. However, `Q` depends on the aspiration and hyperparameters to the satisfia policy, since it is the Q values of the satisfia policy. The same goes for `Q3, ...`. + +Because of this, we decompose the network into multiple parts $f_\theta, g_\phi, h_\psi$. We train the the network by fitting $g_\phi(f_\theta(s))$ to `(maxAdmissibleQ(s, .), minAdmissibleQ(s, .))` and $h_\psi(f_\theta(s), \aleph)$ to `Q(s, .)` (or `(Q(s, .), Q2(s, .), Q3(s, .), ...`), where $s$ is a state and $\aleph$ is an aspiration. Thus, we have a network which has an output not dependent on the aspiration and an output dependent on the aspiration, without the cost of training two networks. Note that the outputs of the network are Q tables, that is, vectors of the form `[maxAdmissibleQ(s, a) for a in all_actions]` (replacing `maxAdmissibleQ` by whatever). Hence the notation `maxAdmissibleQ(s, .)`. + +For now, $f_\theta, g_\phi, h_\psi$ are fully connected networks with dropout and layer normalization. + +## Training + +We use the [DQN algorithm](https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html). + +We could learn this algorithm to learn `maxAdmissibleQ` and `minAdmissibleQ`, just by applying it to train a maximizer and a minimizer. However, this doesn't work that well because there are some states which the maximizer or minimizer never visit, but which the satisfia policy visits. Thus, the Q values in this these states are not learned correctly, which doesn't matter for the maximizer and minimizer, but matters for the satisfia algorithm and makes it not work. + +To mitigate this problem, we notice that DQN need not follow the maximizer or minimizer policy in order to learn the maximizer's and minimizer's Q values (this is a well known fact about DQN in classical settings: even though DQN explores using an $\epsilon$-greedy policy where it takes a random action with probability $\epsilon$ at each step, it learns the Q values of the maximizer, not of the $\epsilon$-greedy policy). So we explore using the satisfia policy during training (we also take a random action instead of the satisfia policy's action with some probability, like in $\epsilon$-greedy exploration). Note that to explore using the satisfia policy requires to have an aspiration, which we draw randomly at the beginning of each episode. + +In order to learn safety criteria, we do the same thing as DQN but with a different bellman equation. + +Note that it is slow to evaluate the satisfia policy at every step when exploring because it is not vectorized, so we reimplement a part of the satisfia algorithm in `src/satisfia/agents/learning/dqn/agent_mdp_dqn.py` in a vectorized manner. + +## Environments + +For now, we use simple gridworlds for debugging because things don't quite work yet and gridwords require less compute and we have access to the ground truth Q values and safety criteria. + +It should, in theory, be easy to make the learning algorithm work with any environment which is a `gymnasium.Env`, since the learning algorithm only uses the `gymnasium.Env` interface and doesn't use anything specific to the gridworlds. + +There is a list of environments where misalignment happened, usually without people expecting it to happen, in `environment-compilation.md`. I think it would be really great if we were able to run the satisfia deep learning algorithm on some of them and see if they mitigate the misalignment. I tried to sort them in order of relevance to satisfia, but it was some time ago and I now disagree with my ranking in some cases. I think the best thing to start with is environments of the type "a robot in a physics simulation exploits a big in the psysics engine" or "an agent exploits a weird balance in a video game", with a slight preference for the former because there are more such environments in the list. + +## Code structure + +The main algorithm is in the `train_dqn` function in `src/satisfia/agents/learning/dqn/train.py`. This is the most important function to understand. All the other files relevant to the train algorithm are in `src/satisfia/agents/learning` and `train_dqn` calls into them. To run the code, run `python scripts/test_dqn.py` in the root directory. `scripts/test_dqn.py` runs the satisfia deep learning algorithm on simple gridworlds and plot some stuff, namely, the evolution of the outputs of the networks during training and an achieved total vs aspiration graph (on which we should see an `x = clamp(y, max=..., min=...)` line if everything is working). \ No newline at end of file diff --git a/scripts/test_dqn.py b/scripts/test_dqn.py index 61d223f..127c814 100644 --- a/scripts/test_dqn.py +++ b/scripts/test_dqn.py @@ -8,7 +8,9 @@ from satisfia.agents.learning.dqn.agent_mdp_dqn import AgentMDPDQN, local_policy from satisfia.agents.learning.dqn.criteria import complete_criteria from satisfia.agents.learning.models.building_blocks import SatisfiaMLP -from satisfia.agents.learning.environment_wrappers import RestrictToPossibleActionsWrapper +from satisfia.agents.learning.environment_wrappers import RestrictToPossibleActionsWrapper, \ + RescaleDeltaWrapper, \ + ObservationToTupleWrapper from satisfia.agents.makeMDPAgentSatisfia import AspirationAgent, AgentMDPPlanning from satisfia.util.interval_tensor import IntervalTensor @@ -29,8 +31,8 @@ from plotly.colors import DEFAULT_PLOTLY_COLORS from plotly.graph_objects import Figure, Scatter, Layout -device = "cuda" if torch.cuda.is_available() else "cpu" -# device = "cpu" +# device = "cuda" if torch.cuda.is_available() else "cpu" +device = "cpu" print("using", device) def multi_tqdm(num_tqdms: int) -> List[Callable[[Iterable], Iterable]]: @@ -72,6 +74,8 @@ def compute_total(agent: AspirationAgent, env: gym.Env, aspiration4state: float if isinstance(aspiration4state, (int, float)): aspiration4state = (aspiration4state, aspiration4state) + env = ObservationToTupleWrapper(env) + total = 0. observation, _ = env.reset() done = False @@ -105,7 +109,7 @@ def plot_totals_vs_aspiration( agents: Iterable[AspirationAgent] | Dict[str, Asp sample_size: int, reference_agents: Iterable[AspirationAgent] | Dict[str, AspirationAgent] | AspirationAgent = [], error_bar_confidence: float = 0.95, - n_jobs: int = -1, + n_jobs: int = 1, title: str = "Totals for agent(s)", save_to: str | None = None ): @@ -174,55 +178,71 @@ def plot_totals_vs_aspiration( agents: Iterable[AspirationAgent] | Dict[str, Asp if save_to is not None: fig.write_html(save_to) -cfg = DQNConfig( aspiration_sampler = UniformPointwiseAspirationSampler(-20, 5), +# this is only for the test_box gridworld +reachable_states = [(0, 2, 2)] + [(time, y, 2) for time in range(1, 10) for y in [1, 2, 3]] + +cfg = DQNConfig( aspiration_sampler = UniformPointwiseAspirationSampler(-20, 10), criterion_coefficients_for_loss = dict( maxAdmissibleQ = 1., - minAdmissibleQ = 1., - Q = 1. ), + minAdmissibleQ = 1., ) , + # Q = 1. ), exploration_rate_scheduler = - PiecewiseLinearScheduler([0., 0.1, 1.], [1., 0.05, 0.05]), + PiecewiseLinearScheduler([0., 0.5, 1.], [1., 0.01, 0.01]), noisy_network_exploration = False, # noisy_network_exploration_rate_scheduler = # PiecewiseLinearScheduler([0., 0.1, 1.], [1., 0.05, 0.05]), num_envs = 10, async_envs = False, discount = 1, - total_timesteps = 10_000, - training_starts = 100, - training_frequency = 1, - target_network_update_frequency = 50, + soft_target_network_update_coefficient = 0, + learning_rate_scheduler = lambda _: 1e-2, + total_timesteps = 50_000, + training_starts = 500, + batch_size = 4096, + training_frequency = 10, + target_network_update_frequency = 100, satisfia_agent_params = { "lossCoeff4FeasibilityPowers": 0, "lossCoeff4LRA1": 0, "lossCoeff4Time1": 0, "lossCoeff4Entropy1": 0, "defaultPolicy": None }, device = device, - plotted_criteria = ["maxAdmissibleQ", "minAdmissibleQ", "Q"], + plotted_criteria = None, # ["maxAdmissibleQ", "minAdmissibleQ"], plot_criteria_frequency = 100, states_for_plotting_criteria = [(time, 2, 2) for time in range(10)], - state_aspirations_for_plotting_criteria = [(-5, -5), (-1, -1), (1, 1)], - actions_for_plotting_criteria = [2, 4] ) + state_aspirations_for_plotting_criteria = [(0, 0), (1e3, 1e3)], # [(-5, -5), (-1, -1), (1, 1)], + actions_for_plotting_criteria = [0, 1, 2, 3, 4] ) -def train_and_plot(gridworld_name: str): - print(gridworld_name) +def train_and_plot( env_name: str, + gridworld: bool = True, + max_achievable_total: float | None = None, + min_achievable_total: float | None = None ): + + print(env_name) def make_env(): - env, _ = make_simple_gridworld(gridworld_name) + if gridworld: + env, _ = make_simple_gridworld(env_name, time=10) + env = RestrictToPossibleActionsWrapper(env) + else: + env = gym.make(env_name) + env = RescaleDeltaWrapper(env, from_interval=(-500, 100), to_interval=(-5, 1)) return env def make_model(pretrained=None): - d_observation = len(make_env().observation_space) + # to do: compute d_observation properly + d_observation = len(make_env().observation_space) if gridworld else 8 n_actions = make_env().action_space.n model = SatisfiaMLP( input_size = d_observation, output_not_depending_on_agent_parameters_sizes = { "maxAdmissibleQ": n_actions, "minAdmissibleQ": n_actions }, - output_depending_on_agent_parameters_sizes = { "Q": n_actions }, - common_hidden_layer_sizes = [64, 64], - hidden_layer_not_depending_on_agent_parameters_sizes = [64], - hidden_layer_depending_on_agent_parameters_sizes = [64], + output_depending_on_agent_parameters_sizes = dict(), # { "Q": n_actions }, + common_hidden_layer_sizes = [32, 32], + hidden_layer_not_depending_on_agent_parameters_sizes = [16], + hidden_layer_depending_on_agent_parameters_sizes = [], # [64], batch_size = cfg.num_envs, layer_norms = True, - dropout = 0.1 + dropout = 0 ) if pretrained is not None: model.load_state_dict(pretrained) @@ -235,10 +255,9 @@ def make_model(pretrained=None): # "Q": n_actions } ], # batch_size=cfg.num_envs ).to(device) - planning_agent = AgentMDPPlanning( cfg.satisfia_agent_params, - RestrictToPossibleActionsWrapper(make_env()) ) + planning_agent = AgentMDPPlanning(cfg.satisfia_agent_params, make_env()) if gridworld else None - model = run_or_load( f"dqn-{gridworld_name}-no-discount-with-criteria.pickle", + model = run_or_load( f"dqn-{env_name}-no-discount-longer-training.pickle", train_dqn, make_env, make_model, @@ -253,31 +272,44 @@ def make_model(pretrained=None): num_actions = make_env().action_space.n, device = device ) - for state in [(time, 2, 2) for time in range(10)]: - for action in range(5): - for state_aspiration in [(0, 0), (1, 1), (2, 2)]: - action_aspiration = planning_agent.aspiration4action(state, action, state_aspiration) - print( state, - action, - action_aspiration, - planning_agent.Q(state, action, state_aspiration) - - learning_agent.Q(state, action, state_aspiration) ) + # for state in [(time, 2, 2) for time in range(10)]: + # for action in range(5): + # for state_aspiration in [(0, 0), (1, 1), (2, 2)]: + # action_aspiration = planning_agent.aspiration4action(state, action, state_aspiration) + # print( state, + # action, + # action_aspiration, + # planning_agent.Q(state, action, state_aspiration) + # - learning_agent.Q(state, action, state_aspiration) ) first_observation, _ = make_env().reset() - min_achievable_total = planning_agent.minAdmissibleV(first_observation) - max_achievable_total = planning_agent.maxAdmissibleV(first_observation) + if max_achievable_total is None: + max_achievable_total = planning_agent.maxAdmissibleV(first_observation) + if min_achievable_total is None: + min_achievable_total = planning_agent.minAdmissibleV(first_observation) plot_totals_vs_aspiration( agents = learning_agent, - env = RestrictToPossibleActionsWrapper(make_env()), + env = make_env(), aspirations = np.linspace( min_achievable_total - 1, max_achievable_total + 1, 20 ), - sample_size = 1_000, + sample_size = 1000, # reference_agents = planning_agent, - title = f"totals for agent with no discount and longer training in {gridworld_name}" ) + title = f"totals for agent with no discount and longer training in {env_name}" ) + +# train_and_plot( 'LunarLander-v2', +# gridworld = False, +# min_achievable_total = -5, +# max_achievable_total = 1 ) + + +all_gridworlds = [ "GW1", "GW2", "GW3", "GW4", "GW5", "GW6", "GW22", "GW23", "GW24", "GW25", "GW26", + "GW27", "GW28", "GW29", "GW30", "GW31", "GW32", "AISG2", "test_return", + "test_box" ] + +gridworlds_without_delta = ["GW23", "GW24"] -all_gridworlds = ["GW1", "GW2", "GW3", "GW4", "GW5", "GW6", "GW22", "GW23", "GW24", "GW25", "GW26", "GW27", "GW28", "GW29", "GW30", "GW31", "GW32", "AISG2", "test_return", "test_box"] -require_longer_training = ["GW22", "GW28", "AISG2", "test_box"] +gridworlds_requiring_longer_training = [ "GW28", "test_box", "GW29", "GW26", "GW30", "GW32"] -# Parallel(n_jobs=-1)(delayed(train_and_plot)(gridworld_name) for gridworld_name in require_longer_training) -for gridworld_name in ["GW1"]: - train_and_plot(gridworld_name) +Parallel(n_jobs=-1)(delayed(train_and_plot)(gridworld_name) for gridworld_name in all_gridworlds) +# for gridworld_name in all_gridworlds: +# train_and_plot(gridworld_name) diff --git a/scripts/test_dqn_copy.py b/scripts/test_dqn_copy.py new file mode 100644 index 0000000..2c88cf0 --- /dev/null +++ b/scripts/test_dqn_copy.py @@ -0,0 +1,271 @@ +import sys +sys.path.insert(0, "./src/") + +from environments.very_simple_gridworlds import make_simple_gridworld, all_worlds +from satisfia.agents.learning.dqn.train import train_dqn +from satisfia.agents.learning.dqn.config import DQNConfig, UniformPointwiseAspirationSampler, \ + UniformAspirationSampler, PiecewiseLinearScheduler +from satisfia.agents.learning.dqn.agent_mdp_dqn import AgentMDPDQN, local_policy +from satisfia.agents.learning.dqn.criteria import complete_criteria +from satisfia.agents.learning.models.building_blocks import SatisfiaMLP +from satisfia.agents.learning.environment_wrappers import RestrictToPossibleActionsWrapper +from satisfia.agents.makeMDPAgentSatisfia import AspirationAgent, AgentMDPPlanning +from satisfia.util.interval_tensor import IntervalTensor + +import gymnasium as gym +import torch +from torch import tensor, Tensor +from torch.nn import Module +import numpy as np +import scipy +import pickle +from joblib import Parallel, delayed +from statistics import mean +from functools import partial +from tqdm import tqdm +from typing import Tuple, List, Dict, Iterable, Callable, Generator +from os.path import isfile +import dataclasses +from plotly.colors import DEFAULT_PLOTLY_COLORS +from plotly.graph_objects import Figure, Scatter, Layout + +device = "cuda" if torch.cuda.is_available() else "cpu" +# device = "cpu" +print("using", device) + +def multi_tqdm(num_tqdms: int) -> List[Callable[[Iterable], Iterable]]: + def itr_wrapper(itr: Iterable, progress_bar: tqdm, desc: str | None = None, total: int | None = None) -> Generator: + progress_bar.desc = desc + progress_bar.reset() + if total is not None: + progress_bar.total = total + else: + progress_bar.total = len(itr) if hasattr(itr, "__len__") else None + progress_bar.refresh() + for item in itr: + yield item + progress_bar.update() + progress_bar.refresh() + + progress_bars = [tqdm() for _ in range(num_tqdms)] + return [partial(itr_wrapper, progress_bar=progress_bar) for progress_bar in progress_bars] + +def confidence_interval(xs: List[float], confidence: float): + return scipy.stats.t.interval(confidence, len(xs)-1, loc=np.mean(xs), scale=scipy.stats.sem(xs)) + +def error_bars(xs: List[float], confidence: float): + mean_ = mean(xs) + lower_confidence, upper_confidence = confidence_interval(xs, confidence) + return mean_ - lower_confidence, upper_confidence - mean_ + +def run_or_load(filename, function, *args, **kwargs): + if isfile(filename): + with open(filename, "rb") as f: + return pickle.load(f) + + result = function(*args, **kwargs) + with open(filename, "wb") as f: + pickle.dump(result, f) + return result + +def compute_total(agent: AspirationAgent, env: gym.Env, aspiration4state: float | Tuple[float, float]) -> float: + if isinstance(aspiration4state, (int, float)): + aspiration4state = (aspiration4state, aspiration4state) + + total = 0. + observation, _ = env.reset() + if isinstance(observation, (np.ndarray, Tensor)): + observation = tuple(observation.tolist()) + done = False + while not done: + action, aspiration4action = agent.localPolicy(observation, aspiration4state).sample()[0] + next_observation, delta, done, truncated, _ = env.step(action) + if isinstance(next_observation, (np.ndarray, Tensor)): + next_observation = tuple(next_observation.tolist()) + done = done or truncated + total += delta + aspiration4state = agent.propagateAspiration(observation, action, aspiration4action, Edel=None, nextState=next_observation) + observation = next_observation + return total + +def scatter_with_y_error_bars( x: Iterable[float], + y: Iterable[Iterable[float]], + confidence: float, + **plotly_kwargs ) -> Scatter: + + means = [mean(point_ys) for point_ys in y] + error_bars_ = [error_bars(point_ys, confidence) for point_ys in y] + return Scatter( x = x, + y = means, + error_y = dict( type = "data", + symmetric = False, + array = [lower for lower, upper in error_bars_], + arrayminus = [upper for lower, upper in error_bars_] ), + **plotly_kwargs ) + +def plot_totals_vs_aspiration( agents: Iterable[AspirationAgent] | Dict[str, AspirationAgent] | AspirationAgent, + env: gym.Env, + aspirations: Iterable[int | Tuple[int, int]] | int | Tuple[int, int], + sample_size: int, + reference_agents: Iterable[AspirationAgent] | Dict[str, AspirationAgent] | AspirationAgent = [], + error_bar_confidence: float = 0.95, + n_jobs: int = -1, + title: str = "Totals for agent(s)", + save_to: str | None = None ): + + if not isinstance(agents, Iterable): + agents = [agents] + if not isinstance(agents, Dict): + agents = {f"agent {i}": agent for i, agent in enumerate(agents)} + if not isinstance(reference_agents, Iterable): + reference_agents = [reference_agents] + if not isinstance(reference_agents, Dict): + reference_agents = {f"agent {i}": agent for i, agent in enumerate(reference_agents)} + if not isinstance(aspirations, Iterable): + aspirations = [aspirations] + + agent_tqdm, aspiration_tqdm, sample_tqdm = multi_tqdm(3) + + totals = dict() + reference_totals = dict() + for is_reference in [False, True]: + for agent_name, agent in (agent_tqdm(agents.items(), desc="agents") + if not is_reference else agent_tqdm(reference_agents.items(), desc="reference agents")): + + for aspiration in aspiration_tqdm(aspirations, desc=agent_name): + if n_jobs == 1: + t = [ + compute_total(agent, env, aspiration) + for _ in sample_tqdm(range(sample_size), desc=f"{agent_name}, {aspiration=}") + ] + else: + t = Parallel(n_jobs=n_jobs)( + delayed(compute_total)(agent, env, aspiration) + for _ in sample_tqdm(range(sample_size), desc=f"{agent_name}, {aspiration=}") + ) + + if is_reference: + reference_totals[agent_name, aspiration] = t + else: + totals[agent_name, aspiration] = t + + fig = Figure(layout=Layout( title = title + f". {error_bar_confidence:.0%} confidence error bars", + xaxis_title = "Aspiration", + yaxis_title = "Total" )) + + aspirations_as_points = [ (aspiration if isinstance(aspiration, (float, int)) else mean(aspiration)) + for aspiration in aspirations ] + + point_aspirations = all(isinstance(aspiration, (int, float)) for aspiration in aspirations) + for i_lower_or_upper, lower_or_upper in enumerate([None] if point_aspirations else ["lower", "upper"]): + fig.add_trace(Scatter( x = aspirations_as_points, + y = [ aspiration if isinstance(aspiration, (float, int)) else aspiration[i_lower_or_upper] + for aspiration in aspirations ], + name = "aspiration" if point_aspirations else f"{lower_or_upper} aspiration" )) + + for is_reference in [False, True]: + for i_agent, agent_name in enumerate(reference_agents.keys() if is_reference else agents.keys()): + t = reference_totals if is_reference else totals + fig.add_trace(scatter_with_y_error_bars( x = aspirations_as_points, + y = [t[agent_name, aspiration] for aspiration in aspirations], + confidence = error_bar_confidence, + line = dict(color = DEFAULT_PLOTLY_COLORS[i_agent], dash = "dash" if is_reference else "solid"), + name = ("reference " if is_reference else "") + + (agent_name if not (len(agents) == 1 and agent_name == "agent 0") else "") )) + + fig.show() + + if save_to is not None: + fig.write_html(save_to) + +cfg = DQNConfig( aspiration_sampler = UniformPointwiseAspirationSampler(1e5, 1e5), + criterion_coefficients_for_loss = dict( maxAdmissibleQ = 1., + minAdmissibleQ = 1., + Q = 1. ), + exploration_rate_scheduler = + PiecewiseLinearScheduler([0., 0.1, 1.], [1., 0.05, 0.05]), + noisy_network_exploration = False, + # noisy_network_exploration_rate_scheduler = + # PiecewiseLinearScheduler([0., 0.1, 1.], [1., 0.05, 0.05]), + num_envs = 10, + async_envs = False, + discount = 1, + total_timesteps = 25_000, + training_starts = 1000, + batch_size = 4096, + training_frequency = 10, + target_network_update_frequency = 50, + satisfia_agent_params = { "lossCoeff4FeasibilityPowers": 0, + "lossCoeff4LRA1": 0, + "lossCoeff4Time1": 0, + "lossCoeff4Entropy1": 0, + "defaultPolicy": None }, + device = device, + plotted_criteria = None, # ["maxAdmissibleQ", "minAdmissibleQ", "Q"], + plot_criteria_frequency = 100, + states_for_plotting_criteria = None, # [(time, 2, 2) for time in range(10)], + state_aspirations_for_plotting_criteria = [(-5, -5), (-1, -1), (1, 1)], + actions_for_plotting_criteria = [2, 4] ) + +def train_and_plot(gym_env: str, min_achievable_total: float, max_achievable_total: float): + print(gym_env) + env = gym.make('LunarLander-v2') + + def discretize_space(space): + num_bins = [10] * space.shape[0] + bin_edges = [np.linspace(space.low[i], space.high[i], num_bins[i] + 1)[1:-1] for i in range(space.shape[0])] + return bin_edges + + def discretize_observations(observation, bin_edges): + discretized_observations = [] + for i in range(len(bin_edges)): + discretized_observations.append(np.digitize(observation[i], bin_edges[i])) + return np.array(discretized_observations) + + def make_env(): + return gym.make(gym_env) + + def make_model(pretrained=None): + d_observation = len(discretize_space(env.observation_space)) + n_actions = env.action_space.n + model = SatisfiaMLP( + input_size = d_observation, + output_not_depending_on_agent_parameters_sizes = { "maxAdmissibleQ": n_actions, + "minAdmissibleQ": n_actions }, + output_depending_on_agent_parameters_sizes = { "Q": n_actions }, + common_hidden_layer_sizes = [64, 64], + hidden_layer_not_depending_on_agent_parameters_sizes = [64], + hidden_layer_depending_on_agent_parameters_sizes = [64], + batch_size = cfg.num_envs, + layer_norms = True, + dropout = 0 + ) + if pretrained is not None: + model.load_state_dict(pretrained) + return model + + model = run_or_load( f"dqn-{str(gym_env)}-no-discount.pickle", + train_dqn, + make_env, + make_model, + cfg ) + model = model.to(device) + + learning_agent = AgentMDPDQN( cfg.satisfia_agent_params, + model, + num_actions = env.action_space.n, + device = device ) + + plot_totals_vs_aspiration( agents = learning_agent, + env = env, + aspirations = np.linspace( min_achievable_total - 1, + max_achievable_total + 1, + 20 ), + sample_size = 100, + # reference_agents = planning_agent, + title = f"totals for agent with no discount and longer training in {str(gym_env)}" ) + +train_and_plot( 'LunarLander-v2', + # replace -10 and 10 by the minimal and maximal achievable total rewards in the environment + min_achievable_total = -500, + max_achievable_total = 500 ) diff --git a/src/environments/very_simple_gridworlds.py b/src/environments/very_simple_gridworlds.py index 5fc64b1..ee7f3db 100644 --- a/src/environments/very_simple_gridworlds.py +++ b/src/environments/very_simple_gridworlds.py @@ -459,7 +459,7 @@ def make_simple_gridworld(gw="GW1", time=None, **kwargs): [' ', ' ', ' ', ' ', ' '], ['A', '#', '#', '#', 'G'], [' ', ' ', ' ', ' ', ' '] - ] + ] delta_grid = [ [' ', 'Δ', 'Δ', 'Δ', ' '], [' ', ' ', ' ', ' ', 'G'], diff --git a/src/satisfia/agents/learning/dqn/agent_mdp_dqn.py b/src/satisfia/agents/learning/dqn/agent_mdp_dqn.py index aa3dc80..7975dda 100644 --- a/src/satisfia/agents/learning/dqn/agent_mdp_dqn.py +++ b/src/satisfia/agents/learning/dqn/agent_mdp_dqn.py @@ -104,22 +104,22 @@ def local_policy( params: Dict[str, Any], action_aspirations_.midpoint() > state_aspirations_.midpoint().unsqueeze(-1) # Tensor[batch, action_candidate] of bools - # action_aspiration_midpoints_close_to_state_aspiration_midpoints = \ - # (action_aspirations_.midpoint() - state_aspirations_.midpoint().unsqueeze(-1)).abs() <= 1e-5 + action_aspiration_midpoints_close_to_state_aspiration_midpoints = \ + (action_aspirations_.midpoint() - state_aspirations_.midpoint().unsqueeze(-1)).abs() <= 1e-5 # Tensor[batch, first_action_candidate, second_action_candidate] of bools action_aspiration_midpoints_on_same_side = action_aspiration_midpoint_sides.unsqueeze(-1) \ == action_aspiration_midpoint_sides.unsqueeze(-2) - - # action_aspiration_midpoints_on_same_side &= \ - # action_aspiration_midpoints_close_to_state_aspiration_midpoints.logical_not().unsqueeze(-1) - # action_aspiration_midpoints_on_same_side &= \ - # action_aspiration_midpoints_close_to_state_aspiration_midpoints.logical_not().unsqueeze(-2) + + action_aspiration_midpoints_on_same_side &= \ + action_aspiration_midpoints_close_to_state_aspiration_midpoints.logical_not().unsqueeze(-1) + action_aspiration_midpoints_on_same_side &= \ + action_aspiration_midpoints_close_to_state_aspiration_midpoints.logical_not().unsqueeze(-2) # Tensor[batch, first_action_candidate] first_action_candidate_probabilities = action_probabilities - # Tensor[batch, first_action_candidate, second_action_candidate] + # Tensor[batch, first_action_candidate, second_action_candidate] second_action_candidate_probabilities_conditional_on_first_action_candidate = \ action_probabilities.unsqueeze(-1).where( action_aspiration_midpoints_on_same_side.logical_not(), diff --git a/src/satisfia/agents/learning/dqn/bellman_formula.py b/src/satisfia/agents/learning/dqn/bellman_formula.py index 18caf53..bfd2e54 100644 --- a/src/satisfia/agents/learning/dqn/bellman_formula.py +++ b/src/satisfia/agents/learning/dqn/bellman_formula.py @@ -54,13 +54,13 @@ def bellman_formula( replay_buffer_sample: ReplayBufferSample, if criterion_name in criterion_names: if cfg.double_q_learning: - target_for_q_network = next_criteria_from_q_network[criterion_name] + target_from_q_network = next_criteria_from_q_network[criterion_name] target_max_or_min = next_criteria[criterion_name].gather( -1, - torch_argmax_or_argmin(target_for_q_network, -1, keepdim=True) + torch_argmax_or_argmin(target_from_q_network, -1, keepdim=True) ).squeeze(-1) else: - target_max_or_min = torch_max_or_min(next_criteria[criterion_name], dim=-1) + target_max_or_min = torch_max_or_min(next_criteria[criterion_name], dim=-1).values target_max_or_min = where( replay_buffer_sample.dones, zeros_like(target_max_or_min), diff --git a/src/satisfia/agents/learning/dqn/config.py b/src/satisfia/agents/learning/dqn/config.py index 0e7eff6..bf19f96 100644 --- a/src/satisfia/agents/learning/dqn/config.py +++ b/src/satisfia/agents/learning/dqn/config.py @@ -87,6 +87,7 @@ class DQNConfig: aspiration_sampler: Callable[[int], IntervalTensor] = None device: str = "cpu" plotted_criteria: List[str] | None = None + plot_criteria_smoothness: int = 1 plot_criteria_frequency: int | None = None states_for_plotting_criteria: List | None = None state_aspirations_for_plotting_criteria: List | None = None diff --git a/src/satisfia/agents/learning/dqn/exploration_strategy.py b/src/satisfia/agents/learning/dqn/exploration_strategy.py index 71d8592..94167f3 100644 --- a/src/satisfia/agents/learning/dqn/exploration_strategy.py +++ b/src/satisfia/agents/learning/dqn/exploration_strategy.py @@ -34,6 +34,7 @@ def __call__(self, observations: Tensor, timestep: int): def satisfia_policy_actions(self, observations: Tensor) -> Categorical: criteria = self.target_network(observations, self.aspirations) complete_criteria(criteria) + # criteria["maxAdmissibleQ"], criteria["minAdmissibleQ"] = criteria["maxAdmissibleQ"].maximum(criteria["minAdmissibleQ"]), criteria["maxAdmissibleQ"].minimum(criteria["minAdmissibleQ"]) self.criteria = criteria return agent_mpd_dqn.local_policy( self.cfg.satisfia_agent_params, criteria, diff --git a/src/satisfia/agents/learning/dqn/train.py b/src/satisfia/agents/learning/dqn/train.py index 16f7b5f..d8f756a 100644 --- a/src/satisfia/agents/learning/dqn/train.py +++ b/src/satisfia/agents/learning/dqn/train.py @@ -11,13 +11,14 @@ from gymnasium.wrappers import AutoResetWrapper from gymnasium.vector import AsyncVectorEnv, SyncVectorEnv import torch -from torch import tensor, zeros_like +from torch import tensor from torch.nn import Module from torch.optim import AdamW, Optimizer from joblib import Parallel, delayed from dataclasses import dataclass, field from collections import Counter from statistics import mean +from more_itertools import chunked from tqdm import tqdm from typing import Callable, Tuple, List, Dict from plotly.colors import DEFAULT_PLOTLY_COLORS @@ -29,14 +30,12 @@ def train_dqn( make_env: Callable[[], Env], stats = DQNTrainingStatistics(cfg) - num_visits_per_state = Counter() - q_network = make_model() - target_network = make_model() + target_network = make_model() target_network.load_state_dict(q_network.state_dict()) - optimizer = AdamW(q_network.parameters(), lr=cfg.learning_rate_scheduler(0)) + optimizer = AdamW(q_network.parameters(), lr=cfg.learning_rate_scheduler(0), weight_decay=0) - make_envs = [ (lambda: AutoResetWrapper(RestrictToPossibleActionsWrapper(make_env()))) + make_envs = [ (lambda: AutoResetWrapper(make_env())) for _ in range(cfg.num_envs) ] envs = AsyncVectorEnv(make_envs) if cfg.async_envs else SyncVectorEnv(make_envs) @@ -46,14 +45,12 @@ def train_dqn( make_env: Callable[[], Env], else cfg.frozen_model_for_exploration, cfg, num_actions=envs.action_space.nvec[0] - ) + ) replay_buffer = ReplayBuffer(cfg.buffer_size, device=cfg.device) observations, _ = envs.reset() for timestep in tqdm(range(cfg.total_timesteps), desc="training dqn"): - for observation in observations: - num_visits_per_state[tuple(observation)] += 1 actions = exploration_strategy(tensor(observations, device=cfg.device), timestep=timestep) @@ -78,7 +75,7 @@ def train_dqn( make_env: Callable[[], Env], register_criteria_in_stats = cfg.plotted_criteria is not None \ and timestep % cfg.plot_criteria_frequency == 0 if register_criteria_in_stats: - stats.register_criteria(q_network, timestep) + stats.register_criteria(target_network, timestep) train = timestep >= cfg.training_starts and timestep % cfg.training_frequency == 0 if train: @@ -127,12 +124,12 @@ def train_dqn( make_env: Callable[[], Env], + (1 - cfg.soft_target_network_update_coefficient) * q_network_param.data ) - print(f"{num_visits_per_state=}") - if cfg.plotted_criteria is not None: stats.plot_criteria(q_network, RestrictToPossibleActionsWrapper(make_env())) - return q_network + # if cfg.soft_target_network_update_coefficient != 0 + # returning the q_network is not the same as returning the target network + return target_network def set_learning_rate(optimizer: Optimizer, learning_rate: float) -> None: for param_group in optimizer.param_groups: @@ -162,6 +159,9 @@ def compute_total(agent, env, state, state_aspiration, first_action=None): observation = next_observation return total +def smoothen(xs, smoothness): + return [mean(chunk) for chunk in chunked(xs, smoothness)] + @dataclass class DQNTrainingStatistics: cfg: DQNConfig @@ -198,7 +198,9 @@ def ground_truth_criteria(self, model, env) -> Dict[Tuple["state", "state_aspira getattr(self.cfg.planning_agent_for_plotting_ground_truth, criterion) if criterion in ["maxAdmissibleQ", "minAdmissibleQ"]: - criterion_value = criterion_function(state, action) + possible_action = action in self.cfg.planning_agent_for_plotting_ground_truth.possible_actions(state) + criterion_value = \ + criterion_function(state, action) if possible_action else None elif criterion in ["Q"]: agent = AgentMDPDQN( self.cfg.satisfia_agent_params, model @@ -234,10 +236,11 @@ def plot_criteria(self, model, env): for state_aspiration in self.cfg.state_aspirations_for_plotting_criteria: dropdown_menu_titles.append(f"{criterion} in state {state} with state aspiration {state_aspiration}") for action in self.cfg.actions_for_plotting_criteria: + y = [ self.criterion_history[timestep, state, state_aspiration, criterion, action] + for timestep in timesteps ] fig.add_scatter( - x = timesteps, - y = [ self.criterion_history[timestep, state, state_aspiration, criterion, action] - for timestep in timesteps ], + x = smoothen(timesteps, self.cfg.plot_criteria_smoothness), + y = smoothen(y, self.cfg.plot_criteria_smoothness), line = dict(color=DEFAULT_PLOTLY_COLORS[action]), name = f"action {action}", visible = first_iteration diff --git a/src/satisfia/agents/learning/environment_wrappers.py b/src/satisfia/agents/learning/environment_wrappers.py index d9b5f1a..c4d1ef7 100644 --- a/src/satisfia/agents/learning/environment_wrappers.py +++ b/src/satisfia/agents/learning/environment_wrappers.py @@ -1,5 +1,8 @@ from gymnasium import Env, Wrapper +from torch import Tensor +import numpy as np import random +from typing import Tuple DO_NOTHING_ACTION = 4 @@ -12,11 +15,52 @@ def reset(self, *args, **kwargs): return self.env.reset(*args, **kwargs) def step(self, action, *args, **kwargs): - possible_actions = self.env.possible_actions() + # for some reason self.env.possible_actions(self.env.get_state()) behaves differently from + # self.env.possible_actions(), although as far as i undestand they should behave the same + possible_actions = self.env.possible_actions(self.env.get_state()) if action not in possible_actions: # ah, this condition should always be true, but for some reason isn't in GW23 if self.default_action in possible_actions: action = self.default_action else: action = random.choice(possible_actions) - return self.env.step(action, *args, **kwargs) \ No newline at end of file + return self.env.step(action, *args, **kwargs) + +class RescaleDeltaWrapper(Wrapper): + def __init__(self, env: Env, from_interval: Tuple[int, int], to_interval: Tuple[int, int]): + assert len(from_interval) == 2 + assert len(to_interval) == 2 + assert from_interval[0] < from_interval[1] + assert to_interval[0] <= to_interval[1] + + super().__init__(env) + + self.from_interval = from_interval + self.to_interval = to_interval + + def reset(self, *args, **kwargs): + return self.env.reset(*args, **kwargs) + + def step(self, *args, **kwargs): + observation, delta, done, truncated, info = self.env.step(*args, **kwargs) + from_l, from_h = self.from_interval + to_l, to_h = self.to_interval + rescaled_delta = to_l + (to_h - to_l) / (from_h - from_l) * (delta - from_l) + return observation, rescaled_delta, done, truncated, info + +class ObservationToTupleWrapper(Wrapper): + def __init__(self, env: Env): + super().__init__(env) + + def reset(self, *args, **kwargs): + observation, info = self.env.reset(*args, **kwargs) + return self.to_tuple(observation), info + + def step(self, *args, **kwargs): + observation, delta, done, truncated, info = self.env.step(*args, **kwargs) + return self.to_tuple(observation), delta, done, truncated, info + + def to_tuple(self, x): + if isinstance(x, (Tensor, np.ndarray)): + x = tuple(x.tolist()) + return x diff --git a/src/satisfia/agents/makeMDPAgentSatisfia.py b/src/satisfia/agents/makeMDPAgentSatisfia.py index 1861244..a22395a 100755 --- a/src/satisfia/agents/makeMDPAgentSatisfia.py +++ b/src/satisfia/agents/makeMDPAgentSatisfia.py @@ -457,7 +457,7 @@ def probability_add(p, key, weight): mid1 = midpoint(aleph1) indices2 = [index for index in indices if between(midTarget, midpoint(alephs[index]), mid1)] if len(indices2) == 0: - print("OOPS: indices2 is empty", a1, adm1, aleph4state, midTarget, aleph1, mid1, alephs) + # print("OOPS: indices2 is empty", a1, adm1, aleph4state, midTarget, aleph1, mid1, alephs) indices2 = indices propensities2 = propensity(indices2, alephs) @@ -469,7 +469,7 @@ def probability_add(p, key, weight): mid2 = midpoint(aleph2) p = relativePosition(mid1, midTarget, mid2) if p < 0 or p > 1: - print("OOPS: p", p) + # print("OOPS: p", p) p = clip(0, p, 1) if self.verbose or self.debug: diff --git a/src/world_model/world_model.py b/src/world_model/world_model.py index e095b7d..fc2db39 100644 --- a/src/world_model/world_model.py +++ b/src/world_model/world_model.py @@ -275,3 +275,8 @@ def get_prolonged_version(self, horizon=None): and possibly adding new states. All formerly non-terminal states, their action spaces, and the corresponding transitons must remain unchanged.""" raise NotImplementedError() + + # temporary method added because RestrictToPossibleActionsWrapper needs to access _state, + # which is private + def get_state(self): + return self._state From 202d2be026abae75d114b0bca0f740286c643e5d Mon Sep 17 00:00:00 2001 From: Vladimir Ivanov Date: Tue, 18 Jun 2024 11:25:04 +0200 Subject: [PATCH 6/9] Fixed some bugs. --- scripts/test_dqn.py | 60 +++++++++++-------- src/satisfia/agents/learning/dqn/config.py | 2 +- .../learning/dqn/exploration_strategy.py | 6 +- src/satisfia/agents/learning/dqn/train.py | 18 +++++- 4 files changed, 54 insertions(+), 32 deletions(-) diff --git a/scripts/test_dqn.py b/scripts/test_dqn.py index 127c814..611484e 100644 --- a/scripts/test_dqn.py +++ b/scripts/test_dqn.py @@ -1,3 +1,7 @@ +from beartype.vale import Is + +EvenValidator = Is[lambda x: x % 2 == 0] + import sys sys.path.insert(0, "./src/") @@ -15,6 +19,7 @@ from satisfia.util.interval_tensor import IntervalTensor import gymnasium as gym +from gymnasium.wrappers import TimeLimit import torch from torch import tensor from torch.nn import Module @@ -109,7 +114,7 @@ def plot_totals_vs_aspiration( agents: Iterable[AspirationAgent] | Dict[str, Asp sample_size: int, reference_agents: Iterable[AspirationAgent] | Dict[str, AspirationAgent] | AspirationAgent = [], error_bar_confidence: float = 0.95, - n_jobs: int = 1, + n_jobs: int = -1, title: str = "Totals for agent(s)", save_to: str | None = None ): @@ -181,25 +186,26 @@ def plot_totals_vs_aspiration( agents: Iterable[AspirationAgent] | Dict[str, Asp # this is only for the test_box gridworld reachable_states = [(0, 2, 2)] + [(time, y, 2) for time in range(1, 10) for y in [1, 2, 3]] -cfg = DQNConfig( aspiration_sampler = UniformPointwiseAspirationSampler(-20, 10), +cfg = DQNConfig( aspiration_sampler = UniformPointwiseAspirationSampler(-10, 10), criterion_coefficients_for_loss = dict( maxAdmissibleQ = 1., - minAdmissibleQ = 1., ) , + minAdmissibleQ = 0., ) , # Q = 1. ), exploration_rate_scheduler = PiecewiseLinearScheduler([0., 0.5, 1.], [1., 0.01, 0.01]), noisy_network_exploration = False, # noisy_network_exploration_rate_scheduler = # PiecewiseLinearScheduler([0., 0.1, 1.], [1., 0.05, 0.05]), - num_envs = 10, + num_envs = 1, async_envs = False, - discount = 1, - soft_target_network_update_coefficient = 0, - learning_rate_scheduler = lambda _: 1e-2, - total_timesteps = 50_000, - training_starts = 500, - batch_size = 4096, - training_frequency = 10, - target_network_update_frequency = 100, + discount = 0.99, + soft_target_network_update_coefficient = 0.999, + learning_rate_scheduler = lambda _: 5e-4, + total_timesteps = 2_000_000, + training_starts = 64, + batch_size = 64, + buffer_size = 100_000, + training_frequency = 1, + target_network_update_frequency = 4, satisfia_agent_params = { "lossCoeff4FeasibilityPowers": 0, "lossCoeff4LRA1": 0, "lossCoeff4Time1": 0, @@ -208,15 +214,15 @@ def plot_totals_vs_aspiration( agents: Iterable[AspirationAgent] | Dict[str, Asp device = device, plotted_criteria = None, # ["maxAdmissibleQ", "minAdmissibleQ"], plot_criteria_frequency = 100, - states_for_plotting_criteria = [(time, 2, 2) for time in range(10)], - state_aspirations_for_plotting_criteria = [(0, 0), (1e3, 1e3)], # [(-5, -5), (-1, -1), (1, 1)], + states_for_plotting_criteria = [(1, 3, 0, 1, 0, 2, 0, 5, 0), (6, 3, 0, 1, 0, 2, 0, 5, 0), (3, 5, 0, 1, 0, 3, 0, 6, 0), (7, 4, 0, 1, 0, 3, 0, 5, 0), (7, 5, 0, 1, 0, 2, 0, 6, 0), (8, 4, 0, 1, 0, 3, 0, 6, 0), (9, 4, 0, 1, 0, 2, 0, 5, 0), (9, 3, 0, 1, 0, 2, 0, 6, 0), (9, 5, 0, 1, 0, 2, 0, 6, 0), (5, 4, 0, 1, 0, 3, 0, 6, 0), (5, 3, 0, 1, 0, 2, 0, 5, 0), (4, 4, 0, 1, 0, 2, 0, 5, 0), (5, 4, 0, 1, 0, 2, 0, 5, 0), (6, 5, 0, 1, 0, 2, 0, 6, 0), (8, 5, 0, 1, 0, 3, 0, 6, 0), (8, 4, 0, 1, 0, 2, 0, 6, 0), (4, 3, 0, 1, 0, 2, 0, 6, 0), (7, 5, 0, 1, 0, 3, 0, 6, 0), (4, 4, 0, 1, 0, 3, 0, 5, 0), (4, 5, 0, 1, 0, 2, 0, 6, 0), (6, 4, 0, 1, 0, 3, 0, 5, 0), (9, 4, 0, 1, 0, 3, 0, 6, 0), (8, 5, 0, 1, 0, 2, 0, 6, 0), (3, 3, 0, 1, 0, 2, 0, 6, 0), (3, 5, 0, 1, 0, 2, 0, 6, 0), (7, 4, 0, 1, 0, 2, 0, 5, 0), (8, 3, 0, 1, 0, 2, 0, 6, 0), (3, 4, 0, 1, 0, 3, 0, 6, 0), (2, 4, 0, 1, 0, 3, 0, 6, 0), (6, 4, 0, 1, 0, 2, 0, 6, 0), (3, 4, 0, 1, 0, 2, 0, 5, 0), (7, 3, 0, 1, 0, 2, 0, 5, 0), (5, 4, 0, 1, 0, 3, 0, 5, 0), (4, 4, 0, 1, 0, 2, 0, 6, 0), (4, 5, 0, 1, 0, 3, 0, 6, 0), (5, 5, 0, 1, 0, 3, 0, 6, 0), (0, 4, 0, 1, 0, 3, 0, 5, 0), (9, 3, 0, 1, 0, 2, 0, 5, 0), (8, 4, 0, 1, 0, 3, 0, 5, 0), (2, 3, 0, 1, 0, 2, 0, 5, 0), (9, 4, 0, 1, 0, 2, 0, 6, 0), (4, 3, 0, 1, 0, 2, 0, 5, 0), (6, 4, 0, 1, 0, 3, 0, 6, 0), (6, 3, 0, 1, 0, 2, 0, 6, 0), (5, 4, 0, 1, 0, 2, 0, 6, 0), (7, 4, 0, 1, 0, 3, 0, 6, 0), (5, 3, 0, 1, 0, 2, 0, 6, 0), (1, 4, 0, 1, 0, 3, 0, 5, 0), (3, 3, 0, 1, 0, 2, 0, 5, 0), (5, 5, 0, 1, 0, 2, 0, 6, 0), (2, 5, 0, 1, 0, 3, 0, 6, 0), (9, 5, 0, 1, 0, 3, 0, 6, 0), (8, 4, 0, 1, 0, 2, 0, 5, 0), (3, 4, 0, 1, 0, 3, 0, 5, 0), (8, 3, 0, 1, 0, 2, 0, 5, 0), (2, 4, 0, 1, 0, 3, 0, 5, 0), (6, 4, 0, 1, 0, 2, 0, 5, 0), (1, 5, 0, 1, 0, 3, 0, 6, 0), (7, 4, 0, 1, 0, 2, 0, 6, 0), (6, 5, 0, 1, 0, 3, 0, 6, 0), (9, 4, 0, 1, 0, 3, 0, 5, 0), (4, 4, 0, 1, 0, 3, 0, 6, 0), (7, 3, 0, 1, 0, 2, 0, 6, 0), (2, 4, 0, 1, 0, 2, 0, 5, 0)], + state_aspirations_for_plotting_criteria = [(0, 0)], # [(-5, -5), (-1, -1), (1, 1)], actions_for_plotting_criteria = [0, 1, 2, 3, 4] ) def train_and_plot( env_name: str, gridworld: bool = True, max_achievable_total: float | None = None, min_achievable_total: float | None = None ): - + print(env_name) def make_env(): @@ -225,6 +231,7 @@ def make_env(): env = RestrictToPossibleActionsWrapper(env) else: env = gym.make(env_name) + env = TimeLimit(env, 1_000) env = RescaleDeltaWrapper(env, from_interval=(-500, 100), to_interval=(-5, 1)) return env @@ -237,11 +244,11 @@ def make_model(pretrained=None): output_not_depending_on_agent_parameters_sizes = { "maxAdmissibleQ": n_actions, "minAdmissibleQ": n_actions }, output_depending_on_agent_parameters_sizes = dict(), # { "Q": n_actions }, - common_hidden_layer_sizes = [32, 32], + common_hidden_layer_sizes = [64, 64], hidden_layer_not_depending_on_agent_parameters_sizes = [16], hidden_layer_depending_on_agent_parameters_sizes = [], # [64], batch_size = cfg.num_envs, - layer_norms = True, + layer_norms = False, dropout = 0 ) if pretrained is not None: @@ -257,7 +264,7 @@ def make_model(pretrained=None): planning_agent = AgentMDPPlanning(cfg.satisfia_agent_params, make_env()) if gridworld else None - model = run_or_load( f"dqn-{env_name}-no-discount-longer-training.pickle", + model = run_or_load( f"dqn-{env_name}-no-discount.pickle", train_dqn, make_env, make_model, @@ -291,15 +298,15 @@ def make_model(pretrained=None): env = make_env(), aspirations = np.linspace( min_achievable_total - 1, max_achievable_total + 1, - 20 ), - sample_size = 1000, + 10 ), + sample_size = 250, # reference_agents = planning_agent, title = f"totals for agent with no discount and longer training in {env_name}" ) -# train_and_plot( 'LunarLander-v2', -# gridworld = False, -# min_achievable_total = -5, -# max_achievable_total = 1 ) +train_and_plot( 'LunarLander-v2', + gridworld = False, + min_achievable_total = -5, + max_achievable_total = 5 ) all_gridworlds = [ "GW1", "GW2", "GW3", "GW4", "GW5", "GW6", "GW22", "GW23", "GW24", "GW25", "GW26", @@ -309,7 +316,10 @@ def make_model(pretrained=None): gridworlds_without_delta = ["GW23", "GW24"] gridworlds_requiring_longer_training = [ "GW28", "test_box", "GW29", "GW26", "GW30", "GW32"] +# still don't work even after longer training: test_box, GW30, GW28, GW29 + +# train_and_plot("test_box") -Parallel(n_jobs=-1)(delayed(train_and_plot)(gridworld_name) for gridworld_name in all_gridworlds) +# Parallel(n_jobs=-1)(delayed(train_and_plot)(gridworld_name) for gridworld_name in all_gridworlds) # for gridworld_name in all_gridworlds: # train_and_plot(gridworld_name) diff --git a/src/satisfia/agents/learning/dqn/config.py b/src/satisfia/agents/learning/dqn/config.py index bf19f96..7d7caf1 100644 --- a/src/satisfia/agents/learning/dqn/config.py +++ b/src/satisfia/agents/learning/dqn/config.py @@ -59,7 +59,7 @@ class DQNConfig: total_timesteps: int = 500_000 num_envs: int = 1 async_envs: bool = True - buffer_size = 10_000 + buffer_size: int = 10_000 learning_rate_scheduler: Callable[[float], float] = \ ConstantScheduler(1e-3) batch_size: int = 128 diff --git a/src/satisfia/agents/learning/dqn/exploration_strategy.py b/src/satisfia/agents/learning/dqn/exploration_strategy.py index 94167f3..9667a60 100644 --- a/src/satisfia/agents/learning/dqn/exploration_strategy.py +++ b/src/satisfia/agents/learning/dqn/exploration_strategy.py @@ -3,7 +3,7 @@ from satisfia.agents.learning.dqn.criteria import complete_criteria from satisfia.util.interval_tensor import IntervalTensor, relative_position, interpolate -from torch import Tensor, empty, ones, full_like, randint, bernoulli, no_grad +from torch import Tensor, empty, ones, full_like, randint, bernoulli, no_grad, allclose from torch.nn import Module from torch.distributions.categorical import Categorical @@ -20,7 +20,7 @@ def __init__(self, target_network: Module, cfg: DQNConfig, num_actions: int): @no_grad() def __call__(self, observations: Tensor, timestep: int): actions = self.satisfia_policy_actions(observations).sample() - + exploration_rate = self.cfg.exploration_rate_scheduler(timestep / self.cfg.total_timesteps) explore = bernoulli(full_like(actions, exploration_rate, dtype=float)).bool() actions[explore] = randint( low=0, @@ -33,8 +33,8 @@ def __call__(self, observations: Tensor, timestep: int): @no_grad() def satisfia_policy_actions(self, observations: Tensor) -> Categorical: criteria = self.target_network(observations, self.aspirations) - complete_criteria(criteria) # criteria["maxAdmissibleQ"], criteria["minAdmissibleQ"] = criteria["maxAdmissibleQ"].maximum(criteria["minAdmissibleQ"]), criteria["maxAdmissibleQ"].minimum(criteria["minAdmissibleQ"]) + complete_criteria(criteria) self.criteria = criteria return agent_mpd_dqn.local_policy( self.cfg.satisfia_agent_params, criteria, diff --git a/src/satisfia/agents/learning/dqn/train.py b/src/satisfia/agents/learning/dqn/train.py index d8f756a..59aadd5 100644 --- a/src/satisfia/agents/learning/dqn/train.py +++ b/src/satisfia/agents/learning/dqn/train.py @@ -33,7 +33,13 @@ def train_dqn( make_env: Callable[[], Env], q_network = make_model() target_network = make_model() target_network.load_state_dict(q_network.state_dict()) - optimizer = AdamW(q_network.parameters(), lr=cfg.learning_rate_scheduler(0), weight_decay=0) + + # we set weight decay to zero because we had some mild Q value underestimation problems and were + # suspecting they were because of the weight decay, but we are not sure at all this is correct + # and not sure aet all setting weigth decay to zero is helpful + optimizer = AdamW( q_network.parameters(), + lr = cfg.learning_rate_scheduler(0), + weight_decay = 0 ) make_envs = [ (lambda: AutoResetWrapper(make_env())) for _ in range(cfg.num_envs) ] @@ -49,8 +55,12 @@ def train_dqn( make_env: Callable[[], Env], replay_buffer = ReplayBuffer(cfg.buffer_size, device=cfg.device) + seen_observations = set() + observations, _ = envs.reset() for timestep in tqdm(range(cfg.total_timesteps), desc="training dqn"): + for observation in observations: + seen_observations.add(tuple(observation.tolist())) actions = exploration_strategy(tensor(observations, device=cfg.device), timestep=timestep) @@ -124,11 +134,13 @@ def train_dqn( make_env: Callable[[], Env], + (1 - cfg.soft_target_network_update_coefficient) * q_network_param.data ) + # print(seen_observations) + if cfg.plotted_criteria is not None: stats.plot_criteria(q_network, RestrictToPossibleActionsWrapper(make_env())) - # if cfg.soft_target_network_update_coefficient != 0 - # returning the q_network is not the same as returning the target network + # if cfg.soft_target_network_update_coefficient != 0 returning the q_network is not the same as + # returning the target network return target_network def set_learning_rate(optimizer: Optimizer, learning_rate: float) -> None: From 18abd986ba64c71cceb80a465b55e7d17b076420 Mon Sep 17 00:00:00 2001 From: Jobst Heitzig Date: Tue, 18 Jun 2024 22:13:18 +0200 Subject: [PATCH 7/9] Update environment-compilation.md --- environment-compilation.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/environment-compilation.md b/environment-compilation.md index a6d2a82..e7c8cc3 100644 --- a/environment-compilation.md +++ b/environment-compilation.md @@ -4,6 +4,8 @@ This is a pruned version of [this list](https://docs.google.com/spreadsheets/d/1 The "Requires Learning" column cointains "Yes" if some kind of gradient descent or genetic optimization is needed and the problem cannot be approached with planning. +(Please also see [the SatisfIA project's kanban board](https://github.com/orgs/pik-gane/projects/2) for cards containing links to further environments such as the MACHIAVELLI or EMPA suites of environments. A big challenge for the testing will be to replace the shipped reward function of an environment by a meaningful set of evaluation metrics in terms of which meaningful aspiration-type goals can be specified. E.g., in the Lunar Lander, one could use four evaluation metrics: be the horizontal and vertical positions and velocities of the lander.) + | Short Description | Requires Learning | Misalignment Occured In The Wild | Long Description | Algorithm | Required Compute | Reference | | ----------------- | ----------------- | -------------------------------- | ---------------- | --------- | ---------------- | --------- | | Misgeneralization of correct goals | I think no | No | Agents trained on simple gridwords do not learn the specified goal. | V-MPO | I guess tens of GPU minutes | [ArXiv](https://arxiv.org/abs/2210.01790), sections 3.1 and 3.2 | From 1c6df2294387d4839ffabaab425752633c74eef1 Mon Sep 17 00:00:00 2001 From: rqc1 <61781558+rqc1@users.noreply.github.com> Date: Wed, 21 Aug 2024 13:52:22 +0200 Subject: [PATCH 8/9] Update building_blocks.py --- .../agents/learning/models/building_blocks.py | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/satisfia/agents/learning/models/building_blocks.py b/src/satisfia/agents/learning/models/building_blocks.py index 852396a..95ebd80 100644 --- a/src/satisfia/agents/learning/models/building_blocks.py +++ b/src/satisfia/agents/learning/models/building_blocks.py @@ -1,6 +1,6 @@ from satisfia.util.interval_tensor import IntervalTensor -from torch import Tensor, cat, stack, empty, zeros, ones, no_grad +from torch import Tensor, cat, stack, empty, zeros, ones, no_grad, minimum, maximum from torch.nn import Module, Linear, ReLU, LayerNorm, Dropout, Parameter, ModuleList, ModuleDict from more_itertools import pairwise from math import sqrt @@ -162,6 +162,20 @@ def new_noise(self, std: int, which_in_batch: Tensor | None = None): noisy_linear.new_noise(std=std, which_in_batch=which_in_batch) +class MinMaxLayer(Module): + def __init__(self): + super().__init__() + + def forward(self, output: Dict[str, Tensor]) -> Dict[str, Tensor]: + processed_output = {} + for key, value in output.items(): + Qmin_k, Qmax_k = value[:, 0], value[:, 1] + M_k = (Qmin_k + Qmax_k) / 2 + new_Qmin_k = minimum(Qmin_k, M_k) + new_Qmax_k = maximum(Qmax_k, M_k) + processed_output[key] = stack((new_Qmin_k, new_Qmax_k), dim=-1) + return processed_output + class SatisfiaMLP(Module): def __init__(self, input_size: int, output_not_depending_on_agent_parameters_sizes: Dict[str, int], @@ -202,6 +216,8 @@ def __init__(self, input_size: int, batch_size = batch_size ) + self.min_max_layer = MinMaxLayer() + agent_parameters_size = 2 self.layers_depending_on_agent_parameters = NoisyMLP( @@ -228,6 +244,10 @@ def forward(self, observations: Tensor, aspirations: Tensor, noisy: bool = True) noisy = noisy ) + output_not_depending_on_agent_parameters = self.min_max_layer( + output_not_depending_on_agent_parameters + ) + output_depending_on_agent_parameters = self.layers_depending_on_agent_parameters( cat((common_hidden, agent_parameters_emebdding), -1), noisy = noisy From aabc46badefc18b835666a24306a5c93c802a781 Mon Sep 17 00:00:00 2001 From: rqc1 <61781558+rqc1@users.noreply.github.com> Date: Tue, 3 Sep 2024 14:19:27 +0200 Subject: [PATCH 9/9] Update building_blocks.py --- src/satisfia/agents/learning/models/building_blocks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/satisfia/agents/learning/models/building_blocks.py b/src/satisfia/agents/learning/models/building_blocks.py index 95ebd80..d3af58c 100644 --- a/src/satisfia/agents/learning/models/building_blocks.py +++ b/src/satisfia/agents/learning/models/building_blocks.py @@ -173,7 +173,7 @@ def forward(self, output: Dict[str, Tensor]) -> Dict[str, Tensor]: M_k = (Qmin_k + Qmax_k) / 2 new_Qmin_k = minimum(Qmin_k, M_k) new_Qmax_k = maximum(Qmax_k, M_k) - processed_output[key] = stack((new_Qmin_k, new_Qmax_k), dim=-1) + processed_output[key] = stack((new_Qmin_k, new_Qmax_k), dim=1) return processed_output class SatisfiaMLP(Module):