Skip to content

Commit

Permalink
verified backprop is working
Browse files Browse the repository at this point in the history
  • Loading branch information
taddyb committed Jul 12, 2023
1 parent efb2560 commit cc7dddd
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 46 deletions.
40 changes: 1 addition & 39 deletions dpLGAR/agents/DifferentiableLGAR.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def train_one_epoch(self):
y_hat_ = torch.zeros([len(self.data_loader)], device=self.cfg.device) # runoff
y_t_ = torch.zeros([len(self.data_loader)], device=self.cfg.device) # runoff
self.optimizer.zero_grad()
for i, (x, y_t) in enumerate(tqdm(self.data_loader, desc=f"Epoch {self.current_epoch} Training")):
for i, (x, y_t) in enumerate(tqdm(self.data_loader, desc=f"Epoch {self.current_epoch + 1} Training")):
# Resetting output vars
runoff, percolation = self.model(x.squeeze())
y_hat_[i] = runoff
Expand All @@ -127,46 +127,8 @@ def train_one_epoch(self):
warmup = self.cfg.models.hyperparameters.warmup
self.y_hat = y_hat_[warmup:]
self.y_t = y_t_[warmup:]
# If there is no gradient (i.e. no runoff), then we shouldn't validate
self.validate()

# Commenting out until we get the normal model to work
# def train_one_epoch(self):
# """
# One epoch of training
# :return:
# """
# for i, (x, y_t) in enumerate(self.data_loader):
# # Resetting output vars
# y_hat_ = torch.zeros([x.shape[0]], device=self.cfg.device) # runoff
# y_t_ = y_t
# percolation_batch = torch.zeros([x.shape[0]], device=self.cfg.device)
# for j in trange(x.shape[0], desc=f"Running Minibatch {i+1}", leave=True):
# # Minibatch loop
# inputs = x[j]
# runoff, percolation = self.model(inputs)
# y_hat_[j] = runoff
# percolation_batch[j] = percolation
# # Updating the total mass of the system
# self.mass_balance.change_mass(self.model)
# time.sleep(0.01)
# self.mass_balance.report_mass(self.model)
# if y_hat_.requires_grad:
# if i == 0:
# warmup = self.cfg.models.hyperparameters.warmup
# else:
# warmup = 0
# self.y_hat = y_hat_[warmup:]
# self.y_t = y_t_[warmup:]
# # If there is no gradient (i.e. no runoff), then we shouldn't validate
# self.validate()
# self.optimizer.zero_grad()
# self.model.update_soil_parameters()
#
# starting_index = i * x.shape[0]
# ending_index = (i + 1) * x.shape[0]
# self.percolation_output[starting_index:ending_index] = percolation_batch

def validate(self) -> None:
"""
One cycle of model validation
Expand Down
Binary file modified dpLGAR/agents/__pycache__/DifferentiableLGAR.cpython-310.pyc
Binary file not shown.
Binary file modified dpLGAR/models/functions/__pycache__/loss.cpython-310.pyc
Binary file not shown.
14 changes: 7 additions & 7 deletions dpLGAR/models/functions/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ def forward(self, params):
loss = loss + upper_bound_loss + lower_bound_loss

# # Process the last Parameter separately
# lb = self.lb[-1]
# ub = self.ub[-1]
# params_tensor = params[-1] # this is already a tensor
# upper_bound_loss = self.factor * torch.relu(params_tensor - ub)
# lower_bound_loss = self.factor * torch.relu(lb - params_tensor)
# loss = loss + upper_bound_loss + lower_bound_loss
return loss
lb = self.lb[-1]
ub = self.ub[-1]
params_tensor = params[-1] # this is already a tensor
upper_bound_loss = self.factor * torch.relu(params_tensor - ub)
lower_bound_loss = self.factor * torch.relu(lb - params_tensor)
loss = loss + upper_bound_loss + lower_bound_loss
return loss

0 comments on commit cc7dddd

Please sign in to comment.