Skip to content

Commit

Permalink
Merge pull request #16 from iSiddharth20/Dev
Browse files Browse the repository at this point in the history
AutoEncoder is Operational, Debugging Logs Improved.
  • Loading branch information
iSiddharth20 authored Dec 24, 2023
2 parents 0625633 + 88aa231 commit d4242c3
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 7 deletions.
4 changes: 2 additions & 2 deletions Code/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def main():
epochs = 5
print('M1 AutoEncoder Training Start.')
model_autoencoder_m1 = trainer_autoencoder_baseline.train_autoencoder(epochs, data_autoencoder_train, data_autoencoder_val)
print('M1 AutoEncoder Training Start Complete.')
print('M1 AutoEncoder Training Complete.')
except Exception as e:
print(f"M1 AutoEncoder Training Error : \n{e}")
try:
Expand All @@ -112,7 +112,7 @@ def main():
epochs = 5
print('M2 AutoEncoder Training Start.')
model_autoencoder_m2 = trainer_autoencoder_m2.train_autoencoder(epochs, data_autoencoder_train, data_autoencoder_val)
print('M2 AutoEncoder Training Start Complete.')
print('M2 AutoEncoder Training Complete.')
except Exception as e:
print(f"M2 AutoEncoder Training Error : \n{e}")
# Method-2 LSTM == Method-1 LSTM, no need to train again
Expand Down
12 changes: 7 additions & 5 deletions Code/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ def train_autoencoder(self, epochs, train_loader, val_loader):
self.model.train() # Set the Model to Training Mode
# Training Loop
for input, target in train_loader: # Input - Grayscale Image, Target - RGB Image
input, target = input.to(self.device), target.to(self.device)
output = self.model(input) # Forward Pass
loss = self.loss_function(output, target) # Compute Training Loss
self.optimizer.zero_grad() # Zero gradients to prepare for Backward Pass
Expand All @@ -43,7 +44,7 @@ def train_autoencoder(self, epochs, train_loader, val_loader):
# Validation Loss Calculation
self.model.eval() # Set the Model to Evaluation Mode
with torch.no_grad(): # Disable gradient computation
val_loss = sum(self.loss_function(self.model(input), target).item() for input, target in val_loader) # Compute Total Validation Loss
val_loss = sum(self.loss_function(self.model(input.to(self.device)), target.to(self.device)).item() for input, target in val_loader) # Compute Total Validation Loss
val_loss /= len(val_loader) # Compute Average Validation Loss
# Print the epoch number and the validation loss
print(f'Epoch : {epoch}, Validation Loss : {val_loss}')
Expand All @@ -60,16 +61,17 @@ def train_lstm(self, epochs, n_interpolate_frames, train_data, val_data):
for epoch in range(epochs):
self.model.train() # Set the model to training mode
# Training Loop
for sequence in train_data:
for sequence, target in train_data:
self.optimizer.zero_grad() # Reset the gradients accumulated from the previous iteration
output = self.model(sequence, n_interpolate_frames) # Forward Pass
loss = self.loss_fn(output, sequence) # Compute Training Loss
sequence, target = sequence.to(self.device), target.to(self.device) # Moved both to the device
output = self.model(sequence, n_interpolate_frames)
loss = self.loss_function(output, target) # Compute Training Loss
loss.backward() # Backward Pass
self.optimizer.step() # Update Model Parameters
# Validation Loss Calculation
self.model.eval() # Set the Model to Evaluation Mode
with torch.no_grad():
val_loss = sum(self.loss_fn(self.model(sequence, n_interpolate_frames), sequence).item() for sequence in val_data) # Compute Total Validation Loss
val_loss = sum(self.loss_function(self.model(sequence, n_interpolate_frames), sequence).item() for sequence in val_data) # Compute Total Validation Loss
val_loss /= len(val_data) # Compute Average Validation Loss
# Print the epoch number and the validation loss
print(f'Epoch : {epoch}, Validation Loss : {val_loss}')
Expand Down

0 comments on commit d4242c3

Please sign in to comment.