From 165242a1cdb2d61c9bd6720ed39255b546dfe040 Mon Sep 17 00:00:00 2001 From: iSiddharth20 Date: Sun, 24 Dec 2023 14:11:28 -0800 Subject: [PATCH 1/2] Fixed Training Debugging Console Logs --- Code/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Code/main.py b/Code/main.py index 269c84b..776dfaa 100644 --- a/Code/main.py +++ b/Code/main.py @@ -96,7 +96,7 @@ def main(): epochs = 5 print('M1 AutoEncoder Training Start.') model_autoencoder_m1 = trainer_autoencoder_baseline.train_autoencoder(epochs, data_autoencoder_train, data_autoencoder_val) - print('M1 AutoEncoder Training Start Complete.') + print('M1 AutoEncoder Training Complete.') except Exception as e: print(f"M1 AutoEncoder Training Error : \n{e}") try: @@ -112,7 +112,7 @@ def main(): epochs = 5 print('M2 AutoEncoder Training Start.') model_autoencoder_m2 = trainer_autoencoder_m2.train_autoencoder(epochs, data_autoencoder_train, data_autoencoder_val) - print('M2 AutoEncoder Training Start Complete.') + print('M2 AutoEncoder Training Complete.') except Exception as e: print(f"M2 AutoEncoder Training Error : \n{e}") # Method-2 LSTM == Method-1 LSTM, no need to train again From 88aa231653e01717deaa958588913ff675b0db34 Mon Sep 17 00:00:00 2001 From: iSiddharth20 Date: Sun, 24 Dec 2023 14:12:01 -0800 Subject: [PATCH 2/2] Fixed AutoEncoder Data to Device Issue --- Code/training.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/Code/training.py b/Code/training.py index 1ef6880..7f436ac 100644 --- a/Code/training.py +++ b/Code/training.py @@ -35,6 +35,7 @@ def train_autoencoder(self, epochs, train_loader, val_loader): self.model.train() # Set the Model to Training Mode # Training Loop for input, target in train_loader: # Input - Grayscale Image, Target - RGB Image + input, target = input.to(self.device), target.to(self.device) output = self.model(input) # Forward Pass loss = self.loss_function(output, target) # Compute Training Loss self.optimizer.zero_grad() # Zero gradients to prepare for Backward Pass @@ -43,7 +44,7 @@ def train_autoencoder(self, epochs, train_loader, val_loader): # Validation Loss Calculation self.model.eval() # Set the Model to Evaluation Mode with torch.no_grad(): # Disable gradient computation - val_loss = sum(self.loss_function(self.model(input), target).item() for input, target in val_loader) # Compute Total Validation Loss + val_loss = sum(self.loss_function(self.model(input.to(self.device)), target.to(self.device)).item() for input, target in val_loader) # Compute Total Validation Loss val_loss /= len(val_loader) # Compute Average Validation Loss # Print the epoch number and the validation loss print(f'Epoch : {epoch}, Validation Loss : {val_loss}') @@ -60,16 +61,17 @@ def train_lstm(self, epochs, n_interpolate_frames, train_data, val_data): for epoch in range(epochs): self.model.train() # Set the model to training mode # Training Loop - for sequence in train_data: + for sequence, target in train_data: self.optimizer.zero_grad() # Reset the gradients accumulated from the previous iteration - output = self.model(sequence, n_interpolate_frames) # Forward Pass - loss = self.loss_fn(output, sequence) # Compute Training Loss + sequence, target = sequence.to(self.device), target.to(self.device) # Moved both to the device + output = self.model(sequence, n_interpolate_frames) + loss = self.loss_function(output, target) # Compute Training Loss loss.backward() # Backward Pass self.optimizer.step() # Update Model Parameters # Validation Loss Calculation self.model.eval() # Set the Model to Evaluation Mode with torch.no_grad(): - val_loss = sum(self.loss_fn(self.model(sequence, n_interpolate_frames), sequence).item() for sequence in val_data) # Compute Total Validation Loss + val_loss = sum(self.loss_function(self.model(sequence, n_interpolate_frames), sequence).item() for sequence in val_data) # Compute Total Validation Loss val_loss /= len(val_data) # Compute Average Validation Loss # Print the epoch number and the validation loss print(f'Epoch : {epoch}, Validation Loss : {val_loss}')