-
-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Updated Model Architecture, Loss Functions, Trainer Initializations
- Loading branch information
Showing
45 changed files
with
1,223 additions
and
221 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
*.tif filter=lfs diff=lfs merge=lfs -text |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
# Import Necessary Libraries | ||
import os | ||
from PIL import Image | ||
import torch | ||
|
||
# Function that generates RGB Image Sequence with Interpolated Frames from a Grayscale Image Sequence | ||
def generate_rgb_sequence(model_lstm, model_autoencoder, grey_sequence, n_interpolate_frames, | ||
model_save_path_lstm, model_save_path_ae, generated_sequence_dir): | ||
|
||
if os.path.exists(model_save_path_lstm): | ||
model_lstm.load_state_dict(torch.load(model_save_path_lstm)) | ||
model_lstm.eval() | ||
|
||
if os.path.exists(model_save_path_ae): | ||
model_autoencoder.load_state_dict(torch.load(model_save_path_ae)) | ||
model_autoencoder.eval() | ||
|
||
full_sequence_gray = model_lstm(grey_sequence, n_interpolate_frames) | ||
|
||
full_sequence_rgb = [] | ||
with torch.no_grad(): | ||
for i in range(full_sequence_gray.size(1)): | ||
gray_frame = full_sequence_gray[:, i, :, :] | ||
rgb_frame = model_autoencoder(gray_frame.unsqueeze(dim=0)) | ||
full_sequence_rgb.append(rgb_frame) | ||
|
||
os.makedirs(generated_sequence_dir, exist_ok=True) | ||
for idx, rgb_tensor in enumerate(full_sequence_rgb): | ||
|
||
image_data = rgb_tensor.squeeze().cpu().numpy() | ||
image_data = np.transpose(image_data, (1, 2, 0)) | ||
image_data = (image_data * 255).astype(np.uint8) | ||
image = Image.fromarray(image_data) | ||
|
||
image_path = os.path.join(generated_sequence_dir, f'generated_frame_{idx:04d}.tif') | ||
image.save(image_path) | ||
|
||
print('The generated sequence of RGB images has been saved.') | ||
|
||
|
||
''' | ||
Pass Output of LSTM Model to AutoEncoder Model to Obtain Final Output | ||
''' | ||
# Maximize Likelihood Principle | ||
model_save_path_ae = '../Models/model_autoencoder_mlp.pth' | ||
model_save_path_lstm = '../Models/model_lstm_mlp.pth' | ||
generated_sequence_dir = '../Dataset/GeneratedSequence/MLP' | ||
generate_rgb_sequence(model_lstm_mlp, model_autoencoder_mlp, grey_sequence, n_interpolate_frames, | ||
model_save_path_lstm, model_save_path_ae, generated_sequence_dir) | ||
|
||
# Maximize Entropy Principle | ||
model_save_path_ae = '../Models/model_autoencoder_mep.pth' | ||
model_save_path_lstm = '../Models/model_lstm_mep.pth' | ||
generated_sequence_dir = '../Dataset/GeneratedSequence/MEP' | ||
generate_rgb_sequence(model_lstm_mep, model_autoencoder_mep, grey_sequence, n_interpolate_frames, | ||
model_save_path_lstm, model_save_path_ae, generated_sequence_dir) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,45 +1,57 @@ | ||
''' | ||
Module that specifies Loss Functions | ||
-------------------------------------------------------------------------------- | ||
Module for Loss Functions : | ||
- Maximum Entropy Principle (MEP) | ||
- Maximum Likelihood Principle (MLP) | ||
- Structural Similarity Index Measure (SSIM) | ||
''' | ||
|
||
# Import Necessary Libraries | ||
import torch | ||
import torch.nn as nn | ||
import torch.nn.functional as F | ||
from pytorch_msssim import SSIM | ||
|
||
# Define a class for the Maximum Entropy Principle (MEP) Loss | ||
''' | ||
Class for Composite Loss with MaxEnt Regularization Term | ||
- Maximum Entropy Principle | ||
''' | ||
class LossMEP(nn.Module): | ||
def __init__(self, alpha=0.5): | ||
super(LossMEP, self).__init__() | ||
# Regularization Parameter Weight | ||
self.alpha = alpha | ||
# Base Loss Function (MSE) | ||
self.mse = nn.MSELoss() | ||
self.alpha = alpha # Weighting factor for the loss | ||
self.mse = nn.MSELoss() # Mean Squared Error loss | ||
|
||
def forward(self, output, target): | ||
# Compute the MSE loss | ||
mse_loss = self.mse(output, target) | ||
# Compute Entropy of the Target Distribution | ||
entropy = -torch.sum(target * torch.log(output + 1e-8), dim=-1).mean() | ||
# Compute Composite Loss Function with MaxEnt Regularization Term | ||
regularized_loss = self.alpha * mse_loss + (1 - self.alpha) * entropy | ||
# Return Composite Loss | ||
return regularized_loss | ||
mse_loss = self.mse(output, target) # Compute MSE Loss | ||
entropy = -torch.sum(target * torch.log(output + 1e-8), dim=-1).mean() # Compute Entropy | ||
composite_loss = self.alpha * mse_loss + (1 - self.alpha) * entropy # Compute Composite Loss | ||
return composite_loss | ||
|
||
# Define a class for the Maximum Likelihood Principle (MLP) Loss | ||
class LossMLP(nn.Module): | ||
def __init__(self, alpha=0.5): | ||
super(LossMLP, self).__init__() | ||
# Regularization Parameter Weight | ||
self.alpha = alpha | ||
# Mean Squared Error Loss | ||
self.mse = nn.MSELoss() | ||
''' | ||
Class for Mean Squared Error (MSE) Loss | ||
- Maximum Likelihood Principle | ||
''' | ||
class LossMSE(nn.Module): | ||
def __init__(self): | ||
super(LossMSE, self).__init__() | ||
self.mse = nn.MSELoss() # Mean Squared Error loss | ||
|
||
def forward(self, output, target): | ||
# Compute the MSE loss | ||
likelihood_loss = self.mse(output, target) | ||
# Compute Loss Function with Maximum Likelihood Principle | ||
regularized_loss = self.alpha * likelihood_loss | ||
# Return Loss | ||
return regularized_loss | ||
likelihood_loss = self.mse(output, target) # Compute MSE loss | ||
return likelihood_loss | ||
|
||
''' | ||
Class for Structural Similarity Index Measure (SSIM) Loss | ||
- Maximum Likelihood Principle | ||
- In PyTorch, loss is minimized, by doing 1 - SSIM, minimizing the loss function will lead to maximization of SSIM | ||
''' | ||
class SSIMLoss(nn.Module): | ||
def __init__(self, data_range=1, size_average=True): | ||
super(SSIMLoss, self).__init__() | ||
self.data_range = data_range # The range of the input image (usually 1.0 or 255) | ||
self.size_average = size_average # If True, the SSIM of all windows are averaged | ||
# Initialize SSIM module | ||
self.ssim_module = SSIM(data_range=self.data_range, size_average=self.size_average) | ||
|
||
def forward(self, img1, img2): | ||
ssim_value = self.ssim_module(img1, img2) # Compute SSIM | ||
return 1 - ssim_value # Return loss |
Oops, something went wrong.