-
-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
2d57423
commit 50cf418
Showing
7 changed files
with
125 additions
and
138 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
''' | ||
Module that specifies AutoEncoder Architecture for AutoEncoder using PyTorch | ||
-------------------------------------------------------------------------------- | ||
This is a Template Code and Needs to be Modified based on the Problem Statement | ||
''' | ||
|
||
# Importing Necessary Libraries | ||
import torch.nn as nn | ||
|
||
# Define a Class which creates AutoEncoder with Convolutional Layers using PyTorch | ||
class AutoEncoder(nn.Module): | ||
def __init__(self): | ||
super(AutoEncoder, self).__init__() | ||
|
||
# Encoder Architecture | ||
self.encoder = nn.Sequential( | ||
nn.Conv2d(1, 16, 3, stride=1, padding=1), # (N, 16, 28, 28) | ||
nn.ReLU(True), | ||
nn.MaxPool2d(2, stride=2), # (N, 16, 14, 14) | ||
nn.Conv2d(16, 8, 3, stride=1, padding=1), # (N, 8, 14, 14) | ||
nn.ReLU(True), | ||
nn.MaxPool2d(2, stride=2), # (N, 8, 7, 7) | ||
nn.Conv2d(8, 8, 3, stride=1, padding=1), # (N, 8, 7, 7) | ||
nn.ReLU(True), | ||
nn.MaxPool2d(2, stride=2) # (N, 8, 3, 3) | ||
) | ||
|
||
# Decoder Architecture | ||
self.decoder = nn.Sequential( | ||
nn.ConvTranspose2d(8, 8, 3, stride=2), # (N, 8, 7, 7) | ||
nn.ReLU(True), | ||
nn.ConvTranspose2d(8, 8, 5, stride=2, padding=1), # (N, 8, 15, 15) | ||
nn.ReLU(True), | ||
nn.ConvTranspose2d(8, 16, 2, stride=2, padding=1), # (N, 16, 31, 31) | ||
nn.ReLU(True), | ||
nn.ConvTranspose2d(16, 1, 2, stride=2, padding=1), # (N, 1, 64, 64) | ||
nn.ReLU(True) | ||
) | ||
|
||
def forward(self, x): | ||
x = self.encoder(x) | ||
x = self.decoder(x) | ||
return x |
This file was deleted.
Oops, something went wrong.
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
''' | ||
Module that uses Structural Similarity Index Measure (SSIM) as validation metrics on Validation Sets | ||
-------------------------------------------------------------------------------- | ||
This is a Template Code and Needs to be Modified based on the Problem Statement | ||
''' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,75 +1,81 @@ | ||
''' | ||
Need to Combine Auto-Encoder and LSTM | ||
The Auto-Encoder is created using Encoder and Decoder Classes | ||
The LSTM Network is obtained from the LSTMModule Class | ||
The Training Loss in obtained from the CompositeLossFunction Class | ||
Module that Combines AutoEncoder, LSTM to Train the Model using 2 Kinds of loss Functions. | ||
-------------------------------------------------------------------------------- | ||
This Code Needs to be Modified based on the Problem Statement | ||
This is a Template Code and Needs to be Modified based on the Problem Statement | ||
''' | ||
|
||
# Importing Necessary Libraries and Individual Components | ||
|
||
import torch | ||
import torch.optim as optim | ||
|
||
from LossFunction import CompositeLossFunction | ||
from Encoder import Encoder | ||
from Decoder import Decoder | ||
from LossFunction import CompositeLossFunction, RegularLossFunction | ||
from AutoEncoder import AutoEncoder | ||
from LSTM import LSTMModule | ||
import pytorch_ssim | ||
|
||
|
||
# Define the AutoEncoder class as a combination of Encoder and Decoder | ||
class AutoEncoder(nn.Module): | ||
def __init__(self, input_size, hidden_size, output_size): | ||
super(AutoEncoder, self).__init__() | ||
# Initialize the encoder and decoder using the given dimensions | ||
self.encoder = Encoder(input_size, hidden_size) | ||
self.decoder = Decoder(hidden_size, output_size) | ||
|
||
def forward(self, x): | ||
# Pass Data through Encoder | ||
x = self.encoder(x) | ||
# Pass Encoded Data through Decoder | ||
x = self.decoder(x) | ||
return x | ||
|
||
# Initialize the model | ||
input_size = 000 | ||
hidden_size = 000 | ||
output_size = 000 | ||
autoencoder = AutoEncoder(input_size, hidden_size, output_size) | ||
# Initialize the model components | ||
autoencoder = AutoEncoder(input_size=000, hidden_size=000, output_size=000) | ||
lstm = LSTMModule(input_size=000, hidden_size=000, output_size=000, num_layers=000, dropout=000) | ||
|
||
# Initialize the Composite Loss Function (Alpha may need tuning) | ||
loss_function = CompositeLossFunction(alpha=0.5) | ||
loss_MEP = CompositeLossFunction(alpha=0.5) | ||
|
||
# Initialize the Regular Loss Function (Alpha may need tuning) | ||
loss_MLP = RegularLossFunction(alpha=0.5) | ||
|
||
# Initialize Adam Optimizer | ||
optimizer = optim.Adam(autoencoder.parameters(), lr=0.001) | ||
|
||
# Import Dataset | ||
dataloader = 'Import Dataset Here' | ||
|
||
# Training loop | ||
num_epochs = 10 | ||
for epoch in range(num_epochs): | ||
for data in dataloader: | ||
# Load the Data | ||
images, _ = data # Ignore labels if present | ||
|
||
# Reset gradients | ||
# Initialize the Training Loop | ||
def train_model(autoencoder, lstm, loss_function, optimizer, train_loader, device, model_name): | ||
lstm.train() | ||
autoencoder.train() | ||
train_loss = 0 | ||
for batch_idx, (data, target) in enumerate(train_loader): | ||
# Send data and target to device | ||
data, target = data.to(device), target.to(device) | ||
# Zero the gradients carried over from previous step | ||
optimizer.zero_grad() | ||
|
||
# Forward pass | ||
reconstructed_images = autoencoder(images) | ||
|
||
# Compute loss | ||
loss = loss_function(reconstructed_images, images) | ||
|
||
# Backward pass | ||
# Pass the data through the LSTM | ||
output = lstm(data) | ||
# Pass the output through the AutoEncoder | ||
output = autoencoder(output) | ||
# Calculate the loss | ||
loss = loss_function(output, target) | ||
# Backpropagate the loss | ||
loss.backward() | ||
|
||
# Update weights | ||
# Update the weights | ||
optimizer.step() | ||
|
||
# Print training progress | ||
print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}") | ||
|
||
# Update the training loss | ||
train_loss += loss.item() | ||
# Return the training loss | ||
return train_loss | ||
|
||
|
||
# Implement a PyTorch validation loop that computes the Mean Squared Error (MSE) and Structural Similarity Index Measure (SSIM) | ||
def validate_model(autoencoder, lstm, val_loader, device, loss_function): | ||
lstm.eval() | ||
autoencoder.eval() | ||
val_loss = 0 | ||
mse = 0 | ||
ssim = 0 | ||
with torch.no_grad(): | ||
for batch_idx, (data, target) in enumerate(val_loader): | ||
# Send data and target to device | ||
data, target = data.to(device), target.to(device) | ||
# Pass the data through the LSTM | ||
output = lstm(data) | ||
# Pass the output through the AutoEncoder | ||
output = autoencoder(output) | ||
# Calculate the loss | ||
loss = loss_function(output, target) | ||
# Update the validation loss | ||
val_loss += loss.item() | ||
# Calculate the MSE | ||
mse += torch.mean((output - target) ** 2) | ||
# Calculate the SSIM | ||
ssim += pytorch_ssim.ssim(output, target) | ||
# Return the validation loss, MSE and SSIM | ||
return val_loss, mse, ssim |