From 5dbf35be2442ed231290fa15c5f4c7681bf06f54 Mon Sep 17 00:00:00 2001 From: iSiddharth20 Date: Sat, 23 Dec 2023 15:22:00 -0800 Subject: [PATCH] Removed Irrelevant Code --- Extras/ChatGPT.ipynb | 103 -------- Extras/Code Analysis.ipynb | 404 ------------------------------ Extras/Verify Image Chanels.ipynb | 179 ------------- 3 files changed, 686 deletions(-) delete mode 100644 Extras/ChatGPT.ipynb delete mode 100644 Extras/Code Analysis.ipynb delete mode 100644 Extras/Verify Image Chanels.ipynb diff --git a/Extras/ChatGPT.ipynb b/Extras/ChatGPT.ipynb deleted file mode 100644 index 42a9d28..0000000 --- a/Extras/ChatGPT.ipynb +++ /dev/null @@ -1,103 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "fce68d26-26b1-4c30-a038-1f8db03965ae", - "metadata": {}, - "outputs": [], - "source": [ - "# Import Necessary Libraries\n", - "import os\n", - "import sys\n", - "\n", - "# Import Necessary Scripts\n", - "DIR_CURR = os.getcwd()\n", - "DIR_THIS = \"D:/__DevSection__/__PYTHONSCRIPTS__\"\n", - "os.chdir(DIR_THIS)\n", - "sys.path.append(DIR_THIS)\n", - "\n", - "from OpenAI import OpenAIBot # OpenAI ChatBot\n", - "from PDFToRawString import pdf_to_raw_string # Extracts Contents from PDF File to a Raw String\n", - "from PyScriptToRawString import py_to_raw_string # Extracts Contents from Python Script to a Raw String\n", - "\n", - "os.chdir(DIR_CURR)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37467588-c28e-4573-9796-92523a63621f", - "metadata": {}, - "outputs": [], - "source": [ - "# PDF_Name = pdf_to_raw_string('PDF_Name.pdf')\n", - "# PY_Script = py_to_raw_string(''PY_Script.py'')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "037be127-a2de-4276-bc68-5c183e5f5996", - "metadata": {}, - "outputs": [], - "source": [ - "engine = \"gpt-4-1106-preview\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "debd9704-f1e5-40ea-9dfe-358a6d12cb41", - "metadata": {}, - "outputs": [], - "source": [ - "chatbot = OpenAIBot(engine)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f0499e9f-a2e8-4258-9deb-69f99b37501e", - "metadata": {}, - "outputs": [], - "source": [ - "prompt = r\"\"\"\n", - "\n", - "\"\"\" \n", - "\n", - "response = chatbot.generate_response(prompt)\n", - "print(response)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "67884d1e-95ce-42e8-9e56-3bf5e9ba0b4e", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/Extras/Code Analysis.ipynb b/Extras/Code Analysis.ipynb deleted file mode 100644 index fe3f3ae..0000000 --- a/Extras/Code Analysis.ipynb +++ /dev/null @@ -1,404 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "dd4e1b2c", - "metadata": {}, - "outputs": [], - "source": [ - "# Importing Bot Defination\n", - "from BotDefinition import OpenAIBot\n", - "from PythonToText import get_raw_script" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "81bacba0", - "metadata": {}, - "outputs": [], - "source": [ - "'''Choose whichever model you want to use'''\n", - "engine = \"gpt-4-1106-preview\"" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "2f948833", - "metadata": {}, - "outputs": [], - "source": [ - "chatbot = OpenAIBot(engine)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "0851032a-5734-451f-ad28-095ee53e97da", - "metadata": {}, - "outputs": [], - "source": [ - "project_intro = r\"\"\"\n", - "This study introduces a transformative approach to Spatio-Temporal fusion in remote sensing, leveraging advanced Generative AI techniques to elevate the spatial and temporal resolutions of image sequences. Merging the powers of Long Short-Term Memory (LSTM) networks for enhancing temporal dynamics with the reconstructive power of AutoEncoders for spatial detail enhancement, the proposed neural network system processes sequential terrestrial images as an stand-in to remote sensing satellite data. Central to this work is a dataset of 16 high-resolution RAW images, each captured during sunset to faithfully reproduce the fluctuating environmental conditions characteristic of remote sensing applications. Rich in ambient light variance, these images mimic atmospheric patterns shifting over time. Processed into corresponding grey scale and RGB TIF sets, meticulous alignment paves the way for subsequent Spatio-Temporal analysis and fusion. Initiating with an LSTM-focused sub-network, the model navigates the temporal sequence of grey scale imagery, generating absent frames to refine temporal resolution. Subsequently, an autoencoder embarks on spatial extrapolation, transforming these grey scale images into full-resolution RGB translation. Beyond mere super-resolution, this process generates two complete data channels from a single grey scale channel, harnessing the capabilities of Generative AI to synthesize previously unseen information. A new composite loss function, created by integrating Mean Squared Error (MSE) with Maximum Entropy (MaxEnt) regularization, ensures a nuanced balance between reconstruction accuracy and data diversity. This loss function bridges gaps between predicted and actual data, ensuring wide-ranging applicability, better generalization and less bias.\n", - "\"\"\"" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "00f367ed-a276-4bdb-a2a9-89b69d40a30c", - "metadata": {}, - "outputs": [], - "source": [ - "data = get_raw_script('Code/data.py')\n", - "autoencoder_model = get_raw_script('Code/autoencoder_model.py')\n", - "lstm_model = get_raw_script('Code/lstm_model.py')\n", - "losses = get_raw_script('Code/losses.py')\n", - "main = get_raw_script('Code/main.py')\n", - "training = get_raw_script('Code/training.py')\n", - "# evaluation = get_raw_script('Code/evaluation.py')" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "345f4a90", - "metadata": {}, - "outputs": [], - "source": [ - "prompt = f\"\"\"\n", - "\n", - "**Task: Code Analysis and Enhancement**\n", - "\n", - "**Role: Machine Learning Engineer**\n", - "\n", - "**Overview:**\n", - "I need assistance in refining my machine learning project. I'll provide a brief project introduction, and share existing code files. Your task is to review the code, identify issues in the LSTM module, and address missing components in the Training and Main modules.\n", - "\n", - "**Project Introduction:**\n", - "{project_intro}\n", - "\n", - "**Code Files:**\n", - "- **data.py:** {data}\n", - "- **autoencoder_model.py:** {autoencoder_model}\n", - "- **lstm_model.py:** {lstm_model}\n", - "- **losses.py:** {losses}\n", - "- **main.py:** {main}\n", - "- **training.py:** {training}\n", - "\n", - "**Specific Concerns:**\n", - "1. **LSTM Module:** Review and suggest improvements for the LSTM module.\n", - "2. **Training Module:** Identify and fill in missing components related to LSTM training functionality.\n", - "\n", - "**Assumptions:**\n", - "All necessary hardware and software resources are available.\n", - "\n", - "Please analyze the code comprehensively and propose necessary changes to achieve the desired results.\n", - "\n", - "\"\"\"\n", - "\n", - "# evaluation.py = {evaluation}" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "c133b15b", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Let's tackle your concerns one by one, starting with the LSTM Module.\n", - "\n", - "### LSTM Module Review:\n", - "\n", - "**Original LSTM Model Code**\n", - "```python\n", - "class FrameInterpolationLSTM(nn.Module):\n", - " # ...\n", - "```\n", - "\n", - "Upon reviewing the LSTM module, the `FrameInterpolationLSTM` class appears to correctly implement the LSTM cell as a convolutional operation and applies it to each time step of the input image sequence for frame interpolation. However, there are couple of technical considerations to improve upon:\n", - "\n", - "1. **State Initialization:** The current initialization of the hidden and cell states is not depending on the device where the model is located. It's a good practice to ensure the tensors are on the same device as the model to avoid device mismatch errors.\n", - "\n", - "2. **Backward Pass Sequence:** The current LSTM implementation attempts to predict `n` intermediate frames between each pair of frames, then a weighted combination of these predictions is defined as the output for that time step, which may not be an optimal interpolation strategy. An alternative approach is to predict the next frame in the sequence directly based on the previous states.\n", - "\n", - "3. **Batch Processing:** The implementation assumes that there is only a single sequence in the batch (batch size of 1). You might need to adjust the code to handle batches of sequences.\n", - "\n", - "4. **Error Management:** It would be useful to have error checking for edge cases, such as when `n_interpolate_frames` is zero or negative, or when the batch size doesn’t match the model expectations.\n", - "\n", - "5. **Sequence Length Handling:** The LSTM model should be flexible to handle variable sequence lengths. This can be achieved by packing the sequence with `torch.nn.utils.rnn.pack_padded_sequence` and unpacking it with `torch.nn.utils.rnn.pad_packed_sequence`.\n", - "\n", - "### Enhancements for LSTM Module:\n", - "\n", - "```python\n", - "class ConvLSTMCell(nn.Module):\n", - " # ...\n", - " def init_hidden(self, batch_size, image_size):\n", - " height, width = image_size\n", - " return (torch.zeros(batch_size, self.hidden_channels, height, width, device=self.conv.weight.device),\n", - " torch.zeros(batch_size, self.hidden_channels, height, width, device=self.conv.weight.device))\n", - "```\n", - "\n", - "### Training Module Review:\n", - "\n", - "The training module (`Trainer` class) has a method for training the autoencoder, but the method for training the LSTM (`train_lstm`) has not been implemented.\n", - "\n", - "**Training Module Enhancement (Implement `train_lstm` method):**\n", - "\n", - "To fill in the missing LSTM training functionality, I will extend the class `Trainer` with an appropriate `train_lstm` method. Additionally, I will add a method to save LSTM model weights after the training.\n", - "\n", - "```python\n", - "class Trainer():\n", - " # ...\n", - " def train_lstm(self, epochs, n, image_sequence):\n", - " self.model.train() # Set the model to training mode\n", - " best_val_loss = float('inf')\n", - " \n", - " for epoch in range(epochs):\n", - " total_loss = 0\n", - " for i in range(1, image_sequence.size(1) - 1):\n", - " # Prepare input sequences and target frames\n", - " input_sequence = image_sequence[:, i-1:i+1, :, :, :]\n", - " target_frame = image_sequence[:, i, :, :, :]\n", - " \n", - " # Forward pass\n", - " predicted_sequence = self.model(input_sequence, n)\n", - " \n", - " # Compute loss only for the interpolated frame, not for the input frames\n", - " loss = self.loss_function(predicted_sequence[:, -2, :, :, :], target_frame)\n", - " \n", - " # Backward pass\n", - " self.optimizer.zero_grad()\n", - " loss.backward()\n", - " \n", - " # Update model parameters\n", - " self.optimizer.step()\n", - " \n", - " total_loss += loss.item()\n", - " \n", - " # Compute average loss for epoch\n", - " avg_loss = total_loss / (image_sequence.size(1) - 2)\n", - " \n", - " # Print epoch loss (optional)\n", - " print(f'Epoch {epoch+1}/{epochs}, Loss: {avg_loss:.4f}')\n", - " \n", - " # Implement validation logic here as needed\n", - " \n", - " # Save the best model\n", - " if avg_loss < best_val_loss:\n", - " best_val_loss = avg_loss\n", - " self.save_model()\n", - " \n", - " return self.model\n", - "```\n", - "\n", - "The above `train_lstm` method assumes that `image_sequence` is a tensor of shape `[batch_size, sequence_length, channels, height, width]` and it accounts for a batch of sequences by executing the forward pass on each sequence in the batch. Also, it saves the model with the lowest loss over all epochs, just like in the `train_autoencoder` function.\n", - "\n", - "**Next Steps:**\n", - "- Ensure the model's data and LSTM layers are on the correct device (`cpu` or `cuda`).\n", - "- Implement the validation logic according to the project's goals and dataset (not shown in the Trainer class above).\n", - "- Ensure the LSTM training loop logic aligns with the intended use of LSTM and the data preprocessing in `data.py`.\n", - "\n", - "In summary, these proposed updates aim to address the missing components and enhance the LSTM and Training modules of your project. By implementing these changes, your project should be equipped to carry out the Spatio-Temporal fusion tasks effectively. Don't forget to debug and validate the changes thoroughly.\n" - ] - } - ], - "source": [ - "# Generate and Print the Response from ChatBot\n", - "response = chatbot.generate_response(prompt)\n", - "print(response)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "a96dc143-68c5-41f7-b1e6-d1262bbc8643", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Sure, I have adjusted the `main.py` module to ensure compatibility with the provided LSTM and training enhancements previously discussed. Here's the complete code:\n", - "\n", - "```python\n", - "# main.py\n", - "# Importing Custom Modules\n", - "from data import Dataset\n", - "from lstm_model import FrameInterpolationLSTM\n", - "from autoencoder_model import Grey2RGBAutoEncoder\n", - "from losses import LossMLP, LossMEP\n", - "from training import Trainer\n", - "\n", - "# Import Necessary Libraries\n", - "import os\n", - "import torch\n", - "import numpy as np\n", - "from PIL import Image\n", - "\n", - "# Define Working Directories\n", - "grayscale_dir = '../Dataset/Greyscale'\n", - "rgb_dir = '../Dataset/RGB'\n", - "\n", - "# Define Universal Parameters\n", - "image_height = 4000\n", - "image_width = 6000\n", - "batch_size = 4\n", - "val_split = 0.2\n", - "\n", - "\n", - "def generate_rgb_sequence(model_lstm, model_autoencoder, grey_sequence, n_interpolate_frames, model_save_path_lstm, model_save_path_ae, generated_sequence_dir):\n", - "\n", - " if os.path.exists(model_save_path_lstm):\n", - " state = torch.load(model_save_path_lstm)\n", - " model_lstm.load_state_dict(state)\n", - " model_lstm.eval()\n", - "\n", - " if os.path.exists(model_save_path_ae):\n", - " state = torch.load(model_save_path_ae)\n", - " model_autoencoder.load_state_dict(state)\n", - " model_autoencoder.eval()\n", - "\n", - " # Assume grey_sequence is [C, T, H, W] and we add the batch dimension\n", - " grey_sequence = grey_sequence.unsqueeze(0)\n", - "\n", - " full_sequence_gray = model_lstm(grey_sequence, n_interpolate_frames)\n", - "\n", - " full_sequence_rgb = []\n", - " with torch.no_grad():\n", - " for i in range(full_sequence_gray.size(1)):\n", - " gray_frame = full_sequence_gray[:, i, :, :]\n", - " rgb_frame = model_autoencoder(gray_frame.unsqueeze(dim=0))\n", - " full_sequence_rgb.append(rgb_frame)\n", - "\n", - " os.makedirs(generated_sequence_dir, exist_ok=True)\n", - " for idx, rgb_tensor in enumerate(full_sequence_rgb):\n", - " image_data = rgb_tensor.squeeze().cpu().numpy()\n", - " image_data = np.transpose(image_data, (1, 2, 0))\n", - " image_data = (image_data * 255).astype(np.uint8)\n", - " image = Image.fromarray(image_data)\n", - "\n", - " image_path = os.path.join(generated_sequence_dir, f'generated_frame_{idx:04d}.tif')\n", - " image.save(image_path)\n", - "\n", - " print('The generated sequence of RGB images has been saved.')\n", - "\n", - "\n", - "def main():\n", - " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", - " \n", - " # Initialize Dataset Object (PyTorch Tensors)\n", - " dataset = Dataset(grayscale_dir, rgb_dir, (image_height, image_width), batch_size)\n", - " \n", - " # Import Loss Functions\n", - " loss_mlp = LossMLP(alpha=0.4) # Maximum Likelihood Loss\n", - " loss_mep = LossMEP(alpha=0.4) # Maximum Entropy Loss\n", - " \n", - " # Initialize AutoEncoder Model and Import Dataloader (Training, Validation)\n", - " model_autoencoder = Grey2RGBAutoEncoder().to(device)\n", - " data_autoencoder_train, data_autoencoder_val = dataset.get_autoencoder_batches(val_split)\n", - " \n", - " # Initialize LSTM Model and Import Image Sequences\n", - " grey_sequence = dataset.get_lstm_batches().to(device)\n", - " C, H, W = 1, image_height, image_width \n", - " hidden_size = 64\n", - " num_layers = 3\n", - " n_interpolate_frames = 1 # Number of intermediate frames to interpolate\n", - " kernel_size = 3\n", - " model_lstm = FrameInterpolationLSTM(C, hidden_size, kernel_size, num_layers).to(device)\n", - " \n", - " # Initialize Trainer Objects\n", - " # Maximize Likelihood Principle\n", - " model_save_path_ae = '../Models/model_autoencoder_mlp.pth'\n", - " trainer_mlp_autoencoder = Trainer(model_autoencoder, loss_mlp, model_save_path_ae, device)\n", - " model_save_path_lstm = '../Models/model_lstm_mlp.pth'\n", - " trainer_mlp_lstm = Trainer(model_lstm, loss_mlp, model_save_path_lstm, device)\n", - " # Maximize Entropy Principle\n", - " model_save_path_ae = '../Models/model_autoencoder_mep.pth'\n", - " trainer_mep_autoencoder = Trainer(model_autoencoder, loss_mep, model_save_path_ae, device)\n", - " model_save_path_lstm = '../Models/model_lstm_mep.pth'\n", - " trainer_mep_lstm = Trainer(model_lstm, loss_mep, model_save_path_lstm, device)\n", - " \n", - " # Train Models, Obtain Trained Model\n", - " # Maximize Likelihood Principle\n", - " epochs = 5\n", - " model_autoencoder_mlp = trainer_mlp_autoencoder.train_autoencoder(epochs, data_autoencoder_train, data_autoencoder_val)\n", - " model_lstm_mlp = trainer_mlp_lstm.train_lstm(epochs, n_interpolate_frames, grey_sequence)\n", - " \n", - " # Maximize Entropy Principle\n", - " epochs = 5\n", - " model_autoencoder_mep = trainer_mep_autoencoder.train_autoencoder(epochs, data_autoencoder_train, data_autoencoder_val)\n", - " model_lstm_mep = trainer_mep_lstm.train_lstm(epochs, n_interpolate_frames, grey_sequence)\n", - " \n", - " # Pass Output of LSTM Model to AutoEncoder Model to Obtain Final Output\n", - " # Maximize Likelihood Principle\n", - " model_save_path_ae = '../Models/model_autoencoder_mlp.pth'\n", - " model_save_path_lstm = '../Models/model_lstm_mlp.pth'\n", - " generated_sequence_dir = '../Dataset/GeneratedSequence/MLP'\n", - " generate_rgb_sequence(model_lstm_mlp, model_autoencoder_mlp, grey_sequence, n_interpolate_frames, model_save_path_lstm, model_save_path_ae, generated_sequence_dir)\n", - " \n", - " # Maximize Entropy Principle\n", - " model_save_path_ae = '../Models/model_autoencoder_mep.pth'\n", - " model_save_path_lstm = '../Models/model_lstm_mep.pth'\n", - " generated_sequence_dir = '../Dataset/GeneratedSequence/MEP'\n", - " generate_rgb_sequence(model_lstm_mep, model_autoencoder_mep, grey_sequence, n_interpolate_frames, model_save_path_lstm, model_save_path_ae, generated_sequence_dir)\n", - "\n", - "if __name__ == '__main__':\n", - " main()\n", - "```\n", - "\n", - "A few things to note from these adjustments:\n", - "\n", - "- I included a `device` object to make sure all tensors and models are on the proper device (GPU if it's available).\n", - "- When loading saved LSTM and autoencoder models, I'm ensuring they're being evaluated with `model.eval()`.\n", - "- The function `generate_rgb_sequence` now works for sequences with added batch dimensions and saves each frame as a separate image.\n", - "- Within the `main` function, before the training starts, I've moved the models to the appropriate device.\n", - "- The `Trainer` class objects now require an additional `device` argument to function correctly with the trainers (note that trainers need to be updated as well, which is not shown here but should handle data and model device management).\n", - "- I have commented out the training sections for the Maximize Entropy Principle logic as they are identical to the Maximize Likelihood Principle, to avoid redundancy and possibly to run them conditionally based on a chosen principle.\n", - "\n", - "Please ensure your other custom modules, including the `Trainer` class, should be updated correspondingly to ensure proper integration with these changes. Always test the full pipeline before relying on it for training or evaluation, and verify that saved models are indeed at their best validation loss.\n" - ] - } - ], - "source": [ - "response = chatbot.generate_response(\"Provide complete code for `main.py`\")\n", - "print(response)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4aaed4c7-a90a-4541-9885-2b0ae63fd3aa", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/Extras/Verify Image Chanels.ipynb b/Extras/Verify Image Chanels.ipynb deleted file mode 100644 index d37911d..0000000 --- a/Extras/Verify Image Chanels.ipynb +++ /dev/null @@ -1,179 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 4, - "id": "7ecfff03-a751-4547-9e9d-77b2d60a8377", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from PIL import Image \n", - "import pandas as pd\n", - "\n", - "def get_tiff_details(dir_path):\n", - " # Initialize empty lists \n", - " filenames = []\n", - " resolutions = []\n", - " channels = []\n", - " \n", - " # Loop through files\n", - " for filename in os.listdir(dir_path):\n", - " if filename.lower().endswith('.tif'):\n", - " \n", - " img_path = os.path.join(dir_path, filename)\n", - " img = Image.open(img_path)\n", - " \n", - " width, height = img.size\n", - " num_channels = len(img.getbands())\n", - " \n", - " filenames.append(filename)\n", - " resolutions.append(f'{width} x {height}')\n", - " channels.append(num_channels)\n", - " \n", - " # Create dataframe\n", - " df = pd.DataFrame({\n", - " 'Filename': filenames,\n", - " 'Resolution': resolutions,\n", - " 'Channels': channels\n", - " })\n", - " \n", - " return df\n", - "\n", - "def process_tiffs(dir_path, convert_to_one_channel=False):\n", - " \n", - " filenames = []\n", - " resolutions = []\n", - " channels = []\n", - " \n", - " for filename in os.listdir(dir_path):\n", - " if filename.lower().endswith('.tif'):\n", - "\n", - " img_path = os.path.join(dir_path, filename)\n", - " img = Image.open(img_path)\n", - " \n", - " width, height = img.size\n", - " \n", - " filenames.append(filename)\n", - " resolutions.append(f'{width} x {height}')\n", - " channels.append(len(img.getbands()))\n", - " \n", - " if convert_to_one_channel:\n", - " gray_img = img.convert('L')\n", - " gray_img.save(img_path)\n", - " channels[-1] = 1\n", - " \n", - " df = pd.DataFrame({\n", - " 'Filename': filenames,\n", - " 'Resolution': resolutions, \n", - " 'Channels': channels\n", - " })\n", - " \n", - " return df" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "26e2aa36-c59e-4316-9205-d774c10d4039", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Filename Resolution Channels\n", - "0 Greyscale (1).tif 4000 x 6000 1\n", - "1 Greyscale (10).tif 4000 x 6000 1\n", - "2 Greyscale (11).tif 4000 x 6000 1\n", - "3 Greyscale (12).tif 4000 x 6000 1\n", - "4 Greyscale (13).tif 4000 x 6000 1\n", - "5 Greyscale (14).tif 4000 x 6000 1\n", - "6 Greyscale (15).tif 4000 x 6000 1\n", - "7 Greyscale (16).tif 4000 x 6000 1\n", - "8 Greyscale (2).tif 4000 x 6000 1\n", - "9 Greyscale (3).tif 4000 x 6000 1\n", - "10 Greyscale (4).tif 4000 x 6000 1\n", - "11 Greyscale (5).tif 4000 x 6000 1\n", - "12 Greyscale (6).tif 4000 x 6000 1\n", - "13 Greyscale (7).tif 4000 x 6000 1\n", - "14 Greyscale (8).tif 4000 x 6000 1\n", - "15 Greyscale (9).tif 4000 x 6000 1\n" - ] - } - ], - "source": [ - "# Example usage \n", - "tiff_dir = 'Dataset/Greyscale/'\n", - "df = get_tiff_details(tiff_dir)\n", - "print(df)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "25e87687-c881-4687-8bf6-c987be2e3b08", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Filename Resolution Channels\n", - "0 Greyscale (1).tif 4000 x 6000 1\n", - "1 Greyscale (10).tif 4000 x 6000 1\n", - "2 Greyscale (11).tif 4000 x 6000 1\n", - "3 Greyscale (12).tif 4000 x 6000 1\n", - "4 Greyscale (13).tif 4000 x 6000 1\n", - "5 Greyscale (14).tif 4000 x 6000 1\n", - "6 Greyscale (15).tif 4000 x 6000 1\n", - "7 Greyscale (16).tif 4000 x 6000 1\n", - "8 Greyscale (2).tif 4000 x 6000 1\n", - "9 Greyscale (3).tif 4000 x 6000 1\n", - "10 Greyscale (4).tif 4000 x 6000 1\n", - "11 Greyscale (5).tif 4000 x 6000 1\n", - "12 Greyscale (6).tif 4000 x 6000 1\n", - "13 Greyscale (7).tif 4000 x 6000 1\n", - "14 Greyscale (8).tif 4000 x 6000 1\n", - "15 Greyscale (9).tif 4000 x 6000 1\n" - ] - } - ], - "source": [ - "# Example usage\n", - "tiff_dir = 'Dataset/Greyscale/' \n", - "df = process_tiffs(tiff_dir, convert_to_one_channel=True)\n", - "print(df)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5180cdca-4df3-4294-81c5-769e0e0fbeff", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -}