Commit 
							
							·
						
						760312f
	
1
								Parent(s):
							
							5dd4d9f
								
Fcommit
Browse files- Load.py +99 -0
- README.md +50 -0
- ReadMe.txt +50 -0
- aziz-model-64p-v2.pth +3 -0
- enhanced_image.jpg +0 -0
- image_enhancement_model.pth +3 -0
- layer-model-8p-v1.0.pth +3 -0
- layer-model-8p-v2.0.pth +3 -0
- model-8p-v3.pth +3 -0
- pixel.py +117 -0
- requirements.txt +4 -0
- script.py +17 -0
- testb.jpg +0 -0
    	
        Load.py
    ADDED
    
    | @@ -0,0 +1,99 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import os
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.nn as nn
         | 
| 4 | 
            +
            from torch.utils.data import DataLoader, Dataset
         | 
| 5 | 
            +
            from torchvision import transforms
         | 
| 6 | 
            +
            from PIL import Image
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            # model architecture
         | 
| 9 | 
            +
            class ImageEnhancementModel(nn.Module):
         | 
| 10 | 
            +
                def __init__(self):
         | 
| 11 | 
            +
                    super(ImageEnhancementModel, self).__init__()
         | 
| 12 | 
            +
             | 
| 13 | 
            +
                    # Define the layers here
         | 
| 14 | 
            +
                    self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1)
         | 
| 15 | 
            +
                    self.relu1 = nn.ReLU()
         | 
| 16 | 
            +
                    self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)
         | 
| 17 | 
            +
                    self.relu2 = nn.ReLU()
         | 
| 18 | 
            +
                    self.conv3 = nn.Conv2d(in_channels=64, out_channels=3, kernel_size=3, padding=1)
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                def forward(self, x):
         | 
| 21 | 
            +
                    # forward pass
         | 
| 22 | 
            +
                    x = self.relu1(self.conv1(x))
         | 
| 23 | 
            +
                    x = self.relu2(self.conv2(x))
         | 
| 24 | 
            +
                    x = self.conv3(x)
         | 
| 25 | 
            +
                    return x
         | 
| 26 | 
            +
                    
         | 
| 27 | 
            +
            class CustomDataset(Dataset):
         | 
| 28 | 
            +
                def __init__(self, data_dir):
         | 
| 29 | 
            +
                    self.data_dir = data_dir
         | 
| 30 | 
            +
                    self.image_files = os.listdir(data_dir)
         | 
| 31 | 
            +
                    self.transform = transforms.Compose([transforms.ToTensor()])
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                def __len__(self):
         | 
| 34 | 
            +
                    return len(self.image_files)
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                def __getitem__(self, idx):
         | 
| 37 | 
            +
                    img_name = os.path.join(self.data_dir, self.image_files[idx])
         | 
| 38 | 
            +
                    image = Image.open(img_name)
         | 
| 39 | 
            +
                    
         | 
| 40 | 
            +
                    
         | 
| 41 | 
            +
                    if image.mode != 'RGB':
         | 
| 42 | 
            +
                        image = image.convert('RGB')
         | 
| 43 | 
            +
                    
         | 
| 44 | 
            +
                    image = self.transform(image)
         | 
| 45 | 
            +
                    return image
         | 
| 46 | 
            +
             | 
| 47 | 
            +
             | 
| 48 | 
            +
            # Hyperparameters
         | 
| 49 | 
            +
            batch_size = 8
         | 
| 50 | 
            +
            learning_rate = 0.001
         | 
| 51 | 
            +
            num_epochs = 50
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            model = ImageEnhancementModel()
         | 
| 54 | 
            +
             | 
| 55 | 
            +
            # loss function and optimizer
         | 
| 56 | 
            +
            criterion = nn.MSELoss()
         | 
| 57 | 
            +
            optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
         | 
| 58 | 
            +
             | 
| 59 | 
            +
            # DataLoader
         | 
| 60 | 
            +
            train_dataset = CustomDataset(data_dir='before')
         | 
| 61 | 
            +
            train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
         | 
| 62 | 
            +
             | 
| 63 | 
            +
            # Training loop
         | 
| 64 | 
            +
            for epoch in range(num_epochs):
         | 
| 65 | 
            +
                for data in train_loader:
         | 
| 66 | 
            +
                    # Forward pass
         | 
| 67 | 
            +
                    outputs = model(data)
         | 
| 68 | 
            +
                    
         | 
| 69 | 
            +
                    # Load the corresponding "after enhancement" images
         | 
| 70 | 
            +
                    target_data = CustomDataset(data_dir='after')  # Load the "after" images
         | 
| 71 | 
            +
                    target_data = next(iter(target_data))  # Get the corresponding target image
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                    loss = criterion(outputs, target_data)  # Use the "after" images as targets
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                    # Backpropagation and optimization
         | 
| 76 | 
            +
                    optimizer.zero_grad()
         | 
| 77 | 
            +
                    loss.backward()
         | 
| 78 | 
            +
                    optimizer.step()
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
         | 
| 81 | 
            +
             | 
| 82 | 
            +
            # Save the trained model
         | 
| 83 | 
            +
            torch.save(model.state_dict(), 'image_enhancement_model.pth')
         | 
| 84 | 
            +
             | 
| 85 | 
            +
            # Inference (enhance images)
         | 
| 86 | 
            +
            model.eval()  # Set the model to evaluation mode
         | 
| 87 | 
            +
             | 
| 88 | 
            +
            # Load and preprocess an input image
         | 
| 89 | 
            +
            input_image = Image.open('testb.jpg')
         | 
| 90 | 
            +
            input_image = train_dataset.transform(input_image).unsqueeze(0)
         | 
| 91 | 
            +
             | 
| 92 | 
            +
            # Use the trained model to enhance the input image
         | 
| 93 | 
            +
            enhanced_image = model(input_image)
         | 
| 94 | 
            +
             | 
| 95 | 
            +
            # Save 
         | 
| 96 | 
            +
            output_image = enhanced_image.squeeze().permute(1, 2, 0).detach().cpu().numpy()
         | 
| 97 | 
            +
            output_image = (output_image + 1) / 2.0 * 255.0  # Denormalize
         | 
| 98 | 
            +
            output_image = output_image.astype('uint8')
         | 
| 99 | 
            +
            Image.fromarray(output_image).save('enhanced_image.jpg')
         | 
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,50 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # Neural Style Transfer (NST) for Image Enhancement
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            Enhance your images using Neural Style Transfer by combining the content of an input image with the style of a reference image.
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            ## Description
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            This project uses TensorFlow to perform Neural Style Transfer (NST) on an input image using a style reference image. NST is a technique for enhancing an image by transferring the artistic style of one image (the reference style image) to the content of another image (the input image). The result is a new image that combines the content of the input image with the artistic style of the reference image.
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            ## Prerequisites
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            - Python 3.x
         | 
| 12 | 
            +
            - TensorFlow 2.x
         | 
| 13 | 
            +
            - NumPy
         | 
| 14 | 
            +
            - Matplotlib
         | 
| 15 | 
            +
            - Pillow
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            You can install the required Python packages by running:
         | 
| 18 | 
            +
             | 
| 19 | 
            +
             | 
| 20 | 
            +
            ## Usage
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            1. Prepare your input image and style reference image and save them in the project directory.
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            2. Update the paths to your input and style reference images in the script (`input_image_path` and `style_image_path` variables).
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            3. Run the script:
         | 
| 27 | 
            +
             | 
| 28 | 
            +
             | 
| 29 | 
            +
            4. The script will optimize the generated image to combine the content of the input image with the style of the reference image.
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            5. The final enhanced image will be saved as `enhanced_image.jpg` in the project directory.
         | 
| 32 | 
            +
             | 
| 33 | 
            +
            ## Examples
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            Here are some example results of using NST to enhance images:
         | 
| 36 | 
            +
             | 
| 37 | 
            +
            
         | 
| 38 | 
            +
            
         | 
| 39 | 
            +
            
         | 
| 40 | 
            +
             | 
| 41 | 
            +
            ## License
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            This project is licensed under the Aziz Karoui License - see the [LICENSE](LICENSE) file for details.
         | 
| 44 | 
            +
             | 
| 45 | 
            +
            ## Acknowledgments
         | 
| 46 | 
            +
             | 
| 47 | 
            +
            - This project is based on the Neural Style Transfer technique developed by Gatys et al.
         | 
| 48 | 
            +
            - Pre-trained VGG models provided by the Keras team.
         | 
| 49 | 
            +
             | 
| 50 | 
            +
            Feel free to modify this README file to include more details, usage instructions, or additional sections relevant to your project.
         | 
    	
        ReadMe.txt
    ADDED
    
    | @@ -0,0 +1,50 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # Neural Style Transfer (NST) for Image Enhancement
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            Enhance your images using Neural Style Transfer by combining the content of an input image with the style of a reference image.
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            ## Description
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            This project uses TensorFlow to perform Neural Style Transfer (NST) on an input image using a style reference image. NST is a technique for enhancing an image by transferring the artistic style of one image (the reference style image) to the content of another image (the input image). The result is a new image that combines the content of the input image with the artistic style of the reference image.
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            ## Prerequisites
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            - Python 3.x
         | 
| 12 | 
            +
            - TensorFlow 2.x
         | 
| 13 | 
            +
            - NumPy
         | 
| 14 | 
            +
            - Matplotlib
         | 
| 15 | 
            +
            - Pillow
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            You can install the required Python packages by running:
         | 
| 18 | 
            +
             | 
| 19 | 
            +
             | 
| 20 | 
            +
            ## Usage
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            1. Prepare your input image and style reference image and save them in the project directory.
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            2. Update the paths to your input and style reference images in the script (`input_image_path` and `style_image_path` variables).
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            3. Run the script:
         | 
| 27 | 
            +
             | 
| 28 | 
            +
             | 
| 29 | 
            +
            4. The script will optimize the generated image to combine the content of the input image with the style of the reference image.
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            5. The final enhanced image will be saved as `enhanced_image.jpg` in the project directory.
         | 
| 32 | 
            +
             | 
| 33 | 
            +
            ## Examples
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            Here are some example results of using NST to enhance images:
         | 
| 36 | 
            +
             | 
| 37 | 
            +
            
         | 
| 38 | 
            +
            
         | 
| 39 | 
            +
            
         | 
| 40 | 
            +
             | 
| 41 | 
            +
            ## License
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
         | 
| 44 | 
            +
             | 
| 45 | 
            +
            ## Acknowledgments
         | 
| 46 | 
            +
             | 
| 47 | 
            +
            - This project is based on the Neural Style Transfer technique developed by Gatys et al.
         | 
| 48 | 
            +
            - Pre-trained VGG models provided by the Keras team.
         | 
| 49 | 
            +
             | 
| 50 | 
            +
            Feel free to modify this README file to include more details, usage instructions, or additional sections relevant to your project.
         | 
    	
        aziz-model-64p-v2.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:bf42fb8b4cc29610c6a53554fe7ebe4cd99edbb464badf10da4053e0b25efc5c
         | 
| 3 | 
            +
            size 164135
         | 
    	
        enhanced_image.jpg
    ADDED
    
    |   | 
    	
        image_enhancement_model.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:cf6c6fefb164747518c9d5e47f4ea218b5da2f37843c0b240ec92987fcc68925
         | 
| 3 | 
            +
            size 164247
         | 
    	
        layer-model-8p-v1.0.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:83a123c480a5c3fd7a0238565ce97ca3962f30fef18c46a1d441c5612550d411
         | 
| 3 | 
            +
            size 22759
         | 
    	
        layer-model-8p-v2.0.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:12c4aa0f97d4acc7fdb39b95dc7682bb4e3d8c9028f32a827b40578186de44f3
         | 
| 3 | 
            +
            size 22759
         | 
    	
        model-8p-v3.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:f659eb14a0d36491a8318277df1bab610a4a566a4a95cc84ab2659802c55c95e
         | 
| 3 | 
            +
            size 15799
         | 
    	
        pixel.py
    ADDED
    
    | @@ -0,0 +1,117 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import tensorflow as tf
         | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
            import matplotlib.pyplot as plt
         | 
| 4 | 
            +
            from tensorflow.keras.preprocessing import image as tf_image
         | 
| 5 | 
            +
            from tensorflow.keras.applications import VGG19
         | 
| 6 | 
            +
            from tensorflow.keras.models import Model
         | 
| 7 | 
            +
            from tensorflow.keras import layers
         | 
| 8 | 
            +
            from tensorflow.keras.optimizers import Adam
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            # Load a pre-trained VGG19 model without the fully connected layers (used for feature extraction)
         | 
| 11 | 
            +
            base_model = VGG19(weights="imagenet", include_top=False)
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            # Specify the layers to use for style and content representations
         | 
| 14 | 
            +
            style_layers = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1", "block5_conv1"]
         | 
| 15 | 
            +
            content_layer = "block4_conv2"
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            # Create a model that extracts style and content features
         | 
| 18 | 
            +
            style_extractor = Model(inputs=base_model.input, outputs=[base_model.get_layer(layer).output for layer in style_layers])
         | 
| 19 | 
            +
            content_extractor = Model(inputs=base_model.input, outputs=base_model.get_layer(content_layer).output)
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            # Define a function to compute the Gram matrix for style representation
         | 
| 22 | 
            +
            def gram_matrix(input_tensor):
         | 
| 23 | 
            +
                result = tf.linalg.einsum("bijc,bijd->bcd", input_tensor, input_tensor)
         | 
| 24 | 
            +
                input_shape = tf.shape(input_tensor)
         | 
| 25 | 
            +
                num_locations = tf.cast(input_shape[1]*input_shape[2], tf.float32)
         | 
| 26 | 
            +
                return result / num_locations
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            # Define a custom loss function that computes the style loss
         | 
| 29 | 
            +
            def style_loss(style_targets, predicted_styles):
         | 
| 30 | 
            +
                loss = 0
         | 
| 31 | 
            +
                for style_target, predicted_style in zip(style_targets, predicted_styles):
         | 
| 32 | 
            +
                    loss += tf.reduce_mean(tf.square(gram_matrix(style_target) - gram_matrix(predicted_style)))
         | 
| 33 | 
            +
                return loss
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            # Define a custom loss function that computes the content loss
         | 
| 36 | 
            +
            def content_loss(content_target, predicted_content):
         | 
| 37 | 
            +
                return tf.reduce_mean(tf.square(content_target - predicted_content))
         | 
| 38 | 
            +
             | 
| 39 | 
            +
            # Load your input and style images
         | 
| 40 | 
            +
            input_image_path = "input_image.jpg"
         | 
| 41 | 
            +
            style_image_path = "style_image.jpg"
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            input_image = tf_image.load_img(input_image_path)
         | 
| 44 | 
            +
            style_image = tf_image.load_img(style_image_path)
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            input_image = tf_image.img_to_array(input_image)
         | 
| 47 | 
            +
            style_image = tf_image.img_to_array(style_image)
         | 
| 48 | 
            +
             | 
| 49 | 
            +
            # Preprocess the images (VGG19 requires specific preprocessing)
         | 
| 50 | 
            +
            input_image = tf_image.smart_resize(input_image, (256, 256))
         | 
| 51 | 
            +
            style_image = tf_image.smart_resize(style_image, (256, 256))
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            input_image = tf_image.img_to_array(input_image)
         | 
| 54 | 
            +
            style_image = tf_image.img_to_array(style_image)
         | 
| 55 | 
            +
             | 
| 56 | 
            +
            input_image = tf.keras.applications.vgg19.preprocess_input(input_image)
         | 
| 57 | 
            +
            style_image = tf.keras.applications.vgg19.preprocess_input(style_image)
         | 
| 58 | 
            +
             | 
| 59 | 
            +
            input_image = np.expand_dims(input_image, axis=0)
         | 
| 60 | 
            +
            style_image = np.expand_dims(style_image, axis=0)
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            # Define a variable to store the generated image and create a TensorFlow variable for it
         | 
| 63 | 
            +
            generated_image = tf.Variable(input_image, dtype=tf.float32)
         | 
| 64 | 
            +
             | 
| 65 | 
            +
            # Define optimizer and hyperparameters
         | 
| 66 | 
            +
            optimizer = Adam(learning_rate=10.0)
         | 
| 67 | 
            +
             | 
| 68 | 
            +
            # Number of iterations for optimization
         | 
| 69 | 
            +
            num_iterations = 1000
         | 
| 70 | 
            +
             | 
| 71 | 
            +
            # Extract style and content features from the style and input images
         | 
| 72 | 
            +
            style_features = style_extractor(style_image)
         | 
| 73 | 
            +
            content_features = content_extractor(input_image)
         | 
| 74 | 
            +
             | 
| 75 | 
            +
            # Define target style features (the same style for all layers)
         | 
| 76 | 
            +
            style_targets = [style_extractor(tf.constant(style_image)) for _ in style_layers]
         | 
| 77 | 
            +
             | 
| 78 | 
            +
            # Main optimization loop
         | 
| 79 | 
            +
            for iteration in range(num_iterations):
         | 
| 80 | 
            +
                with tf.GradientTape() as tape:
         | 
| 81 | 
            +
                    # Extract features from the generated image
         | 
| 82 | 
            +
                    generated_features = style_extractor(generated_image)
         | 
| 83 | 
            +
             | 
| 84 | 
            +
                    # Compute style loss and content loss
         | 
| 85 | 
            +
                    current_style_loss = style_loss(style_targets, generated_features)
         | 
| 86 | 
            +
                    current_content_loss = content_loss(content_features, generated_features[-1])
         | 
| 87 | 
            +
             | 
| 88 | 
            +
                    # Total loss as a combination of style and content loss
         | 
| 89 | 
            +
                    total_loss = current_style_loss + current_content_loss
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                # Compute gradients
         | 
| 92 | 
            +
                gradients = tape.gradient(total_loss, generated_image)
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                # Update the generated image using the gradients
         | 
| 95 | 
            +
                optimizer.apply_gradients([(gradients, generated_image)])
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                # Clip pixel values to the [0, 255] range
         | 
| 98 | 
            +
                generated_image.assign(tf.clip_by_value(generated_image, clip_value_min=0.0, clip_value_max=255.0))
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                # Print the progress
         | 
| 101 | 
            +
                if iteration % 100 == 0:
         | 
| 102 | 
            +
                    print(f"Iteration {iteration}, Total loss: {total_loss}")
         | 
| 103 | 
            +
             | 
| 104 | 
            +
            # Convert the final generated image to a NumPy array
         | 
| 105 | 
            +
            final_image = tf_image.img_to_array(generated_image[0])
         | 
| 106 | 
            +
             | 
| 107 | 
            +
            # Clip pixel values to the [0, 255] range and cast to uint8
         | 
| 108 | 
            +
            final_image = np.clip(final_image, 0, 255).astype(np.uint8)
         | 
| 109 | 
            +
             | 
| 110 | 
            +
            # Save the final image
         | 
| 111 | 
            +
            final_image_path = "enhanced_image.jpg"
         | 
| 112 | 
            +
            tf.keras.preprocessing.image.save_img(final_image_path, final_image[0])
         | 
| 113 | 
            +
             | 
| 114 | 
            +
            # Display the final enhanced image
         | 
| 115 | 
            +
            plt.imshow(final_image[0])
         | 
| 116 | 
            +
            plt.axis("off")
         | 
| 117 | 
            +
            plt.show()
         | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,4 @@ | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            tensorflow==2.6.0
         | 
| 2 | 
            +
            numpy==1.19.5
         | 
| 3 | 
            +
            matplotlib==3.3.4
         | 
| 4 | 
            +
            Pillow==8.2.0
         | 
    	
        script.py
    ADDED
    
    | @@ -0,0 +1,17 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from PIL import Image, ImageEnhance
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            # Open the input image
         | 
| 4 | 
            +
            image = Image.open('dog.jpg')
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            # Resize the image
         | 
| 7 | 
            +
            width, height = image.size
         | 
| 8 | 
            +
            target_width = width * 2
         | 
| 9 | 
            +
            target_height = height * 2
         | 
| 10 | 
            +
            resized_image = image.resize((target_width, target_height), Image.BILINEAR)
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            # Apply image enhancements
         | 
| 13 | 
            +
            enhancer = ImageEnhance.Brightness(resized_image)
         | 
| 14 | 
            +
            enhanced_image = enhancer.enhance(1)  # Adjust brightness (increase or decrease the value as desired)
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # Save the final enhanced image
         | 
| 17 | 
            +
            enhanced_image.save('dogii.jpg')
         | 
    	
        testb.jpg
    ADDED
    
    |   | 
