Spaces:
Sleeping
Sleeping
first commit
Browse files- .gitattributes +1 -0
- .gitignore +3 -0
- app.py +21 -0
- generator_model_6of50_epochs_at_2023-04-16-21_0.pth +3 -0
- requirements.txt +2 -0
- utils.py +147 -0
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
generator_model_25_epochs_at_2023-04-16-18_12.pth filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
log.csv
|
2 |
+
flagged
|
3 |
+
__pycache__
|
app.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from utils import load_model, generate_random_img
|
4 |
+
|
5 |
+
def generate_image():
|
6 |
+
with torch.no_grad():
|
7 |
+
model = load_model('generator', 'generator_model_6of50_epochs_at_2023-04-16-21_0.pth')
|
8 |
+
generated_image = generate_random_img(model)
|
9 |
+
return generated_image
|
10 |
+
|
11 |
+
iface = gr.Interface(
|
12 |
+
fn=generate_image,
|
13 |
+
inputs=[],
|
14 |
+
outputs=gr.outputs.Image(type='numpy'),
|
15 |
+
allow_screenshot=True,
|
16 |
+
title='Random Landscape Image Generator By Huseyn Gorbani',
|
17 |
+
description='This app generates random images, using DCFAN inspired WGAN-GP model. Special Thanks to Aladdin Persson and Emilien Dupont for their insightful repos on GitHub. Aladdin Persson (repo: https://github.com/aladdinpersson/Machine-Learning-Collection/tree/master/ML/Pytorch/GANs/4.%20WGAN-GP) Emilien Dupont (repo: https://github.com/EmilienDupont/wgan-gp/blob/master/training.py)',
|
18 |
+
)
|
19 |
+
|
20 |
+
iface.launch()
|
21 |
+
|
generator_model_6of50_epochs_at_2023-04-16-21_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa3e403793d3dc7590f632b7921c8bf403c44933811ddf98560cf63987755b4a
|
3 |
+
size 51311505
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
utils.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generator model
|
2 |
+
# Critic Model
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torchvision
|
6 |
+
import torch.nn as nn
|
7 |
+
# import torch.optim as optim
|
8 |
+
from torchvision.utils import save_image
|
9 |
+
from torchvision.transforms import transforms
|
10 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
11 |
+
|
12 |
+
|
13 |
+
noise_dim = 100
|
14 |
+
img_channels = 3
|
15 |
+
gen_features = 64
|
16 |
+
critic_features = 64
|
17 |
+
|
18 |
+
|
19 |
+
class Generator(nn.Module):
|
20 |
+
def __init__(self, noise_dim, img_channels, gen_features):
|
21 |
+
super(Generator, self).__init__()
|
22 |
+
|
23 |
+
self.gen = nn.Sequential(
|
24 |
+
# Input: N x noise_dim x 1 x 1
|
25 |
+
self._block(noise_dim, gen_features * 16, 4, 1, 0),
|
26 |
+
self._block(gen_features * 16, gen_features * 8, 4, 2, 1),
|
27 |
+
self._block(gen_features * 8, gen_features * 4, 4, 2, 1),
|
28 |
+
self._block(gen_features * 4, gen_features * 2, 4, 2, 1),
|
29 |
+
self._block(gen_features * 2, gen_features, 4, 2, 1),
|
30 |
+
self._block(gen_features, gen_features // 2, 4, 2, 1),
|
31 |
+
nn.ConvTranspose2d(gen_features // 2, img_channels, kernel_size=4, stride=2, padding=1),
|
32 |
+
nn.Tanh()
|
33 |
+
# Output: N x channels_img x 256 x 256
|
34 |
+
)
|
35 |
+
|
36 |
+
def _block(self, in_c, out_c, k_size, s_size, p_size): # This is a nice practice that I learned from: # https://github.com/aladdinpersson
|
37 |
+
return nn.Sequential(
|
38 |
+
nn.ConvTranspose2d(
|
39 |
+
in_c, out_c,
|
40 |
+
k_size, s_size, p_size
|
41 |
+
),
|
42 |
+
nn.BatchNorm2d(out_c),
|
43 |
+
nn.ReLU(),
|
44 |
+
)
|
45 |
+
|
46 |
+
def forward(self, x):
|
47 |
+
return self.gen(x)
|
48 |
+
|
49 |
+
|
50 |
+
class Critic(nn.Module): # aka discirminator (called critic in )
|
51 |
+
def __init__(self, img_channels, critic_features):
|
52 |
+
super(Critic, self).__init__()
|
53 |
+
self.critic = nn.Sequential(
|
54 |
+
# Input: N x channels_img x 256 x 256
|
55 |
+
nn.Conv2d(img_channels, critic_features, kernel_size=4, stride=2, padding=1),
|
56 |
+
nn.LeakyReLU(0.2),
|
57 |
+
self._block(critic_features, critic_features * 2, 4, 2, 1),
|
58 |
+
self._block(critic_features * 2, critic_features * 4, 4, 2, 1),
|
59 |
+
self._block(critic_features * 4, critic_features * 8 , 4, 2, 1),
|
60 |
+
self._block(critic_features * 8, critic_features * 16 , 4, 2, 1),
|
61 |
+
self._block(critic_features * 16, critic_features * 32 , 4, 2, 1),
|
62 |
+
nn.Conv2d(critic_features * 32, 1, kernel_size=4, stride=1, padding=0)
|
63 |
+
# Output: N x 1 x 1 x 1
|
64 |
+
)
|
65 |
+
|
66 |
+
def _block(self, in_c, out_c, k_size, s_size, p_size): # this is a nice practice that I learned from: # https://github.com/aladdinpersson
|
67 |
+
return nn.Sequential(
|
68 |
+
nn.Conv2d(
|
69 |
+
in_c, out_c,
|
70 |
+
k_size, s_size, p_size
|
71 |
+
),
|
72 |
+
nn.BatchNorm2d(out_c),
|
73 |
+
nn.LeakyReLU(0.2),
|
74 |
+
)
|
75 |
+
|
76 |
+
def forward(self, x):
|
77 |
+
return self.critic(x)
|
78 |
+
|
79 |
+
|
80 |
+
def weights_init(m):
|
81 |
+
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear, nn.BatchNorm2d)):
|
82 |
+
nn.init.normal_(m.weight.data, 0.0, 0.02)
|
83 |
+
nn.init.constant_(m.bias.data, 0)
|
84 |
+
|
85 |
+
# The following gradianet penalty funciton is take from:
|
86 |
+
# https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/GANs/4.%20WGAN-GP/utils.py
|
87 |
+
def gradient_penalty(critic, real, fake, device):
|
88 |
+
BATCH_SIZE, C, H, W = real.shape
|
89 |
+
alpha = torch.rand((BATCH_SIZE, 1, 1, 1)).repeat(1, C, H, W).to(device)
|
90 |
+
interpolated_images = real * alpha + fake * (1 - alpha)
|
91 |
+
|
92 |
+
# calculating the critic scores
|
93 |
+
mixed_scores = critic(interpolated_images)
|
94 |
+
|
95 |
+
# taking the gradient of the scores w.r.t the images
|
96 |
+
gradient = torch.autograd.grad(
|
97 |
+
inputs=interpolated_images,
|
98 |
+
outputs=mixed_scores,
|
99 |
+
grad_outputs=torch.ones_like(mixed_scores),
|
100 |
+
create_graph=True,
|
101 |
+
retain_graph=True,
|
102 |
+
)[0]
|
103 |
+
|
104 |
+
gradient = gradient.view(gradient.shape[0], -1)
|
105 |
+
gradient_norm = gradient.norm(2, dim=1)
|
106 |
+
gradient_penalty = torch.mean((gradient_norm - 1) ** 2)
|
107 |
+
return gradient_penalty
|
108 |
+
|
109 |
+
|
110 |
+
def load_model(model_type, model_path):
|
111 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
112 |
+
|
113 |
+
# Loding the model based on the model_type
|
114 |
+
if model_type == 'generator':
|
115 |
+
model = Generator(noise_dim, img_channels, gen_features)
|
116 |
+
elif model_type == 'critic':
|
117 |
+
model = Critic(img_channels, critic_features)
|
118 |
+
else:
|
119 |
+
raise ValueError(f"Invalid model_type: {model_type}")
|
120 |
+
|
121 |
+
model.load_state_dict(torch.load(model_path, map_location=device))
|
122 |
+
model.to(device)
|
123 |
+
model.eval()
|
124 |
+
|
125 |
+
return model
|
126 |
+
|
127 |
+
import torchvision.transforms as transforms
|
128 |
+
from PIL import Image
|
129 |
+
|
130 |
+
def generate_random_img(model):
|
131 |
+
|
132 |
+
# Creating a random noise tensor
|
133 |
+
noise = torch.randn(1, noise_dim, 1, 1).to(device) # 1 is the number of images you want to generate
|
134 |
+
|
135 |
+
# Generating an image using the trained generator
|
136 |
+
with torch.no_grad():
|
137 |
+
generated_image = model(noise)
|
138 |
+
|
139 |
+
# Converting the generated tensor to a PIL image
|
140 |
+
generated_image = generated_image.cpu().detach().squeeze(0)
|
141 |
+
generated_image = transforms.ToPILImage()(generated_image)
|
142 |
+
|
143 |
+
return generated_image
|
144 |
+
|
145 |
+
if __name__ == "__main__":
|
146 |
+
model = load_model('generator','generator_model_25_epochs_at_2023-04-16-18_12.pth')
|
147 |
+
generate_random_img(model)
|