File size: 1,911 Bytes
d1ef8ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import torch
from torchvision import transforms
from PIL import Image
import matplotlib.pyplot as plt
from model import aeModel
def load_model(model_path, device):
model = aeModel().to(device)
model.load_state_dict(torch.load(model_path, map_location=device))
model.eval()
return model
def process_single_image(image_path, model, device):
transform = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor(),
])
image = Image.open(image_path).convert('RGB')
image_tensor = transform(image).unsqueeze(0).to(device)
with torch.no_grad():
encoded = model.encode(image_tensor)
reconstruction = model.decode(encoded)
print(f'Original shape: {image_tensor.shape}')
print(f'Encoded shape: {encoded.shape}')
print(f'Decoded shape: {reconstruction.shape}')
return image_tensor.squeeze(0).cpu(), reconstruction.squeeze(0).cpu()
def visualize_original_and_reconstruction(original, reconstruction):
original = torch.clamp(original, 0, 1)
reconstruction = torch.clamp(reconstruction, 0, 1)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
axes[0].imshow(original.permute(1, 2, 0))
axes[0].set_title("Original")
axes[0].axis("off")
axes[1].imshow(reconstruction.permute(1, 2, 0))
axes[1].set_title("Decoded")
axes[1].axis("off")
plt.tight_layout()
plt.show()
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
model_path = 'autoencoder.pth'
model = load_model(model_path, device)
image_path = r"dataset\images\proof_2.png"
original, reconstruction = process_single_image(image_path, model, device)
visualize_original_and_reconstruction(original, reconstruction) |