Tho Tran commited on
Commit
f96716d
·
unverified ·
1 Parent(s): fdf66b4

Add files via upload

Browse files
Files changed (1) hide show
  1. app.py +78 -0
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torchvision.models as models
4
+ import torchvision.transforms as transforms
5
+ from torch.autograd import Variable
6
+ from PIL import Image
7
+ from torchvision import transforms
8
+ import os
9
+ from glob import glob
10
+ import json
11
+ from json import JSONEncoder
12
+ import numpy
13
+ from sklearn.neighbors import NearestNeighbors
14
+ import streamlit as st
15
+
16
+ class NumpyArrayEncoder(JSONEncoder):
17
+ def default(self, obj):
18
+ if isinstance(obj, numpy.ndarray):
19
+ return obj.tolist()
20
+ return JSONEncoder.default(self, obj)
21
+
22
+ with open('sample.json') as json_file:
23
+ data = json.load(json_file)
24
+
25
+ resnet=models.resnet50(pretrained=True)
26
+ layer = resnet._modules.get('avgpool')
27
+ #grab all images in the lfw folder
28
+ import os
29
+ from glob import glob
30
+ path="./lfw"
31
+
32
+ result = [y for x in os.walk(path) for y in glob(os.path.join(x[0], '*.jpg'))]
33
+ resnet.eval
34
+
35
+ #d={}
36
+
37
+ preprocess=transforms.Compose([transforms.Resize(256),
38
+ transforms.CenterCrop(224),
39
+ transforms.ToTensor(),
40
+ transforms.Normalize(mean=[.485,.456,.406],std=[.229,.224,.225])
41
+ ])
42
+
43
+ def get_vector(image):
44
+ # Create a PyTorch tensor with the transformed image
45
+ t_img = preprocess(image)
46
+ my_embedding = torch.zeros(2048)
47
+
48
+ # Define a function that will copy the output of a layer
49
+ def copy_data(m, i, o):
50
+ my_embedding.copy_(o.flatten()) # <-- flatten
51
+
52
+ # Attach that function to our selected layer
53
+ h = layer.register_forward_hook(copy_data)
54
+ # Run the model on our transformed image
55
+ with torch.no_grad(): # <-- no_grad context
56
+ resnet(t_img.unsqueeze(0)) # <-- unsqueeze
57
+ # Detach our copy function from the layer
58
+ h.remove()
59
+ # Return the feature vector
60
+ return my_embedding
61
+
62
+ #for image in result:
63
+ #d[image]=get_vector(Image.open(image).convert('RGB')).numpy()
64
+
65
+ image=st.file_uploader(label="upload your own file",type="jpg")
66
+ if image is None:
67
+ st.write("upload an image")
68
+ else:
69
+ input=get_vector(Image.open(image).convert('RGB')).numpy()
70
+ featurelist=[]
71
+ for img in data:
72
+ featurelist.append(data[img])
73
+ neighbors = NearestNeighbors(n_neighbors=10, algorithm='brute',metric='euclidean').fit(featurelist)
74
+ distances, indices = neighbors.kneighbors(input.reshape(1,-1))
75
+ simular=[]
76
+ for i in range(10):
77
+ simular.append(result[indices[0][i]])
78
+ st.image(simular,caption=simular)