Cristiants commited on
Commit
fa3cb20
β€’
1 Parent(s): b39ba03

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ if 'google.colab' in sys.modules:
3
+ print('Running in Colab.')
4
+ !pip3 install transformers==4.15.0 timm==0.4.12 fairscale==0.4.4
5
+ !git clone https://github.com/salesforce/BLIP
6
+ %cd BLIP
7
+ import gradio as gr
8
+ import torch
9
+ import requests
10
+ from torchvision import transforms
11
+ from PIL import Image
12
+ import requests
13
+ import torch
14
+ from torchvision import transforms
15
+ from torchvision.transforms.functional import InterpolationMode
16
+
17
+
18
+ #@title
19
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
20
+
21
+ model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
22
+ response = requests.get("https://git.io/JJkYN")
23
+ labels = response.text.split("\n")
24
+
25
+ def predict(inp):
26
+ inp = transforms.ToTensor()(inp).unsqueeze(0)
27
+ with torch.no_grad():
28
+ prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
29
+ confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
30
+ return confidences
31
+
32
+ demo = gr.Interface(fn=predict,
33
+ inputs=gr.inputs.Image(type="pil"),
34
+ outputs=gr.outputs.Label(num_top_classes=3)
35
+ )
36
+
37
+ def load_demo_image(image_size,device,imageurl):
38
+ img_url = imageurl
39
+ raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
40
+
41
+ w,h = raw_image.size
42
+ display(raw_image.resize((w//5,h//5)))
43
+
44
+ transform = transforms.Compose([
45
+ transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC),
46
+ transforms.ToTensor(),
47
+ transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
48
+ ])
49
+ image = transform(raw_image).unsqueeze(0).to(device)
50
+ return image
51
+ from models.blip import blip_decoder
52
+
53
+ def predict(imageurl):
54
+ image_size = 384
55
+ image = load_demo_image(image_size=image_size, device=device,imageurl=imageurl)
56
+
57
+ model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
58
+
59
+ model = blip_decoder(pretrained=model_url, image_size=image_size, vit='base')
60
+ model.eval()
61
+ model = model.to(device)
62
+
63
+ with torch.no_grad():
64
+ # beam search
65
+ caption = model.generate(image, sample=False, num_beams=3, max_length=20, min_length=5)
66
+ # nucleus sampling
67
+ # caption = model.generate(image, sample=True, top_p=0.9, max_length=20, min_length=5)
68
+ return('caption: '+caption[0])
69
+ demo = gr.Interface(fn=predict,
70
+ inputs="text",
71
+ outputs=gr.outputs.Label(num_top_classes=3)
72
+ )
73
+ demo.launch()