Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import CLIPModel,
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
|
@@ -13,7 +13,7 @@ def load_clip(model_size='large'):
|
|
13 |
MODEL_name = 'openai/clip-vit-large-patch14'
|
14 |
|
15 |
model = CLIPModel.from_pretrained(MODEL_name)
|
16 |
-
processor =
|
17 |
|
18 |
return processor, model
|
19 |
|
@@ -48,11 +48,11 @@ with col_l:
|
|
48 |
|
49 |
if picture_file is not None:
|
50 |
image = Image.open(picture_file)
|
51 |
-
st.image(image, caption='Please upload an image of the damage'
|
52 |
|
53 |
#image
|
54 |
with col_l:
|
55 |
-
default_options = ['
|
56 |
options = st.text_input(label="Please enter the classes", value=default_options)
|
57 |
#options = list(options)
|
58 |
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import CLIPModel, pipeline, CLIPImageProcessor
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
|
|
|
13 |
MODEL_name = 'openai/clip-vit-large-patch14'
|
14 |
|
15 |
model = CLIPModel.from_pretrained(MODEL_name)
|
16 |
+
processor = CLIPImageProcessor.from_pretrained(MODEL_name)
|
17 |
|
18 |
return processor, model
|
19 |
|
|
|
48 |
|
49 |
if picture_file is not None:
|
50 |
image = Image.open(picture_file)
|
51 |
+
st.image(image, caption='Please upload an image of the damage') #use_column_width=True
|
52 |
|
53 |
#image
|
54 |
with col_l:
|
55 |
+
default_options = ['There is a car', 'There is no car']
|
56 |
options = st.text_input(label="Please enter the classes", value=default_options)
|
57 |
#options = list(options)
|
58 |
|