Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -12,33 +12,33 @@ sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
|
|
12 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
13 |
image_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
14 |
|
15 |
-
def generate_input(
|
16 |
-
#
|
17 |
-
|
|
|
|
|
|
|
18 |
inputs = processor(images=image, return_tensors="pt")
|
19 |
out = image_model.generate(**inputs)
|
20 |
image_caption = processor.decode(out[0], skip_special_tokens=True)
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
input = image_caption+" "+text_input
|
34 |
-
elif image_caption:
|
35 |
-
input = image_caption
|
36 |
-
elif text:
|
37 |
-
input = text_input
|
38 |
-
else:
|
39 |
-
input = "No input provided."
|
40 |
|
41 |
-
|
|
|
|
|
|
|
|
|
42 |
|
43 |
# Load embeddings and metadata
|
44 |
embeddings = np.load("netflix_embeddings.npy") #created using sentence_transformers on kaggle
|
@@ -59,13 +59,14 @@ with gr.Blocks() as demo:
|
|
59 |
gr.Markdown("# Netflix Recommendation System")
|
60 |
gr.Markdown("Enter a query to receive Netflix show recommendations based on title, description, and genre.")
|
61 |
with gr.Row():
|
|
|
62 |
image_input = gr.Image(label="Upload Image", type="pil", optional=True) # Image input
|
63 |
text_input = gr.Textbox(label="Enter Text Query", placeholder="Enter a description or query here", optional=True) # Text input
|
64 |
|
65 |
submit_button = gr.Button("Submit")
|
66 |
output = gr.Textbox(label="Recommendations")
|
67 |
|
68 |
-
submit_button.click(fn=generate_input, inputs=[image_input, text_input], outputs=output)
|
69 |
|
70 |
demo.launch()
|
71 |
|
|
|
12 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
13 |
image_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
14 |
|
15 |
+
def generate_input(input_type, image=None, text=None):
|
16 |
+
# Initialize the input variable
|
17 |
+
combined_input = ""
|
18 |
+
|
19 |
+
# Handle image input if chosen
|
20 |
+
if input_type == "Image" and image:
|
21 |
inputs = processor(images=image, return_tensors="pt")
|
22 |
out = image_model.generate(**inputs)
|
23 |
image_caption = processor.decode(out[0], skip_special_tokens=True)
|
24 |
+
combined_input += image_caption # Add the image caption to input
|
25 |
+
|
26 |
+
# Handle text input if chosen
|
27 |
+
elif input_type == "Text" and text:
|
28 |
+
combined_input += text # Add the text to input
|
29 |
+
|
30 |
+
# Handle both text and image input if chosen
|
31 |
+
elif input_type == "Both" and image and text:
|
32 |
+
inputs = processor(images=image, return_tensors="pt")
|
33 |
+
out = image_model.generate(**inputs)
|
34 |
+
image_caption = processor.decode(out[0], skip_special_tokens=True)
|
35 |
+
combined_input += image_caption + " " + text # Combine image caption and text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
+
# If no input, fallback
|
38 |
+
if not combined_input:
|
39 |
+
combined_input = "No input provided."
|
40 |
+
|
41 |
+
return vector_search(combined_input)
|
42 |
|
43 |
# Load embeddings and metadata
|
44 |
embeddings = np.load("netflix_embeddings.npy") #created using sentence_transformers on kaggle
|
|
|
59 |
gr.Markdown("# Netflix Recommendation System")
|
60 |
gr.Markdown("Enter a query to receive Netflix show recommendations based on title, description, and genre.")
|
61 |
with gr.Row():
|
62 |
+
input_choice = gr.Radio(choices=["Image", "Text", "Both"], label="Select Input Type", value="Both")
|
63 |
image_input = gr.Image(label="Upload Image", type="pil", optional=True) # Image input
|
64 |
text_input = gr.Textbox(label="Enter Text Query", placeholder="Enter a description or query here", optional=True) # Text input
|
65 |
|
66 |
submit_button = gr.Button("Submit")
|
67 |
output = gr.Textbox(label="Recommendations")
|
68 |
|
69 |
+
submit_button.click(fn=generate_input, inputs=[input_choice,image_input, text_input], outputs=output)
|
70 |
|
71 |
demo.launch()
|
72 |
|