File size: 2,351 Bytes
36f7a46
 
fdf85cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import gradio as gr

# def greet(name):
#     return "Hello " + name + "!!"
from sentence_transformers import SentenceTransformer
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from datasets import load_dataset
# Load pre-trained SentenceTransformer model
embedding_model = SentenceTransformer("thenlper/gte-large")

# Example dataset with genres (replace with your actual data)
dataset = load_dataset("hugginglearners/netflix-shows")

# Combine description and genre for embedding
def combine_description_title_and_genre(description, listed_in, title):
    return f"{description} Genre: {listed_in} Title: {title}"

# Generate embedding for the query
def get_embedding(text):
    return embedding_model.encode(text)

# Vector search function
def vector_search(query):
    query_embedding = get_embedding(query)
    
    # Generate embeddings for the combined description and genre
    embeddings = np.array([get_embedding(combine_description_title_and_genre(item["description"], item["listed_in"],item["title"])) for item in dataset])

    # Calculate cosine similarity between the query and all embeddings
    similarities = cosine_similarity([query_embedding], embeddings)

    # Adjust similarity scores based on ratings
    ratings = np.array([item["rating"] for item in dataset])
    adjusted_similarities = similarities * ratings.reshape(-1, 1)

    # Get top N most similar items (e.g., top 3)
    top_n = 3
    top_indices = adjusted_similarities[0].argsort()[-top_n:][::-1]  # Get indices of the top N results
    top_items = [dataset[i] for i in top_indices]
    
    # Format the output for display
    search_result = ""
    for item in top_items:
        search_result += f"Title: {item['title']}, Description: {item['description']}, Genre: {item['listed_in']}, Rating: {item['rating']}\n"

    return search_result

# Gradio Interface
def movie_search(query):
    return vector_search(query)

iface = gr.Interface(fn=movie_search, 
                     inputs="text", 
                     outputs="text", 
                     live=True,
                     title="Netflix Recommendation System",
                     description="Enter a query to get Netflix recommendations based on description and genre.")

iface.launch()


# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
# demo.launch()