File size: 3,397 Bytes
aaad9e0
890dc71
 
 
 
aaad9e0
890dc71
 
aaad9e0
890dc71
 
 
 
aaad9e0
890dc71
 
 
 
 
 
 
 
 
 
 
aaad9e0
890dc71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aaad9e0
890dc71
 
 
 
 
 
 
 
aaad9e0
890dc71
 
 
 
 
 
 
 
 
 
 
7505e2e
890dc71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7505e2e
aaad9e0
890dc71
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
from huggingface_hub import InferenceClient
import pandas as pd
import torch
from sentence_transformers import SentenceTransformer

# Initialize the Hugging Face API client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

# Load and preprocess course data for search functionality
courses_df = pd.read_csv("courses_data.csv")  # Assuming courses_data.csv is already scraped
model = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')
courses_df['embedding'] = courses_df['description'].apply(lambda x: model.encode(x, convert_to_tensor=True))

# Define a helper function to search for courses based on user query
def search_courses(query, top_k=5):
    query_embedding = model.encode(query, convert_to_tensor=True)
    cosine_scores = torch.nn.functional.cosine_similarity(query_embedding, torch.stack(courses_df['embedding'].tolist()))
    top_results = torch.topk(cosine_scores, k=top_k)
    
    results = []
    for idx in top_results.indices:
        course = courses_df.iloc[idx.item()]
        results.append(f"**{course['title']}**\n{course['description']}\nNumber of Lessons: {course['lessons']}")
    return "\n\n".join(results)

# Modify respond function to include course search when "search: [query]" is detected
def respond(
    message,
    history: list[tuple[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
):
    # Check if the message is a course search command
    if message.lower().startswith("search:"):
        query = message[len("search:"):].strip()  # Extract search query
        response = search_courses(query)  # Perform course search
        yield response
        return

    # Standard chat processing using Hugging Face Inference API for general chat
    messages = [{"role": "system", "content": system_message}]
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})
    messages.append({"role": "user", "content": message})

    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message.choices[0].delta.content
        response += token
        yield response

# Gradio chat interface setup
demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="You are a helpful assistant that provides detailed information about courses on Analytics Vidhya. Please assist the user by answering questions about available courses, course content, instructors, ratings, pricing, and any other relevant details. If a user asks about course recommendations, suggest relevant courses based on their needs or preferences.", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=0.95,
            step=0.05,
            label="Top-p (nucleus sampling)",
        ),
    ],
    description="Type 'search: [your query]' to search for courses on Analytics Vidhya, or chat with the assistant."
)

# Launch the app
if __name__ == "__main__":
    demo.launch()