Threatthriver commited on
Commit
09fa947
1 Parent(s): ccbd1ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -84
app.py CHANGED
@@ -1,114 +1,83 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from bs4 import BeautifulSoup
4
- import requests
5
- import os
6
- from typing import List, Tuple, Optional
7
 
8
- # --- Configuration ---
9
- MODEL_NAME = "meta-llama/Meta-Llama-3.1-405B-Instruct"
10
- API_KEY_ENV_VAR = "HF_TOKEN"
11
- SEARCH_TRIGGER_WORD = "search"
12
- DEFAULT_NUM_RESULTS = 3
13
 
14
- # --- Utility Functions ---
15
-
16
- def get_api_key() -> str:
17
- """Retrieves the API key from an environment variable."""
18
- api_key = os.getenv(API_KEY_ENV_VAR)
19
- if not api_key:
20
- raise ValueError(f"API key not found. Please set the {API_KEY_ENV_VAR} environment variable.")
21
- return api_key
22
-
23
- def scrape_yahoo_search(query: str, num_results: int = DEFAULT_NUM_RESULTS) -> Tuple[Optional[str], Optional[str]]:
24
- """Scrapes Yahoo search results and returns formatted snippets and the search URL."""
25
- search_url = f"https://search.yahoo.com/search?p={query}"
26
- headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'}
27
-
28
- try:
29
- response = requests.get(search_url, headers=headers)
30
- response.raise_for_status()
31
-
32
- soup = BeautifulSoup(response.content, 'html.parser')
33
- result_elements = soup.find_all('div', {'class': 'dd algo algo-sr Sr'}, limit=num_results)
34
-
35
- if result_elements:
36
- results = [
37
- f"**Title:** {res.find('h3').get_text(strip=True)}\n**Snippet:** {res.find('div', {'class': 'compText aAbs'}).get_text(strip=True)}\n**URL:** {res.find('a')['href']}"
38
- for res in result_elements
39
- if res.find('h3') and res.find('div', {'class': 'compText aAbs'}) and res.find('a')
40
- ]
41
- return "\n\n".join(results), search_url
42
- else:
43
- return None, search_url
44
-
45
- except requests.RequestException as e:
46
- return f"Request error: {str(e)}", None
47
- except Exception as e:
48
- return f"Processing error: {str(e)}", None
49
-
50
- def extract_search_query(message: str, trigger_word: str = SEARCH_TRIGGER_WORD) -> Optional[str]:
51
- """Extracts the search query from the message if the trigger word is present."""
52
- lower_message = message.lower()
53
- if trigger_word in lower_message:
54
- query = lower_message.split(trigger_word, 1)[1].strip()
55
- return query if query else None
56
- return None
57
-
58
- # --- Initialize Inference Client ---
59
- client = InferenceClient(model=MODEL_NAME, token=get_api_key())
60
-
61
- # --- Chatbot Logic ---
62
  def respond(
63
  message: str,
64
- history: List[Tuple[str, str]],
65
  system_message: str,
66
  max_tokens: int,
67
  temperature: float,
68
  top_p: float,
69
- ) -> str:
70
- """Generates a response from the AI model, incorporating search results if requested."""
71
- query = extract_search_query(message)
72
-
73
- if query:
74
- search_results, search_url = scrape_yahoo_search(query)
75
- if search_results:
76
- message += f"\n\n## Web Search Results:\n{search_results}\n**Source:** {search_url}"
77
- else:
78
- message += "\n\nI couldn't find any relevant web results for your query."
 
 
 
 
 
79
 
80
- messages = [{"role": "system", "content": system_message}] + \
81
- [{"role": role, "content": content} for role, content in history] + \
82
- [{"role": "user", "content": message}]
 
 
 
 
 
 
 
 
83
 
 
84
  response = ""
 
85
  try:
86
- for response_chunk in client.chat_completion(
 
87
  messages=messages,
88
  max_tokens=max_tokens,
89
  stream=True,
90
  temperature=temperature,
91
  top_p=top_p,
92
  ):
93
- response += response_chunk.choices[0].delta.content
 
 
 
94
  except Exception as e:
95
- return f"AI model error: {str(e)}"
96
 
97
- return response
98
 
99
- # --- Gradio Interface ---
100
  demo = gr.ChatInterface(
101
  fn=respond,
102
  additional_inputs=[
103
- gr.Textbox(value="You are a helpful and informative AI assistant.", label="System Message"),
104
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens"),
105
- gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"),
106
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Nucleus Sampling)"),
 
 
 
 
 
 
107
  ],
108
- title="Chatbot with Search",
109
- description="Chat and search the web using the power of Meta-Llama!",
110
  )
111
 
112
- # --- Launch the App ---
113
  if __name__ == "__main__":
114
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
3
 
4
+ # Initialize the InferenceClient with the model ID from Hugging Face
5
+ client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
 
 
 
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def respond(
8
  message: str,
9
+ history: list[tuple[str, str]],
10
  system_message: str,
11
  max_tokens: int,
12
  temperature: float,
13
  top_p: float,
14
+ ):
15
+ """
16
+ Generates a response from the AI model based on the user's message and chat history.
17
+
18
+ Args:
19
+ message (str): The user's input message.
20
+ history (list): A list of tuples representing the conversation history (user, assistant).
21
+ system_message (str): A system-level message guiding the AI's behavior.
22
+ max_tokens (int): The maximum number of tokens for the output.
23
+ temperature (float): Sampling temperature for controlling the randomness.
24
+ top_p (float): Top-p (nucleus sampling) for controlling diversity.
25
+
26
+ Yields:
27
+ str: The AI's response as it is generated.
28
+ """
29
 
30
+ # Prepare the conversation history for the API call
31
+ messages = [{"role": "system", "content": system_message}]
32
+
33
+ for user_input, assistant_response in history:
34
+ if user_input:
35
+ messages.append({"role": "user", "content": user_input})
36
+ if assistant_response:
37
+ messages.append({"role": "assistant", "content": assistant_response})
38
+
39
+ # Add the latest user message to the conversation
40
+ messages.append({"role": "user", "content": message})
41
 
42
+ # Initialize an empty response
43
  response = ""
44
+
45
  try:
46
+ # Generate a response from the model with streaming
47
+ for message in client.chat_completion(
48
  messages=messages,
49
  max_tokens=max_tokens,
50
  stream=True,
51
  temperature=temperature,
52
  top_p=top_p,
53
  ):
54
+ token = message.choices[0].delta.content
55
+ response += token
56
+ yield response
57
+
58
  except Exception as e:
59
+ yield f"An error occurred: {str(e)}"
60
 
 
61
 
62
+ # Define the ChatInterface with additional input components for user customization
63
  demo = gr.ChatInterface(
64
  fn=respond,
65
  additional_inputs=[
66
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
67
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
68
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
69
+ gr.Slider(
70
+ minimum=0.1,
71
+ maximum=1.0,
72
+ value=0.95,
73
+ step=0.05,
74
+ label="Top-p (nucleus sampling)",
75
+ ),
76
  ],
77
+ title="Chatbot Interface",
78
+ description="A customizable chatbot interface using Hugging Face's Inference API.",
79
  )
80
 
81
+ # Launch the Gradio interface
82
  if __name__ == "__main__":
83
+ demo.launch()