SaisExperiments commited on
Commit
c137ca3
·
verified ·
1 Parent(s): 43de95d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -12
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from huggingface_hub.inference_api import InferenceApiException
 
4
  import os
5
 
6
  """
@@ -13,17 +14,14 @@ Alternatively, pass your token directly: InferenceClient(token="hf_YOUR_TOKEN")
13
  # Initialize the Inference Client
14
  # It will try to use HUGGING_FACE_HUB_TOKEN environment variable or cached login
15
  try:
 
 
 
16
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
17
  except Exception as e:
18
  print(f"Error initializing InferenceClient: {e}")
19
- # Optionally, provide a default token if needed and available
20
- # token = os.getenv("HUGGING_FACE_HUB_TOKEN")
21
- # if token:
22
- # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=token)
23
- # else:
24
- # raise ValueError("Could not initialize InferenceClient. Ensure you are logged in or provide a token.") from e
25
- # For now, let's just raise it if initialization fails fundamentally
26
- raise
27
 
28
  def respond(
29
  message: str,
@@ -76,9 +74,20 @@ def respond(
76
  response += token
77
  yield response # Yield the accumulated response so far
78
 
79
- except InferenceApiException as e:
80
- print(f"Inference API Error: {e}")
81
- yield f"Sorry, I encountered an error: {e}"
 
 
 
 
 
 
 
 
 
 
 
82
  except Exception as e:
83
  print(f"An unexpected error occurred: {e}")
84
  yield f"Sorry, an unexpected error occurred: {e}"
@@ -120,4 +129,6 @@ demo = gr.ChatInterface(
120
 
121
 
122
  if __name__ == "__main__":
 
 
123
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ # Import the correct exception class
4
+ from huggingface_hub.utils import HfHubHTTPError
5
  import os
6
 
7
  """
 
14
  # Initialize the Inference Client
15
  # It will try to use HUGGING_FACE_HUB_TOKEN environment variable or cached login
16
  try:
17
+ # You might need to provide a token if you haven't logged in via CLI
18
+ # token = os.getenv("HUGGING_FACE_HUB_TOKEN")
19
+ # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=token)
20
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
21
  except Exception as e:
22
  print(f"Error initializing InferenceClient: {e}")
23
+ raise ValueError("Could not initialize InferenceClient. Ensure you are logged in or provide a token.") from e
24
+
 
 
 
 
 
 
25
 
26
  def respond(
27
  message: str,
 
74
  response += token
75
  yield response # Yield the accumulated response so far
76
 
77
+ # Catch HTTP errors from the Hugging Face Hub API
78
+ except HfHubHTTPError as e:
79
+ error_message = f"Inference API Error: {e}"
80
+ # Try to get more details from the response if available
81
+ if e.response:
82
+ try:
83
+ details = e.response.json()
84
+ error_message += f"\nDetails: {details.get('error', 'N/A')}"
85
+ except Exception: # Catch potential JSON decoding errors
86
+ pass # Keep the original error message
87
+ print(error_message)
88
+ yield f"Sorry, I encountered an error communicating with the model service: {e}" # Display a user-friendly message
89
+
90
+ # Catch other potential errors
91
  except Exception as e:
92
  print(f"An unexpected error occurred: {e}")
93
  yield f"Sorry, an unexpected error occurred: {e}"
 
129
 
130
 
131
  if __name__ == "__main__":
132
+ # Ensure huggingface_hub library is up-to-date: pip install --upgrade huggingface_hub
133
+ print("Launching Gradio Interface...")
134
  demo.launch()