Ozaii commited on
Commit
12da0b8
·
verified ·
1 Parent(s): 441f0fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -20
app.py CHANGED
@@ -8,7 +8,7 @@ from datetime import datetime
8
  from threading import Thread
9
 
10
  # Load the model and tokenizer
11
- MODEL_PATH = "Ozaii/zephyr-bae" # Your Hugging Face model path
12
 
13
  print("Attempting to load Zephyr... Cross your fingers! 🤞")
14
 
@@ -20,12 +20,13 @@ try:
20
  base_model = AutoModelForCausalLM.from_pretrained(
21
  peft_config.base_model_name_or_path,
22
  torch_dtype=torch.float16,
 
23
  device_map="auto",
24
- low_cpu_mem_usage=True
25
  )
26
 
27
  # Load the PEFT model
28
- model = PeftModel.from_pretrained(base_model, MODEL_PATH)
29
 
30
  # Load the tokenizer
31
  tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
@@ -107,23 +108,19 @@ def add_feedback(user_message, bot_message, rating, note):
107
  return "Feedback saved successfully!"
108
 
109
  # Gradio interface
110
- def gradio_chat(message, history):
111
- history.append((message, ""))
112
- for response in chat_with_zephyr(message, history[:-1]):
113
- history[-1] = (message, response)
114
- yield history
115
-
116
- def submit_feedback(rating, note, history):
117
- if len(history) > 0:
118
- last_user_message, last_bot_message = history[-1]
119
- add_feedback(last_user_message, last_bot_message, rating, note)
120
- return f"Feedback submitted for: '{last_bot_message}'"
121
- return "No conversation to provide feedback on."
122
-
123
- def undo_last_message(history):
124
- if history:
125
- history.pop()
126
- return history
127
 
128
  css = """
129
  body {
 
8
  from threading import Thread
9
 
10
  # Load the model and tokenizer
11
+ MODEL_PATH = "Ozaii/zephyr-bae"
12
 
13
  print("Attempting to load Zephyr... Cross your fingers! 🤞")
14
 
 
20
  base_model = AutoModelForCausalLM.from_pretrained(
21
  peft_config.base_model_name_or_path,
22
  torch_dtype=torch.float16,
23
+ low_cpu_mem_usage=True,
24
  device_map="auto",
25
+ trust_remote_code=True # Add this line
26
  )
27
 
28
  # Load the PEFT model
29
+ model = PeftModel.from_pretrained(base_model, MODEL_PATH, is_trainable=False)
30
 
31
  # Load the tokenizer
32
  tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
 
108
  return "Feedback saved successfully!"
109
 
110
  # Gradio interface
111
+ def chat_with_zephyr(message, history):
112
+ # Implement your chat logic here
113
+ response = "Hello! I'm Zephyr. How can I help you today?" # Placeholder
114
+ return response
115
+
116
+ iface = gr.ChatInterface(
117
+ chat_with_zephyr,
118
+ title="Chat with Zephyr: Your AI Boyfriend",
119
+ description="Zephyr is an AI trained to be your virtual boyfriend. Chat with him and see where the conversation goes!",
120
+ examples=["Hey Zephyr, how are you feeling today?", "What's your idea of a perfect date?", "Tell me something romantic!"],
121
+ cache_examples=False,
122
+ )
123
+
 
 
 
 
124
 
125
  css = """
126
  body {