Spaces:
Running
Running
abrakjamson
commited on
Commit
•
60a54bb
1
Parent(s):
d646867
Adding more control models
Browse files- app.py +9 -12
- lazy.gguf +0 -0
- right-leaning.gguf +0 -0
- tripping.gguf +0 -0
app.py
CHANGED
@@ -9,11 +9,11 @@ import gradio as gr
|
|
9 |
from huggingface_hub import login
|
10 |
|
11 |
# Initialize model and tokenizer
|
12 |
-
mistral_path = "mistralai/Mistral-7B-Instruct-v0.3"
|
13 |
-
|
14 |
|
15 |
-
access_token = os.getenv("mistralaccesstoken")
|
16 |
-
login(access_token)
|
17 |
|
18 |
tokenizer = AutoTokenizer.from_pretrained(mistral_path)
|
19 |
tokenizer.pad_token_id = 0
|
@@ -95,12 +95,9 @@ def generate_response(system_prompt, user_message, history, max_new_tokens, repi
|
|
95 |
# Construct the formatted prompt based on history
|
96 |
if len(history) > 0:
|
97 |
for turn in history:
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
formatted_prompt += f"{user_tag} {turn.content}{asst_tag}"
|
102 |
-
elif turn.role == 'assistant':
|
103 |
-
formatted_prompt += f" {turn.content}"
|
104 |
|
105 |
if len(history) > 0:
|
106 |
formatted_prompt += "</s>"
|
@@ -153,7 +150,7 @@ def reset_chat():
|
|
153 |
|
154 |
# Build the Gradio interface
|
155 |
with gr.Blocks() as demo:
|
156 |
-
gr.Markdown("#
|
157 |
gr.Markdown("Usage demo: (link)")
|
158 |
|
159 |
with gr.Row():
|
@@ -233,7 +230,7 @@ with gr.Blocks() as demo:
|
|
233 |
# Submit and New Chat buttons
|
234 |
submit_button = gr.Button("💬 Submit")
|
235 |
retry_button = gr.Button("🔃 Retry last turn")
|
236 |
-
new_chat_button = gr.Button("
|
237 |
|
238 |
|
239 |
inputs_list = [system_prompt, user_input, chatbot, max_new_tokens, repetition_penalty] + control_checks + control_sliders
|
|
|
9 |
from huggingface_hub import login
|
10 |
|
11 |
# Initialize model and tokenizer
|
12 |
+
#mistral_path = "mistralai/Mistral-7B-Instruct-v0.3"
|
13 |
+
mistral_path = r"E:/language_models/models/mistral"
|
14 |
|
15 |
+
#access_token = os.getenv("mistralaccesstoken")
|
16 |
+
#login(access_token)
|
17 |
|
18 |
tokenizer = AutoTokenizer.from_pretrained(mistral_path)
|
19 |
tokenizer.pad_token_id = 0
|
|
|
95 |
# Construct the formatted prompt based on history
|
96 |
if len(history) > 0:
|
97 |
for turn in history:
|
98 |
+
user_msg, asst_msg = turn
|
99 |
+
formatted_prompt += f"{user_tag} {user_msg} {asst_tag} {asst_msg}"
|
100 |
+
|
|
|
|
|
|
|
101 |
|
102 |
if len(history) > 0:
|
103 |
formatted_prompt += "</s>"
|
|
|
150 |
|
151 |
# Build the Gradio interface
|
152 |
with gr.Blocks() as demo:
|
153 |
+
gr.Markdown("# 🧠 LLM Brain Control")
|
154 |
gr.Markdown("Usage demo: (link)")
|
155 |
|
156 |
with gr.Row():
|
|
|
230 |
# Submit and New Chat buttons
|
231 |
submit_button = gr.Button("💬 Submit")
|
232 |
retry_button = gr.Button("🔃 Retry last turn")
|
233 |
+
new_chat_button = gr.Button("🌟 New Chat")
|
234 |
|
235 |
|
236 |
inputs_list = [system_prompt, user_input, chatbot, max_new_tokens, repetition_penalty] + control_checks + control_sliders
|
lazy.gguf
ADDED
Binary file (509 kB). View file
|
|
right-leaning.gguf
ADDED
Binary file (509 kB). View file
|
|
tripping.gguf
ADDED
Binary file (509 kB). View file
|
|