Spaces:
Sleeping
Sleeping
Changed llama model
Browse files
app.py
CHANGED
@@ -68,58 +68,58 @@ model_sentiment.eval()
|
|
68 |
model_sentiment.to(device)
|
69 |
|
70 |
|
71 |
-
model_name_or_path = "TheBloke/Llama-2-
|
72 |
-
model_basename = "llama-2-
|
73 |
|
74 |
# Download the model file
|
75 |
print('downloading llama model...')
|
76 |
-
|
77 |
print('finished download...')
|
78 |
# Initialize the Llama model with appropriate settings for GPU
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
#
|
85 |
-
|
86 |
-
|
87 |
-
#
|
88 |
-
|
89 |
-
#
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
#
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
#
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
#
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
|
124 |
def classify_sentiment(text):
|
125 |
# Encode the text using the tokenizer
|
@@ -214,15 +214,15 @@ iface_ner = gr.Interface(
|
|
214 |
title="NER Analysis",
|
215 |
description="Performs Named Entity Recognition using spaCy and Transformer models."
|
216 |
)
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
|
225 |
# Using tabs to organize the interfaces
|
226 |
-
tabs = gr.TabbedInterface([iface_category, iface_sentiment,iface_summary,iface_ner], ["Category", "Sentiment"," Summary","NER"], css=custom_css)
|
227 |
tabs.launch(share=True)
|
228 |
|
|
|
68 |
model_sentiment.to(device)
|
69 |
|
70 |
|
71 |
+
model_name_or_path = "TheBloke/Llama-2-7B-Chat-GGUF"
|
72 |
+
model_basename = "llama-2-7b-chat.Q2_K.gguf" # The model is in bin format
|
73 |
|
74 |
# Download the model file
|
75 |
print('downloading llama model...')
|
76 |
+
model_path_llama = hf_hub_download(repo_id=model_name_or_path, filename=model_basename, force_download=True, local_dir="./llama_model")
|
77 |
print('finished download...')
|
78 |
# Initialize the Llama model with appropriate settings for GPU
|
79 |
+
lcpp_llm = Llama(
|
80 |
+
model_path=model_path_llama,
|
81 |
+
)
|
82 |
+
|
83 |
+
def generate_email_response(email_prompt):
|
84 |
+
# Check input received by the function
|
85 |
+
print("Received prompt:", email_prompt)
|
86 |
+
|
87 |
+
# Determine if the input is a shorthand command or an actual email
|
88 |
+
if 'email to' in email_prompt.lower():
|
89 |
+
# Assume it's a shorthand command, format appropriately
|
90 |
+
formatted_prompt = f'''
|
91 |
+
Email received: "{email_prompt}"
|
92 |
+
Respond to this email, ensuring a professional tone, providing a concise update, and addressing any potential concerns the sender might have.
|
93 |
+
Response:
|
94 |
+
'''
|
95 |
+
else:
|
96 |
+
# Assume it's direct email content
|
97 |
+
formatted_prompt = f'''
|
98 |
+
Email received: "{email_prompt}"
|
99 |
+
Respond to this email, ensuring a professional tone, providing a concise update, and addressing any potential concerns the sender might have.
|
100 |
+
Response:
|
101 |
+
'''
|
102 |
+
|
103 |
+
# Generate response using Llama-2 model
|
104 |
+
try:
|
105 |
+
response = lcpp_llm(
|
106 |
+
prompt=formatted_prompt,
|
107 |
+
max_tokens=256,
|
108 |
+
temperature=0.5,
|
109 |
+
top_p=0.95,
|
110 |
+
repeat_penalty=1.2,
|
111 |
+
top_k=150,
|
112 |
+
echo=True
|
113 |
+
)
|
114 |
+
generated_response = response["choices"][0]["text"]
|
115 |
+
# Remove the input part from the output if it is included
|
116 |
+
if formatted_prompt in generated_response:
|
117 |
+
generated_response = generated_response.replace(formatted_prompt, '').strip()
|
118 |
+
print("Generated response:", generated_response)
|
119 |
+
return generated_response
|
120 |
+
except Exception as e:
|
121 |
+
print("Error in response generation:", str(e))
|
122 |
+
return "Failed to generate response, please check the console for errors."
|
123 |
|
124 |
def classify_sentiment(text):
|
125 |
# Encode the text using the tokenizer
|
|
|
214 |
title="NER Analysis",
|
215 |
description="Performs Named Entity Recognition using spaCy and Transformer models."
|
216 |
)
|
217 |
+
iface_response = gr.Interface(
|
218 |
+
fn=generate_email_response,
|
219 |
+
inputs=gr.Textbox(lines=10, placeholder="Enter the email prompt..."),
|
220 |
+
outputs=gr.Textbox(label="Generated Email Response"),
|
221 |
+
title="Email Response Generator",
|
222 |
+
description="Generate email responses using Llama-2 model."
|
223 |
+
)
|
224 |
|
225 |
# Using tabs to organize the interfaces
|
226 |
+
tabs = gr.TabbedInterface([iface_category, iface_sentiment,iface_summary,iface_ner,iface_response], ["Category", "Sentiment"," Summary","NER","Response Generator"], css=custom_css)
|
227 |
tabs.launch(share=True)
|
228 |
|