Spaces:
Sleeping
Sleeping
as-cle-bert
commited on
Commit
•
6b09d9d
1
Parent(s):
c66a7e5
Create ape.py
Browse files
ape.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_anthropic import ChatAnthropic
|
2 |
+
from langchain_cohere import ChatCohere
|
3 |
+
from langchain_groq import ChatGroq
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
from langchain_core.output_parsers import StrOutputParser
|
6 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
7 |
+
from utils import Translation
|
8 |
+
import time
|
9 |
+
import gradio as gr
|
10 |
+
|
11 |
+
NAME2CHAT = {"Cohere": ChatCohere, "claude-3-opus-20240229": ChatAnthropic, "claude-3-sonnet-20240229": ChatAnthropic, "claude-3-haiku-20240307": ChatAnthropic, "llama3-8b-8192": ChatGroq, "llama3-70b-8192": ChatGroq, "mixtral-8x7b-32768": ChatGroq, "gemma-7b-it": ChatGroq, "gpt-4o": ChatOpenAI, "gpt-3.5-turbo-0125": ChatOpenAI}
|
12 |
+
NAME2APIKEY = {"Cohere": "COHERE_API_KEY", "claude-3-opus-20240229": "ANTHROPIC_API_KEY", "claude-3-sonnet-20240229": "ANTHROPIC_API_KEY", "claude-3-haiku-20240307": "ANTHROPIC_API_KEY", "llama3-8b-8192": "GROQ_API_KEY", "llama3-70b-8192": "GROQ_API_KEY", "mixtral-8x7b-32768": "GROQ_API_KEY", "gemma-7b-it": "GROQ_API_KEY", "gpt-4o": "OPENAI_API_KEY", "gpt-3.5-turbo-0125": "OPENAI_API_KEY"}
|
13 |
+
parser = StrOutputParser()
|
14 |
+
|
15 |
+
def reply(message, history, name, api_key, temperature, max_new_tokens, system_template):
|
16 |
+
global pdfdb
|
17 |
+
os.environ[NAME2APIKEY[name]] = api_key
|
18 |
+
if name == "Cohere":
|
19 |
+
model = NAME2CHAT[name](temperature=temperature, max_tokens=max_new_tokens)
|
20 |
+
else:
|
21 |
+
model = NAME2CHAT[name](model=name,temperature=temperature, max_tokens=max_new_tokens)
|
22 |
+
prompt_template = ChatPromptTemplate.from_messages(
|
23 |
+
[("system", system_template), ("user", "{text}")]
|
24 |
+
)
|
25 |
+
chain = prompt_template | model | parser
|
26 |
+
txt = Translation(message, "en")
|
27 |
+
if txt.original == "en":
|
28 |
+
response = chain.invoke({"text": message})
|
29 |
+
r = ''
|
30 |
+
for c in response:
|
31 |
+
r+=c
|
32 |
+
time.sleep(0.001)
|
33 |
+
yield r
|
34 |
+
|
35 |
+
else:
|
36 |
+
translation = txt.translatef()
|
37 |
+
response = chain.invoke({"text": message})
|
38 |
+
t = Translation(response, txt.original)
|
39 |
+
res = t.translatef()
|
40 |
+
r = ''
|
41 |
+
for c in res:
|
42 |
+
r+=c
|
43 |
+
time.sleep(0.001)
|
44 |
+
yield r
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
chat_model = gr.Dropdown(
|
49 |
+
[m for m in list(NAME2APIKEY)], label="Chat Model", info="Choose one of the available chat models"
|
50 |
+
)
|
51 |
+
|
52 |
+
user_api_key = gr.Textbox(
|
53 |
+
label="API key",
|
54 |
+
info="Paste your API key here",
|
55 |
+
lines=1,
|
56 |
+
type="password",
|
57 |
+
)
|
58 |
+
|
59 |
+
user_temperature = gr.Slider(0, 1, value=0.5, label="Temperature", info="Select model temperature")
|
60 |
+
|
61 |
+
user_max_new_tokens = gr.Slider(0, 8192, value=1024, label="Max new tokens", info="Select max output tokens (higher number of tokens will result in a longer latency)")
|
62 |
+
|
63 |
+
user_session_id = gr.Textbox(label="System Template",info="Customize your assistant with your instructions",value="You are an helpful assistant")
|
64 |
+
|
65 |
+
additional_accordion = gr.Accordion(label="Parameters to be set before you start chatting", open=True)
|
66 |
+
|
67 |
+
demo = gr.ChatInterface(fn=reply, additional_inputs=[chat_model, user_api_key, user_temperature, user_max_new_tokens, user_session_id], additional_inputs_accordion=additional_accordion, title="Chat with Anthropic, OpenAI, Groq and Cohere Models🤖")
|
68 |
+
|
69 |
+
|
70 |
+
if __name__=="__main__":
|
71 |
+
demo.launch(server_name="0.0.0.0", share=False)
|
72 |
+
|