Spaces:
Sleeping
Sleeping
sainathBelagavi
commited on
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
st.title("CODEFUSSION ☄") # Changed from "strangerzone.world🗞️"
|
7 |
+
|
8 |
+
base_url = "https://api-inference.huggingface.co/models/"
|
9 |
+
|
10 |
+
API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
|
11 |
+
# print(API_KEY)
|
12 |
+
# headers = {"Authorization":"Bearer "+API_KEY}
|
13 |
+
|
14 |
+
model_links = {
|
15 |
+
"LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2", # Changed from "Dorado🥤"
|
16 |
+
"ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1", # Changed from "Hercules⭐"
|
17 |
+
"RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct" # Changed from "Lepus🚀"
|
18 |
+
}
|
19 |
+
|
20 |
+
# Pull info about the model to display
|
21 |
+
model_info = {
|
22 |
+
"LegacyLift🚀": {
|
23 |
+
'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
24 |
+
\nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
|
25 |
+
'logo': './dorado.png'
|
26 |
+
},
|
27 |
+
|
28 |
+
"ModernMigrate⭐": {
|
29 |
+
'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
30 |
+
\nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
|
31 |
+
'logo': './hercules.png'
|
32 |
+
},
|
33 |
+
|
34 |
+
"RetroRecode🔄": {
|
35 |
+
'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
36 |
+
\nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
|
37 |
+
'logo': './lepus.png'
|
38 |
+
},
|
39 |
+
}
|
40 |
+
|
41 |
+
def format_promt(message, custom_instructions=None):
|
42 |
+
prompt = ""
|
43 |
+
if custom_instructions:
|
44 |
+
prompt += f"[INST] {custom_instructions} [/INST]"
|
45 |
+
prompt += f"[INST] {message} [/INST]"
|
46 |
+
return prompt
|
47 |
+
|
48 |
+
def reset_conversation():
|
49 |
+
'''
|
50 |
+
Resets Conversation
|
51 |
+
'''
|
52 |
+
st.session_state.conversation = []
|
53 |
+
st.session_state.messages = []
|
54 |
+
return None
|
55 |
+
|
56 |
+
models = [key for key in model_links.keys()]
|
57 |
+
|
58 |
+
selected_model = st.sidebar.selectbox("Select Model", models)
|
59 |
+
|
60 |
+
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
61 |
+
|
62 |
+
st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
|
63 |
+
|
64 |
+
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
65 |
+
st.sidebar.markdown(model_info[selected_model]['description'])
|
66 |
+
st.sidebar.image(model_info[selected_model]['logo'])
|
67 |
+
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
68 |
+
st.sidebar.markdown("\nYou can support me by sponsoring to buy me a coffee🥤.[here](https://buymeacoffee.com/prithivsakthi).")
|
69 |
+
|
70 |
+
if "prev_option" not in st.session_state:
|
71 |
+
st.session_state.prev_option = selected_model
|
72 |
+
|
73 |
+
if st.session_state.prev_option != selected_model:
|
74 |
+
st.session_state.messages = []
|
75 |
+
# st.write(f"Changed to {selected_model}")
|
76 |
+
st.session_state.prev_option = selected_model
|
77 |
+
reset_conversation()
|
78 |
+
|
79 |
+
repo_id = model_links[selected_model]
|
80 |
+
|
81 |
+
st.subheader(f'{selected_model}')
|
82 |
+
# st.title(f'ChatBot Using {selected_model}')
|
83 |
+
|
84 |
+
if "messages" not in st.session_state:
|
85 |
+
st.session_state.messages = []
|
86 |
+
|
87 |
+
for message in st.session_state.messages:
|
88 |
+
with st.chat_message(message["role"]):
|
89 |
+
st.markdown(message["content"])
|
90 |
+
|
91 |
+
if prompt := st.chat_input(f"Hi I'm {selected_model}🗞️, How can I help you today?"):
|
92 |
+
custom_instruction = "Act like a Human in conversation"
|
93 |
+
|
94 |
+
with st.chat_message("user"):
|
95 |
+
st.markdown(prompt)
|
96 |
+
|
97 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
98 |
+
|
99 |
+
formated_text = format_promt(prompt, custom_instruction)
|
100 |
+
|
101 |
+
with st.chat_message("assistant"):
|
102 |
+
client = InferenceClient(
|
103 |
+
model=model_links[selected_model], )
|
104 |
+
|
105 |
+
output = client.text_generation(
|
106 |
+
formated_text,
|
107 |
+
temperature=temp_values, # 0.5
|
108 |
+
max_new_tokens=3000,
|
109 |
+
stream=True
|
110 |
+
)
|
111 |
+
|
112 |
+
response = st.write_stream(output)
|
113 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|