sainathBelagavi commited on
Commit
1c2e9be
·
verified ·
1 Parent(s): 55b1ec6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -56
app.py CHANGED
@@ -1,48 +1,48 @@
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  import os
4
- import pickle
5
 
6
- st.title("CODEFUSSION ☄")
7
 
8
  base_url = "https://api-inference.huggingface.co/models/"
 
9
  API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
 
 
10
 
11
  model_links = {
12
- "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
13
- "ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1",
14
- "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
15
  }
16
 
 
17
  model_info = {
18
  "LegacyLift🚀": {
19
- 'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
 
20
  'logo': './11.jpg'
21
  },
 
22
  "ModernMigrate⭐": {
23
- 'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
 
24
  'logo': './2.jpg'
25
  },
 
26
  "RetroRecode🔄": {
27
- 'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
 
28
  'logo': './3.jpg'
29
  },
30
  }
31
 
32
- def format_prompt(message, conversation_history, custom_instructions=None):
33
  prompt = ""
34
  if custom_instructions:
35
- prompt += f"\[INST\] {custom_instructions} \[/INST\]"
36
-
37
- # Add conversation history to the prompt
38
- prompt += "\[CONV_HISTORY\]\n"
39
- for role, content in conversation_history:
40
- prompt += f"{role.upper()}: {content}\n"
41
- prompt += "\[/CONV_HISTORY\]"
42
-
43
- # Add the current message
44
- prompt += f"\[INST\] {message} \[/INST\]"
45
-
46
  return prompt
47
 
48
  def reset_conversation():
@@ -51,46 +51,38 @@ def reset_conversation():
51
  '''
52
  st.session_state.conversation = []
53
  st.session_state.messages = []
54
- save_conversation_history([])
55
  return None
56
 
57
- def load_conversation_history():
58
- history_file = "conversation_history.pickle"
59
- if os.path.exists(history_file):
60
- with open(history_file, "rb") as f:
61
- conversation_history = pickle.load(f)
62
- else:
63
- conversation_history = []
64
- return conversation_history
65
-
66
- def save_conversation_history(conversation_history):
67
- history_file = "conversation_history.pickle"
68
- with open(history_file, "wb") as f:
69
- pickle.dump(conversation_history, f)
70
-
71
  models = [key for key in model_links.keys()]
 
72
  selected_model = st.sidebar.selectbox("Select Model", models)
 
73
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
 
74
  st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
75
 
76
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
77
  st.sidebar.markdown(model_info[selected_model]['description'])
78
  st.sidebar.image(model_info[selected_model]['logo'])
 
79
 
80
- st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
81
 
82
  if "prev_option" not in st.session_state:
83
  st.session_state.prev_option = selected_model
84
 
85
  if st.session_state.prev_option != selected_model:
86
  st.session_state.messages = []
 
87
  st.session_state.prev_option = selected_model
 
88
 
89
  repo_id = model_links[selected_model]
 
90
  st.subheader(f'{selected_model}')
 
91
 
92
- # Load the conversation history from the file
93
- st.session_state.messages = load_conversation_history()
94
 
95
  for message in st.session_state.messages:
96
  with st.chat_message(message["role"]):
@@ -98,30 +90,24 @@ for message in st.session_state.messages:
98
 
99
  if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
100
  custom_instruction = "Act like a Human in conversation"
 
101
  with st.chat_message("user"):
102
  st.markdown(prompt)
103
-
104
  st.session_state.messages.append({"role": "user", "content": prompt})
105
- conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
106
-
107
- formatted_text = format_prompt(prompt, conversation_history, custom_instruction)
108
-
109
- max_new_tokens = 3000
110
- if selected_model != "RetroRecode🔄":
111
- input_tokens = len(formatted_text.split())
112
- max_tokens = {"LegacyLift🚀": 32000, "ModernMigrate⭐": 8192}
113
- max_new_tokens = max_tokens[selected_model] - input_tokens
114
 
115
  with st.chat_message("assistant"):
116
- client = InferenceClient(model=model_links[selected_model])
 
 
117
  output = client.text_generation(
118
- formatted_text,
119
- temperature=temp_values,
120
- max_new_tokens=max_new_tokens,
121
  stream=True
122
  )
 
123
  response = st.write_stream(output)
124
- st.session_state.messages.append({"role": "assistant", "content": response})
125
-
126
- # Save the updated conversation history to the file
127
- save_conversation_history(st.session_state.messages)
 
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  import os
4
+ import sys
5
 
6
+ st.title("CODEFUSSION ☄")
7
 
8
  base_url = "https://api-inference.huggingface.co/models/"
9
+
10
  API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
11
+ # print(API_KEY)
12
+ # headers = {"Authorization":"Bearer "+API_KEY}
13
 
14
  model_links = {
15
+ "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
16
+ "ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1",
17
+ "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
18
  }
19
 
20
+ # Pull info about the model to display
21
  model_info = {
22
  "LegacyLift🚀": {
23
+ 'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
24
+ \nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
25
  'logo': './11.jpg'
26
  },
27
+
28
  "ModernMigrate⭐": {
29
+ 'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
30
+ \nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
31
  'logo': './2.jpg'
32
  },
33
+
34
  "RetroRecode🔄": {
35
+ 'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
36
+ \nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
37
  'logo': './3.jpg'
38
  },
39
  }
40
 
41
+ def format_promt(message, custom_instructions=None):
42
  prompt = ""
43
  if custom_instructions:
44
+ prompt += f"[INST] {custom_instructions} [/INST]"
45
+ prompt += f"[INST] {message} [/INST]"
 
 
 
 
 
 
 
 
 
46
  return prompt
47
 
48
  def reset_conversation():
 
51
  '''
52
  st.session_state.conversation = []
53
  st.session_state.messages = []
 
54
  return None
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  models = [key for key in model_links.keys()]
57
+
58
  selected_model = st.sidebar.selectbox("Select Model", models)
59
+
60
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
61
+
62
  st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
63
 
64
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
65
  st.sidebar.markdown(model_info[selected_model]['description'])
66
  st.sidebar.image(model_info[selected_model]['logo'])
67
+ st.sidebar.markdown("*Generating the code might go slow if you are using low power resources *")
68
 
 
69
 
70
  if "prev_option" not in st.session_state:
71
  st.session_state.prev_option = selected_model
72
 
73
  if st.session_state.prev_option != selected_model:
74
  st.session_state.messages = []
75
+ # st.write(f"Changed to {selected_model}")
76
  st.session_state.prev_option = selected_model
77
+ reset_conversation()
78
 
79
  repo_id = model_links[selected_model]
80
+
81
  st.subheader(f'{selected_model}')
82
+ # st.title(f'ChatBot Using {selected_model}')
83
 
84
+ if "messages" not in st.session_state:
85
+ st.session_state.messages = []
86
 
87
  for message in st.session_state.messages:
88
  with st.chat_message(message["role"]):
 
90
 
91
  if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
92
  custom_instruction = "Act like a Human in conversation"
93
+
94
  with st.chat_message("user"):
95
  st.markdown(prompt)
96
+
97
  st.session_state.messages.append({"role": "user", "content": prompt})
98
+
99
+ formated_text = format_promt(prompt, custom_instruction)
 
 
 
 
 
 
 
100
 
101
  with st.chat_message("assistant"):
102
+ client = InferenceClient(
103
+ model=model_links[selected_model], )
104
+
105
  output = client.text_generation(
106
+ formated_text,
107
+ temperature=temp_values, # 0.5
108
+ max_new_tokens=3000,
109
  stream=True
110
  )
111
+
112
  response = st.write_stream(output)
113
+ st.session_state.messages.append({"role": "assistant", "content": response})