Spaces:
Sleeping
Sleeping
import streamlit as st | |
from huggingface_hub import InferenceClient | |
import os | |
import pickle | |
st.title("Transcription Summarization") | |
# Base URL and API Key | |
API_KEY = os.environ.get('HUGGINGFACE_API_KEY') | |
BASE_URL = "https://api-inference.huggingface.co/models/" | |
model_links = { | |
"LegacyLift🚀": BASE_URL + "Qwen/QwQ-32B-Preview", | |
"ModernMigrate⭐": BASE_URL + "mistralai/Mixtral-8x7B-Instruct-v0.1", | |
"RetroRecode🔄": BASE_URL + "microsoft/Phi-3-mini-4k-instruct" | |
} | |
model_info = { | |
"LegacyLift🚀": { | |
'description': "The LegacyLift model is a **Large Language Model (LLM)** for problem-solving, content writing, and daily tips.", | |
'logo': './11.jpg' | |
}, | |
"ModernMigrate⭐": { | |
'description': "The ModernMigrate model excels in coding, logical reasoning, and high-speed inference.", | |
'logo': './2.jpg' | |
}, | |
"RetroRecode🔄": { | |
'description': "The RetroRecode is ideal for critical development, practical knowledge, and serverless inference.", | |
'logo': './3.jpg' | |
}, | |
} | |
# Function Definitions | |
def format_prompt(message, conversation_history, custom_instructions=None): | |
"""Formats the input prompt.""" | |
prompt = f"\[INST\] {custom_instructions} \[/INST\]\n\[CONV_HISTORY\]\n" | |
for role, content in conversation_history: | |
prompt += f"{role.upper()}: {content}\n" | |
prompt += f"\[/CONV_HISTORY\]\n\[INST\] {message} \[/INST\]\n\[RESPONSE\]\n" | |
return prompt | |
def reset_conversation(): | |
"""Resets the conversation.""" | |
st.session_state.conversation = [] | |
st.session_state.messages = [] | |
st.session_state.chat_state = "reset" | |
def load_conversation_history(): | |
"""Loads conversation history from a file.""" | |
history_file = "conversation_history.pickle" | |
return pickle.load(open(history_file, "rb")) if os.path.exists(history_file) else [] | |
def save_conversation_history(conversation_history): | |
"""Saves conversation history to a file.""" | |
with open("conversation_history.pickle", "wb") as f: | |
pickle.dump(conversation_history, f) | |
# Sidebar UI | |
models = list(model_links.keys()) | |
selected_model = st.sidebar.selectbox("Select Model", models) | |
temp_values = st.sidebar.slider('Select Temperature', 0.0, 1.0, 0.5) | |
st.sidebar.button('Reset Chat', on_click=reset_conversation) | |
st.sidebar.write(f"Chatting with **{selected_model}**") | |
st.sidebar.markdown(model_info[selected_model]['description']) | |
st.sidebar.image(model_info[selected_model]['logo']) | |
# Load session state | |
if "prev_option" not in st.session_state: | |
st.session_state.prev_option = selected_model | |
if st.session_state.prev_option != selected_model: | |
st.session_state.messages = [] | |
st.session_state.prev_option = selected_model | |
if "chat_state" not in st.session_state: | |
st.session_state.chat_state = "normal" | |
# Load conversation history | |
if "messages" not in st.session_state: | |
st.session_state.messages = load_conversation_history() | |
# Main Chat | |
repo_id = model_links[selected_model] | |
st.subheader(f"{selected_model}") | |
if st.session_state.chat_state == "normal": | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"): | |
custom_instruction = ( | |
"Analyze this transcript with precision. Remove commas in claim numbers, preserve exact numbers and " | |
"dates (dd/mm/yy). Extract claim numbers as single entities." | |
"""1. Only include information explicitly stated | |
2. Mark unclear information as "UNCLEAR" | |
3. Preserve exact numbers, dates (in dd/mm/yy format), and claims | |
4. Focus on factual content | |
IMPORTANT REQUIREMENTS: | |
- Format all dates as dd/mm/yy | |
- Extract and list all claim numbers mentioned | |
- Maintain exact numbers and statistics as stated | |
- Do not make assumptions about unclear information | |
Please analyze the following transcript and structure your response as follows: | |
PARTICIPANTS: | |
- List all participants and their roles (if mentioned) | |
CONTEXT: | |
- Meeting purpose | |
- Duration (if mentioned) | |
- Meeting date/time | |
KEY POINTS: | |
- Main topics discussed | |
- Decisions made | |
- Important numbers/metrics mentioned | |
- Claims discussed | |
ACTION ITEMS: | |
- Specific tasks assigned | |
- Who is responsible | |
- Deadlines (in dd/mm/yy format) | |
FOLLOW UP: | |
- Scheduled next meetings | |
- Pending items | |
- Required approvals or confirmations""" | |
) | |
conversation_history = [(msg["role"], msg["content"]) for msg in st.session_state.messages] | |
formatted_text = format_prompt(prompt, conversation_history, custom_instruction) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("assistant"): | |
try: | |
client = InferenceClient(model=repo_id) | |
response = client.text_generation( | |
formatted_text, | |
temperature=temp_values, | |
max_new_tokens=1024, | |
stream=True | |
) | |
response_text = ''.join(response) # Collect and concatenate the response | |
response_text = response_text.replace(",", "") # Remove commas in claim numbers | |
st.markdown(response_text) | |
st.session_state.messages.append({"role": "assistant", "content": response_text}) | |
save_conversation_history(st.session_state.messages) | |
except Exception as e: | |
st.error(f"An error occurred: {e}") | |
elif st.session_state.chat_state == "reset": | |
st.session_state.chat_state = "normal" | |
st.experimental_rerun() | |