File size: 6,413 Bytes
14415d3
 
 
3010e69
14415d3
aec842c
14415d3
aec842c
14415d3
aec842c
14415d3
 
267675c
aec842c
 
14415d3
 
 
 
aec842c
cef0a6e
14415d3
 
aec842c
cef0a6e
14415d3
 
29300ad
cef0a6e
14415d3
 
 
aec842c
 
 
 
ac15cea
 
aec842c
14415d3
 
 
aec842c
3010e69
14415d3
07426b3
14415d3
fc591d0
aec842c
fc591d0
aec842c
fc591d0
 
aec842c
 
fc591d0
 
aec842c
 
14415d3
aec842c
 
14415d3
aec842c
14415d3
 
3010e69
aec842c
d249eac
 
 
14415d3
 
 
 
07426b3
 
 
aec842c
0f971cd
 
1c2e9be
aec842c
3010e69
aec842c
14415d3
07426b3
 
 
 
 
 
aec842c
 
 
da8438e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0261ba5
 
aec842c
 
 
944d5c1
07426b3
 
aec842c
07426b3
aec842c
07426b3
 
aec842c
 
 
07426b3
aec842c
07426b3
 
aec842c
 
 
 
07426b3
aec842c
 
07426b3
 
 
aec842c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import streamlit as st
from huggingface_hub import InferenceClient
import os
import pickle

st.title("Transcription Summarization")

# Base URL and API Key
API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
BASE_URL = "https://api-inference.huggingface.co/models/"

model_links = {
    "LegacyLift🚀": BASE_URL + "Qwen/QwQ-32B-Preview",
    "ModernMigrate⭐": BASE_URL + "mistralai/Mixtral-8x7B-Instruct-v0.1",
    "RetroRecode🔄": BASE_URL + "microsoft/Phi-3-mini-4k-instruct"
}

model_info = {
    "LegacyLift🚀": {
        'description': "The LegacyLift model is a **Large Language Model (LLM)** for problem-solving, content writing, and daily tips.",
        'logo': './11.jpg'
    },
    "ModernMigrate⭐": {
        'description': "The ModernMigrate model excels in coding, logical reasoning, and high-speed inference.",
        'logo': './2.jpg'
    },
    "RetroRecode🔄": {
        'description': "The RetroRecode  is ideal for critical development, practical knowledge, and serverless inference.",
        'logo': './3.jpg'
    },
}

# Function Definitions
def format_prompt(message, conversation_history, custom_instructions=None):
    """Formats the input prompt."""
    prompt = f"\[INST\] {custom_instructions} \[/INST\]\n\[CONV_HISTORY\]\n"
    for role, content in conversation_history:
        prompt += f"{role.upper()}: {content}\n"
    prompt += f"\[/CONV_HISTORY\]\n\[INST\] {message} \[/INST\]\n\[RESPONSE\]\n"
    return prompt

def reset_conversation():
    """Resets the conversation."""
    st.session_state.conversation = []
    st.session_state.messages = []
    st.session_state.chat_state = "reset"

def load_conversation_history():
    """Loads conversation history from a file."""
    history_file = "conversation_history.pickle"
    return pickle.load(open(history_file, "rb")) if os.path.exists(history_file) else []

def save_conversation_history(conversation_history):
    """Saves conversation history to a file."""
    with open("conversation_history.pickle", "wb") as f:
        pickle.dump(conversation_history, f)

# Sidebar UI
models = list(model_links.keys())
selected_model = st.sidebar.selectbox("Select Model", models)
temp_values = st.sidebar.slider('Select Temperature', 0.0, 1.0, 0.5)
st.sidebar.button('Reset Chat', on_click=reset_conversation)

st.sidebar.write(f"Chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])

# Load session state
if "prev_option" not in st.session_state:
    st.session_state.prev_option = selected_model

if st.session_state.prev_option != selected_model:
    st.session_state.messages = []
    st.session_state.prev_option = selected_model

if "chat_state" not in st.session_state:
    st.session_state.chat_state = "normal"

# Load conversation history
if "messages" not in st.session_state:
    st.session_state.messages = load_conversation_history()

# Main Chat
repo_id = model_links[selected_model]
st.subheader(f"{selected_model}")

if st.session_state.chat_state == "normal":
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
        custom_instruction = (
            "Analyze this transcript with precision. Remove commas in claim numbers, preserve exact numbers and "
            "dates (dd/mm/yy). Extract claim numbers as single entities."
            """1. Only include information explicitly stated

            2. Mark unclear information as "UNCLEAR"
            
            3. Preserve exact numbers, dates (in dd/mm/yy format), and claims
            
            4. Focus on factual content
            
            IMPORTANT REQUIREMENTS:
            
            - Format all dates as dd/mm/yy
            
            - Extract and list all claim numbers mentioned
            
            - Maintain exact numbers and statistics as stated
            
            - Do not make assumptions about unclear information
            
            Please analyze the following transcript and structure your response as follows:
            
            PARTICIPANTS:
            
            - List all participants and their roles (if mentioned)
            
            CONTEXT:
            
            - Meeting purpose
            
            - Duration (if mentioned)
            
            - Meeting date/time
            
            KEY POINTS:
            
            - Main topics discussed
            
            - Decisions made
            
            - Important numbers/metrics mentioned
            
            - Claims discussed
            
            ACTION ITEMS:
            
            - Specific tasks assigned
            
            - Who is responsible
            
            - Deadlines (in dd/mm/yy format)
            
            FOLLOW UP:
            
            - Scheduled next meetings
            
            - Pending items
            
            - Required approvals or confirmations"""


        )
        conversation_history = [(msg["role"], msg["content"]) for msg in st.session_state.messages]
        formatted_text = format_prompt(prompt, conversation_history, custom_instruction)

        with st.chat_message("user"):
            st.markdown(prompt)

        st.session_state.messages.append({"role": "user", "content": prompt})

        with st.chat_message("assistant"):
            try:
                client = InferenceClient(model=repo_id)
                response = client.text_generation(
                    formatted_text,
                    temperature=temp_values,
                    max_new_tokens=1024,
                    stream=True
                )
                response_text = ''.join(response)  # Collect and concatenate the response
                response_text = response_text.replace(",", "")  # Remove commas in claim numbers
                st.markdown(response_text)
                st.session_state.messages.append({"role": "assistant", "content": response_text})
                save_conversation_history(st.session_state.messages)
            except Exception as e:
                st.error(f"An error occurred: {e}")

elif st.session_state.chat_state == "reset":
    st.session_state.chat_state = "normal"
    st.experimental_rerun()