ysharma HF staff commited on
Commit
97caaf5
·
verified ·
1 Parent(s): cc40cc6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -0
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio import ChatMessage
3
+ import time
4
+
5
+ def simulate_thinking_chat(message: str, history: list) -> list:
6
+ """Mimicking thinking process and response"""
7
+ # Add initial empty thinking message to chat history
8
+ history.append( # Adds new message to the chat history list
9
+ ChatMessage( # Creates a new chat message
10
+ role="assistant", # Specifies this is from the assistant
11
+ content="", # Initially empty content
12
+ metadata={"title": "💭 Thinking Process"} # Setting a thinking header here
13
+ )
14
+ )
15
+ time.sleep(1)
16
+ yield history # Returns current state of chat history
17
+
18
+ # Define the thoughts that LLM will "think" through
19
+ thoughts = [
20
+ "First, I need to understand the core aspects of the query...",
21
+ "Now, considering the broader context and implications...",
22
+ "Analyzing potential approaches to formulate a comprehensive answer...",
23
+ "Finally, structuring the response for clarity and completeness..."
24
+ ]
25
+
26
+ # Variable to store all thoughts as they accumulate
27
+ accumulated_thoughts = ""
28
+
29
+ # Loop through each thought
30
+ for thought in thoughts:
31
+ time.sleep(0.5) # Add a samll delay for realism
32
+
33
+ # Add new thought to accumulated thoughts with markdown bullet point
34
+ accumulated_thoughts += f"- {thought}\n\n" # \n\n creates line breaks
35
+
36
+ # Update the thinking message with all thoughts so far
37
+ history[-1] = ChatMessage( # Updates last message in history
38
+ role="assistant",
39
+ content=accumulated_thoughts.strip(), # Remove extra whitespace
40
+ metadata={"title": "💭 Thinking Process"} # Shows thinking header
41
+ )
42
+ yield history # Returns updated chat history
43
+
44
+ # After thinking is complete, adding the final response
45
+ history.append(
46
+ ChatMessage(
47
+ role="assistant",
48
+ content="Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer."
49
+ )
50
+ )
51
+ yield history # Returns final state of chat history
52
+
53
+ # Gradio blocks with gr.chatbot
54
+ with gr.Blocks() as demo:
55
+ gr.Markdown("# Thinking LLM Demo 🤔")
56
+ chatbot = gr.Chatbot(type="messages", render_markdown=True)
57
+ msg = gr.Textbox(placeholder="Type your message...")
58
+
59
+ # Handling message submission here
60
+ msg.submit(
61
+ lambda m, h: (m, h + [ChatMessage(role="user", content=m)]),
62
+ [msg, chatbot],
63
+ [msg, chatbot]
64
+ ).then(
65
+ simulate_thinking_chat,
66
+ [msg, chatbot],
67
+ chatbot
68
+ )
69
+
70
+ if __name__ == "__main__":
71
+ demo.launch()