SoumyaJ commited on
Commit
b78d97f
·
verified ·
1 Parent(s): c0b2cd8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -46,6 +46,8 @@ llm = ChatGroq(model = "qwen-qwq-32b",temperature=0.1)
46
 
47
  memorysaver = MemorySaver()
48
 
 
 
49
  class State(TypedDict):
50
  messages: Annotated[list[AnyMessage], add_messages]
51
 
@@ -68,8 +70,9 @@ def build_tools():
68
  tools = [TavilySearchResults(max_results=2)]
69
  return tools
70
 
71
- def get_llm(clearMemory):
72
  global llm
 
73
  if clearMemory:
74
  llm = ChatGroq(model = "qwen-qwq-32b",temperature=0.1)
75
  tools = build_tools()
@@ -77,8 +80,7 @@ def get_llm(clearMemory):
77
  llm_with_tools = llm.bind_tools(tools)
78
  return llm_with_tools
79
 
80
- def llm_callingTools(state:State, clearMemory = False):
81
-
82
  format_instructions = parser.get_format_instructions()
83
 
84
  system_msg = SystemMessage(content=f"""You are a smart movie researcher.
@@ -95,15 +97,17 @@ def llm_callingTools(state:State, clearMemory = False):
95
  Think carefully before responding: **Is the latest message is referring to a specific show or programme, even indirectly?** Only then use the formatted output.""")
96
 
97
  human_message = HumanMessage( content=f"{state['messages']}.")
98
- llm_with_tools = get_llm(clearMemory)
99
  return {"messages": [llm_with_tools.invoke([system_msg]+ [human_message])]}
100
 
101
- def build_graph(clearMemory: bool = False):
102
  global memorysaver
 
 
103
  if clearMemory:
104
  memorysaver = MemorySaver()
105
  graph_builder = StateGraph(State)
106
- graph_builder.add_node("llm_with_tool", llm_callingTools(clearMemory))
107
  graph_builder.add_node("tools", ToolNode(build_tools()))
108
  graph_builder.add_edge(START, "llm_with_tool")
109
  graph_builder.add_conditional_edges("llm_with_tool", tools_condition)
@@ -118,6 +122,7 @@ def is_pattern_in_string(string: str) -> bool:
118
 
119
  @app.post("/api/v1/get_programme_info")
120
  def get_data_by_prompt(prompt: str, thread_id: str):
 
121
  clearMemory = False
122
  try:
123
  print(f"Prompt: {prompt}")
 
46
 
47
  memorysaver = MemorySaver()
48
 
49
+ clearMemory = False
50
+
51
  class State(TypedDict):
52
  messages: Annotated[list[AnyMessage], add_messages]
53
 
 
70
  tools = [TavilySearchResults(max_results=2)]
71
  return tools
72
 
73
+ def get_llm():
74
  global llm
75
+ global clearMemory
76
  if clearMemory:
77
  llm = ChatGroq(model = "qwen-qwq-32b",temperature=0.1)
78
  tools = build_tools()
 
80
  llm_with_tools = llm.bind_tools(tools)
81
  return llm_with_tools
82
 
83
+ def llm_callingTools(state:State):
 
84
  format_instructions = parser.get_format_instructions()
85
 
86
  system_msg = SystemMessage(content=f"""You are a smart movie researcher.
 
97
  Think carefully before responding: **Is the latest message is referring to a specific show or programme, even indirectly?** Only then use the formatted output.""")
98
 
99
  human_message = HumanMessage( content=f"{state['messages']}.")
100
+ llm_with_tools = get_llm()
101
  return {"messages": [llm_with_tools.invoke([system_msg]+ [human_message])]}
102
 
103
+ def build_graph(memory: bool = False):
104
  global memorysaver
105
+ global clearMemory
106
+ clearMemory = memory
107
  if clearMemory:
108
  memorysaver = MemorySaver()
109
  graph_builder = StateGraph(State)
110
+ graph_builder.add_node("llm_with_tool", llm_callingTools)
111
  graph_builder.add_node("tools", ToolNode(build_tools()))
112
  graph_builder.add_edge(START, "llm_with_tool")
113
  graph_builder.add_conditional_edges("llm_with_tool", tools_condition)
 
122
 
123
  @app.post("/api/v1/get_programme_info")
124
  def get_data_by_prompt(prompt: str, thread_id: str):
125
+ global clearMemory
126
  clearMemory = False
127
  try:
128
  print(f"Prompt: {prompt}")