bradartigue commited on
Commit
cb4c6c0
β€’
1 Parent(s): e6a8daa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -159
app.py CHANGED
@@ -1,205 +1,137 @@
1
- import os
2
- import time
3
- import requests
4
- from flask import Flask, request, jsonify
5
  from flask_cors import CORS
6
-
7
- import openai
8
- import langchain
9
- import random
10
- from langchain_openai import ChatOpenAI
11
- from langchain_community.cache import InMemoryCache
12
- from langchain.prompts import PromptTemplate
13
- from langchain.chains import LLMChain
14
 
15
  # Set environment variables and OpenAI configurations
16
- api_keys = os.environ
17
- openai.api_key = os.environ['OPENAI_API_KEY']
18
- print(os.environ['OPENAI_API_KEY'])
19
- langchain.llm_cache = InMemoryCache()
 
 
 
 
 
20
 
21
  app = Flask(__name__)
22
  CORS(app)
23
 
24
- # This sends final LLM output to dynamically get tagged with UneeQ emotions and expressions
25
- def process_emotions(query):
26
- try:
27
 
28
- # URL of the FT LLM API endpoint (ECS)
29
- url = "https://api-ft-inline-tags.caipinnovation.com/query"
30
 
31
- # Payload to be sent in the POST request
32
- payload = {"prompt": query}
33
 
34
- # Making the POST request
35
- response = requests.post(url, json=payload)
36
 
37
- # Checking if the request was successful
38
- response.raise_for_status()
 
 
 
 
 
39
 
40
- # Returning the 'answer' from the response
41
- ft_answer = response.json().get("answer")
42
- return ft_answer
43
 
44
- except Exception as e:
45
- raise
 
 
 
 
46
 
47
- #This handles general Q&A to the LLM
48
- def process_query(query, chat_history, systemMessage, emotions):
49
- try:
50
 
51
- print(f"calling fine_tuned_model")
 
 
 
 
 
52
 
53
- #Get name of model from ENV
54
- ft_model_name = os.environ.get("OPENAI_MODEL_NAME")
55
-
56
- #Model name from env will be used here:
57
- fine_tuned_model = ChatOpenAI(
58
- temperature=0, model_name=ft_model_name
59
- )
60
-
61
- prompt_template = """System: {systemMessage}.
62
- User: The user is inquiring about cataracts or cataract surgery. Answer their question: {query}"""
63
- PROMPT = PromptTemplate(template=prompt_template,
64
- input_variables=["systemMessage", "query"])
65
-
66
- chain = LLMChain(llm=fine_tuned_model, prompt=PROMPT, verbose=False)
67
-
68
- input_prompt = [{"systemMessage": systemMessage, "query": query}]
69
-
70
- generatedResponse = chain.apply(input_prompt)
71
-
72
- #Replace/filter out any prepended strings from LLM response
73
- #Sometimes we have issues that the LLM writes these following strings before answer. Use if needed.
74
- llm_response = generatedResponse[0]["text"].replace("Answer:", "").replace("System:", "").lstrip()
75
- print(llm_response)
76
-
77
- #NOW SEND RESPONSE TO GET TAGGED w/ Emotions and Expressions
78
- if emotions:
79
- try:
80
- llm_response_ft = process_emotions(llm_response)
81
- except Exception as e:
82
- # Log the error
83
- print(f"Error processing emotions for query: {llm_response}. Error: {str(e)}")
84
- # Return the error response
85
- return {"error": "Error processing emotions", "query": llm_response}
86
-
87
- return {
88
- "answer": llm_response_ft,
89
- "source_documents": ""
90
- }
91
- else:
92
- return {
93
- "answer": llm_response,
94
- "source_documents": ""
95
- }
96
- except Exception as e:
97
- print(f"Error processing query: {query}. Error: {str(e)}")
98
- return {"error": "Error processing query"}
99
-
100
- #This handles the chart functionality in HIMSS
101
- def process_chart(query, s1 ,s2):
102
- try:
103
- print("calling fine_tuned_model")
104
- #Get name of model from ENV
105
- ft_model_name = os.environ.get("OPENAI_MODEL_NAME")
106
-
107
- #Model name from env will be used here:
108
- fine_tuned_model = ChatOpenAI(
109
- temperature=0, model_name=ft_model_name
110
- )
111
 
112
- prompt_template = """System: {systemMessage}.
113
- User: {query}"""
114
- PROMPT = PromptTemplate(template=prompt_template,
115
- input_variables=["systemMessage", "query"])
116
 
117
- chain = LLMChain(llm=fine_tuned_model, prompt=PROMPT, verbose=False)
 
118
 
119
- # Get systemMessage from env file:
120
- systemMessage = os.environ.get("SYSTEM_MESSAGE")
121
 
122
- input_prompt = [{"systemMessage": systemMessage, "query": query}]
 
123
 
124
- generatedResponse = chain.apply(input_prompt)
125
- print("after ", generatedResponse[0]["text"])
 
 
 
 
126
 
127
- #Replace/filter out any prepended strings from LLM response
128
- #Sometimes we have issues that the LLM writes these following strings before answer. Use if needed.
129
- generatedResponse_filtered = generatedResponse[0]["text"].replace("Answer:", "").replace("System:", "").lstrip()
130
 
131
- stripped_answer = f"I see you have {s1} and {s2}. {generatedResponse_filtered}"
 
 
 
132
 
133
- return {
134
- "answer": stripped_answer,
135
- "source_documents": ""
136
- }
137
- except Exception as e:
138
- print(f"Error processing query: {query}. Error: {str(e)}")
139
- return {"error": "Error processing query"}
 
 
 
 
 
 
 
140
 
141
  #POST request to this service
142
  @app.route('/query', methods=['POST'])
143
  def handle_query():
144
-
 
145
  data = request.json
146
  query=data['prompt']
147
- chatHistory=""
148
- systemMessage=os.environ['SYSTEM_MESSAGE']
 
149
  answer = ''
150
- emotions = ''
151
 
152
- result = process_query(query, chatHistory, systemMessage, emotions)
153
- answer = result['answer']
154
 
155
- serialized_result = {
156
- "answer": answer,
157
- "matchedContext": "",
158
- "conversationPayload": ""
159
- }
160
 
161
- print(serialized_result)
162
-
163
- return jsonify(serialized_result), 200
164
 
165
- # Helper Functions
166
- def pick_random_issues(issues):
167
- # Randomly select two strings from the list
168
- random_strings = random.sample(issues, 2)
169
- return random_strings
170
-
171
- def generate_description():
172
- issues = ["Anterior Uveitis", "Corneal Guttata", "Diabetes", "Diabetes Mellitus", "Glaucoma", "Retinal Detachment", "Corticosteroids", "Phenothiazine", "Chlorpromazine", "Ultraviolet Radiation Exposure", "Smoking", "High Alcohol Consumption", "Poor Nutrition"]
173
- random_strings = pick_random_issues(issues)
174
- random_string_1, random_string_2 = random_strings
175
- description = f"Describe any issues I may encounter due to {random_string_1} and {random_string_2} relative to my upcoming cataract surgery?"
176
- return description, random_string_1, random_string_2
177
-
178
- #GET request to chart feature
179
- @app.route('/chart', methods=['GET'])
180
- def handle_chart():
181
- description, random_string_1, random_string_2 = generate_description()
182
- query = description
183
- if not query:
184
- return jsonify({"error": "No query provided"}), 400
185
- result = process_chart(query, random_string_1, random_string_2)
186
- if "error" in result:
187
- return jsonify(result), 500
188
 
189
  serialized_result = {
190
- "query": query,
191
- "answer": result["answer"],
192
- "source_documents": ""
 
193
  }
194
-
195
  return jsonify(serialized_result), 200
196
 
197
-
198
  @app.route('/')
199
  def hello():
200
  version = os.environ.get("CODE_VERSION")
201
  return jsonify({"status": "Healthy", "version": version}), 200
202
 
203
-
204
  if __name__ == "__main__":
205
- app.run(host="0.0.0.0", port=7860)
 
1
+ import os, re
2
+ from flask import Flask, request, jsonify, make_response
 
 
3
  from flask_cors import CORS
4
+ from time import sleep
5
+ from openai import OpenAI
 
 
 
 
 
 
6
 
7
  # Set environment variables and OpenAI configurations
8
+ print("OpenAI:\t\t"+os.environ['OPENAI_API_KEY'])
9
+ # Connect to the assistant
10
+
11
+ openai_client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
12
+ openai_assistant = openai_client.beta.assistants.retrieve(assistant_id=os.environ['OPENAI_ASSISTANT_ID'])
13
+ openai_assistant_id=openai_assistant.id
14
+ openai_thread_id = ""
15
+ openai_additional_instruction = os.environ['OPENAI_ADDITIONAL_INSTRUCTION']
16
+ max_response_length = os.environ['MAX_RESPONSE_LENGTH']
17
 
18
  app = Flask(__name__)
19
  CORS(app)
20
 
21
+ # Function to create a thread
 
 
22
 
23
+ def create_thread():
 
24
 
25
+ openai_thread = openai_client.beta.threads.create()
26
+ return(openai_thread.id)
27
 
28
+ # Function to create a message in a given thread
 
29
 
30
+ def create_message(thread_id,user_message):
31
+ thread_message = openai_client.beta.threads.messages.create(
32
+ thread_id,
33
+ role='user',
34
+ content=user_message,
35
+ )
36
+ return thread_message
37
 
38
+ # Function to retrieve a message from a given thread
 
 
39
 
40
+ def retrieve_message(thread_id,message_id):
41
+ message = openai_client.beta.threads.messages.retrieve(
42
+ message_id=message_id,
43
+ thread_id=thread_id,
44
+ )
45
+ return message
46
 
47
+ # Function to run the thread
 
 
48
 
49
+ def run_thread(thread_id,assistant_id):
50
+ run = openai_client.beta.threads.runs.create_and_poll(
51
+ thread_id=thread_id,
52
+ additional_instructions=openai_additional_instruction,
53
+ assistant_id=assistant_id,
54
+ )
55
 
56
+ return run
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ # Function to check the status of the run
 
 
 
59
 
60
+ def run_status(run):
61
+ return run.status
62
 
63
+ # Function to clear a thread
 
64
 
65
+ def delete_thread(thread_id):
66
+ return openai_client.beta.threads.delete(thread_id)
67
 
68
+ def delete_message(message_id,thread_id):
69
+ deleted_message = openai_client.beta.threads.messages.delete(
70
+ message_id=message_id,
71
+ thread_id=thread_id,
72
+ )
73
+ return deleted_message.id
74
 
75
+ #This handles general Q&A to the LLM
76
+ def process_query(query,thread_id):
 
77
 
78
+ retval = {"answer":""}
79
+
80
+ new_message = create_message(thread_id,query)
81
+ run =run_thread(thread_id,openai_assistant_id)
82
 
83
+ messages = openai_client.beta.threads.messages.list(thread_id=thread_id)
84
+ for message in messages:
85
+
86
+ if message.run_id == run.id:
87
+ if message.role=='assistant':
88
+ # gets the answer from the assistant
89
+ answer = str(message.content[0].text.value)
90
+ # kills the source reference in the response, if there
91
+ regex_pattern = r"【.*?】"
92
+ scrubbed_answer = re.sub(regex_pattern, '', answer)
93
+ scrubbed_answer = scrubbed_answer.replace('Mater','Matter')
94
+ retval = {"answer":scrubbed_answer}
95
+
96
+ return retval
97
 
98
  #POST request to this service
99
  @app.route('/query', methods=['POST'])
100
  def handle_query():
101
+ print(request)
102
+ print(request.json)
103
  data = request.json
104
  query=data['prompt']
105
+ openai_thread_id=data['thread']
106
+ print("Assistant:\t\t "+openai_assistant.id)
107
+ print("Thread:\t\t"+openai_thread_id)
108
  answer = ''
 
109
 
110
+ # need to grab a thread id or create a new thread
 
111
 
112
+ if openai_thread_id == "":
113
+ print("Creating a new thread ")
114
+ # create the thread
115
+ openai_thread_id=create_thread()
 
116
 
117
+ print("New thread:\t"+openai_thread_id)
 
 
118
 
119
+ result = process_query(query,openai_thread_id)
120
+ answer = result['answer']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
  serialized_result = {
123
+ "answer": answer,
124
+ "matchedContext": "",
125
+ "conversationPayload": "",
126
+ "thread": openai_thread_id
127
  }
128
+ print(serialized_result['answer'])
129
  return jsonify(serialized_result), 200
130
 
 
131
  @app.route('/')
132
  def hello():
133
  version = os.environ.get("CODE_VERSION")
134
  return jsonify({"status": "Healthy", "version": version}), 200
135
 
 
136
  if __name__ == "__main__":
137
+ app.run(host="0.0.0.0", port=15002)