TYehan commited on
Commit
b8b4ab2
·
verified ·
1 Parent(s): 9e70c5d

Upload app.py

Browse files

Updated only the UI without changing any functionality

Files changed (1) hide show
  1. app.py +238 -159
app.py CHANGED
@@ -1,159 +1,238 @@
1
- import streamlit as st
2
- from rdflib import Graph, Namespace, URIRef, Literal
3
- from typing import Dict, List, Optional
4
- from langgraph.graph import StateGraph
5
- from langchain.prompts import ChatPromptTemplate
6
- import json
7
- from dotenv import load_dotenv
8
- import os
9
- from dataclasses import dataclass
10
- from langchain_community.chat_models import ChatOllama
11
- from langchain_groq import ChatGroq
12
- import logging
13
- from analyzers import DrugInteractionAnalyzer
14
-
15
- # Load environment variables
16
- load_dotenv()
17
-
18
- # Configure logging
19
- logging.basicConfig(
20
- level=logging.INFO,
21
- format='%(asctime)s [%(levelname)s] %(message)s',
22
- handlers=[
23
- logging.FileHandler("app.log"),
24
- logging.StreamHandler()
25
- ]
26
- )
27
-
28
- # Validating API key
29
- GROQ_API_KEY = os.getenv("GROQ_API_KEY")
30
- if not GROQ_API_KEY:
31
- logging.error("GROQ_API_KEY not found in environment variables. Please add it to your .env file.")
32
- raise ValueError("GROQ_API_KEY not found in environment variables. Please add it to your .env file.")
33
-
34
- @dataclass
35
- class GraphState:
36
- """State type for the graph."""
37
- input: str
38
- query: Optional[str] = None
39
- ontology_results: Optional[str] = None
40
- response: Optional[str] = None
41
-
42
- class OntologyAgent:
43
- def __init__(self, owl_file_path: str):
44
- """Initialize the OntologyAgent with an OWL file."""
45
- self.g = Graph()
46
- try:
47
- self.g.parse(owl_file_path, format="xml")
48
- self.ns = Namespace("http://www.example.org/DrugInteraction.owl#")
49
- logging.info(f"Ontology loaded successfully from {owl_file_path}")
50
- except Exception as e:
51
- logging.error(f"Failed to load ontology file: {e}")
52
- raise ValueError(f"Failed to load ontology file: {e}")
53
-
54
- def create_agent_graph(owl_file_path: str) -> StateGraph:
55
- """Create a processing graph for drug interaction analysis using separate agents."""
56
- analyzer = DrugInteractionAnalyzer(owl_file_path)
57
-
58
- def user_input_node(state: GraphState) -> Dict[str, str]:
59
- logging.info("Processing user input.")
60
- return {"query": state.input}
61
-
62
- def ontology_query_node(state: GraphState) -> Dict[str, str]:
63
- try:
64
- logging.info("Executing ontology queries.")
65
- drug_names = [d.strip() for d in state.input.split(",")]
66
- results = analyzer.analyze_drugs(drug_names)
67
- logging.info(f"Ontology query results: {results}")
68
- return {"ontology_results": json.dumps(results, indent=2)}
69
- except Exception as e:
70
- logging.warning(f"Ontology query failed: {e}")
71
- return {"ontology_results": json.dumps({"error": str(e)})}
72
-
73
- def llm_processing_node(state: GraphState) -> Dict[str, str]:
74
- template = """
75
- Based on the drug interaction analysis results:
76
- {ontology_results}
77
-
78
- Please provide a comprehensive summary of:
79
- 1. Direct interactions between the drugs
80
- 2. Potential conflicts
81
- 3. Similar drug alternatives
82
- 4. Recommended alternatives if conflicts exist
83
-
84
- If no results were found, please indicate this clearly.
85
- Format the response in a clear, structured manner.
86
- """
87
-
88
- prompt = ChatPromptTemplate.from_template(template)
89
-
90
- try:
91
- llm = ChatGroq(
92
- model_name="llama-3.3-70b-versatile",
93
- api_key=GROQ_API_KEY,
94
- temperature=0.7
95
- )
96
- logging.info("LLM initialized successfully.")
97
- except Exception as e:
98
- logging.error(f"Error initializing LLM: {e}")
99
- return {"response": f"Error initializing LLM: {str(e)}"}
100
-
101
- chain = prompt | llm
102
-
103
- try:
104
- response = chain.invoke({
105
- "ontology_results": state.ontology_results
106
- })
107
-
108
- logging.info("LLM processing completed successfully.")
109
- return {"response": response.content}
110
- except Exception as e:
111
- logging.error(f"Error processing results with LLM: {e}")
112
- return {"response": f"Error processing results: {str(e)}"}
113
-
114
- workflow = StateGraph(GraphState)
115
-
116
- workflow.add_node("input_processor", user_input_node)
117
- workflow.add_node("ontology_query", ontology_query_node)
118
- workflow.add_node("llm_processing", llm_processing_node)
119
-
120
- workflow.add_edge("input_processor", "ontology_query")
121
- workflow.add_edge("ontology_query", "llm_processing")
122
-
123
- workflow.set_entry_point("input_processor")
124
-
125
- logging.info("Agent graph created and configured successfully.")
126
-
127
- return workflow.compile()
128
-
129
- def main():
130
- st.title("Drug Interaction Analysis System")
131
-
132
- user_input = st.text_input("Enter drug names separated by commas (e.g., Aspirin, Warfarin):", value="")
133
-
134
- if st.button("Analyze"):
135
- if not user_input.strip():
136
- st.warning("Please enter at least one drug name.")
137
- return
138
-
139
- owl_file_path = os.path.join("ontology", "DrugInteraction.owl")
140
- if not os.path.exists(owl_file_path):
141
- logging.error(f"Ontology file not found: {owl_file_path}")
142
- st.error(f"Ontology file not found: {owl_file_path}")
143
- return
144
-
145
- try:
146
- with st.spinner("Analyzing drug interactions..."):
147
- agent_graph = create_agent_graph(owl_file_path)
148
- result = agent_graph.invoke(GraphState(input=user_input))
149
-
150
- st.subheader("Analysis Results:")
151
- st.markdown(result["response"])
152
-
153
- logging.info("Analysis completed and results displayed.")
154
- except Exception as e:
155
- logging.error(f"An error occurred: {str(e)}")
156
- st.error(f"An error occurred: {str(e)}")
157
-
158
- if __name__ == "__main__":
159
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from rdflib import Graph, Namespace, URIRef, Literal
3
+ from typing import Dict, List, Optional
4
+ from langgraph.graph import StateGraph
5
+ from langchain.prompts import ChatPromptTemplate
6
+ import json
7
+ from dotenv import load_dotenv
8
+ import os
9
+ from dataclasses import dataclass
10
+ from langchain_community.chat_models import ChatOllama
11
+ from langchain_groq import ChatGroq
12
+ import logging
13
+ from analyzers import DrugInteractionAnalyzer
14
+ import base64
15
+
16
+ # Load environment variables
17
+ load_dotenv()
18
+
19
+ # Configure logging
20
+ logging.basicConfig(
21
+ level=logging.INFO,
22
+ format='%(asctime)s [%(levelname)s] %(message)s',
23
+ handlers=[
24
+ logging.FileHandler("app.log"),
25
+ logging.StreamHandler()
26
+ ]
27
+ )
28
+
29
+ # Validating API key
30
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
31
+ if not GROQ_API_KEY:
32
+ logging.error("GROQ_API_KEY not found in environment variables. Please add it to your .env file.")
33
+ raise ValueError("GROQ_API_KEY not found in environment variables. Please add it to your .env file.")
34
+
35
+ @dataclass
36
+ class GraphState:
37
+ """State type for the graph."""
38
+ input: str
39
+ query: Optional[str] = None
40
+ ontology_results: Optional[str] = None
41
+ response: Optional[str] = None
42
+
43
+ class OntologyAgent:
44
+ def __init__(self, owl_file_path: str):
45
+ """Initialize the OntologyAgent with an OWL file."""
46
+ self.g = Graph()
47
+ try:
48
+ self.g.parse(owl_file_path, format="xml")
49
+ self.ns = Namespace("http://www.example.org/DrugInteraction.owl#")
50
+ logging.info(f"Ontology loaded successfully from {owl_file_path}")
51
+ except Exception as e:
52
+ logging.error(f"Failed to load ontology file: {e}")
53
+ raise ValueError(f"Failed to load ontology file: {e}")
54
+
55
+ def create_agent_graph(owl_file_path: str) -> StateGraph:
56
+ """Create a processing graph for drug interaction analysis using separate agents."""
57
+ analyzer = DrugInteractionAnalyzer(owl_file_path)
58
+
59
+ def user_input_node(state: GraphState) -> Dict[str, str]:
60
+ logging.info("Processing user input.")
61
+ return {"query": state.input}
62
+
63
+ def ontology_query_node(state: GraphState) -> Dict[str, str]:
64
+ try:
65
+ logging.info("Executing ontology queries.")
66
+ drug_names = [d.strip() for d in state.input.split(",")]
67
+ results = analyzer.analyze_drugs(drug_names)
68
+ logging.info(f"Ontology query results: {results}")
69
+ return {"ontology_results": json.dumps(results, indent=2)}
70
+ except Exception as e:
71
+ logging.warning(f"Ontology query failed: {e}")
72
+ return {"ontology_results": json.dumps({"error": str(e)})}
73
+
74
+ def llm_processing_node(state: GraphState) -> Dict[str, str]:
75
+ template = """
76
+ Based on the drug interaction analysis results:
77
+ {ontology_results}
78
+
79
+ Please provide a comprehensive summary of:
80
+ 1. Direct interactions between the drugs
81
+ 2. Potential conflicts
82
+ 3. Similar drug alternatives
83
+ 4. Recommended alternatives if conflicts exist
84
+
85
+ If no results were found, please indicate this clearly.
86
+ Format the response in a clear, structured manner.
87
+ """
88
+
89
+ prompt = ChatPromptTemplate.from_template(template)
90
+
91
+ try:
92
+ llm = ChatGroq(
93
+ model_name="llama3-groq-70b-8192-tool-use-preview",
94
+ api_key=GROQ_API_KEY,
95
+ temperature=0.7
96
+ )
97
+ logging.info("LLM initialized successfully.")
98
+ except Exception as e:
99
+ logging.error(f"Error initializing LLM: {e}")
100
+ return {"response": f"Error initializing LLM: {str(e)}"}
101
+
102
+ chain = prompt | llm
103
+
104
+ try:
105
+ response = chain.invoke({
106
+ "ontology_results": state.ontology_results
107
+ })
108
+
109
+ logging.info("LLM processing completed successfully.")
110
+ return {"response": response.content}
111
+ except Exception as e:
112
+ logging.error(f"Error processing results with LLM: {e}")
113
+ return {"response": f"Error processing results: {str(e)}"}
114
+
115
+ workflow = StateGraph(GraphState)
116
+
117
+ workflow.add_node("input_processor", user_input_node)
118
+ workflow.add_node("ontology_query", ontology_query_node)
119
+ workflow.add_node("llm_processing", llm_processing_node)
120
+
121
+ workflow.add_edge("input_processor", "ontology_query")
122
+ workflow.add_edge("ontology_query", "llm_processing")
123
+
124
+ workflow.set_entry_point("input_processor")
125
+
126
+ logging.info("Agent graph created and configured successfully.")
127
+
128
+ return workflow.compile()
129
+
130
+ def main():
131
+ st.set_page_config(page_title="Drug Interaction Analysis System", page_icon="💊", layout="wide")
132
+
133
+ st.markdown("<h1 style='text-align: center;'>Drug Interaction Analysis System</h1>", unsafe_allow_html=True)
134
+
135
+ st.markdown("<p style='text-align: center; color: #c9f9fa;'>This application uses a combination of ontology-based reasoning and language models to analyze drug interactions.</p>", unsafe_allow_html=True)
136
+ st.markdown("<p style='text-align: center; color: #faffb7;'>Analyze potential interactions between different drugs</p>", unsafe_allow_html=True)
137
+
138
+ st.markdown("""
139
+ <style>
140
+ .centered {
141
+ display: flex;
142
+ flex-direction: column;
143
+ align-items: center;
144
+ justify-content: center;
145
+ text-align: center;
146
+ }
147
+ </style>
148
+ <div class="centered">
149
+ """, unsafe_allow_html=True)
150
+
151
+ def get_base64_image(img_path):
152
+ with open(img_path, "rb") as f:
153
+ encoded = base64.b64encode(f.read()).decode()
154
+ return f"data:image/webp;base64,{encoded}"
155
+
156
+ # Generate the image URL
157
+ image_url = get_base64_image("img/header-image.webp")
158
+
159
+ # Load custom CSS with the embedded image
160
+ st.markdown(f"""
161
+ <style>
162
+ .parallax {{
163
+ background-image: url("{image_url}");
164
+ min-height: 200px;
165
+ background-attachment: fixed;
166
+ background-position: center;
167
+ background-repeat: no-repeat;
168
+ background-size: cover;
169
+ }}
170
+ </style>
171
+ """, unsafe_allow_html=True)
172
+
173
+ st.markdown('<div class="parallax"></div>', unsafe_allow_html=True)
174
+
175
+ st.markdown("<br>", unsafe_allow_html=True)
176
+
177
+ col1, col2, col3 = st.columns([7, 1, 3])
178
+
179
+ with col1:
180
+
181
+ st.markdown("<h2 style='text-align: left; color: #d46c6c;'>Instructions</h2>", unsafe_allow_html=True)
182
+ st.markdown("<p style='text-align: left; color: white;'>1. Enter the drug names separated by commas. <br> 2. Click on the 'Analyze' button. <br> 3. Wait for the analysis to complete. <br> 4. View the results below.</p>", unsafe_allow_html=True)
183
+ st.markdown("<div class='divider'></div>", unsafe_allow_html=True)
184
+ st.markdown("<hr>", unsafe_allow_html=True)
185
+
186
+ st.markdown('<h3 class="big-font">Enter drug names separated by commas (e.g., Aspirin, Warfarin):</h3>', unsafe_allow_html=True)
187
+ user_input = st.text_input("", value="", key="drug_input")
188
+ st.markdown('<style>div[data-testid="stTextInput"] input { font-size: 16px;}</style>', unsafe_allow_html=True)
189
+
190
+ if st.button("Analyze"):
191
+ if not user_input.strip():
192
+ st.warning("Please enter at least one drug name.")
193
+ return
194
+
195
+ owl_file_path = os.path.join("ontology", "DrugInteraction.owl")
196
+ if not os.path.exists(owl_file_path):
197
+ logging.error(f"Ontology file not found: {owl_file_path}")
198
+ st.error(f"Ontology file not found: {owl_file_path}")
199
+ return
200
+
201
+ try:
202
+ with st.spinner("Analyzing drug interactions..."):
203
+ agent_graph = create_agent_graph(owl_file_path)
204
+ result = agent_graph.invoke(GraphState(input=user_input))
205
+
206
+ st.subheader("Analysis Results:")
207
+ st.markdown(result["response"])
208
+
209
+ logging.info("Analysis completed and results displayed.")
210
+ except Exception as e:
211
+ logging.error(f"An error occurred: {str(e)}")
212
+ st.error(f"An error occurred: {str(e)}")
213
+
214
+ with col3:
215
+ st.markdown("<h4 style='text-align: center; color: #5ac5c9;'>Comprehensive Drug Analysis</h4>", unsafe_allow_html=True)
216
+ # st.markdown("<img src='https://cdn.dribbble.com/users/228367/screenshots/4603754/chemistryset_dribbble.gif' alt='reaction' width='100%'/>", unsafe_allow_html=True)
217
+ st.markdown("<div class='divider'></div>", unsafe_allow_html=True)
218
+ image_base64 = get_base64_image("img/reaction.gif")
219
+ st.markdown(f"<img src='{image_base64}' width='100%'/>", unsafe_allow_html=True)
220
+ st.markdown("<br>", unsafe_allow_html=True)
221
+ st.write("""
222
+ - Drug interaction detection
223
+ - Conflict identification
224
+ - Similar drug suggestions
225
+ - Alternative medication recommendations
226
+ """)
227
+
228
+
229
+ st.markdown("<hr>", unsafe_allow_html=True)
230
+ st.markdown("<p style='text-align: center; color: #faffb7;'></p>", unsafe_allow_html=True)
231
+ st.write("""
232
+ <div style='text-align: center;'>
233
+ <p style='text-align: left; color: #d46c6c;'>Disclaimer: </p><p style='text-align: left; color: #707377;'>This application is intended for informational purposes only and does not replace professional medical advice, diagnosis, or treatment. The analysis provided is based on the data available in the ontology and may not account for all possible drug interactions. Users are strongly advised to consult a licensed healthcare provider before making any decisions based on the analysis results. The creators of this application are not responsible for any decisions made or actions taken based on the information provided.</p>
234
+ </div>
235
+ """, unsafe_allow_html=True)
236
+
237
+ if __name__ == "__main__":
238
+ main()