|
import os |
|
import streamlit as st |
|
from datetime import datetime |
|
import json |
|
import requests |
|
import uuid |
|
from datetime import date, datetime |
|
import requests |
|
from pydantic import BaseModel, Field |
|
from typing import Optional |
|
|
|
|
|
|
|
placeHolderPersona1 = """## Mission Statement |
|
My mission is to utilize my expertise to aid in the medical triaging process by providing a clear, concise, and accurate assessment of potential arthritis related conditions. |
|
|
|
# Triaging process |
|
Ensure you stay on the topic of asking questions to triage the potential of Rheumatoid arthritis. |
|
Ask only one question at a time. |
|
Provide some context or clarification around the follow-up questions you ask. |
|
Do not converse with the customer. |
|
Be as concise as possible. |
|
Do not give a diagnosis """ |
|
|
|
placeHolderPersona2 = """## Mission |
|
To analyse a clinical triaging discussion between a patient and AI doctor interactions with a focus on Immunology symptoms, medical history, and test results to deduce the most probable Immunology diagnosis. |
|
|
|
## Diagnostic Process |
|
Upon receipt of the clinical notes, I will follow a systematic approach to arrive at a diagnosis: |
|
1. Review the patient's presenting symptoms and consider their relevance to immunopathology. |
|
2. Cross-reference the gathered information with my knowledge base of immunology to identify patterns or indicators of specific immune disorders. |
|
3. Formulate a diagnosis from the potential conditions. |
|
4. Determine the most likely diagnosis and assign a confidence score from 1-100, with 100 being absolute certainty. |
|
|
|
# Limitations |
|
While I am specialized in immunology, I understand that not all cases will fall neatly within my domain. In instances where the clinical notes point to a condition outside of my expertise, I will provide the best possible diagnosis with the acknowledgment that my confidence score will reflect the limitations of my specialization in those cases""" |
|
|
|
|
|
|
|
class ChatRequestClient(BaseModel): |
|
user_id: str |
|
user_input: str |
|
numberOfQuestions: int |
|
welcomeMessage: str |
|
llm1: str |
|
tokens1: int |
|
temperature1: float |
|
persona1SystemMessage: str |
|
persona2SystemMessage: str |
|
userMessage2: str |
|
llm2: str |
|
tokens2: int |
|
temperature2: float |
|
|
|
def call_chat_api(data: ChatRequestClient): |
|
url = "http://127.0.0.1:8000/chat/" |
|
|
|
validated_data = data.dict() |
|
|
|
|
|
response = requests.post(url, json=validated_data) |
|
|
|
if response.status_code == 200: |
|
return response.json() |
|
else: |
|
return "An error occured" |
|
|
|
def genuuid (): |
|
return uuid.uuid4() |
|
|
|
def format_elapsed_time(time): |
|
|
|
return "{:.2f}".format(time) |
|
|
|
def update_history(response): |
|
|
|
if 'history' not in st.session_state: |
|
st.session_state.history = [] |
|
|
|
|
|
st.session_state.history.append("Agent: " + response['content']) |
|
def display_history(): |
|
|
|
for item in st.session_state.history: |
|
st.text_area(label="", value=item, height=75) |
|
|
|
|
|
|
|
st.title('LLM-Powered Agent Interaction') |
|
|
|
|
|
st.sidebar.image('agentBuilderLogo.png') |
|
st.sidebar.header("Agent Personas Design") |
|
|
|
|
|
st.sidebar.subheader("Personas 1 Settings") |
|
numberOfQuestions = st.sidebar.slider("Number of Questions", min_value=0, max_value=10, step=1, value=5, key='persona1_questions') |
|
persona1SystemMessage = st.sidebar.text_area("Define Triaging Persona", value=placeHolderPersona1, height=150) |
|
with st.sidebar.expander("See explanation"): |
|
st.write("Personas: the individual members of the business function / agent equivalent to employee’s. They have job or personality specific design and are crafted to think, and reason based on this job or personality specific design. They have free reign to feedback to the task however they see most appropriate ") |
|
st.image("agentPersona1.png") |
|
llm1 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona1_size') |
|
temp1 = st.sidebar.slider("Tempreature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona1_temp') |
|
tokens1 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona1_tokens') |
|
|
|
|
|
st.sidebar.subheader("Personas 2 Settings") |
|
persona2SystemMessage = st.sidebar.text_area("Define Selection Persona", value=placeHolderPersona2, height=150) |
|
with st.sidebar.expander("See explanation"): |
|
st.write("Personas: the individual members of the business function / agent equivalent to employee’s. They have job or personality specific design and are crafted to think, and reason based on this job or personality specific design. They have free reign to feedback to the task however they see most appropriate ") |
|
st.image("agentPersona2.png") |
|
llm2 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona2_size') |
|
temp2 = st.sidebar.slider("Tempreature", min_value=0.0, max_value=1.0, step=0.1, value=0.5, key='persona2_temp') |
|
tokens2 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona2_tokens') |
|
userMessage2 = st.sidebar.text_area("Define User Message", value="This is the conversation todate, ", height=150) |
|
st.sidebar.caption(f"Session ID: {genuuid()}") |
|
|
|
st.header("Chat with the Agents") |
|
user_id = st.text_input("User ID:", key="user_id") |
|
user_input = st.text_input("Write your message here:", key="user_input") |
|
|
|
if 'history' not in st.session_state: |
|
st.session_state.history = [] |
|
|
|
if st.button("Send"): |
|
|
|
data = ChatRequestClient( |
|
user_id=user_id, |
|
user_input=user_input, |
|
numberOfQuestions=numberOfQuestions, |
|
welcomeMessage="", |
|
llm1=llm1, |
|
tokens1=tokens1, |
|
temperature1=temp1, |
|
persona1SystemMessage=persona1SystemMessage, |
|
persona2SystemMessage=persona2SystemMessage, |
|
userMessage2=userMessage2, |
|
llm2=llm2, |
|
tokens2=tokens2, |
|
temperature2=temp2 |
|
) |
|
response = call_chat_api(data) |
|
|
|
|
|
st.markdown(f"##### Time take: {format_elapsed_time(response['elapsed_time'])}") |
|
st.markdown(f"##### Question Count : {response['count']} of {numberOfQuestions}") |
|
|
|
|
|
|
|
|
|
st.session_state.history.append("You: " + user_input) |
|
|
|
st.session_state.history.append("Agent: " + response['content']) |
|
for message in st.session_state.history: |
|
st.write(message) |
|
|
|
|
|
|