DAMHelper / app.py
enricorampazzo's picture
streamlit ui and ondemand integration
224e4de
raw
history blame
5.83 kB
import os
import uuid
from pathlib import Path
from llm_manager.llm_parser import LlmParser
from prompts.prompts_manager import PromptsManager
from repository.repository import get_repository
from repository.repository_abc import ModelRoles, Model
from form.form import build_form_data_from_answers, write_pdf_form
import streamlit as st
user_msg = "Please describe what you need to do. To get the best results try to answer all the following questions:"
def check_for_missing_answers(parsed_questions: dict[int, str]):
return [k for k in parsed_questions if parsed_questions[k] is None]
def ask_again(missing_questions: list[int], user_questions: list[str], parsed_questions: dict[int, str]):
for id_ in missing_questions:
new_answer = input(f"I could not find the answer to this question: {user_questions[id_].lower()}")
parsed_questions[id_] = new_answer
def use_command_line():
prompts_manager = PromptsManager()
user_prompt = input(f"{user_msg}\n{'\n'.join(prompts_manager.questions)}\n\n>")
repository = get_repository("intel_npu", Model("meta-llama/Meta-Llama-3-8B-Instruct",
ModelRoles("system", "user", "assistant")),
prompts_manager.system_prompt, Path("llm_log.txt"))
repository.init()
# repository.send_prompt(prompts_manager.ingest_user_answers(user_prompt))
answer = repository.send_prompt(prompts_manager.verify_user_input_prompt(user_prompt))
answers = LlmParser.parse_verification_prompt_answers(answer['content'])
missing_answers = check_for_missing_answers(answers)
while missing_answers:
ask_again(missing_answers, prompts_manager.questions, answers)
missing_answers = check_for_missing_answers(answers)
answer = repository.send_prompt(prompts_manager.get_work_category(answers[1]))
categories = LlmParser.parse_get_categories_answer(answer['content'])
form_data = build_form_data_from_answers(answers, categories, f"{Path(__file__, "..", "signature.png")}")
write_pdf_form(form_data, Path("signed_form1.pdf"))
def update_answer (answers, missing_answer):
answers[missing_answer] = getattr(st.session_state, f"ma_{missing_answer}")
def use_streamlit():
pm = PromptsManager()
help_ = f"{user_msg}\n\n{'\n'.join(pm.questions)}"
repository = get_repository("ondemand", Model("ondemand-gpt-3.5-turbo", ModelRoles("system", "user", "assistant")))
if not st.session_state.get("step"):
with st.form("Please describe your request"):
user_input = st.text_area("Your input", height=700, label_visibility="hidden", placeholder=help_, help=help_)
signature = st.file_uploader("Your signature", key="file_upload")
st.session_state["signature"] = signature
st.session_state["session_id"] = str(uuid.uuid4())
button = st.form_submit_button()
if button:
llama3 = "meta-llama/Meta-Llama-3-8B-Instruct"
# repository = get_repository("intel_npu", Model(llama3,
# ModelRoles("system", "user", "assistant")),
# pm.system_prompt, Path("llm_log.txt"))
st.session_state["step"] = "parsing_answers"
if st.session_state.get("step") == "parsing_answers":
with st.status("initialising LLM"):
repository.init()
with st.status("waiting for LLM"):
repository.send_prompt(pm.ingest_user_answers(user_input))
answer = repository.send_prompt(pm.verify_user_input_prompt(user_input))
with st.status("Checking for missing answers"):
st.session_state["answers"] = LlmParser.parse_verification_prompt_answers(answer['content'])
st.session_state["missing_answers"] = check_for_missing_answers(st.session_state["answers"])
if not st.session_state.get("missing_answers"):
st.session_state["step"] = "check_category"
else:
st.session_state["step"] = "ask_again"
if st.session_state.get("step") == "ask_again":
with st.form("form1"):
for ma in st.session_state["missing_answers"]:
st.text_input(pm.questions[ma].lower(), key=ma)
submitted = st.form_submit_button("Submit answers")
if submitted:
st.session_state["step"] = "check_category"
for ma in st.session_state["missing_answers"]:
st.session_state["answers"][ma] = st.session_state[ma]
if st.session_state.get("step") == "check_category":
with st.status("finding the work categories applicable to your work"):
answer = repository.send_prompt(pm.get_work_category(st.session_state["answers"][1]))
categories = LlmParser.parse_get_categories_answer(answer['content'])
with st.status("categories found, creating PDF form"):
form_filename = f"{st.session_state["session_id"]}_form.pdf"
st.session_state["form_filename"] = form_filename
form_data = build_form_data_from_answers(st.session_state["answers"], categories,
st.session_state.get("signature"))
write_pdf_form(form_data, Path(form_filename))
st.session_state["step"] = "form_created"
if st.session_state.get("step") == "form_created":
with open(Path(st.session_state["form_filename"]), "rb") as form:
st.download_button("download form", form.read(), mime="application/pdf")
start_over_button = st.button("Start over")
if start_over_button:
del st.session_state["step"]
os.unlink(st.session_state["form_filename"])
use_streamlit()
#use_command_line()