enricorampazzo commited on
Commit
d005419
1 Parent(s): 062179e

final command line implementation

Browse files
app.py CHANGED
@@ -1,11 +1,11 @@
1
  from pathlib import Path
2
 
 
3
  from prompts.prompts_manager import PromptsManager
4
- from repository.intel_npu import IntelNpuRepository
5
- from repository.ollama import OllamaRepository
6
- from llm.llm import Model
7
- from repository.repository import ModelRoles
8
- from form.form import work_categories, build_form_data_from_answers, write_pdf_form
9
 
10
 
11
  def check_for_missing_answers(parsed_questions: dict[int, str]):
@@ -14,8 +14,8 @@ def check_for_missing_answers(parsed_questions: dict[int, str]):
14
 
15
  def ask_again(missing_questions: list[int], user_questions: list[str], parsed_questions: dict[int, str]):
16
  for id_ in missing_questions:
17
- answer = input(f"I could not find the answer to this question: {user_questions[id_].lower()}")
18
- parsed_questions[id_] = answer
19
 
20
 
21
  if __name__ == '__main__':
@@ -23,31 +23,20 @@ if __name__ == '__main__':
23
  user_prompt = input(f"Please describe what you need to do. To get the best results "
24
  f"try to answer all the following questions:\n{'\n'.join(prompts_manager.questions)}\n\n>")
25
 
26
- # repository = OllamaRepository(Model("llama3.1",
27
- # ModelRoles("system", "user", "assistant")),
28
- # prompts_manager.system_prompt,
29
- # )
30
- repository = IntelNpuRepository("meta-llama/Meta-Llama-3-8B-Instruct")
31
  repository.init()
32
- repository.send_prompt(f"Ingest the following information: {user_prompt}")
33
- answers = {x:None for x in range(0,11)}
34
- answer = repository.send_prompt(f"Answer the following questions, if the answer is not present just answer null. Put the answers between curly braces, separate each answer with a comma, keep the answer brief and maintain the order in which the questions are asked. Do not add any preamble: {"\n".join(prompts_manager.verification_prompt)}")
35
- for idx, a in enumerate(answer['content'].split(",")):
36
- answers[idx] = None if 'null' in a else a
37
-
38
- # for idx, q in enumerate(prompts_manager.verification_prompt):
39
- # answer = repository.send_prompt(
40
- # f"Answer the following questions, if the answer is not present just answer null. Keep the answer brief and separate each answer with a comma and maintain the order in which the questions are asked: {q}")
41
- # answers[idx] = None if 'null' in answer["content"].lower() else answer['content']
42
  missing_answers = check_for_missing_answers(answers)
43
  while missing_answers:
44
  ask_again(missing_answers, prompts_manager.questions, answers)
45
  missing_answers = check_for_missing_answers(answers)
46
- answer = repository.send_prompt(
47
- f"The work to do is {answers[1]}. Given the following categories {work_categories.values()} which ones are the most relevant? Only return one categories, separated by a semicolon")
48
- categories = []
49
- for category in answer["content"].split(";"):
50
- categories.extend([k for k, v in work_categories.items() if category in v])
51
 
52
  form_data = build_form_data_from_answers(answers, categories, f"{Path(__file__, "..", "signature.png")}")
53
- write_pdf_form(form_data, Path("signed_form1.pdf"))
 
1
  from pathlib import Path
2
 
3
+ from llm_manager.llm_parser import LlmParser
4
  from prompts.prompts_manager import PromptsManager
5
+
6
+ from repository.repository import get_repository
7
+ from repository.repository_abc import ModelRoles, Model
8
+ from form.form import build_form_data_from_answers, write_pdf_form
 
9
 
10
 
11
  def check_for_missing_answers(parsed_questions: dict[int, str]):
 
14
 
15
  def ask_again(missing_questions: list[int], user_questions: list[str], parsed_questions: dict[int, str]):
16
  for id_ in missing_questions:
17
+ new_answer = input(f"I could not find the answer to this question: {user_questions[id_].lower()}")
18
+ parsed_questions[id_] = new_answer
19
 
20
 
21
  if __name__ == '__main__':
 
23
  user_prompt = input(f"Please describe what you need to do. To get the best results "
24
  f"try to answer all the following questions:\n{'\n'.join(prompts_manager.questions)}\n\n>")
25
 
26
+ repository = get_repository("intel_npu", Model("meta-llama/Meta-Llama-3-8B-Instruct",
27
+ ModelRoles("system", "user", "assistant")),
28
+ prompts_manager.system_prompt, Path("llm_log.txt"))
 
 
29
  repository.init()
30
+ # repository.send_prompt(prompts_manager.ingest_user_answers(user_prompt))
31
+ answer = repository.send_prompt(prompts_manager.verify_user_input_prompt(user_prompt))
32
+ answers = LlmParser.parse_verification_prompt_answers(answer['content'])
33
+
 
 
 
 
 
 
34
  missing_answers = check_for_missing_answers(answers)
35
  while missing_answers:
36
  ask_again(missing_answers, prompts_manager.questions, answers)
37
  missing_answers = check_for_missing_answers(answers)
38
+ answer = repository.send_prompt(prompts_manager.get_work_category(answers[1]))
39
+ categories = LlmParser.parse_get_categories_answer(answer['content'])
 
 
 
40
 
41
  form_data = build_form_data_from_answers(answers, categories, f"{Path(__file__, "..", "signature.png")}")
42
+ write_pdf_form(form_data, Path("signed_form1.pdf"))
llm/llm.py DELETED
@@ -1,7 +0,0 @@
1
- from repository.repository import ModelRoles
2
-
3
-
4
- class Model:
5
- def __init__(self, name: str, roles: ModelRoles):
6
- self.name: str = name
7
- self.roles: ModelRoles = roles
 
 
 
 
 
 
 
 
{llm → llm_manager}/__init__.py RENAMED
File without changes
llm_manager/llm_parser.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ from form.form import work_categories
4
+
5
+
6
+ class LlmParser:
7
+ _verification_prompt_answers_regex = re.compile(r"\|\s*([^|]*)\s?", re.MULTILINE)
8
+
9
+ @classmethod
10
+ def parse_verification_prompt_answers(cls, llm_answer) -> dict[int, str | None]:
11
+ print(f"llm answer: {llm_answer}")
12
+ expected_answers_count = 13
13
+ answers = {}
14
+ i = 0
15
+ question_id = 0
16
+ lines = [l for l in llm_answer.split("\n") if len(l.strip()) > 0]
17
+ while i < len(lines):
18
+ line = lines[i].strip()
19
+ if len(line) == 0:
20
+ i += 1
21
+ elif line.endswith("?") and i+1<len(lines):
22
+ i+=1
23
+ elif "null" in lines[i]:
24
+ answers[question_id] = None
25
+ i += 1
26
+ question_id += 1
27
+ elif ":" in lines[i]:
28
+ answers[question_id] = line.split(":")[1]
29
+ i += 1
30
+ question_id += 1
31
+ else:
32
+ answers[question_id] = line
33
+ i+=1
34
+ question_id += 1
35
+ return answers
36
+
37
+
38
+
39
+
40
+ @classmethod
41
+ def parse_get_categories_answer(cls, category_answer) -> list[str]:
42
+ categories = []
43
+ for category in category_answer.split(";"):
44
+ categories.extend([k for k, v in work_categories.items() if category in v])
45
+ return categories
prompts/prompts_manager.py CHANGED
@@ -2,18 +2,34 @@ import datetime
2
  from pathlib import Path
3
 
4
  from utils.date_utils import get_today_date_as_dd_mm_yyyy
 
5
 
6
 
7
  class PromptsManager:
8
- def __init__(self):
 
 
 
9
  base_path = Path(__file__, "..")
10
  with open(Path(base_path, "system_prompt.txt")) as sysprompt_file:
11
  self.system_prompt: str = sysprompt_file.read()
12
  with open(Path(base_path, "questions.txt")) as questions_file:
13
  self.questions: list[str] = questions_file.readlines()
14
  with open(Path(base_path, "verification_prompt2.txt")) as verification_prompt_file:
15
- verification_prompt = verification_prompt_file.readlines()
16
  todays_date = get_today_date_as_dd_mm_yyyy()
17
- for line in verification_prompt:
18
- line.replace("{today}", todays_date)
19
- self.verification_prompt: list[str] = verification_prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from pathlib import Path
3
 
4
  from utils.date_utils import get_today_date_as_dd_mm_yyyy
5
+ from form.form import work_categories as wc
6
 
7
 
8
  class PromptsManager:
9
+ def __init__(self, work_categories: dict[str, str] = None):
10
+
11
+ if not work_categories:
12
+ self.work_categories = wc
13
  base_path = Path(__file__, "..")
14
  with open(Path(base_path, "system_prompt.txt")) as sysprompt_file:
15
  self.system_prompt: str = sysprompt_file.read()
16
  with open(Path(base_path, "questions.txt")) as questions_file:
17
  self.questions: list[str] = questions_file.readlines()
18
  with open(Path(base_path, "verification_prompt2.txt")) as verification_prompt_file:
19
+ verification_prompt = verification_prompt_file.read()
20
  todays_date = get_today_date_as_dd_mm_yyyy()
21
+ verification_prompt = verification_prompt.replace("{today}", todays_date)
22
+ self.verification_prompt: str = verification_prompt
23
+
24
+ def verify_user_input_prompt(self, user_prompt) -> str:
25
+ return (f"Using only this information \n {user_prompt} \n Answer the following questions, if the answer is not present just answer null. "
26
+ f"Put each answer in a new line, keep the answer brief "
27
+ f"and maintain the order in which the questions are asked. Do not add any preamble: "
28
+ f"{self.verification_prompt}")
29
+
30
+ def get_work_category(self, work_description: str) -> str:
31
+ return (f"The work to do is {work_description}. Given the following categories {", ".join(self.work_categories.values())} "
32
+ f"which ones are the most relevant? Only return the categories, separated by a semicolon")
33
+
34
+ def ingest_user_answers(self, user_prompt: str) -> str:
35
+ return f"Ingest the following information: {user_prompt}"
prompts/system_prompt.txt CHANGED
@@ -1,5 +1,5 @@
1
  You are a helpful assistant helping tenants and owners when filling minor work permits. Do not answer question outside
2
  of this domain.
3
- Keep in mind that 'JBR' is a community, and buildings are named after the following names followed by a number:
4
  Murjan, Bahar, Shams, Amwaj, Sadaf
5
- for example "Murjan 1"
 
1
  You are a helpful assistant helping tenants and owners when filling minor work permits. Do not answer question outside
2
  of this domain.
3
+ Keep in mind that 'JBR' is a community, and buildings are named after the following names followed by one or more numbers:
4
  Murjan, Bahar, Shams, Amwaj, Sadaf
5
+ for example "Murjan 1", "Bahar 3", "Sadaf 8"
prompts/verification_prompt2.txt CHANGED
@@ -2,12 +2,12 @@ What is my full name?
2
  What is the nature of the work I need to do?
3
  In which community is the work taking place?
4
  In which building?
5
- In which unit/apartment number?
6
  Am I the owner or the tenant?
7
- In which date is the work taking place? Please answer with just a date formatted as dd/mm/yyyy. In case I used expressions like today, tomorrow, in two days, ecc, know that today it is 08/09/2024
8
  In which date will the work finish? Please answer with just a date formatted as dd/mm/yyyy. In case I used expressions like today, tomorrow, in two days, ecc, know that today it is {today}. If no date is provided, consider that it will finish on the same day as the start date
9
  What is my contact number?
10
  What is the name of the contracting company?
11
  What is the contact number of the contracting company?
12
  What is the email of the contracting company?
13
- What is your email?
 
2
  What is the nature of the work I need to do?
3
  In which community is the work taking place?
4
  In which building?
5
+ In which unit/apartment number? Answer only with the unit number
6
  Am I the owner or the tenant?
7
+ In which date is the work taking place? Please answer with just a date formatted as dd/mm/yyyy. In case I used expressions like today, tomorrow, in two days, ecc, know that today it is {today}
8
  In which date will the work finish? Please answer with just a date formatted as dd/mm/yyyy. In case I used expressions like today, tomorrow, in two days, ecc, know that today it is {today}. If no date is provided, consider that it will finish on the same day as the start date
9
  What is my contact number?
10
  What is the name of the contracting company?
11
  What is the contact number of the contracting company?
12
  What is the email of the contracting company?
13
+ What is my email?
repository/intel_npu.py CHANGED
@@ -1,47 +1,57 @@
1
- from intel_npu_acceleration_library import NPUModelForCausalLM, int4, int8
 
 
 
2
  from intel_npu_acceleration_library.compiler import CompilerConfig
3
  from transformers import AutoTokenizer
4
 
5
- from repository.repository import Repository, ModelRoles
6
 
7
 
8
  class IntelNpuRepository(Repository):
9
- def __init__(self, model_name: str):
10
- self.model_name = model_name
11
  self.message_history: list[dict[str, str]] = []
12
- self.roles = ModelRoles("system", "user", "assistant")
13
  self.model = None
14
  self.tokenizer = None
15
  self.terminators = None
 
16
 
17
- def get_model_roles(self) -> ModelRoles:
18
- return self.roles
19
-
20
- def get_model_name(self) -> str:
21
- return self.model_name
22
 
23
  def get_message_history(self) -> list[dict[str, str]]:
24
  return self.message_history
25
 
26
- def set_message_for_role(self, message:str, role: str):
27
- self.get_message_history().append({"role": role, "content": message})
28
-
29
  def init(self):
30
  compiler_conf = CompilerConfig(dtype=int4)
31
- self.model = NPUModelForCausalLM.from_pretrained(self.get_model_name(), use_cache=True, config=compiler_conf, export=True, temperature=0).eval()
32
- self.tokenizer = AutoTokenizer.from_pretrained(self.get_model_name())
 
33
  self.terminators = [self.tokenizer.eos_token_id, self.tokenizer.convert_tokens_to_ids("<|eot_id|>")]
34
 
35
  def send_prompt(self, prompt: str, add_to_history: bool = True) -> dict[str, str]:
36
- self.get_message_history().append({"role":self.get_model_roles().user_role, "content":prompt})
37
- input_ids = (self.tokenizer.apply_chat_template(self.get_message_history(), add_generation_prompt=True, return_tensors="pt")
 
 
 
 
 
 
 
38
  .to(self.model.device))
39
  outputs = self.model.generate(input_ids, eos_token_id=self.terminators, do_sample=True, max_new_tokens=2000)
40
  generated_token_array = outputs[0][len(input_ids[0]):]
41
  generated_tokens = "".join(self.tokenizer.batch_decode(generated_token_array, skip_special_tokens=True))
42
- answer = {"role": self.get_model_roles().ai_role, "content":generated_tokens}
 
 
 
 
43
  if add_to_history:
44
  self.message_history.append(answer)
45
  else:
46
  self.message_history.pop()
47
- return answer
 
1
+ import json
2
+ from pathlib import Path
3
+
4
+ from intel_npu_acceleration_library import NPUModelForCausalLM, int4
5
  from intel_npu_acceleration_library.compiler import CompilerConfig
6
  from transformers import AutoTokenizer
7
 
8
+ from repository.repository_abc import Repository, Model
9
 
10
 
11
  class IntelNpuRepository(Repository):
12
+ def __init__(self, model_info: Model, system_msg: str = None, log_to_file: Path = None):
13
+ self.model_info: Model = model_info
14
  self.message_history: list[dict[str, str]] = []
15
+ self.set_message_for_role(self.model_info.roles.system_role, system_msg)
16
  self.model = None
17
  self.tokenizer = None
18
  self.terminators = None
19
+ self.log_to_file = log_to_file
20
 
21
+ def get_model_info(self) -> Model:
22
+ return self.model_info
 
 
 
23
 
24
  def get_message_history(self) -> list[dict[str, str]]:
25
  return self.message_history
26
 
 
 
 
27
  def init(self):
28
  compiler_conf = CompilerConfig(dtype=int4)
29
+ self.model = NPUModelForCausalLM.from_pretrained(self.model_info.name, use_cache=True, config=compiler_conf,
30
+ export=True, temperature=0).eval()
31
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_info.name)
32
  self.terminators = [self.tokenizer.eos_token_id, self.tokenizer.convert_tokens_to_ids("<|eot_id|>")]
33
 
34
  def send_prompt(self, prompt: str, add_to_history: bool = True) -> dict[str, str]:
35
+ print("prompt to be sent: " + prompt)
36
+ user_prompt = {"role": self.model_info.roles.user_role, "content": prompt}
37
+ if self.log_to_file:
38
+ with open(self.log_to_file, "a+") as log_file:
39
+ log_file.write(json.dumps(user_prompt, indent=2))
40
+ log_file.write("\n")
41
+ self.get_message_history().append(user_prompt)
42
+ input_ids = (self.tokenizer.apply_chat_template(self.get_message_history(), add_generation_prompt=True,
43
+ return_tensors="pt")
44
  .to(self.model.device))
45
  outputs = self.model.generate(input_ids, eos_token_id=self.terminators, do_sample=True, max_new_tokens=2000)
46
  generated_token_array = outputs[0][len(input_ids[0]):]
47
  generated_tokens = "".join(self.tokenizer.batch_decode(generated_token_array, skip_special_tokens=True))
48
+ answer = {"role": self.get_model_info().roles.ai_role, "content": generated_tokens}
49
+ if self.log_to_file:
50
+ with open(self.log_to_file, "a+") as log_file:
51
+ log_file.write(json.dumps(answer, indent=2))
52
+ log_file.write("\n")
53
  if add_to_history:
54
  self.message_history.append(answer)
55
  else:
56
  self.message_history.pop()
57
+ return answer
repository/ollama.py CHANGED
@@ -1,29 +1,27 @@
1
  import ollama
2
  from ollama import Options
3
 
4
- from llm.llm import Model
5
- from repository.repository import Repository, ModelRoles
6
 
7
 
8
  class OllamaRepository(Repository):
9
- def __init__(self, model: Model, system_msg):
10
- self.model: Model = model
11
  self.system_msg: str = system_msg
12
- self.message_history: list[dict[str, str]] = [{"role": self.model.roles.system_role, "content": system_msg}]
 
13
 
14
  def send_prompt(self, prompt: str, add_to_history: bool = True) -> dict[str, str]:
 
15
  options: Options = Options(temperature=0)
16
- self.message_history.append({"role": self.model.roles.user_role, "content":prompt})
17
- response = ollama.chat(self.model.name, self.message_history, options=options)
18
- answer = {"role": self.model.roles.ai_role, "content": response["message"]["content"]}
19
  if add_to_history:
20
  self.message_history.append(answer)
21
  else:
22
  self.message_history.pop()
23
  return answer
24
 
25
- def get_model_name(self) -> str:
26
- return self.model.name
27
-
28
- def get_model_roles(self) -> ModelRoles:
29
- return self.model.roles
 
1
  import ollama
2
  from ollama import Options
3
 
4
+ from repository.repository_abc import Repository, Model
 
5
 
6
 
7
  class OllamaRepository(Repository):
8
+ def __init__(self, model_info: Model, system_msg):
9
+ self.model_info: Model = model_info
10
  self.system_msg: str = system_msg
11
+ self.message_history: list[dict[str, str]] = [
12
+ {"role": self.model_info.roles.system_role, "content": system_msg}]
13
 
14
  def send_prompt(self, prompt: str, add_to_history: bool = True) -> dict[str, str]:
15
+ print("Prompt to be sent:" + prompt)
16
  options: Options = Options(temperature=0)
17
+ self.message_history.append({"role": self.model_info.roles.user_role, "content": prompt})
18
+ response = ollama.chat(self.model_info.name, self.message_history, options=options)
19
+ answer = {"role": self.model_info.roles.ai_role, "content": response["message"]["content"]}
20
  if add_to_history:
21
  self.message_history.append(answer)
22
  else:
23
  self.message_history.pop()
24
  return answer
25
 
26
+ def get_model_info(self) -> Model:
27
+ return self.model_info
 
 
 
repository/repository.py CHANGED
@@ -1,29 +1,15 @@
1
- import abc
2
 
 
 
 
3
 
4
- class ModelRoles:
5
- def __init__(self, system_role: str, user_role: str, ai_role: str):
6
- self.system_role: str = system_role
7
- self.user_role: str = user_role
8
- self.ai_role: str = ai_role
9
 
10
-
11
- class Repository(abc.ABC):
12
-
13
- def get_model_name(self) -> str:
14
- pass
15
-
16
- def get_model_roles(self) -> ModelRoles:
17
- pass
18
-
19
- def get_message_history(self) -> list[dict[str, str]]:
20
- pass
21
-
22
- def send_prompt(self, prompt: str, add_to_history: bool) -> dict[str, str]:
23
- pass
24
-
25
- def set_message_for_role(self, message:str, role: ModelRoles):
26
- pass
27
-
28
- def init(self):
29
- pass
 
1
+ from pathlib import Path
2
 
3
+ from repository.intel_npu import IntelNpuRepository
4
+ from repository.ollama import OllamaRepository
5
+ from repository.repository_abc import Model
6
 
 
 
 
 
 
7
 
8
+ def get_repository(implementation: str, model: Model, system_msg: str = None, log_to_file: Path = None):
9
+ known_implementations = ["ollama", "intel_npu"]
10
+ if not implementation or implementation.lower() not in ["ollama", "intel_npu"]:
11
+ raise ValueError(f"Unknown implementation {implementation}. Known implementations: {known_implementations}")
12
+ if "ollama" == implementation:
13
+ return OllamaRepository(model, system_msg)
14
+ if "intel_npu" == implementation:
15
+ return IntelNpuRepository(model, system_msg, log_to_file)
 
 
 
 
 
 
 
 
 
 
 
 
repository/repository_abc.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+
3
+
4
+ class ModelRoles:
5
+ def __init__(self, system_role: str, user_role: str, ai_role: str):
6
+ self.system_role: str = system_role
7
+ self.user_role: str = user_role
8
+ self.ai_role: str = ai_role
9
+
10
+
11
+ class Model:
12
+ def __init__(self, model_name: str, model_roles: ModelRoles):
13
+ self.name = model_name
14
+ self.roles = model_roles
15
+
16
+
17
+ class Repository(abc.ABC):
18
+
19
+ def get_model_info(self) -> Model:
20
+ pass
21
+
22
+ def get_model_roles(self) -> ModelRoles:
23
+ pass
24
+
25
+ def get_message_history(self) -> list[dict[str, str]]:
26
+ pass
27
+
28
+ def send_prompt(self, prompt: str, add_to_history: bool) -> dict[str, str]:
29
+ pass
30
+
31
+ def set_message_for_role(self, message: str, role: str):
32
+ self.get_message_history().append({"role": role, "content": message})
33
+
34
+ def init(self):
35
+ pass
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  PyPDFForm
2
  ollama
 
3
  intel-npu-acceleration-library
 
1
  PyPDFForm
2
  ollama
3
+ transformers
4
  intel-npu-acceleration-library
test.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ llm_answer = """What is my full name?
2
+
3
+ Enrico Rampazzo
4
+
5
+ What is the nature of the work I need to do?
6
+
7
+ A/C clean
8
+
9
+ In which community is the work taking place?
10
+
11
+ JBR
12
+
13
+ In which building?
14
+
15
+ Amwaj
16
+
17
+ In which unit/apartment number?
18
+
19
+ 99999
20
+
21
+ Am I the owner or the tenant?
22
+
23
+ Owner
24
+
25
+ In which date is the work taking place?
26
+
27
+ 17/09/2024
28
+
29
+ In which date will the work finish?
30
+
31
+ 17/09/2024
32
+
33
+ What is my contact number?
34
+
35
+ 989764
36
+
37
+ What is the name of the contracting company?
38
+
39
+ Breathe Maintenance
40
+
41
+ What is the contact number of the contracting company?
42
+
43
+ 1234567
44
+
45
+ What is the email of the contracting company?
46
+
47
48
+
49
+ What is my email?
50
+
51
52
+ """
53
+ from llm_manager.llm_parser import LlmParser
54
+
55
+ llm_parser = LlmParser
56
+ llm_parser.parse_verification_prompt_answers(llm_answer)