Upload example-evidence-checking-print-out.py
Browse files
example-evidence-checking-print-out.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
from llmware.prompts import Prompt
|
4 |
+
from llmware.configs import LLMWareConfig
|
5 |
+
import os
|
6 |
+
import time
|
7 |
+
|
8 |
+
|
9 |
+
def contract_analysis_w_fact_checking (model_name):
|
10 |
+
|
11 |
+
# note: assumes preparation step of putting pdf contract documents in this folder
|
12 |
+
contracts_path = "/home/ubuntu/contracts/"
|
13 |
+
|
14 |
+
print("\nupdate: loading model - ", model_name)
|
15 |
+
|
16 |
+
# create prompt object
|
17 |
+
prompter = Prompt().load_model(model_name)
|
18 |
+
|
19 |
+
research = {"topic": "base salary", "prompt": "What is the executive's base salary?"}
|
20 |
+
|
21 |
+
# start the clock to measure processing cycle time
|
22 |
+
t0 = time.time()
|
23 |
+
|
24 |
+
for i, contract in enumerate(os.listdir(contracts_path)):
|
25 |
+
|
26 |
+
print("\nAnalyzing Contract - ", str(i+1), contract)
|
27 |
+
print("Question: ", research["prompt"])
|
28 |
+
|
29 |
+
# contract is parsed, text-chunked, and then filtered by "base salary'
|
30 |
+
source = prompter.add_source_document(contracts_path, contract, query=research["topic"])
|
31 |
+
|
32 |
+
# calling the LLM with 'source' information from the contract automatically packaged into the prompt
|
33 |
+
responses = prompter.prompt_with_source(research["prompt"], prompt_name="just_the_facts", temperature=0.3)
|
34 |
+
|
35 |
+
# run several fact checks
|
36 |
+
ev_numbers = prompter.evidence_check_numbers(responses)
|
37 |
+
ev_sources = prompter.evidence_check_sources(responses)
|
38 |
+
ev_stats = prompter.evidence_comparison_stats(responses)
|
39 |
+
z = prompter.classify_not_found_response(responses, parse_response=True, evidence_match=True,ask_the_model=False)
|
40 |
+
|
41 |
+
for r, response in enumerate(responses):
|
42 |
+
print("LLM Response: ", response["llm_response"])
|
43 |
+
print("Numbers: ", ev_numbers[r]["fact_check"])
|
44 |
+
print("Sources: ", ev_sources[r]["source_review"])
|
45 |
+
print("Stats: ", ev_stats[r]["comparison_stats"])
|
46 |
+
print("Not Found Check: ", z[r])
|
47 |
+
|
48 |
+
# We're done with this contract, clear the source from the prompt
|
49 |
+
prompter.clear_source_materials()
|
50 |
+
|
51 |
+
# Save jsonl report to jsonl to /prompt_history folder
|
52 |
+
print("\nupdate: prompt state saved at: ", os.path.join(LLMWareConfig.get_prompt_path(),prompter.prompt_id))
|
53 |
+
print("update: time processing: ", time.time() - t0)
|
54 |
+
|
55 |
+
prompter.save_state()
|
56 |
+
|
57 |
+
return 0
|
58 |
+
|
59 |
+
|
60 |
+
# execution script
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
|
64 |
+
model_name = "llmware/dragon-deci-6b-v0"
|
65 |
+
contract_analysis_w_fact_checking(model_name)
|
66 |
+
|