cjber commited on
Commit
1cb9fb6
·
1 Parent(s): c7f8eaf

feat: use o3mini for final document

Browse files
planning_ai/chains/hallucination_chain.py CHANGED
@@ -2,7 +2,7 @@ from langchain_core.prompts import ChatPromptTemplate
2
  from pydantic import BaseModel, Field
3
 
4
  from planning_ai.common.utils import Paths
5
- from planning_ai.llms.llm import LLM
6
 
7
  with open(Paths.PROMPTS / "hallucination.txt", "r") as f:
8
  reduce_template = f.read()
@@ -15,7 +15,7 @@ class HallucinationChecker(BaseModel):
15
  explanation: str = Field(..., description="Explain your reasoning for the score")
16
 
17
 
18
- SLLM = LLM.with_structured_output(HallucinationChecker, strict=True)
19
 
20
  hallucination_prompt = ChatPromptTemplate([("system", reduce_template)])
21
  hallucination_chain = hallucination_prompt | SLLM
 
2
  from pydantic import BaseModel, Field
3
 
4
  from planning_ai.common.utils import Paths
5
+ from planning_ai.llms.llm import GPT4o
6
 
7
  with open(Paths.PROMPTS / "hallucination.txt", "r") as f:
8
  reduce_template = f.read()
 
15
  explanation: str = Field(..., description="Explain your reasoning for the score")
16
 
17
 
18
+ SLLM = GPT4o.with_structured_output(HallucinationChecker, strict=True)
19
 
20
  hallucination_prompt = ChatPromptTemplate([("system", reduce_template)])
21
  hallucination_chain = hallucination_prompt | SLLM
planning_ai/chains/map_chain.py CHANGED
@@ -5,7 +5,7 @@ from langchain_core.prompts import ChatPromptTemplate
5
  from pydantic import BaseModel, Field, create_model
6
 
7
  from planning_ai.common.utils import Paths
8
- from planning_ai.llms.llm import LLM
9
  from planning_ai.themes import THEMES_AND_POLICIES
10
 
11
  with open(Paths.PROMPTS / "map.txt", "r") as f:
@@ -61,7 +61,7 @@ def create_dynamic_map_chain(themes, prompt: str):
61
  PolicyEnum = create_policy_enum(policy_groups)
62
  DynamicBriefSummary = create_brief_summary_model(PolicyEnum)
63
 
64
- SLLM = LLM.with_structured_output(DynamicBriefSummary, strict=True)
65
 
66
  prompt = (
67
  f"{prompt}\n\nAvailable Policies:\n\n- "
 
5
  from pydantic import BaseModel, Field, create_model
6
 
7
  from planning_ai.common.utils import Paths
8
+ from planning_ai.llms.llm import GPT4o
9
  from planning_ai.themes import THEMES_AND_POLICIES
10
 
11
  with open(Paths.PROMPTS / "map.txt", "r") as f:
 
61
  PolicyEnum = create_policy_enum(policy_groups)
62
  DynamicBriefSummary = create_brief_summary_model(PolicyEnum)
63
 
64
+ SLLM = GPT4o.with_structured_output(DynamicBriefSummary, strict=True)
65
 
66
  prompt = (
67
  f"{prompt}\n\nAvailable Policies:\n\n- "
planning_ai/chains/policy_chain.py CHANGED
@@ -2,7 +2,7 @@ from langchain_core.prompts import ChatPromptTemplate
2
  from pydantic import BaseModel
3
 
4
  from planning_ai.common.utils import Paths
5
- from planning_ai.llms.llm import LLM
6
 
7
  with open(Paths.PROMPTS / "policy.txt", "r") as f:
8
  policy_template = f.read()
@@ -19,7 +19,7 @@ class PolicyList(BaseModel):
19
  policies: list[Policy]
20
 
21
 
22
- SLLM = LLM.with_structured_output(PolicyList, strict=True)
23
 
24
 
25
  policy_prompt = ChatPromptTemplate([("system", policy_template)])
 
2
  from pydantic import BaseModel
3
 
4
  from planning_ai.common.utils import Paths
5
+ from planning_ai.llms.llm import GPT4o
6
 
7
  with open(Paths.PROMPTS / "policy.txt", "r") as f:
8
  policy_template = f.read()
 
19
  policies: list[Policy]
20
 
21
 
22
+ SLLM = GPT4o.with_structured_output(PolicyList, strict=True)
23
 
24
 
25
  policy_prompt = ChatPromptTemplate([("system", policy_template)])
planning_ai/chains/reduce_chain.py CHANGED
@@ -2,20 +2,23 @@ from langchain_core.output_parsers import StrOutputParser
2
  from langchain_core.prompts import ChatPromptTemplate
3
 
4
  from planning_ai.common.utils import Paths
5
- from planning_ai.llms.llm import LLM
6
 
7
  with open(Paths.PROMPTS / "reduce.txt", "r") as f:
8
  reduce_template = f.read()
9
 
 
 
 
10
 
11
  reduce_prompt = ChatPromptTemplate([("system", reduce_template)])
12
- reduce_chain = reduce_prompt | LLM | StrOutputParser()
 
 
13
 
14
 
15
  if __name__ == "__main__":
16
  test_summary = """
17
- Summary:
18
-
19
  The author expresses concern over the proposed mass development north-west of Cambridge,
20
  highlighting the significant growth in the area over the past twenty years,
21
  particularly with the establishment of Cambourne and the expansion of Papworth Everard.
 
2
  from langchain_core.prompts import ChatPromptTemplate
3
 
4
  from planning_ai.common.utils import Paths
5
+ from planning_ai.llms.llm import O3Mini
6
 
7
  with open(Paths.PROMPTS / "reduce.txt", "r") as f:
8
  reduce_template = f.read()
9
 
10
+ with open(Paths.PROMPTS / "reduce_final.txt", "r") as f:
11
+ reduce_template_final = f.read()
12
+
13
 
14
  reduce_prompt = ChatPromptTemplate([("system", reduce_template)])
15
+ reduce_prompt_final = ChatPromptTemplate([("system", reduce_template_final)])
16
+ reduce_chain = reduce_prompt | O3Mini | StrOutputParser()
17
+ reduce_chain_final = reduce_prompt_final | O3Mini | StrOutputParser()
18
 
19
 
20
  if __name__ == "__main__":
21
  test_summary = """
 
 
22
  The author expresses concern over the proposed mass development north-west of Cambridge,
23
  highlighting the significant growth in the area over the past twenty years,
24
  particularly with the establishment of Cambourne and the expansion of Papworth Everard.
planning_ai/chains/themes_chain.py CHANGED
@@ -5,7 +5,7 @@ from langchain_core.prompts import ChatPromptTemplate
5
  from pydantic import BaseModel, Field
6
 
7
  from planning_ai.common.utils import Paths
8
- from planning_ai.llms.llm import LLM
9
 
10
 
11
  class Theme(Enum):
@@ -32,7 +32,7 @@ with open(Paths.PROMPTS / "themes.txt", "r") as f:
32
 
33
  themes_prompt = ChatPromptTemplate.from_messages([("system", themes_template)])
34
 
35
- SLLM = LLM.with_structured_output(ThemeSelector, strict=True)
36
 
37
  themes_chain = themes_prompt | SLLM
38
 
 
5
  from pydantic import BaseModel, Field
6
 
7
  from planning_ai.common.utils import Paths
8
+ from planning_ai.llms.llm import GPT4o
9
 
10
 
11
  class Theme(Enum):
 
32
 
33
  themes_prompt = ChatPromptTemplate.from_messages([("system", themes_template)])
34
 
35
+ SLLM = GPT4o.with_structured_output(ThemeSelector, strict=True)
36
 
37
  themes_chain = themes_prompt | SLLM
38
 
planning_ai/llms/llm.py CHANGED
@@ -3,4 +3,5 @@ from langchain_openai import ChatOpenAI
3
 
4
  load_dotenv()
5
 
6
- LLM = ChatOpenAI(temperature=0, model="gpt-4o-mini")
 
 
3
 
4
  load_dotenv()
5
 
6
+ GPT4o = ChatOpenAI(temperature=0, model="gpt-4o-mini")
7
+ O3Mini = ChatOpenAI(model="o3-mini")