cjber commited on
Commit
379aa81
·
1 Parent(s): cd5b50a

refactor: Update logging levels for improved message severity clarity

Browse files
planning_ai/logging.py CHANGED
@@ -1,3 +1,3 @@
1
  from loguru import logger
2
 
3
- logger.add("logs/model.log", rotation="1 MB", retention="10 days", level="INFO")
 
1
  from loguru import logger
2
 
3
+ logger.add("logs/model.log", rotation="1 MB", retention="10 days", level="DEBUG")
planning_ai/nodes/hallucination_node.py CHANGED
@@ -24,13 +24,13 @@ def check_hallucination(state: DocumentState):
24
  dict: A dictionary containing either a list of fixed summaries or hallucinations
25
  that need to be addressed.
26
  """
27
- logger.warning(f"Checking hallucinations for document {state['filename']}")
28
 
29
  if state["processed"] or (state["refinement_attempts"] >= MAX_ATTEMPTS):
30
- logger.warning(f"Max attempts exceeded for document: {state['filename']}")
31
  return {"documents": [{**state, "failed": True, "processed": True}]}
32
  elif not state["is_hallucinated"]:
33
- logger.warning(f"Finished processing document: {state['filename']}")
34
  return {"documents": [{**state, "processed": True}]}
35
 
36
  try:
@@ -40,7 +40,7 @@ def check_hallucination(state: DocumentState):
40
  is_hallucinated = response.score == 0
41
  refinement_attempts = state["refinement_attempts"] + 1
42
  except Exception as e:
43
- logger.error(f"Failed to decode JSON {state['filename']}: {e}.")
44
  return {
45
  "documents": [
46
  {
@@ -60,7 +60,7 @@ def check_hallucination(state: DocumentState):
60
  "refinement_attempts": refinement_attempts,
61
  "is_hallucinated": is_hallucinated,
62
  }
63
- logger.warning(f"Hallucination for {state['filename']}: {is_hallucinated}")
64
  return (
65
  {"documents": [{**out, "processed": False}]}
66
  if is_hallucinated
 
24
  dict: A dictionary containing either a list of fixed summaries or hallucinations
25
  that need to be addressed.
26
  """
27
+ logger.info(f"Checking hallucinations for document {state['filename']}")
28
 
29
  if state["processed"] or (state["refinement_attempts"] >= MAX_ATTEMPTS):
30
+ logger.error(f"Max attempts exceeded for document: {state['filename']}")
31
  return {"documents": [{**state, "failed": True, "processed": True}]}
32
  elif not state["is_hallucinated"]:
33
+ logger.info(f"Finished processing document: {state['filename']}")
34
  return {"documents": [{**state, "processed": True}]}
35
 
36
  try:
 
40
  is_hallucinated = response.score == 0
41
  refinement_attempts = state["refinement_attempts"] + 1
42
  except Exception as e:
43
+ logger.error(f"Failed to decode JSON {state['filename']}: {e}")
44
  return {
45
  "documents": [
46
  {
 
60
  "refinement_attempts": refinement_attempts,
61
  "is_hallucinated": is_hallucinated,
62
  }
63
+ logger.info(f"Hallucination for {state['filename']}: {is_hallucinated}")
64
  return (
65
  {"documents": [{**out, "processed": False}]}
66
  if is_hallucinated
planning_ai/nodes/map_node.py CHANGED
@@ -28,7 +28,7 @@ def retrieve_themes(state: DocumentState) -> DocumentState:
28
 
29
 
30
  def add_entities(state: OverallState) -> OverallState:
31
- logger.warning("Adding entities to all documents.")
32
  for idx, document in enumerate(
33
  nlp.pipe(
34
  [doc["document"].page_content for doc in state["documents"]],
@@ -73,15 +73,15 @@ def generate_summary(state: DocumentState) -> dict:
73
  Returns:
74
  dict: A dictionary containing the generated summary and updated document state.
75
  """
76
- logger.warning(f"Generating summary for document: {state['filename']}")
77
 
78
- logger.warning(f"Starting PII removal for: {state['filename']}")
79
  state["document"].page_content = remove_pii(state["document"].page_content)
80
- logger.warning(f"Retrieving themes for: {state['filename']}")
81
  state = retrieve_themes(state)
82
 
83
  if not state["themes"]:
84
- logger.error(f"No themes found for {state['filename']}")
85
  return {
86
  "documents": [
87
  {
@@ -99,7 +99,7 @@ def generate_summary(state: DocumentState) -> dict:
99
  try:
100
  response = map_chain.invoke({"context": state["document"].page_content})
101
  except Exception as e:
102
- logger.error(f"Failed to decode JSON {state['document']}: {e}.")
103
  return {
104
  "documents": [
105
  {
@@ -112,7 +112,7 @@ def generate_summary(state: DocumentState) -> dict:
112
  }
113
  ]
114
  }
115
- logger.warning(f"Summary generation completed for document: {state['filename']}")
116
 
117
  return {
118
  "documents": [
@@ -129,5 +129,5 @@ def generate_summary(state: DocumentState) -> dict:
129
 
130
 
131
  def map_documents(state: OverallState) -> list[Send]:
132
- logger.warning("Mapping documents to generate summaries.")
133
  return [Send("generate_summary", document) for document in state["documents"]]
 
28
 
29
 
30
  def add_entities(state: OverallState) -> OverallState:
31
+ logger.info("Adding entities to all documents.")
32
  for idx, document in enumerate(
33
  nlp.pipe(
34
  [doc["document"].page_content for doc in state["documents"]],
 
73
  Returns:
74
  dict: A dictionary containing the generated summary and updated document state.
75
  """
76
+ logger.info(f"Generating summary for document: {state['filename']}")
77
 
78
+ logger.info(f"Starting PII removal for: {state['filename']}")
79
  state["document"].page_content = remove_pii(state["document"].page_content)
80
+ logger.info(f"Retrieving themes for: {state['filename']}")
81
  state = retrieve_themes(state)
82
 
83
  if not state["themes"]:
84
+ logger.warning(f"No themes found for {state['filename']}")
85
  return {
86
  "documents": [
87
  {
 
99
  try:
100
  response = map_chain.invoke({"context": state["document"].page_content})
101
  except Exception as e:
102
+ logger.error(f"Failed to decode JSON {state['document']}: {e}")
103
  return {
104
  "documents": [
105
  {
 
112
  }
113
  ]
114
  }
115
+ logger.info(f"Summary generation completed for document: {state['filename']}")
116
 
117
  return {
118
  "documents": [
 
129
 
130
 
131
  def map_documents(state: OverallState) -> list[Send]:
132
+ logger.info("Mapping documents to generate summaries.")
133
  return [Send("generate_summary", document) for document in state["documents"]]
planning_ai/nodes/reduce_node.py CHANGED
@@ -82,7 +82,7 @@ def batch_generate_executive_summaries(summaries):
82
  final_responses = []
83
  batch_size = 50
84
  for i in range(0, len(summaries_text), batch_size):
85
- logger.warning(
86
  f"Processing batches... {int(i/50)+1}/{(len(summaries_text)//batch_size)+1}"
87
  )
88
  batch = summaries_text[i : i + batch_size]
@@ -98,7 +98,7 @@ def generate_policy_output(policy_groups):
98
  .agg(pl.col("details"), pl.col("doc_id"))
99
  .rows(named=True)
100
  ):
101
- logger.warning(f"Processing policies: {policy['policies']}...")
102
  zipped = [
103
  f"{bullet} Doc ID: {id}"
104
  for (bullet, id) in zip(policy["details"], policy["doc_id"], strict=True)
@@ -121,7 +121,7 @@ def generate_policy_output(policy_groups):
121
  def generate_final_report(state: OverallState):
122
  final_docs = [doc for doc in state["documents"] if doc["processed"]]
123
  if len(final_docs) == state["n_docs"]:
124
- logging.warning(f"Generating final report... ({len(final_docs)} documents)")
125
  return final_output(final_docs)
126
 
127
 
 
82
  final_responses = []
83
  batch_size = 50
84
  for i in range(0, len(summaries_text), batch_size):
85
+ logger.info(
86
  f"Processing batches... {int(i/50)+1}/{(len(summaries_text)//batch_size)+1}"
87
  )
88
  batch = summaries_text[i : i + batch_size]
 
98
  .agg(pl.col("details"), pl.col("doc_id"))
99
  .rows(named=True)
100
  ):
101
+ logger.info(f"Processing policies: {policy['policies']}...")
102
  zipped = [
103
  f"{bullet} Doc ID: {id}"
104
  for (bullet, id) in zip(policy["details"], policy["doc_id"], strict=True)
 
121
  def generate_final_report(state: OverallState):
122
  final_docs = [doc for doc in state["documents"] if doc["processed"]]
123
  if len(final_docs) == state["n_docs"]:
124
+ logger.info(f"Generating final report... ({len(final_docs)} documents)")
125
  return final_output(final_docs)
126
 
127