Princess3 commited on
Commit
cb6bdc7
1 Parent(s): e2e558e

Update x.py

Browse files
Files changed (1) hide show
  1. x.py +216 -144
x.py CHANGED
@@ -5,24 +5,35 @@ import xml.etree.ElementTree as ET
5
  import torch
6
  import torch.nn as nn
7
  import torch.nn.functional as F
 
 
 
8
  from collections import defaultdict
9
  from typing import List, Dict, Any, Optional
10
  from colorama import Fore, Style, init
11
  from accelerate import Accelerator
12
  from torch.utils.data import DataLoader, TensorDataset
13
  from torch.cuda.amp import GradScaler, autocast
 
 
 
 
14
 
15
  # Initialize colorama
16
  init(autoreset=True)
 
17
 
18
- # Set file path and output path
19
  file_path = 'data/'
20
  output_path = 'output/'
21
 
22
  # Create output path if it doesn't exist
23
  if not os.path.exists(output_path):
24
- os.makedirs(output_path)
25
- os.chmod(output_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # Set full r/w permissions
 
 
 
 
26
 
27
  # Ensure necessary files are created with full r/w permissions
28
  def ensure_file(file_path):
@@ -31,124 +42,101 @@ def ensure_file(file_path):
31
  pass
32
  os.chmod(file_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # Set full r/w permissions
33
 
34
- # Define a simple memory augmentation module
 
 
 
 
 
 
 
35
  class MemoryAugmentationLayer(nn.Module):
36
- def __init__(self, size: int):
37
- super(MemoryAugmentationLayer, self).__init__()
38
  self.memory = nn.Parameter(torch.randn(size))
39
 
40
- def forward(self, x: torch.Tensor) -> torch.Tensor:
41
  return x + self.memory
42
 
43
  class HybridAttentionLayer(nn.Module):
44
- def __init__(self, size: int):
45
- super(HybridAttentionLayer, self).__init__()
46
  self.attention = nn.MultiheadAttention(size, num_heads=8)
47
 
48
- def forward(self, x: torch.Tensor) -> torch.Tensor:
49
- x = x.unsqueeze(1) # Add sequence dimension
50
  attn_output, _ = self.attention(x, x, x)
51
  return attn_output.squeeze(1)
52
 
53
  class DynamicFlashAttentionLayer(nn.Module):
54
- def __init__(self, size: int):
55
- super(DynamicFlashAttentionLayer, self).__init__()
56
  self.attention = nn.MultiheadAttention(size, num_heads=8)
57
 
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- x = x.unsqueeze(1) # Add sequence dimension
60
  attn_output, _ = self.attention(x, x, x)
61
  return attn_output.squeeze(1)
62
 
63
  class DynamicModel(nn.Module):
64
  def __init__(self, sections: Dict[str, List[Dict[str, Any]]]):
65
- super(DynamicModel, self).__init__()
66
- self.sections = nn.ModuleDict()
67
- if not sections:
68
- sections = {
69
- 'default': [{
70
- 'input_size': 128,
71
- 'output_size': 256,
72
- 'activation': 'relu',
73
- 'batch_norm': True,
74
- 'dropout': 0.1
75
- }]
76
- }
77
- for section_name, layers in sections.items():
78
- self.sections[section_name] = nn.ModuleList()
79
- for layer_params in layers:
80
- print(f"Creating layer in section '{section_name}' with params: {layer_params}")
81
- self.sections[section_name].append(self.create_layer(layer_params))
82
-
83
- def create_layer(self, layer_params: Dict[str, Any]) -> nn.Module:
84
- layers = []
85
- layers.append(nn.Linear(layer_params['input_size'], layer_params['output_size']))
86
- if layer_params.get('batch_norm', False):
87
- layers.append(nn.BatchNorm1d(layer_params['output_size']))
88
- activation = layer_params.get('activation', 'relu')
89
- if activation == 'relu':
90
- layers.append(nn.ReLU(inplace=True))
91
- elif activation == 'tanh':
92
- layers.append(nn.Tanh())
93
- elif activation == 'sigmoid':
94
- layers.append(nn.Sigmoid())
95
- elif activation == 'leaky_relu':
96
- layers.append(nn.LeakyReLU(negative_slope=0.01, inplace=True))
97
- elif activation == 'elu':
98
- layers.append(nn.ELU(alpha=1.0, inplace=True))
99
- elif activation is not None:
100
- raise ValueError(f"Unsupported activation function: {activation}")
101
- if dropout_rate := layer_params.get('dropout', 0.0):
102
- layers.append(nn.Dropout(p=dropout_rate))
103
- if hidden_layers := layer_params.get('hidden_layers', []):
104
- for hidden_layer_params in hidden_layers:
105
- layers.append(self.create_layer(hidden_layer_params))
106
- if layer_params.get('memory_augmentation', True):
107
- layers.append(MemoryAugmentationLayer(layer_params['output_size']))
108
- if layer_params.get('hybrid_attention', True):
109
- layers.append(HybridAttentionLayer(layer_params['output_size']))
110
- if layer_params.get('dynamic_flash_attention', True):
111
- layers.append(DynamicFlashAttentionLayer(layer_params['output_size']))
112
- return nn.Sequential(*layers)
113
-
114
- def forward(self, x: torch.Tensor, section_name: Optional[str] = None) -> torch.Tensor:
115
- if section_name is not None:
116
- if section_name not in self.sections:
117
- raise KeyError(f"Section '{section_name}' not found in model")
118
- for layer in self.sections[section_name]:
119
- x = layer(x)
120
  else:
121
- for section_name, layers in self.sections.items():
122
- for layer in layers:
123
- x = layer(x)
124
  return x
125
 
126
- def parse_xml_file(file_path: str) -> List[Dict[str, Any]]:
127
- tree = ET.parse(file_path)
128
- root = tree.getroot()
129
- layers = []
130
  for layer in root.findall('.//layer'):
131
- layer_params = {}
132
- layer_params['input_size'] = int(layer.get('input_size', 128))
133
- layer_params['output_size'] = int(layer.get('output_size', 256))
134
- layer_params['activation'] = layer.get('activation', 'relu').lower()
135
- if layer_params['activation'] not in ['relu', 'tanh', 'sigmoid', 'none']:
136
- raise ValueError(f"Unsupported activation function: {layer_params['activation']}")
137
- if layer_params['input_size'] <= 0 or layer_params['output_size'] <= 0:
138
  raise ValueError("Layer dimensions must be positive integers")
139
- layers.append(layer_params)
140
  if not layers:
141
- layers.append({
142
- 'input_size': 128,
143
- 'output_size': 256,
144
- 'activation': 'relu'
145
- })
146
  return layers
147
 
148
- def create_model_from_folder(folder_path: str) -> DynamicModel:
149
  sections = defaultdict(list)
150
  if not os.path.exists(folder_path):
151
- print(f"Warning: Folder {folder_path} does not exist. Creating model with default configuration.")
152
  return DynamicModel({})
153
  xml_files_found = False
154
  for root, dirs, files in os.walk(folder_path):
@@ -157,68 +145,152 @@ def create_model_from_folder(folder_path: str) -> DynamicModel:
157
  xml_files_found = True
158
  file_path = os.path.join(root, file)
159
  try:
160
- layers = parse_xml_file(file_path)
161
- section_name = os.path.basename(root).replace('.', '_')
162
- sections[section_name].extend(layers)
163
  except Exception as e:
164
- print(f"Error processing {file_path}: {str(e)}")
165
  if not xml_files_found:
166
- print("Warning: No XML files found. Creating model with default configuration.")
167
  return DynamicModel({})
168
  return DynamicModel(dict(sections))
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  def main():
171
- print(Fore.CYAN + "Starting conversion...")
172
-
173
- # Create the dynamic model from the folder
174
- model = create_model_from_folder(file_path)
175
- print(f"Created dynamic PyTorch model with sections: {list(model.sections.keys())}")
176
-
177
- # Print the model architecture
178
- print(model)
179
-
180
- # Ensure the input tensor size matches the expected input size
181
- first_section = next(iter(model.sections.keys()))
182
- first_layer = model.sections[first_section][0]
183
- input_features = first_layer[0].in_features
184
- sample_input = torch.randn(1, input_features)
185
- output = model(sample_input)
186
- print(f"Sample output shape: {output.shape}")
187
-
188
- # Training setup
189
- accelerator = Accelerator()
190
- optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
191
- criterion = nn.CrossEntropyLoss()
192
- num_epochs = 10
193
- dataset = TensorDataset(
194
- torch.randn(100, input_features),
195
- torch.randint(0, 2, (100,))
196
- )
197
- train_dataloader = DataLoader(
198
- dataset,
199
- batch_size=8, # Reduced batch size
200
- shuffle=True
201
- )
202
- model, optimizer, train_dataloader = accelerator.prepare(
203
- model, optimizer, train_dataloader
204
- )
205
- scaler = GradScaler() # Mixed precision training
206
-
207
- # Training loop
208
  for epoch in range(num_epochs):
209
  model.train()
210
  total_loss = 0
211
- for batch_idx, (inputs, labels) in enumerate(train_dataloader):
212
  optimizer.zero_grad()
213
- with autocast(): # Mixed precision training
214
- outputs = model(inputs)
215
- loss = criterion(outputs, labels)
216
- scaler.scale(loss).backward()
217
- scaler.step(optimizer)
218
- scaler.update()
219
  total_loss += loss.item()
220
- avg_loss = total_loss / len(train_dataloader)
221
- print(f"Epoch {epoch+1}/{num_epochs}, Average Loss: {avg_loss:.4f}")
 
 
 
 
 
222
 
223
  if __name__ == "__main__":
224
- main()
 
5
  import torch
6
  import torch.nn as nn
7
  import torch.nn.functional as F
8
+ import logging
9
+ import requests
10
+ import faiss
11
  from collections import defaultdict
12
  from typing import List, Dict, Any, Optional
13
  from colorama import Fore, Style, init
14
  from accelerate import Accelerator
15
  from torch.utils.data import DataLoader, TensorDataset
16
  from torch.cuda.amp import GradScaler, autocast
17
+ from transformers import AutoTokenizer, AutoModel
18
+ from sentence_transformers import SentenceTransformer
19
+ from sentence_transformers.readers import ParagraphReader
20
+ from sentence_transformers.uniformer import Uniformer
21
 
22
  # Initialize colorama
23
  init(autoreset=True)
24
+ logging.basicConfig(level=logging.INFO, format='\033[92m%(asctime)s - %(levelname)s - %(message)s\033[0m')
25
 
 
26
  file_path = 'data/'
27
  output_path = 'output/'
28
 
29
  # Create output path if it doesn't exist
30
  if not os.path.exists(output_path):
31
+ try:
32
+ os.makedirs(output_path)
33
+ os.chmod(output_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # Set full r/w permissions
34
+ except PermissionError:
35
+ print(f"Permission denied: '{output_path}'")
36
+ # Handle the error or try a different path
37
 
38
  # Ensure necessary files are created with full r/w permissions
39
  def ensure_file(file_path):
 
42
  pass
43
  os.chmod(file_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # Set full r/w permissions
44
 
45
+ class MagicStateLayer(nn.Module):
46
+ def __init__(self, size):
47
+ super().__init__()
48
+ self.state = nn.Parameter(torch.randn(size))
49
+
50
+ def forward(self, x):
51
+ return x + self.state
52
+
53
  class MemoryAugmentationLayer(nn.Module):
54
+ def __init__(self, size):
55
+ super().__init__()
56
  self.memory = nn.Parameter(torch.randn(size))
57
 
58
+ def forward(self, x):
59
  return x + self.memory
60
 
61
  class HybridAttentionLayer(nn.Module):
62
+ def __init__(self, size):
63
+ super().__init__()
64
  self.attention = nn.MultiheadAttention(size, num_heads=8)
65
 
66
+ def forward(self, x):
67
+ x = x.unsqueeze(1)
68
  attn_output, _ = self.attention(x, x, x)
69
  return attn_output.squeeze(1)
70
 
71
  class DynamicFlashAttentionLayer(nn.Module):
72
+ def __init__(self, size):
73
+ super().__init__()
74
  self.attention = nn.MultiheadAttention(size, num_heads=8)
75
 
76
+ def forward(self, x):
77
+ x = x.unsqueeze(1)
78
  attn_output, _ = self.attention(x, x, x)
79
  return attn_output.squeeze(1)
80
 
81
  class DynamicModel(nn.Module):
82
  def __init__(self, sections: Dict[str, List[Dict[str, Any]]]):
83
+ super().__init__()
84
+ self.sections = nn.ModuleDict({sn: nn.ModuleList([self.create_layer(lp) for lp in layers]) for sn, layers in sections.items()})
85
+
86
+ def create_layer(self, lp):
87
+ l = [nn.Linear(lp['input_size'], lp['output_size'])]
88
+ if lp.get('batch_norm', True):
89
+ l.append(nn.BatchNorm1d(lp['output_size']))
90
+ a = lp.get('activation', 'relu')
91
+ if a == 'relu':
92
+ l.append(nn.ReLU(inplace=True))
93
+ elif a == 'tanh':
94
+ l.append(nn.Tanh())
95
+ elif a == 'sigmoid':
96
+ l.append(nn.Sigmoid())
97
+ elif a == 'leaky_relu':
98
+ l.append(nn.LeakyReLU(negative_slope=0.01, inplace=True))
99
+ elif a == 'elu':
100
+ l.append(nn.ELU(alpha=1.0, inplace=True))
101
+ if dr := lp.get('dropout', 0.0):
102
+ l.append(nn.Dropout(p=dr))
103
+ if lp.get('memory_augmentation', False):
104
+ l.append(MemoryAugmentationLayer(lp['output_size']))
105
+ if lp.get('hybrid_attention', False):
106
+ l.append(HybridAttentionLayer(lp['output_size']))
107
+ if lp.get('dynamic_flash_attention', False):
108
+ l.append(DynamicFlashAttentionLayer(lp['output_size']))
109
+ if lp.get('magic_state', False):
110
+ l.append(MagicStateLayer(lp['output_size']))
111
+ return nn.Sequential(*l)
112
+
113
+ def forward(self, x, sn=None):
114
+ if sn:
115
+ for l in self.sections[sn]:
116
+ x = l(x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  else:
118
+ for sn, layers in self.sections.items():
119
+ for l in layers:
120
+ x = l(x)
121
  return x
122
 
123
+ def parse_xml_file(file_path):
124
+ tree, root, layers = ET.parse(file_path), ET.parse(file_path).getroot(), []
 
 
125
  for layer in root.findall('.//layer'):
126
+ lp = {'input_size': int(layer.get('input_size', 128)), 'output_size': int(layer.get('output_size', 256)), 'activation': layer.get('activation', 'relu').lower()}
127
+ if lp['activation'] not in ['relu', 'tanh', 'sigmoid', 'none']:
128
+ raise ValueError(f"Unsupported activation function: {lp['activation']}")
129
+ if lp['input_size'] <= 0 or lp['output_size'] <= 0:
 
 
 
130
  raise ValueError("Layer dimensions must be positive integers")
131
+ layers.append(lp)
132
  if not layers:
133
+ layers.append({'input_size': 128, 'output_size': 256, 'activation': 'relu'})
 
 
 
 
134
  return layers
135
 
136
+ def create_model_from_folder(folder_path):
137
  sections = defaultdict(list)
138
  if not os.path.exists(folder_path):
139
+ logging.warning(f"Folder {folder_path} does not exist. Creating model with default configuration.")
140
  return DynamicModel({})
141
  xml_files_found = False
142
  for root, dirs, files in os.walk(folder_path):
 
145
  xml_files_found = True
146
  file_path = os.path.join(root, file)
147
  try:
148
+ sections[os.path.basename(root).replace('.', '_')].extend(parse_xml_file(file_path))
 
 
149
  except Exception as e:
150
+ logging.error(f"Error processing {file_path}: {str(e)}")
151
  if not xml_files_found:
152
+ logging.warning("No XML files found. Creating model with default configuration.")
153
  return DynamicModel({})
154
  return DynamicModel(dict(sections))
155
 
156
+ def create_embeddings_and_stores(folder_path, model_name="sentence-transformers/all-MiniLM-L6-v2"):
157
+ tokenizer, model, vector_store, doc_store = AutoTokenizer.from_pretrained(model_name), AutoModel.from_pretrained(model_name), faiss.IndexFlatL2(384), []
158
+ for root, dirs, files in os.walk(folder_path):
159
+ for file in files:
160
+ if file.endswith('.xml'):
161
+ file_path = os.path.join(root, file)
162
+ try:
163
+ tree, root = ET.parse(file_path), ET.parse(file_path).getroot()
164
+ for elem in root.iter():
165
+ if elem.text:
166
+ text = elem.text.strip()
167
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
168
+ with torch.no_grad():
169
+ embeddings = model(**inputs).last_hidden_state.mean(dim=1).numpy()
170
+ vector_store.add(embeddings)
171
+ doc_store.append(text)
172
+ except Exception as e:
173
+ logging.error(f"Error processing {file_path}: {str(e)}")
174
+ return vector_store, doc_store
175
+
176
+ def query_vector_store(query, vector_store, doc_store, model_name="sentence-transformers/all-MiniLM-L6-v2"):
177
+ tokenizer, model = AutoTokenizer.from_pretrained(model_name), AutoModel.from_pretrained(model_name)
178
+ inputs = tokenizer(query, return_tensors="pt", truncation=True, padding=True)
179
+ with torch.no_grad():
180
+ query_embedding = model(**inputs).last_hidden_state.mean(dim=1).numpy()
181
+ D, I = vector_store.search(query_embedding, k=5)
182
+ return [doc_store[i] for i in I[0]]
183
+
184
+ def fetch_courtlistener_data(query):
185
+ base_url, params = "https://nzlii.org/cgi-bin/sinosrch.cgi", {"method": "auto", "query": query, "meta": "/nz", "results": "50", "format": "json"}
186
+ try:
187
+ response = requests.get(base_url, params=params, headers={"Accept": "application/json"}, timeout=10)
188
+ response.raise_for_status()
189
+ return [{"title": r.get("title", ""), "citation": r.get("citation", ""), "date": r.get("date", ""), "court": r.get("court", ""), "summary": r.get("summary", ""), "url": r.get("url", "")} for r in response.json().get("results", [])]
190
+ except requests.exceptions.RequestException as e:
191
+ logging.error(f"Failed to fetch data from NZLII API: {str(e)}")
192
+ return []
193
+
194
+ class CustomModel(nn.Module):
195
+ def __init__(self, model_name="distilbert-base-uncased"):
196
+ super().__init__()
197
+ self.model_name = model_name
198
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
199
+ self.encoder = AutoModel.from_pretrained(model_name)
200
+ self.hidden_size = self.encoder.config.hidden_size
201
+ self.dropout = nn.Dropout(p=0.2)
202
+ self.fc1 = nn.Linear(self.hidden_size, 64)
203
+ self.fc2 = nn.Linear(64, 32)
204
+ self.fc3 = nn.Linear(32, 16)
205
+ self.memory = nn.LSTM(self.hidden_size, 64, bidirectional=True, batch_first=True)
206
+ self.memory_fc1 = nn.Linear(64 * 2, 32)
207
+ self.memory_fc2 = nn.Linear(32, 16)
208
+
209
+ def forward(self, data):
210
+ tokens = self.tokenizer(data, return_tensors="pt", truncation=True, padding=True)
211
+ outputs = self.encoder(**tokens)
212
+ x = outputs.last_hidden_state.mean(dim=1)
213
+ x = self.dropout(F.relu(self.fc1(x)))
214
+ x = self.dropout(F.relu(self.fc2(x)))
215
+ x = self.fc3(x)
216
+ return x
217
+
218
+ def training_step(self, data, labels, optimizer, criterion):
219
+ optimizer.zero_grad()
220
+ outputs = self.forward(data)
221
+ loss = criterion(outputs, labels)
222
+ loss.backward()
223
+ optimizer.step()
224
+ return loss.item()
225
+
226
+ def validation_step(self, data, labels, criterion):
227
+ with torch.no_grad():
228
+ outputs = self.forward(data)
229
+ loss = criterion(outputs, labels)
230
+ return loss.item()
231
+
232
+ def predict(self, input):
233
+ self.eval()
234
+ with torch.no_grad():
235
+ return self.forward(input)
236
+
237
+ class CustomModelInference(nn.Module):
238
+ def __init__(self, model_name="distilbert-base-uncased"):
239
+ super().__init__()
240
+ self.model_name = model_name
241
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
242
+ self.encoder = AutoModel.from_pretrained(model_name)
243
+ self.hidden_size = self.encoder.config.hidden_size
244
+ self.dropout = nn.Dropout(p=0.2)
245
+ self.fc1 = nn.Linear(self.hidden_size, 64)
246
+ self.fc2 = nn.Linear(64, 32)
247
+ self.fc3 = nn.Linear(32, 16)
248
+ self.reader = ParagraphReader("data/docstore.json")
249
+ self.model_embedding = SentenceTransformer('sentence-transformers/multilingual-v2')
250
+ self.vectorstore = Uniformer("distilusembert-base-nli-mean-tokens", torch.nn.CrossEntropyLoss(), margin=0.5, temperature=0.1, top_k=4)
251
+
252
+ def forward(self, data):
253
+ tokens = self.tokenizer(data, return_tensors="pt", truncation=True, padding=True)
254
+ outputs = self.encoder(**tokens)
255
+ x = outputs.last_hidden_state.mean(dim=1)
256
+ x = self.dropout(F.relu(self.fc1(x)))
257
+ x = self.dropout(F.relu(self.fc2(x)))
258
+ x = self.fc3(x)
259
+ return x
260
+
261
+ def infer(self, input):
262
+ self.eval()
263
+ with torch.no_grad():
264
+ return self.forward(input)
265
+
266
+ def update_memory(self, data):
267
+ embeddings = self.model_embedding.encode(data, convert_to_tensor=True)
268
+ self.vectorstore.add(embeddings)
269
+
270
  def main():
271
+ folder_path, model = 'data', create_model_from_folder('data')
272
+ logging.info(f"Created dynamic PyTorch model with sections: {list(model.sections.keys())}")
273
+ vector_store, doc_store = create_embeddings_and_stores(folder_path)
274
+ accelerator, optimizer, criterion, num_epochs = Accelerator(), torch.optim.Adam(model.parameters(), lr=0.001), nn.CrossEntropyLoss(), 10
275
+ dataset, dataloader = TensorDataset(torch.randn(100, 128), torch.randint(0, 2, (100,))), DataLoader(TensorDataset(torch.randn(100, 128), torch.randint(0, 2, (100,))), batch_size=16, shuffle=True)
276
+ model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  for epoch in range(num_epochs):
278
  model.train()
279
  total_loss = 0
280
+ for batch_data, batch_labels in dataloader:
281
  optimizer.zero_grad()
282
+ outputs = model(batch_data)
283
+ loss = criterion(outputs, batch_labels)
284
+ accelerator.backward(loss)
285
+ optimizer.step()
 
 
286
  total_loss += loss.item()
287
+ avg_loss = total_loss / len(dataloader)
288
+ logging.info(f"Epoch {epoch+1}/{num_epochs}, Average Loss: {avg_loss:.4f}")
289
+ query = "example query text"
290
+ results = query_vector_store(query, vector_store, doc_store)
291
+ logging.info(f"Query results: {results}")
292
+ courtlistener_data = fetch_courtlistener_data(query)
293
+ logging.info(f"CourtListener API results: {courtlistener_data}")
294
 
295
  if __name__ == "__main__":
296
+ main()