|
import os |
|
import json |
|
from collections import defaultdict |
|
from transformers import LlamaTokenizer |
|
from tqdm import tqdm |
|
|
|
input_json_file = '/Users/liyu/PycharmProjects/CoTKR/updated.json' |
|
output_dir = '/Users/liyu/PycharmProjects/CoTKR/output' |
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
model_path = '/Users/liyu/PycharmProjects/CoTKR/Llama-2-7b-hf' |
|
|
|
tokenizer = LlamaTokenizer.from_pretrained(model_path) |
|
|
|
with open(input_json_file, 'r') as f: |
|
triples = json.load(f) |
|
|
|
grouped_entity_triples = defaultdict(list) |
|
for triple in triples: |
|
subj, relation, obj = triple |
|
grouped_entity_triples[subj].append(f"{relation}: {obj}") |
|
|
|
|
|
def create_paragraphs(subj, relations, tokenizer, max_tokens=512): |
|
paragraphs = [] |
|
current_chunk = [] |
|
current_tokens = 0 |
|
|
|
for relation in relations: |
|
|
|
tokens_in_relation = len(tokenizer.encode(relation, add_special_tokens=False)) |
|
|
|
|
|
if current_tokens + tokens_in_relation + 1 > max_tokens: |
|
paragraphs.append({ |
|
"title": f"{subj}", |
|
"contents": ", ".join(current_chunk).strip() |
|
}) |
|
current_chunk = [relation] |
|
current_tokens = tokens_in_relation |
|
else: |
|
current_chunk.append(relation) |
|
current_tokens += tokens_in_relation + 1 |
|
|
|
if current_chunk: |
|
paragraphs.append({ |
|
"title": f"{subj}", |
|
"contents": ", ".join(current_chunk).strip() |
|
}) |
|
|
|
return paragraphs |
|
|
|
all_paragraphs = [] |
|
for subj, relations in tqdm(grouped_entity_triples.items(), desc="Processing subjects", unit="subject"): |
|
paragraphs = create_paragraphs(subj, relations, tokenizer) |
|
all_paragraphs.extend(paragraphs) |
|
|
|
output_file = os.path.join(output_dir, 'new_triple_processed.json') |
|
with open(output_file, 'w') as out_f: |
|
json.dump(all_paragraphs, out_f, ensure_ascii=False, indent=4) |
|
|
|
|
|
|