Upload division.py
Browse files- division.py +62 -0
division.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from collections import defaultdict
|
4 |
+
from transformers import LlamaTokenizer
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
input_json_file = '/Users/liyu/PycharmProjects/CoTKR/updated.json'
|
8 |
+
output_dir = '/Users/liyu/PycharmProjects/CoTKR/output'
|
9 |
+
os.makedirs(output_dir, exist_ok=True)
|
10 |
+
|
11 |
+
model_path = '/Users/liyu/PycharmProjects/CoTKR/Llama-2-7b-hf'
|
12 |
+
|
13 |
+
tokenizer = LlamaTokenizer.from_pretrained(model_path)
|
14 |
+
|
15 |
+
with open(input_json_file, 'r') as f:
|
16 |
+
triples = json.load(f)
|
17 |
+
|
18 |
+
grouped_entity_triples = defaultdict(list)
|
19 |
+
for triple in triples:
|
20 |
+
subj, relation, obj = triple
|
21 |
+
grouped_entity_triples[subj].append(f"{relation}: {obj}")
|
22 |
+
|
23 |
+
# 根据Tokenizer的长度限制创建段落
|
24 |
+
def create_paragraphs(subj, relations, tokenizer, max_tokens=512):
|
25 |
+
paragraphs = []
|
26 |
+
current_chunk = []
|
27 |
+
current_tokens = 0
|
28 |
+
|
29 |
+
for relation in relations:
|
30 |
+
|
31 |
+
tokens_in_relation = len(tokenizer.encode(relation, add_special_tokens=False))
|
32 |
+
|
33 |
+
|
34 |
+
if current_tokens + tokens_in_relation + 1 > max_tokens: # +1考虑分隔符或空格
|
35 |
+
paragraphs.append({
|
36 |
+
"title": f"{subj}",
|
37 |
+
"contents": ", ".join(current_chunk).strip()
|
38 |
+
})
|
39 |
+
current_chunk = [relation]
|
40 |
+
current_tokens = tokens_in_relation
|
41 |
+
else:
|
42 |
+
current_chunk.append(relation)
|
43 |
+
current_tokens += tokens_in_relation + 1 # +1考虑分隔符或空格
|
44 |
+
|
45 |
+
if current_chunk:
|
46 |
+
paragraphs.append({
|
47 |
+
"title": f"{subj}",
|
48 |
+
"contents": ", ".join(current_chunk).strip()
|
49 |
+
})
|
50 |
+
|
51 |
+
return paragraphs
|
52 |
+
|
53 |
+
all_paragraphs = []
|
54 |
+
for subj, relations in tqdm(grouped_entity_triples.items(), desc="Processing subjects", unit="subject"):
|
55 |
+
paragraphs = create_paragraphs(subj, relations, tokenizer)
|
56 |
+
all_paragraphs.extend(paragraphs)
|
57 |
+
|
58 |
+
output_file = os.path.join(output_dir, 'new_triple_processed.json')
|
59 |
+
with open(output_file, 'w') as out_f:
|
60 |
+
json.dump(all_paragraphs, out_f, ensure_ascii=False, indent=4)
|
61 |
+
|
62 |
+
# print(f"Processed paragraphs saved to {output_file}")
|