SamuelYang commited on
Commit
c30ee5c
1 Parent(s): 5499b9f

Upload doct5.py

Browse files
Files changed (1) hide show
  1. doct5.py +190 -0
doct5.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.multiprocessing as mp
4
+ from tqdm import tqdm
5
+ from multiprocessing import Pool
6
+ from transformers import T5ForConditionalGeneration, AutoTokenizer
7
+ from utils.manager import Manager
8
+ from utils.arguments import *
9
+
10
+
11
+ @dataclass
12
+ class CommonArgs(CommonArguments):
13
+ mode: str = "dev"
14
+ plm: str = "doct5"
15
+ loader_query: str = "none"
16
+ dataset: str = "NQ"
17
+
18
+ preprocess_plm: str = "t5"
19
+
20
+
21
+ @dataclass
22
+ class ModelArgs(ModelArguments):
23
+ text_length: int = 512
24
+ batch_size_eval: int = 50
25
+
26
+ max_length: int = 64
27
+
28
+
29
+
30
+ def main(rank, manager):
31
+ manager.setup(rank)
32
+
33
+ loaders = manager.prepare()
34
+ loader_text = loaders["text"]
35
+
36
+ model = T5ForConditionalGeneration.from_pretrained(manager.config.plm_dir).to(manager.config.device)
37
+ tokenizer = AutoTokenizer.from_pretrained(manager.config.plm_dir)
38
+
39
+ max_length = manager.config.max_length
40
+ query_per_doc = manager.config.query_per_doc
41
+ mmp_path = os.path.join(manager.config.cache_root, "dataset", "text", "doct5.mmp")
42
+ doct5_path = os.path.join(manager.config.data_root, manager.config.dataset, "doct5.tsv")
43
+
44
+ # generate psudo queries
45
+ if not manager.config.load_cache:
46
+ text_token_ids = np.zeros((len(loader_text.sampler), query_per_doc, max_length), dtype=np.int32)
47
+
48
+ with torch.no_grad():
49
+ start_idx = end_idx = 0
50
+ for i, x in enumerate(tqdm(loader_text, ncols=100, desc="Generating Queries")):
51
+ input_ids = x["pos_seq_token_id"].to(manager.config.device)
52
+ B = input_ids.shape[0]
53
+
54
+ sequences = model.generate(
55
+ input_ids=input_ids,
56
+ max_length=max_length,
57
+ do_sample=True,
58
+ num_return_sequences=query_per_doc
59
+ ).view(B, query_per_doc, -1).cpu().numpy() # B, N, L
60
+
61
+ end_idx += B
62
+ text_token_ids[start_idx: end_idx, :, :sequences.shape[-1]] = sequences
63
+ start_idx = end_idx
64
+
65
+ # use memmap to temperarily save the generated token ids
66
+ if manager._rank == 0:
67
+ text_token_ids_mmp = np.memmap(
68
+ mmp_path,
69
+ shape=(len(loader_text.dataset), query_per_doc, max_length),
70
+ dtype=np.int32,
71
+ mode="w+"
72
+ )
73
+ manager.synchronize()
74
+ text_token_ids_mmp = np.memmap(
75
+ mmp_path,
76
+ dtype=np.int32,
77
+ mode="r+"
78
+ ).reshape(len(loader_text.dataset), query_per_doc, max_length)
79
+ text_token_ids_mmp[loader_text.sampler.start: loader_text.sampler.end] = text_token_ids
80
+
81
+ del text_token_ids_mmp
82
+
83
+ # tokenize psudo queries by preprocess_plm and save it in the dataset/text/preprocess_plm/doct5.mmp
84
+ if rank == 0:
85
+ # load all saved token ids
86
+ text_token_ids = np.memmap(
87
+ mmp_path,
88
+ dtype=np.int32,
89
+ mode="r+"
90
+ ).reshape(len(loader_text.dataset), query_per_doc, max_length)
91
+
92
+ if not manager.config.load_cache:
93
+ with open(doct5_path, "w") as f:
94
+ for sequences in tqdm(text_token_ids, ncols=100, desc="Decoding"):
95
+ texts = tokenizer.batch_decode(sequences, skip_special_tokens=True) # N
96
+ f.write("\t".join(texts) + "\n")
97
+
98
+ cache_dir = os.path.join(manager.config.cache_root, "dataset", "text", manager.config.preprocess_plm, "doct5")
99
+ os.makedirs(cache_dir, exist_ok=True)
100
+ preprocess_threads = 32
101
+ all_line_count = len(loader_text.dataset)
102
+
103
+ manager._set_plm(manager.config.preprocess_plm)
104
+ tokenizer = AutoTokenizer.from_pretrained(manager.config.plm_dir)
105
+ manager.logger.info("tokenizing {} in {} threads, output file will be saved at {}".format(doct5_path, preprocess_threads, cache_dir))
106
+
107
+ arguments = []
108
+ # create memmap first
109
+ token_ids = np.memmap(
110
+ os.path.join(cache_dir, "token_ids.mmp"),
111
+ shape=(all_line_count, query_per_doc, max_length),
112
+ mode="w+",
113
+ dtype=np.int32
114
+ )
115
+ token_lengths = np.memmap(
116
+ os.path.join(cache_dir, "token_lengths.mmp"),
117
+ shape=(all_line_count, query_per_doc),
118
+ mode="w+",
119
+ dtype=np.int32
120
+ )
121
+
122
+ for i in range(preprocess_threads):
123
+ start_idx = round(all_line_count * i / preprocess_threads)
124
+ end_idx = round(all_line_count * (i+1) / preprocess_threads)
125
+ arguments.append((doct5_path, cache_dir, all_line_count, start_idx, end_idx, query_per_doc, tokenizer, max_length))
126
+
127
+ with Pool(preprocess_threads) as p:
128
+ id2indexs = p.starmap(_tokenize_text, arguments)
129
+
130
+
131
+ def _tokenize_text(input_path, output_dir, all_line_count, start_idx, end_idx, query_per_doc, tokenizer, max_length):
132
+ """
133
+ tokenize the input text, do padding and truncation, then save the token ids, token_lengths, text ids
134
+
135
+ Args:
136
+ input_path: input text file path
137
+ output_dir: directory of output numpy arrays
138
+ start_idx: the begining index to read
139
+ end_idx: the ending index
140
+ tokenizer: transformer tokenizer
141
+ max_length: max length of tokens
142
+ text_type: corpus class
143
+ """
144
+ token_ids = np.memmap(
145
+ os.path.join(output_dir, "token_ids.mmp"),
146
+ shape=(all_line_count, query_per_doc, max_length),
147
+ mode="r+",
148
+ dtype=np.int32
149
+ )
150
+ token_lengths = np.memmap(
151
+ os.path.join(output_dir, "token_lengths.mmp"),
152
+ shape=(all_line_count, query_per_doc),
153
+ mode="r+",
154
+ dtype=np.int32
155
+ )
156
+
157
+ with open(input_path, 'r') as f:
158
+ pbar = tqdm(total=end_idx-start_idx, desc="Tokenizing", ncols=100, leave=False)
159
+ for idx, line in enumerate(f):
160
+ if idx < start_idx:
161
+ continue
162
+ if idx >= end_idx:
163
+ break
164
+
165
+ psudo_queries = line.split('\t')
166
+ output = tokenizer(psudo_queries, max_length=max_length, padding="max_length", truncation=True, return_tensors="np")
167
+
168
+ token_id = output.input_ids
169
+ token_length = output.attention_mask.sum(axis=-1)
170
+
171
+ # token_length covers [CLS] and [SEP]
172
+ token_lengths[idx] = token_length
173
+ token_ids[idx] = token_id
174
+ pbar.update(1)
175
+ pbar.close()
176
+
177
+
178
+ if __name__ == "__main__":
179
+ manager = Manager()
180
+ manager.parse_args(CommonArgs=CommonArgs, ModelArgs=ModelArgs)
181
+
182
+ if manager._distributed:
183
+ mp.spawn(
184
+ main,
185
+ args=(manager,),
186
+ nprocs=manager._world_size,
187
+ join=True
188
+ )
189
+ else:
190
+ main(0, manager)