Upload ragwiki_indexing-terrier.ipynb
Browse files- ragwiki_indexing-terrier.ipynb +202 -0
ragwiki_indexing-terrier.ipynb
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# Sparse Index for RAG Wikipedia Corpus\n",
|
8 |
+
"\n",
|
9 |
+
"This creates a sparse Terrier index using PyTerrier for the Wikipedia corpus used by Natural Questions and TextbookQuestionAnswering.\n",
|
10 |
+
"\n",
|
11 |
+
"The corpus is downloaded from https://huggingface.co/datasets/RUC-NLPIR/FlashRAG_datasets/resolve/main/retrieval-corpus/wiki18_100w.zip by `\n",
|
12 |
+
"pt.get_dataset('rag:nq_wiki').get_corpus_iter()`.\n",
|
13 |
+
"\n"
|
14 |
+
]
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"cell_type": "code",
|
18 |
+
"execution_count": 1,
|
19 |
+
"metadata": {},
|
20 |
+
"outputs": [],
|
21 |
+
"source": [
|
22 |
+
"import pyterrier as pt\n",
|
23 |
+
"import pyterrier_rag"
|
24 |
+
]
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"cell_type": "markdown",
|
28 |
+
"metadata": {},
|
29 |
+
"source": [
|
30 |
+
"This notebook requires PyTerrier 0.13 or higher."
|
31 |
+
]
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"cell_type": "code",
|
35 |
+
"execution_count": 2,
|
36 |
+
"metadata": {},
|
37 |
+
"outputs": [
|
38 |
+
{
|
39 |
+
"data": {
|
40 |
+
"text/plain": [
|
41 |
+
"'0.13.0'"
|
42 |
+
]
|
43 |
+
},
|
44 |
+
"execution_count": 2,
|
45 |
+
"metadata": {},
|
46 |
+
"output_type": "execute_result"
|
47 |
+
}
|
48 |
+
],
|
49 |
+
"source": [
|
50 |
+
"pt.__version__"
|
51 |
+
]
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"cell_type": "markdown",
|
55 |
+
"metadata": {},
|
56 |
+
"source": [
|
57 |
+
"Lets prepare the index. We're going to store the title and text of the documents in the Terrier index, so we can use them for reranking. A study of title and text length distributions found that very few were cutoff with for max lengths of 1750 and 125, respectively.\n"
|
58 |
+
]
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"cell_type": "code",
|
62 |
+
"execution_count": 34,
|
63 |
+
"metadata": {},
|
64 |
+
"outputs": [
|
65 |
+
{
|
66 |
+
"name": "stdout",
|
67 |
+
"output_type": "stream",
|
68 |
+
"text": [
|
69 |
+
"13:45:49.361 [ForkJoinPool-2-worker-3] WARN org.terrier.structures.BaseCompressingMetaIndex -- Structure meta reading lookup file directly from disk (SLOW) - try index.meta.index-source=fileinmem in the index properties file. 137.3 MiB of memory would be required.\n",
|
70 |
+
"13:45:49.366 [ForkJoinPool-2-worker-3] WARN org.terrier.structures.BaseCompressingMetaIndex -- Structure meta reading data file directly from disk (SLOW) - try index.meta.data-source=fileinmem in the index properties file. 7 GiB of memory would be required.\n",
|
71 |
+
"13:56:25.302 [ForkJoinPool-2-worker-3] WARN org.terrier.structures.BaseCompressingMetaIndex -- Structure meta reading data file directly from disk (SLOW) - try index.meta.data-source=fileinmem in the index properties file. 1.2 GiB of memory would be required.\n"
|
72 |
+
]
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"data": {
|
76 |
+
"text/plain": [
|
77 |
+
"<org.terrier.querying.IndexRef at 0x7fa3d024d5b0 jclass=org/terrier/querying/IndexRef jself=<LocalRef obj=0xc526808 at 0x7fa274037470>>"
|
78 |
+
]
|
79 |
+
},
|
80 |
+
"execution_count": 34,
|
81 |
+
"metadata": {},
|
82 |
+
"output_type": "execute_result"
|
83 |
+
}
|
84 |
+
],
|
85 |
+
"source": [
|
86 |
+
"index_dir = \"./nq_index_new\"\n",
|
87 |
+
"ref = pt.IterDictIndexer(\n",
|
88 |
+
" index_dir, \n",
|
89 |
+
" text_attrs=['title', 'text'], \n",
|
90 |
+
" meta={'docno' : 20, 'text' : 1750, 'title' : 125}\n",
|
91 |
+
" ).index(pt.get_dataset('rag:nq_wiki').get_corpus_iter())"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"cell_type": "markdown",
|
96 |
+
"metadata": {},
|
97 |
+
"source": [
|
98 |
+
"We then upload the index to Huggingface..."
|
99 |
+
]
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"cell_type": "code",
|
103 |
+
"execution_count": 6,
|
104 |
+
"metadata": {},
|
105 |
+
"outputs": [
|
106 |
+
{
|
107 |
+
"name": "stdout",
|
108 |
+
"output_type": "stream",
|
109 |
+
"text": [
|
110 |
+
"adding data.direct.bf [1.9 GB]\n",
|
111 |
+
"adding data.document.fsarrayfile [340.7 MB]\n",
|
112 |
+
"adding data.inverted.bf [1.5 GB]\n",
|
113 |
+
"adding data.lexicon.fsomapfile [330.0 MB]\n",
|
114 |
+
"adding data.lexicon.fsomaphash [1017 B]\n",
|
115 |
+
"adding data.lexicon.fsomapid [15.3 MB]\n",
|
116 |
+
"adding data.meta-0.fsomapfile [1.3 GB]\n",
|
117 |
+
"adding data.meta.idx [160.3 MB]\n",
|
118 |
+
"adding data.meta.zdata [8.2 GB]\n",
|
119 |
+
"adding data.properties [4.1 KB]\n",
|
120 |
+
"adding pt_meta.json [79 B]\n"
|
121 |
+
]
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"data": {
|
125 |
+
"application/vnd.jupyter.widget-view+json": {
|
126 |
+
"model_id": "d807844944c94c4cb5b76e1472d062f8",
|
127 |
+
"version_major": 2,
|
128 |
+
"version_minor": 0
|
129 |
+
},
|
130 |
+
"text/plain": [
|
131 |
+
"artifact.tar.lz4.json: 0%| | 0.00/913 [00:00<?, ?B/s]"
|
132 |
+
]
|
133 |
+
},
|
134 |
+
"metadata": {},
|
135 |
+
"output_type": "display_data"
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"data": {
|
139 |
+
"application/vnd.jupyter.widget-view+json": {
|
140 |
+
"model_id": "8477f74a10114db0ab4c62be17d21385",
|
141 |
+
"version_major": 2,
|
142 |
+
"version_minor": 0
|
143 |
+
},
|
144 |
+
"text/plain": [
|
145 |
+
"artifact.tar.lz4: 0%| | 0.00/12.9G [00:00<?, ?B/s]"
|
146 |
+
]
|
147 |
+
},
|
148 |
+
"metadata": {},
|
149 |
+
"output_type": "display_data"
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"data": {
|
153 |
+
"application/vnd.jupyter.widget-view+json": {
|
154 |
+
"model_id": "b7082bc99c9a439dbb6ed8ab9fc484a1",
|
155 |
+
"version_major": 2,
|
156 |
+
"version_minor": 0
|
157 |
+
},
|
158 |
+
"text/plain": [
|
159 |
+
"Upload 2 LFS files: 0%| | 0/2 [00:00<?, ?it/s]"
|
160 |
+
]
|
161 |
+
},
|
162 |
+
"metadata": {},
|
163 |
+
"output_type": "display_data"
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"name": "stderr",
|
167 |
+
"output_type": "stream",
|
168 |
+
"text": [
|
169 |
+
"\n",
|
170 |
+
"Artifact uploaded to https://huggingface.co/datasets/pyterrier/ragwiki-terrier/tree/main/\n",
|
171 |
+
"Consider editing the README.md to help explain this artifact to others.\n"
|
172 |
+
]
|
173 |
+
}
|
174 |
+
],
|
175 |
+
"source": [
|
176 |
+
"index = pt.terrier.TerrierIndex(ref)\n",
|
177 |
+
"index.to_hf('pyterrier/ragwiki-terrier')"
|
178 |
+
]
|
179 |
+
}
|
180 |
+
],
|
181 |
+
"metadata": {
|
182 |
+
"kernelspec": {
|
183 |
+
"display_name": "Python [conda env:rag]",
|
184 |
+
"language": "python",
|
185 |
+
"name": "conda-env-rag-py"
|
186 |
+
},
|
187 |
+
"language_info": {
|
188 |
+
"codemirror_mode": {
|
189 |
+
"name": "ipython",
|
190 |
+
"version": 3
|
191 |
+
},
|
192 |
+
"file_extension": ".py",
|
193 |
+
"mimetype": "text/x-python",
|
194 |
+
"name": "python",
|
195 |
+
"nbconvert_exporter": "python",
|
196 |
+
"pygments_lexer": "ipython3",
|
197 |
+
"version": "3.11.11"
|
198 |
+
}
|
199 |
+
},
|
200 |
+
"nbformat": 4,
|
201 |
+
"nbformat_minor": 4
|
202 |
+
}
|