Commit
·
cf05905
0
Parent(s):
initial commit
Browse files- .gitattributes +2 -0
- .gitignore +2 -0
- README.md +74 -0
- convert.py +98 -0
- data/dev/dev.jsonl.gz +3 -0
- data/test/test.jsonl.gz +3 -0
- data/train/train.jsonl.gz +3 -0
- requirements.txt +1 -0
.gitattributes
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
.mypy_cache
|
README.md
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
license: apache-2.0
|
5 |
+
tags:
|
6 |
+
- text
|
7 |
+
pretty_name: MSMARCO
|
8 |
+
size_categories:
|
9 |
+
- "100K<n<1M"
|
10 |
+
source_datasets:
|
11 |
+
- MSMARCO
|
12 |
+
task_categories:
|
13 |
+
- sentence-similarity
|
14 |
+
dataset_info:
|
15 |
+
config_name: default
|
16 |
+
splits:
|
17 |
+
- name: train
|
18 |
+
num_bytes: 1
|
19 |
+
num_examples: 1
|
20 |
+
- name: test
|
21 |
+
num_bytes: 1
|
22 |
+
num_examples: 1
|
23 |
+
- name: dev
|
24 |
+
num_bytes: 1
|
25 |
+
num_examples: 1
|
26 |
+
train-eval-index:
|
27 |
+
- config: default
|
28 |
+
task: sentence-similarity
|
29 |
+
splits:
|
30 |
+
train_split: train
|
31 |
+
eval_split: test
|
32 |
+
configs:
|
33 |
+
- config_name: default
|
34 |
+
data_files:
|
35 |
+
- split: train
|
36 |
+
path: "data/train/*"
|
37 |
+
- split: test
|
38 |
+
path: "data/test/*"
|
39 |
+
- split: dev
|
40 |
+
path: "data/dev/*"
|
41 |
+
---
|
42 |
+
|
43 |
+
# MSMARCO dataset
|
44 |
+
|
45 |
+
A dataset in a [nixietune](https://github.com/nixiesearch/nixietune) compatible format:
|
46 |
+
|
47 |
+
```json
|
48 |
+
{
|
49 |
+
"query": ")what was the immediate impact of the success of the manhattan project?",
|
50 |
+
"pos": [
|
51 |
+
{
|
52 |
+
"doc": "The presence of communication amid scientific minds was equally important to the success of the Manhattan Project as scientific intellect was. The only cloud hanging over the impressive achievement of the atomic researchers and engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated.",
|
53 |
+
"score": 1
|
54 |
+
}
|
55 |
+
]
|
56 |
+
}
|
57 |
+
```
|
58 |
+
|
59 |
+
This is the original converted dataset with the following splits:
|
60 |
+
* train: 502939 queries, only positives.
|
61 |
+
* test: 43 queries, positives and negatives.
|
62 |
+
* dev: 6980 queries, only positives.
|
63 |
+
|
64 |
+
## Usage
|
65 |
+
|
66 |
+
```python
|
67 |
+
from datasets import load_dataset
|
68 |
+
|
69 |
+
data = load_dataset('nixiesearch/msmarco', split="train")
|
70 |
+
```
|
71 |
+
|
72 |
+
## License
|
73 |
+
|
74 |
+
Apache 2.0
|
convert.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
from dataclasses import dataclass, field
|
3 |
+
import logging
|
4 |
+
from transformers import HfArgumentParser
|
5 |
+
from tqdm import tqdm
|
6 |
+
from typing import Dict, List
|
7 |
+
import json
|
8 |
+
|
9 |
+
logger = logging.getLogger()
|
10 |
+
logger.setLevel(logging.INFO)
|
11 |
+
console_handler = logging.StreamHandler()
|
12 |
+
console_handler.setFormatter(
|
13 |
+
logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
|
14 |
+
)
|
15 |
+
logger.handlers = [console_handler]
|
16 |
+
|
17 |
+
|
18 |
+
@dataclass
|
19 |
+
class ConversionAgruments:
|
20 |
+
path: str = field(metadata={"help": "Path to the MAMARCO dataset"})
|
21 |
+
out: str = field(metadata={"help": "Output path"})
|
22 |
+
|
23 |
+
|
24 |
+
@dataclass
|
25 |
+
class QRel:
|
26 |
+
doc: int
|
27 |
+
score: int
|
28 |
+
|
29 |
+
|
30 |
+
def load_json(path: str, split: str = "train") -> List[str]:
|
31 |
+
dataset = load_dataset("json", data_files=path, split=split)
|
32 |
+
cache: List[str] = []
|
33 |
+
for row in tqdm(dataset, desc=f"loading {path}"):
|
34 |
+
index = int(row["_id"])
|
35 |
+
if index >= len(cache):
|
36 |
+
cache.extend([""] * (1 + 2 * max(index, len(cache))))
|
37 |
+
cache[index] = row["text"]
|
38 |
+
return cache
|
39 |
+
|
40 |
+
|
41 |
+
def load_qrel(path: str) -> Dict[int, List[QRel]]:
|
42 |
+
dataset = load_dataset("csv", data_files=path, split="train", delimiter="\t")
|
43 |
+
print(dataset.features)
|
44 |
+
cache: Dict[int, List[QRel]] = {}
|
45 |
+
for row in tqdm(dataset, desc=f"loading {path}"):
|
46 |
+
qid = int(row["query-id"])
|
47 |
+
qrel = QRel(int(row["corpus-id"]), int(row["score"]))
|
48 |
+
if qid in cache:
|
49 |
+
cache[qid].append(qrel)
|
50 |
+
else:
|
51 |
+
cache[qid] = [qrel]
|
52 |
+
return cache
|
53 |
+
|
54 |
+
|
55 |
+
def process(
|
56 |
+
qrels: Dict[int, List[QRel]], queries: List[str], corpus: List[str]
|
57 |
+
) -> List[Dict]:
|
58 |
+
result = []
|
59 |
+
for query, rels in tqdm(qrels.items(), desc="processing split"):
|
60 |
+
pos = [
|
61 |
+
{"doc": corpus[rel.doc], "score": rel.score}
|
62 |
+
for rel in rels
|
63 |
+
if rel.doc < len(corpus) and rel.score > 0 and corpus[rel.doc] != ""
|
64 |
+
]
|
65 |
+
neg = [
|
66 |
+
{"doc": corpus[rel.doc], "score": rel.score}
|
67 |
+
for rel in rels
|
68 |
+
if rel.doc < len(corpus) and rel.score == 0 and corpus[rel.doc] != ""
|
69 |
+
]
|
70 |
+
group = {"query": queries[query], "pos": pos}
|
71 |
+
if len(neg) > 0:
|
72 |
+
group["neg"] = neg
|
73 |
+
result.append(group)
|
74 |
+
return result
|
75 |
+
|
76 |
+
|
77 |
+
def main():
|
78 |
+
parser = HfArgumentParser((ConversionAgruments))
|
79 |
+
(args,) = parser.parse_args_into_dataclasses()
|
80 |
+
print(f"Args: {args}")
|
81 |
+
corpus = load_json(f"{args.path}/corpus.jsonl", split="train")
|
82 |
+
queries = load_json(f"{args.path}/queries.jsonl")
|
83 |
+
qrels = {
|
84 |
+
"dev": process(load_qrel(f"{args.path}/qrels/dev.tsv"), queries, corpus),
|
85 |
+
"test": process(load_qrel(f"{args.path}/qrels/test.tsv"), queries, corpus),
|
86 |
+
"train": process(load_qrel(f"{args.path}/qrels/train.tsv"), queries, corpus),
|
87 |
+
}
|
88 |
+
print("processing done")
|
89 |
+
for split, data in qrels.items():
|
90 |
+
with open(f"{args.out}/{split}.jsonl", "w") as out:
|
91 |
+
for item in data:
|
92 |
+
json.dump(item, out)
|
93 |
+
out.write("\n")
|
94 |
+
print("done")
|
95 |
+
|
96 |
+
|
97 |
+
if __name__ == "__main__":
|
98 |
+
main()
|
data/dev/dev.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92ff05e37188b132fdac1734e823e78b8c92a2e00622f9886711b0d7d9274624
|
3 |
+
size 1109843
|
data/test/test.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3c4362a2f021d6b45f068d2f13aee9336b84123e718df797930956a540f30167
|
3 |
+
size 860084
|
data/train/train.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfe4888522ab3a7be8bfa406f3e45a207193cd8b0fd026acd895cb6c1213a86f
|
3 |
+
size 92239408
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
datasets
|