parquet-converter commited on
Commit
cef51a9
·
1 Parent(s): 7715802

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,38 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
38
- data/train.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/paper_dev.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/paper_test.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/train.jsonl → default/enfever_nli-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fea733cf6e4bc3e8cbb77474d8637dde0a6f5c4a33733f5c9706e5592f0d0285
3
- size 104038801
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6200d228e282923b68458473dc938e1da895fcea9987bf76f6de3981140ef2cd
3
+ size 2040383
default/enfever_nli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab2332ce728ed5094ebe9a3c0293b89ac23db8b2d5362b48b55b3e84d1c47dea
3
+ size 38927895
default/enfever_nli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a94b24ad8a8b0cffc0d39718eb48b5a2f71cfc1e3f7f1cf5df57ba28de30b19
3
+ size 1920557
enfever_nli.py DELETED
@@ -1,76 +0,0 @@
1
- import os
2
- import pathlib
3
- from typing import overload
4
- import datasets
5
- import json
6
-
7
- from datasets.info import DatasetInfo
8
-
9
- _VERSION = "0.0.1"
10
-
11
- _URL= "data/"
12
-
13
- _URLS = {
14
- "train": _URL + "train.jsonl",
15
- "validation": _URL + "paper_dev.jsonl",
16
- "test": _URL + "paper_test.jsonl"
17
- }
18
-
19
- _DESCRIPTION = """\
20
- EnfeverNLI is a NLI version of the fever dataset
21
- """
22
-
23
- _CITATION = """\
24
- todo
25
- """
26
-
27
- datasets.utils.version.Version
28
- class EnfeverNli(datasets.GeneratorBasedBuilder):
29
- def _info(self):
30
- return datasets.DatasetInfo(
31
- description=_DESCRIPTION,
32
- features=datasets.Features(
33
- {
34
- "id": datasets.Value("string"),
35
- "label": datasets.ClassLabel(names=["REFUTES", "NOT ENOUGH INFO", "SUPPORTS"]),
36
- # datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),})
37
- "evidence": datasets.Value("string"),
38
- "claim": datasets.Value("string"),
39
- }
40
- ),
41
- # No default supervised_keys (as we have to pass both question
42
- # and context as input).
43
- supervised_keys=None,
44
- version=_VERSION,
45
- homepage="https://fcheck.fel.cvut.cz/dataset/",
46
- citation=_CITATION,
47
- )
48
-
49
- def _split_generators(self, dl_manager: datasets.DownloadManager):
50
- downloaded_files = dl_manager.download_and_extract(_URLS)
51
-
52
- return [
53
- datasets.SplitGenerator(datasets.Split.TRAIN, {
54
- "filepath": downloaded_files["train"]
55
- }),
56
- datasets.SplitGenerator(datasets.Split.VALIDATION, {
57
- "filepath": downloaded_files["validation"]
58
- }),
59
- datasets.SplitGenerator(datasets.Split.TEST, {
60
- "filepath": downloaded_files["test"]
61
- }),
62
- ]
63
-
64
- def _generate_examples(self, filepath):
65
- """This function returns the examples in the raw (text) form."""
66
- key = 0
67
- with open(filepath, encoding="utf-8") as f:
68
- for line in f:
69
- datapoint = json.loads(line)
70
- yield key, {
71
- "id": datapoint["cid"],
72
- "evidence": datapoint["context"],
73
- "claim": datapoint["query"],
74
- "label": datapoint["label"]
75
- }
76
- key += 1