parquet-converter commited on
Commit
62158eb
·
1 Parent(s): 4d30699

Update parquet files

Browse files
.gitattributes CHANGED
@@ -15,3 +15,5 @@
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
  *.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
  *.csv filter=lfs diff=lfs merge=lfs -text
18
+ default/spanish_diagnostics-train.parquet filter=lfs diff=lfs merge=lfs -text
19
+ default/spanish_diagnostics-test.parquet filter=lfs diff=lfs merge=lfs -text
spanish_diagnostics.csv → default/spanish_diagnostics-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:14d674a6f896db4482c95a123115140f34e1f4767ed40700f35bb4ceaceae0cd
3
- size 6847038
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77c5691d2cb9891d7297cc0c9be169ac0463ee2b971a248481748de4fdce8360
3
+ size 1373042
default/spanish_diagnostics-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3881e897ee4075b3c8ab7e44aaa16a00ffa1dcb8f90b9204b97a491eb8ef74e2
3
+ size 3271739
spanish_diagnostics.py DELETED
@@ -1,41 +0,0 @@
1
- import csv
2
- import datasets
3
-
4
- _DOWNLOAD_URL = "https://huggingface.co/datasets/fvillena/spanish_diagnostics/resolve/main/spanish_diagnostics.csv"
5
-
6
- class SpanishDiagnostics(datasets.GeneratorBasedBuilder):
7
- """Spanish diagnostics classification dataset."""
8
-
9
- def _info(self):
10
- return datasets.DatasetInfo(
11
- features=datasets.Features(
12
- {
13
- "text": datasets.Value("string"),
14
- "label": datasets.ClassLabel(names = ["not_dental", "dental"] ),
15
- }
16
- )
17
- )
18
-
19
- def _split_generators(self, dl_manager):
20
- path = dl_manager.download_and_extract(_DOWNLOAD_URL)
21
- return [
22
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path, "is_test": False}),
23
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": path, "is_test": True}),
24
- ]
25
-
26
- def _generate_examples(self, filepath, is_test, test_size = 0.3):
27
- """Generate Spanish Diagnostics examples."""
28
- with open(filepath, encoding="utf-8") as csv_file:
29
- train_threshold = 70001
30
- csv_reader = csv.reader(
31
- csv_file
32
- )
33
- # next(csv_reader, None) # skip the headers
34
- for id_, row in enumerate(csv_reader):
35
- if id_ > 0:
36
- text, label = row
37
- current_row = id_, {"text": text, "label": int(label)}
38
- if (id_ < train_threshold) & (not is_test):
39
- yield current_row
40
- if (id_ >= train_threshold) & (is_test):
41
- yield current_row
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tokenizer.json DELETED
The diff for this file is too large to render. See raw diff