File size: 1,473 Bytes
19bdedc
 
 
 
 
 
323cefc
19bdedc
 
 
 
 
 
 
 
 
 
bbfc8e1
19bdedc
 
 
 
 
 
 
 
 
 
 
58e48e6
19bdedc
 
 
 
 
 
 
 
 
 
 
bbfc8e1
19bdedc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323cefc
 
19bdedc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import datasets
import csv
import os
import sys
csv.field_size_limit(sys.maxsize)

_DESCRIPTION = """"""
_PROJECT_URL = """"""


_CITATION = """
https://saied71.github.io/RohanAiLab/,
  author={Saied Alimoradi},
  year={2021}
}
"""

_URL = "persian_daily.zip"



class Persian_news(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "summary": datasets.Value("string"),
                }
            ),
            homepage=_PROJECT_URL,
            citation=_CITATION,
        )



    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        dl_dir = dl_manager.download_and_extract(_URL)
        data_dir = os.path.join(dl_dir, "persian_daily.csv")
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": data_dir,
                },),]

    def _generate_examples(self, filepath):
        """Yields examples."""
        with open(filepath, encoding="utf-8") as f:
            reader = csv.reader(f)
            for id_, row in enumerate(reader):
                if id_ == 0:
                    continue
                yield id_, {
                    "text": row[1],
                    "summary": row[0]
                }