its5Q commited on
Commit
39ea9f0
·
1 Parent(s): 5b65f1b

Upload dataset and loading script

Browse files
Files changed (3) hide show
  1. README.md +69 -1
  2. habr_qna.py +102 -0
  3. questions.jsonl.zst +3 -0
README.md CHANGED
@@ -1,3 +1,71 @@
1
  ---
2
- license: cc0-1.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ language:
5
+ - ru
6
+ language_creators:
7
+ - crowdsourced
8
+ license:
9
+ - cc0-1.0
10
+ multilinguality:
11
+ - monolingual
12
+ pretty_name: Habr QnA
13
+ size_categories:
14
+ - 100K<n<1M
15
+ source_datasets:
16
+ - original
17
+ tags: []
18
+ task_categories:
19
+ - text-generation
20
+ - question-answering
21
+ task_ids:
22
+ - language-modeling
23
+ - open-domain-qa
24
  ---
25
+
26
+ # Dataset Card for Habr QnA
27
+
28
+ ## Table of Contents
29
+ - [Dataset Card for Habr QnA](#dataset-card-for-habr-qna)
30
+ - [Table of Contents](#table-of-contents)
31
+ - [Dataset Description](#dataset-description)
32
+ - [Dataset Summary](#dataset-summary)
33
+ - [Languages](#languages)
34
+ - [Dataset Structure](#dataset-structure)
35
+ - [Data Fields](#data-fields)
36
+ - [Data Splits](#data-splits)
37
+ - [Dataset Creation](#dataset-creation)
38
+ - [Additional Information](#additional-information)
39
+ - [Dataset Curators](#dataset-curators)
40
+
41
+ ## Dataset Description
42
+
43
+ - **Repository:** https://github.com/its5Q/habr-qna-parser
44
+
45
+ ### Dataset Summary
46
+
47
+ This is a dataset of questions and answers scraped from [Habr QnA](https://qna.habr.com/). There are 723430 asked questions with answers, comments and other metadata.
48
+
49
+ ### Languages
50
+
51
+ The dataset is mostly Russian with source code in different languages.
52
+
53
+ ## Dataset Structure
54
+
55
+ ### Data Fields
56
+
57
+ Data fields can be previewed on the dataset card page.www
58
+
59
+ ### Data Splits
60
+
61
+ All 723430 examples are in the train split, there is no validation split.
62
+
63
+ ## Dataset Creation
64
+
65
+ The data was scraped with a script, located in [my GitHub repository](https://github.com/its5Q/habr-qna-parser)
66
+
67
+ ## Additional Information
68
+
69
+ ### Dataset Curators
70
+
71
+ - https://github.com/its5Q
habr_qna.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Datasets Authors and Ilya Gusev
3
+ # Modified by its5Q
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Lint as: python3
18
+ import os
19
+ import io
20
+
21
+ import zstandard
22
+ import jsonlines
23
+ import datasets
24
+
25
+ try:
26
+ import simdjson
27
+ parser = simdjson.Parser()
28
+ def parse_json(x):
29
+ try:
30
+ return parser.parse(x).as_dict()
31
+ except ValueError:
32
+ return
33
+ except ImportError:
34
+ import json
35
+ def parse_json(x):
36
+ return json.loads(x)
37
+
38
+
39
+ _DESCRIPTION = "Habr QnA Dataset"
40
+ _URL = "questions.jsonl.zst"
41
+
42
+
43
+ class YandexQFullDataset(datasets.GeneratorBasedBuilder):
44
+ VERSION = datasets.Version("0.0.1")
45
+
46
+ BUILDER_CONFIGS = [
47
+ datasets.BuilderConfig(name="default", version=VERSION, description=""),
48
+ ]
49
+
50
+ DEFAULT_CONFIG_NAME = "default"
51
+
52
+ def _info(self):
53
+ features = datasets.Features(
54
+ {
55
+ "id": datasets.Value("int32"),
56
+ "author": datasets.Value("string"),
57
+ "title": datasets.Value("string"),
58
+ "description": datasets.Value("string"),
59
+ "tags": datasets.Sequence(feature=datasets.Value("string")),
60
+ "posted_at": datasets.Value("string"),
61
+ "view_count": datasets.Value("int32"),
62
+ "subscribers_count": datasets.Value("int32"),
63
+ "complexity": datasets.Value("string"),
64
+ "complexity_votes": datasets.Value("int32"),
65
+ "comments": datasets.Sequence(feature={
66
+ "author": datasets.Value("string"),
67
+ "posted_at": datasets.Value("string"),
68
+ "body": datasets.Value("string")
69
+ }),
70
+ "answers": datasets.Sequence(feature={
71
+ "id": datasets.Value("int32"),
72
+ "author": datasets.Value("string"),
73
+ "posted_at": datasets.Value("string"),
74
+ "body": datasets.Value("string"),
75
+ "accepted": datasets.Value("bool"),
76
+ "upvote_count": datasets.Value("int32"),
77
+ "comments": datasets.Sequence(feature={
78
+ "author": datasets.Value("string"),
79
+ "posted_at": datasets.Value("string"),
80
+ "body": datasets.Value("string")
81
+ })
82
+ })
83
+ }
84
+ )
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ features=features
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ downloaded_file = dl_manager.download(_URL)
92
+ return [
93
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"path": downloaded_file}),
94
+ ]
95
+
96
+ def _generate_examples(self, path):
97
+ with open(path, "rb") as f:
98
+ cctx = zstandard.ZstdDecompressor()
99
+ reader_stream = io.BufferedReader(cctx.stream_reader(f))
100
+ reader = jsonlines.Reader(reader_stream, loads=parse_json)
101
+ for id_, item in enumerate(reader):
102
+ yield id_, item
questions.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a34f33c4bdccf86b79878b379a8f1cbad0215c6b184fe0b9812a16f3c0947caa
3
+ size 499909503