Datasets:

ArXiv:
License:
NamCyan commited on
Commit
b1d8d14
·
1 Parent(s): 3644089

add load script

Browse files
Files changed (1) hide show
  1. the-vault-inline.py +195 -0
the-vault-inline.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pyarrow as pa
4
+ import pyarrow.parquet as pq
5
+ import datasets
6
+
7
+
8
+ # Meta infomation
9
+ _REPO_NAME = 'Fsoft-AIC/the-vault-inline'
10
+
11
+ _DESCRIPTION = """The Vault is a multilingual code-text dataset with over 34 million pairs covering 10 popular programming languages.
12
+ It is the largest corpus containing parallel code-text data. By building upon The Stack, a massive raw code sample collection,
13
+ the Vault offers a comprehensive and clean resource for advancing research in code understanding and generation. It provides a
14
+ high-quality dataset that includes code-text pairs at multiple levels, such as class and inline-level, in addition to the function level.
15
+ The Vault can serve many purposes at multiple levels."""
16
+
17
+ _HOMEPAGE = "https://huggingface.co/Fsoft-AIC"
18
+ _LICENSE = "MIT License"
19
+ _CITATION = """
20
+ @article{manh2023vault,
21
+ title={The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation},
22
+ author={Manh, Dung Nguyen and Hai, Nam Le and Dau, Anh TV and Nguyen, Anh Minh and Nghiem, Khanh and Guo, Jin and Bui, Nghi DQ},
23
+ journal={arXiv preprint arXiv:2305.06156},
24
+ year={2023}
25
+ }
26
+ """
27
+ ################################################################################################
28
+
29
+ # Config metadata
30
+ _LANG_TO_TEXT = {
31
+ "python": "python",
32
+ "c": "c",
33
+ "c#": "c_sharp",
34
+ "c++": "cpp",
35
+ "go": "go",
36
+ "java": "java",
37
+ "javascript": "javascript",
38
+ "php": "php",
39
+ "ruby": "ruby",
40
+ "rust": "rust",
41
+ }
42
+ _LANG_CONFIGS = ["all"] + list(_LANG_TO_TEXT.keys())
43
+
44
+ _TEXT_TO_LANG = {}
45
+ for lang in _LANG_TO_TEXT:
46
+ _TEXT_TO_LANG[_LANG_TO_TEXT[lang]] = lang
47
+
48
+ num_shard_split = {
49
+ "ruby": 3,
50
+ "c": 29,
51
+ "c_sharp": 1,
52
+ "cpp": 39,
53
+ "go": 15,
54
+ "java": 75,
55
+ "javascript": 6,
56
+ "php": 21,
57
+ "python": 48,
58
+ "rust": 1,
59
+ }
60
+
61
+ ################################################################################################
62
+
63
+ class TheVaultFunctionConfig(datasets.BuilderConfig):
64
+ """BuilderConfig for The Vault dataset."""
65
+
66
+ def __init__(self, *args, languages=["all"], **kwargs):
67
+ """BuilderConfig for the The Vault dataset.
68
+ Args:
69
+ split_set (:obj:`List[str]`): List of split set to load.
70
+ languages (:obj:`List[str]`): List of languages to load.
71
+ **kwargs: keyword arguments forwarded to super.
72
+ """
73
+ super().__init__(
74
+ *args,
75
+ name= "+".join([split.replace("/", "_") for split in split_set]) + "-" + "+".join([_LANG_TO_TEXT[lang] if lang in _LANG_TO_TEXT else lang for lang in languages]),
76
+ **kwargs,
77
+ )
78
+
79
+ languages = set([lang.lower() for lang in languages])
80
+
81
+ assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}."
82
+
83
+ if "all" in languages:
84
+ assert len(languages)==1, f"Passed 'all' together with other languages. {languages}"
85
+ else:
86
+ languages = [_LANG_TO_TEXT[lang] for lang in languages] # Convert to text name
87
+
88
+ self.languages = list(languages)
89
+
90
+
91
+ class TheVaultFunction(datasets.GeneratorBasedBuilder):
92
+ """The Vault dataset."""
93
+
94
+ VERSION = datasets.Version("1.0.0")
95
+
96
+ BUILDER_CONFIG_CLASS = TheVaultFunctionConfig
97
+ BUILDER_CONFIGS = [TheVaultFunctionConfig(languages=[lang]) for lang in _LANG_CONFIGS]
98
+ DEFAULT_CONFIG_NAME = "all-all"
99
+
100
+
101
+ def _info(self):
102
+ return datasets.DatasetInfo(
103
+ description=_DESCRIPTION,
104
+ features=datasets.Features({
105
+ "hexsha": datasets.Value("string"),
106
+ "repo": datasets.Value("string"),
107
+ "path": datasets.Value("string"),
108
+ "license": datasets.Sequence(datasets.Value("string")),
109
+ "language": datasets.Value("string"),
110
+ "identifier": datasets.Value("string"),
111
+ "code": datasets.Value("string"),
112
+ "code_tokens": datasets.Sequence(datasets.Value("string")),
113
+ "original_comment": datasets.Value("string"),
114
+ "comment": datasets.Value("string"),
115
+ "comment_tokens": datasets.Sequence(datasets.Value("string")),
116
+ "start_point": datasets.Sequence(datasets.Value("int32")),
117
+ "end_point": datasets.Sequence(datasets.Value("int32")),
118
+ "prev_context":
119
+ {
120
+ "code": datasets.Value("string"),
121
+ "start_point": datasets.Sequence(datasets.Value("int32")),
122
+ "end_point": datasets.Sequence(datasets.Value("int32")),
123
+ },
124
+ "next_context":
125
+ {
126
+ "code": datasets.Value("string"),
127
+ "start_point": datasets.Sequence(datasets.Value("int32")),
128
+ "end_point": datasets.Sequence(datasets.Value("int32")),
129
+ },
130
+ }),
131
+ supervised_keys=None,
132
+ homepage=_HOMEPAGE,
133
+ license=_LICENSE,
134
+ citation=_CITATION,
135
+
136
+ )
137
+
138
+ def _split_generators(self, dl_manager):
139
+ generators = []
140
+ languages = self.config.languages
141
+
142
+ if "all" in languages:
143
+ languages = list(_LANG_TO_TEXT.values())
144
+
145
+
146
+ split_files = []
147
+ for language in languages:
148
+ num_shards = num_shard_split[language]
149
+ data_files = [
150
+ f"data/train/{language}-{_index:05d}-of-{num_shards:05d}.parquet"
151
+ for _index in range(num_shards)
152
+ ]
153
+ files = dl_manager.download(data_files)
154
+ split_files.extend(files)
155
+
156
+ generators.append(
157
+ datasets.SplitGenerator(
158
+ name="train",
159
+ gen_kwargs={
160
+ "files": split_files,
161
+ },
162
+ ),
163
+ )
164
+
165
+
166
+ return generators
167
+
168
+ def _generate_examples(self, files):
169
+ key = 0
170
+ for file_idx, file in enumerate(files):
171
+ with open(file, "rb") as f:
172
+ parquet_file = pq.ParquetFile(f)
173
+ for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
174
+ pa_table = pa.Table.from_batches([record_batch])
175
+ for row_index in range(pa_table.num_rows):
176
+ row = pa_table.slice(row_index, 1).to_pydict()
177
+
178
+ yield key, {
179
+ "hexsha": row['hexsha'],
180
+ "repo": row['repo'],
181
+ "path": row['path'],
182
+ "license": row['license'],
183
+ "language": row['language'],
184
+ "identifier": row['identifier'],
185
+ "code": row['code'],
186
+ "code_tokens": row['code_tokens'],
187
+ "original_comment": row['original_comment'],
188
+ "comment": row['comment'],
189
+ "comment_tokens": row['comment_tokens'],
190
+ "start_point": row['start_point'],
191
+ "end_point": row['end_point'],
192
+ "prev_context": row['prev_context'],
193
+ "next_context": row['next_context']
194
+ }
195
+ key += 1