shibing624 commited on
Commit
192b2ed
1 Parent(s): 8d44f12

Create source_code.py

Browse files
Files changed (1) hide show
  1. source_code.py +117 -0
source_code.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ @author:XuMing([email protected])
4
+ @description:
5
+ """
6
+
7
+ """Code AutoComplete Python dataset Corpus.(code_autocomplete)"""
8
+
9
+ import os
10
+
11
+ import datasets
12
+
13
+ _DESCRIPTION = """纯文本数据,内容:高质量编程源代码,包括Python,Java,CPP源代码"""
14
+
15
+ PYTHON_HOME = "https://github.com/bharathgs/Awesome-pytorch-list"
16
+ JAVA_HOME = "https://github.com/akullpp/awesome-java"
17
+ CPP_HOME = "https://github.com/fffaraz/awesome-cpp"
18
+
19
+ _CITATION = "https://github.com/shibing624/code-autocomplete"
20
+
21
+ _DATA_URL = "https://github.com/shibing624/code-autocomplete/releases/download/0.0.4/source_code.zip"
22
+
23
+
24
+ class SourceCodeConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for NLI_zh"""
26
+
27
+ def __init__(self, features, data_url, citation, url, **kwargs):
28
+ """BuilderConfig for NLI_zh
29
+ Args:
30
+ features: `list[string]`, list of the features that will appear in the
31
+ feature dict. Should not include "label".
32
+ data_url: `string`, url to download the zip file from.
33
+ citation: `string`, citation for the data set.
34
+ url: `string`, url for information about the data set.
35
+ **kwargs: keyword arguments forwarded to super.
36
+ """
37
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
38
+ self.features = features
39
+ self.data_url = data_url
40
+ self.citation = citation
41
+ self.url = url
42
+
43
+
44
+ class SourceCode(datasets.GeneratorBasedBuilder):
45
+ """The Natural Language Inference Chinese(NLI_zh) Corpus."""
46
+
47
+ BUILDER_CONFIGS = [
48
+ SourceCodeConfig(
49
+ name="python",
50
+ description=_DESCRIPTION,
51
+ features=["text"],
52
+ data_url=_DATA_URL,
53
+ citation=_CITATION,
54
+ url=PYTHON_HOME,
55
+ ),
56
+ SourceCodeConfig(
57
+ name="java",
58
+ description=_DESCRIPTION,
59
+ features=["text"],
60
+ data_url=_DATA_URL,
61
+ citation=_CITATION,
62
+ url=JAVA_HOME,
63
+ ),
64
+ SourceCodeConfig(
65
+ name="cpp",
66
+ description=_DESCRIPTION,
67
+ features=["text"],
68
+ data_url=_DATA_URL,
69
+ citation=_CITATION,
70
+ url=CPP_HOME,
71
+ ),
72
+ ]
73
+
74
+ def _info(self):
75
+ return datasets.DatasetInfo(
76
+ description=self.config.description,
77
+ features=datasets.Features(
78
+ {
79
+ "text": datasets.Value("string"),
80
+ }
81
+ ),
82
+ homepage=self.config.url,
83
+ citation=self.config.citation,
84
+ )
85
+
86
+ def _split_generators(self, dl_manager):
87
+ dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
88
+ dl_dir = os.path.join(dl_dir, f"source_code/{self.config.name}")
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN,
92
+ gen_kwargs={
93
+ "filepath": os.path.join(dl_dir, f"train.txt"),
94
+ },
95
+ ),
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.VALIDATION,
98
+ gen_kwargs={
99
+ "filepath": os.path.join(dl_dir, f"valid.txt"),
100
+ },
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TEST,
104
+ gen_kwargs={
105
+ "filepath": os.path.join(dl_dir, f"test.txt"),
106
+ },
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, filepath):
111
+ """This function returns the examples in the raw (text) form."""
112
+ with open(filepath, 'r', encoding="utf-8") as f:
113
+ for idx, row in enumerate(f):
114
+ if row.strip():
115
+ yield idx, {"text": row}
116
+ else:
117
+ yield idx, {"text": ""}