|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Wrapper for datasets in CodeXGLUE benchmark.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@article{Lu2021, |
|
author = {Lu, Shuai and Guo, Daya and Ren, Shuo and Huang, Junjie and Svyatkovskiy, Alexey and Blanco, Ambrosio and Clement, Colin B. and Drain, Dawn and Jiang, Daxin and Tang, Duyu and Li, Ge and Zhou, Lidong and Shou, Linjun and Zhou, Long and Tufano, Michele and Gong, Ming and Zhou, Ming and Duan, Nan and Sundaresan, Neel and Deng, Shao Kun and Fu, Shengyu and Liu, Shujie}, |
|
year = {2021}, |
|
booktitle = {arXiv}, |
|
title = {CodeXGLUE - A Machine Learning Benchmark Dataset for Code Understanding and Generation} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
CodeXGLUE is a benchmark dataset to foster machine learning research for program understanding and generation. |
|
CodeXGLUE includes a collection of 10 tasks across 14 datasets and a platform for model evaluation and comparison. |
|
""" |
|
|
|
_HOMEPAGE = "https://microsoft.github.io/CodeXGLUE/" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
_URLs = { |
|
'code-to-code-trans': "code-to-code-trans.zip", |
|
'code-completion-token-py150': "code-completion-token-py150.zip", |
|
} |
|
|
|
|
|
|
|
class CodeXGLUE(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.1") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="code-to-code-trans", version=VERSION, |
|
description="Java to C-sharp translation task."), |
|
datasets.BuilderConfig(name="code-completion-token-py150", version=VERSION, |
|
description="Token compltetion task for Python"), |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "code-to-code-trans": |
|
features = datasets.Features( |
|
{ |
|
"java_code": datasets.Value("string"), |
|
"cs_code": datasets.Value("string") |
|
} |
|
) |
|
elif self.config.name == 'code-completion-token-py150': |
|
features = datasets.Features( |
|
{ |
|
"code": datasets.Value("string") |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"option2": datasets.Value("string"), |
|
"second_domain_answer": datasets.Value("string") |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
|
|
if self.config.name == 'code-to-code-trans': |
|
data_dir = os.path.join(data_dir, 'code-to-code-trans') |
|
def get_kwargs(split_name: str): |
|
return { |
|
"data_paths": { |
|
"java": os.path.join(data_dir, f'{split_name}.java-cs.txt.java'), |
|
"cs": os.path.join(data_dir, f'{split_name}.java-cs.txt.cs'), |
|
}, |
|
"split": split_name |
|
} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs=get_kwargs('train') |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs=get_kwargs('test') |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs=get_kwargs('valid') |
|
), |
|
] |
|
elif self.config.name == 'code-completion-token-py150': |
|
data_dir = os.path.join(data_dir, self.config.name) |
|
|
|
def get_kwargs(split_name: str): |
|
return { |
|
"data_paths": { |
|
"code": os.path.join(data_dir, f'{split_name}.txt') |
|
}, |
|
"split": split_name |
|
} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs=get_kwargs('train') |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs=get_kwargs('test') |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs=get_kwargs('dev') |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, data_paths, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
if self.config.name == 'code-to-code-trans': |
|
java_path = data_paths['java'] |
|
cs_path = data_paths['cs'] |
|
with open(java_path, encoding="utf-8") as java_file: |
|
with open(cs_path, encoding="utf-8") as cs_file: |
|
java_lines = java_file.readlines() |
|
cs_lines = cs_file.readlines() |
|
for id_, (java_code, cs_code) in enumerate(zip(java_lines, cs_lines)): |
|
java_code = java_code.strip() |
|
cs_code = cs_code.strip() |
|
yield id_, { |
|
'java_code': java_code, |
|
'cs_code': cs_code |
|
} |
|
elif self.config.name == 'code-completion-token-py150': |
|
code_path = data_paths['code'] |
|
with open(code_path, encoding='utf-8') as code_file: |
|
for _id, code_line in enumerate(code_file): |
|
yield _id, { |
|
'code': code_line |
|
} |
|
|