oo-method-test-split / oo-method-test-split.py
Edward J. Schwartz
dedup
48b6277
raw
history blame
6.27 kB
#!/usr/bin/python
import datasets
import itertools
import os
import pyarrow as pa
import pyarrow.parquet as pq
BASE_DATASET = "ejschwartz/oo-method-test"
def setexe(r):
r['Dirname'], r['Exename'] = os.path.split(r['Binary'])
return r
class OOMethodTestDataset(datasets.ArrowBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="combined",
version=datasets.Version("1.0.0"),
description="All data files combined",
),
datasets.BuilderConfig(
name="byrow",
version=datasets.Version("1.0.0"),
description="Split by example (dumb)",
),
datasets.BuilderConfig(
name="byfuncname",
version=datasets.Version("1.0.0"),
description="Split by function name",
),
datasets.BuilderConfig(
name="bylibrary",
version=datasets.Version("1.0.0"),
description="Split so that library functions (those appearing in >1 exe) are used for training, and non-library functions are used for testing",
),
datasets.BuilderConfig(
name="bylibrarydedup",
version=datasets.Version("1.0.0"),
description="Split so that library functions (those appearing in >1 exe) are used for training, and non-library functions are used for testing. Only one example per function name is retained.",
)
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _info(self):
return datasets.DatasetInfo(
features = datasets.Features({'Binary': datasets.Value(dtype='string', id=None),
'Addr': datasets.Value(dtype='string'),
'Name': datasets.Value(dtype='string'),
'Type': datasets.ClassLabel(num_classes=2, names=['func', 'method']),
'Disassembly': datasets.Value(dtype='string'),
'Dirname': datasets.Value(dtype='string'),
'Exename': datasets.Value(dtype='string')}))
def _split_generators(self, dl_manager):
ds = datasets.load_dataset(BASE_DATASET)['combined']
ds = ds.map(setexe, batched=False)
if self.config.name == "combined":
return [
datasets.SplitGenerator(
name="combined",
gen_kwargs={
"ds": ds,
},
),
]
elif self.config.name == "byrow":
ds = ds.train_test_split(test_size=0.1, seed=42)
#print(ds)
return [
datasets.SplitGenerator(
name="train",
gen_kwargs={
"ds": ds['train'],
},
),
datasets.SplitGenerator(
name="test",
gen_kwargs={
"ds": ds['test'],
},
),
]
elif self.config.name == "byfuncname":
unique_names = ds.unique('Name')
nameds = datasets.Dataset.from_dict({'Name': unique_names})
name_split = nameds.train_test_split(test_size=0.1, seed=42)
#print(name_split)
train_name = name_split['train']['Name']
test_name = name_split['test']['Name']
return [
datasets.SplitGenerator(
name="train",
gen_kwargs={
"ds": ds.filter(lambda r: r['Name'] in train_name),
},
),
datasets.SplitGenerator(
name="test",
gen_kwargs={
"ds": ds.filter(lambda r: r['Name'] in test_name),
},
),
]
elif self.config.name in ["bylibrary", "bylibrarydedup"]:
# A function (name) is a library function if it appears in more than one Exename
# this is (('func', 'oo.exe'))
testcount = set(zip(ds['Name'], ds['Exename']))
# (('func', 'path/oo.exe'))
testcountfull = set(zip(ds['Name'], ds['Binary']))
# sorted pairs by function name
testcount = sorted(testcount, key=lambda x: x[0])
testcountfull = sorted(testcountfull, key=lambda x: x[0])
# group by function name
grouped = itertools.groupby(testcount, lambda t: t[0])
groupedfull = itertools.groupby(testcountfull, lambda t: t[0])
grouped = {k: [b for _,b in g] for k, g in grouped}
groupedfull = {k: [b for _,b in g] for k, g in groupedfull}
library_func_names = {f for f, exes in grouped.items() if len(exes) > 1}
library_func_names_dedup = {(f, exes[0]) for f, exes in groupedfull.items() if len(exes) > 1}
nonlibrary_func_names = {f for f, exes in grouped.items() if len(exes) == 1}
train_filter_fun = None
if self.config.name == "bylibrary":
train_filter_fun = lambda r: r['Name'] in library_func_names
elif self.config.name == "bylibrarydedup":
train_filter_fun = lambda r: (r['Name'], r['Binary']) in library_func_names_dedup
else:
assert False, "Invalid configuration"
return [
datasets.SplitGenerator(
name="train",
gen_kwargs={
"ds": ds.filter(train_filter_fun),
},
),
datasets.SplitGenerator(
name="test",
gen_kwargs={
"ds": ds.filter(lambda r: r['Name'] in nonlibrary_func_names),
},
),
]
else:
assert False
def _generate_tables(self, ds):
# Converting to pandas is silly, but the old version of datasets doesn't
# seem to have a way to convert to Arrow?
for i, batch in enumerate(ds.to_pandas(batched=True)):
yield i, pa.Table.from_pandas(batch)