jester_embedding / jester_embedding.py
kitkatdafu's picture
update
d6de56f
raw
history blame
3.45 kB
import pandas as pd
import datasets
from sklearn.model_selection import train_test_split
_CITATION = "N/A"
_DESCRIPTION = "Embeddings for the jokes in Jester jokes dataset"
_HOMEPAGE = "N/A"
_LICENSE = "apache-2.0"
_URLS = {
"mistral": "./jester-salesforce-sfr-embedding-mistral.parquet",
"instructor-xl": "./jester-hkunlp-instructor-xl.parquet",
"all-MiniLM-L6-v2": "./jester-sentence-transformers-all-MiniLM-L6-v2.parquet",
"all-mpnet-base-v2": "./jester-sentence-transformers-all-mpnet-base-v2.parquet",
}
_DIMS = {
"mistral": 4096,
"instructor-xl": 768,
"all-MiniLM-L6-v2": 384,
"all-mpnet-base-v2": 768,
}
class JesterEmbedding(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="mistral", version=VERSION, description="SFR-Embedding by Salesforce Research."),
datasets.BuilderConfig(name="instructor-xl", version=VERSION, description="Instructor embedding"),
datasets.BuilderConfig(name="all-MiniLM-L6-v2", version=VERSION, description="All-round model embedding tuned for many use-cases"),
datasets.BuilderConfig(name="all-mpnet-base-v2", version=VERSION, description="All-round model embedding tuned for many use-cases"),
]
DEFAULT_CONFIG_NAME = "mistral" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
features = datasets.Features({"x": datasets.Array2D(shape=(1, _DIMS[self.config.name]), dtype="float32")})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": "test"
},
),
]
def _generate_examples(self, filepath, split):
embeddings = pd.read_parquet(filepath).values
train, test = train_test_split(embeddings, test_size=0.2, random_state=42)
train, val = train_test_split(train, test_size=0.2, random_state=42)
if split == "train":
for _id, x in enumerate(train):
yield _id, {"x": x.reshape(1, -1)}
elif split == "test":
for _id, x in enumerate(test):
yield _id, {"x": x.reshape(1, -1)}
else:
for _id, x in enumerate(val):
yield _id, {"x": x.reshape(1, -1)}