File size: 3,093 Bytes
47a0e6d 26180e9 47a0e6d 83b2a26 47a0e6d 83b2a26 47a0e6d 83b2a26 47a0e6d 83b2a26 47a0e6d 83b2a26 47a0e6d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
# -*- coding: utf-8 -*-
"""Sarcasm
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/15_wDQ9RJXwyxbomu2F1k0pK9H7XZ1cuT
"""
import geopandas
import matplotlib.pyplot as plt
import seaborn as sns
from shapely.geometry import Point
import pandas as pd
import geopandas as gpd
from datasets import (
GeneratorBasedBuilder, Version, DownloadManager, SplitGenerator, Split,
Features, Value, BuilderConfig, DatasetInfo
)
import matplotlib.pyplot as plt
import seaborn as sns
import csv
import json
from shapely.geometry import Point
# URL definitions
_URLS = {
"csv_file": "https://drive.google.com/uc?export=download&id=1WcPqVZasDy1nmGcildLS-uw_-04I9Max",
}
class Sarcasm(GeneratorBasedBuilder):
VERSION = Version("1.0.0")
def _info(self):
return DatasetInfo(
description="This dataset combines information from sarcasm",
features=Features({
"comments": Value("string"),
"contains_slash_s": Value("int64"),
}),
supervised_keys=None,
homepage="https://github.com/AuraMa111?tab=repositories",
citation="Citation for the combined dataset",
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
data_file_path = downloaded_files["csv_file"]
num_examples = pd.read_csv(data_file_path).shape[0]
train_size = int(0.6 * num_examples)
val_size = int(0.2 * num_examples)
test_size = num_examples - train_size - val_size
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"data_file_path": data_file_path, "split": Split.TRAIN, "size": train_size}
),
SplitGenerator(
name=Split.VALIDATION,
gen_kwargs={"data_file_path": data_file_path, "split": Split.VALIDATION, "size": val_size}
),
SplitGenerator(
name=Split.TEST,
gen_kwargs={"data_file_path": data_file_path, "split": Split.TEST, "size": test_size}
),
]
def _generate_examples(self, data_file_path, split, size):
data = pd.read_csv(data_file_path)
if split == Split.TRAIN:
subset_data = data[:size]
elif split == Split.VALIDATION:
subset_data = data[size:size*2]
elif split == Split.TEST:
subset_data = data[size*2:]
for index, row in subset_data.iterrows():
example = {
"comments": row["comments"],
"contains_slash_s": row["contains_slash_s"]
}
yield index, example
# Instantiate your dataset class
sarcasm = Sarcasm()
# Build the datasets
sarcasm.download_and_prepare()
# Access the datasets for training, validation, and testing
dataset_train = sarcasm.as_dataset(split='train')
dataset_validation = sarcasm.as_dataset(split='validation')
dataset_test = sarcasm.as_dataset(split='test') |