File size: 4,397 Bytes
3c33491 7f97f62 d635639 3c33491 d635639 3c33491 d635639 3c33491 d635639 3c33491 d635639 3c33491 d635639 3c33491 72217c4 3c33491 7f97f62 3c33491 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
"""Graptoloidea Specimens dataset."""
import os
import random
from typing import List
import datasets
import pandas as pd
import numpy as np
import csv
import logging
from PIL import Image
import ast
_CITATION = """\
111
"""
_DESCRIPTION = """\
[Your dataset description here...]
"""
_HOMEPAGE = "https://zenodo.org/records/6194943"
_license = "111"
class GraptoloideaSpecimensDataset(datasets.GeneratorBasedBuilder):
_URL = "https://raw.githubusercontent.com/LeoZhangzaolin/photos/main/Final_GS_with_Images.csv"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"Suborder": datasets.Value("string"),
"Infraorder": datasets.Value("string"),
"Family (Subfamily)": datasets.Value("string"),
"Genus": datasets.Value("string"),
"Tagged Species Name": datasets.Value("string"),
"Image": datasets.Value("string"),
"Stage": datasets.Value("string"),
"Mean Age Value": datasets.Value("float64"),
"Locality (Longitude, Latitude, Horizon)": datasets.Value("string"),
"Reference (Specimens Firstly Published)": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
downloaded_file = dl_manager.download_and_extract(self._URL)
# Read the CSV file
df = pd.read_csv(downloaded_file)
df = df.sample(frac=1).reset_index(drop=True) # Shuffle the dataset
# Splitting the dataset
train_size = int(0.7 * len(df))
test_size = int(0.15 * len(df))
train_df = df[:train_size]
test_df = df[train_size:train_size + test_size]
validation_df = df[train_size + test_size:]
# Save split dataframes to temporary CSV files
train_file = '/tmp/train_split.csv'
test_file = '/tmp/test_split.csv'
validation_file = '/tmp/validation_split.csv'
train_df.to_csv(train_file, index=False)
test_df.to_csv(test_file, index=False)
validation_df.to_csv(validation_file, index=False)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_file}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_file}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_file}),
]
def _generate_examples(self, filepath):
"""This function returns the examples from the CSV file."""
logging.info("generating examples from = %s", filepath)
with open(filepath, encoding='utf-8') as f:
reader = csv.DictReader(f)
key = 0
for row in reader:
key += 1
# Extracting data from each column
suborder = row['Suborder'].strip()
infraorder = row['Infraorder'].strip()
family_subfamily = row['Family (Subfamily)'].strip()
genus = row['Genus'].strip()
species_name = row['tagged species name'].strip()
image = row['image'].strip()
stage = row['Stage'].strip()
mean_age = row['mean age value']
locality = row['Locality (Longitude, Latitude, Horizon)'].strip()
reference = row['Reference (specimens firstly published)'].strip()
# Constructing the example
yield key, {
"Suborder": suborder,
"Infraorder": infraorder,
"Family (Subfamily)": family_subfamily,
"Genus": genus,
"Tagged Species Name": species_name,
"Image": image,
"Stage": stage,
"Mean Age Value": mean_age,
"Locality (Longitude, Latitude, Horizon)": locality,
"Reference (Specimens Firstly Published)": reference,
}
|