# coding=utf-8
# Copyright 2020 BigScience Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""P3 (Public Pool of Prompts)"""


import datasets
import glob
import json
import os
from collections import defaultdict
import tensorflow as tf


_CITATION = """\
TODO"""

_DESCRIPTION = """\
P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).

Prompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).

To train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**
"""

_LICENSE = "Apache License 2.0"

_HOMEPAGE = "https://github.com/bigscience-workshop/promptsource"

_DATA_PATH = "data"


# def load_cached_task(cache_dir, split):
#     # TODO(Victor): this info.*.json is actually done twice... -> factorize
#     with tf.io.gfile.GFile(os.path.join(cache_dir, f"info.{split}.json")) as f:
#         split_info = json.load(f)
#         features = split_info["features"]

#     # Use `FixedLenSequenceFeature` for sequences with variable length.
#     def _feature_config(shape, dtype):
#         if dtype in ("int32", "bool"):
#             # int32 and bool are stored as int64 in the tf.train.Example protobuf.
#             dtype = "int64"
#         if shape and shape[0] is None:
#             return tf.io.FixedLenSequenceFeature(
#                 shape[1:], dtype, allow_missing=True
#             )
#         return tf.io.FixedLenFeature(shape, dtype)

#     feature_description = {
#         feat: _feature_config(**desc) for feat, desc in features.items()
#     }

#     tfrecords = os.path.join(
#         cache_dir, f"{split}.tfrecord-*-of-*{split_info['num_shards']}"
#     )
#     ds = tf.data.TFRecordDataset(tf.io.gfile.glob(tfrecords))
#     ds = ds.map(
#         lambda pb: tf.io.parse_single_example(pb, feature_description),
#         num_parallel_calls=tf.data.experimental.AUTOTUNE
#     )
#     # Cast features back to the types from the info JSON since some features
#     # must be cast for storage (e.g., in32 is stored as int64).
#     ds = ds.map(
#         lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()},
#         num_parallel_calls=tf.data.experimental.AUTOTUNE
#     )
#     return ds

def load_cached_task(features_file, tfrecord, split):
    # # TODO(Victor): this info.*.json is actually done twice... -> factorize
    # with tf.io.gfile.GFile(os.path.join(cache_dir, f"info.{split}.json")) as f:
    #     split_info = json.load(f)
    #     features = split_info["features"]
    with tf.io.gfile.GFile(features_file) as f:
        features = json.load(f)

    # Use `FixedLenSequenceFeature` for sequences with variable length.
    def _feature_config(shape, dtype):
        if dtype in ("int32", "bool"):
            # int32 and bool are stored as int64 in the tf.train.Example protobuf.
            dtype = "int64"
        if shape and shape[0] is None:
            return tf.io.FixedLenSequenceFeature(
                shape[1:], dtype, allow_missing=True
            )
        return tf.io.FixedLenFeature(shape, dtype)

    feature_description = {
        feat: _feature_config(**desc) for feat, desc in features.items()
    }

    # tfrecords = os.path.join(
    #     cache_dir, f"{split}.tfrecord-*-of-*{split_info['num_shards']}"
    # )
    ds = tf.data.TFRecordDataset(tf.io.gfile.glob([tfrecord]))
    ds = ds.map(
        lambda pb: tf.io.parse_single_example(pb, feature_description),
        num_parallel_calls=tf.data.experimental.AUTOTUNE
    )
    # Cast features back to the types from the info JSON since some features
    # must be cast for storage (e.g., in32 is stored as int64).
    ds = ds.map(
        lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()},
        num_parallel_calls=tf.data.experimental.AUTOTUNE
    )
    return ds

def find_task_splits_and_features():
    """Find the available tasks under ./data and their available splits and features."""
    task_and_their_splits = defaultdict(dict)
    for stats in glob.glob(f"{_DATA_PATH}/*/stats.*.json"):
        folder_path = os.path.dirname(stats)
        task_name = folder_path.split("/")[-1]
        if "rte" not in task_name:
            continue
        split_name = os.path.basename(stats).split(".")[1]

        if not os.path.exists(f"{folder_path}/COMPLETED"):
            continue

        with open(stats, "r") as f:
            split_stats = json.load(f)
            nb_examples = split_stats["examples"]

        if nb_examples > 0:
            with open(os.path.join(folder_path, f"info.{split_name}.json")) as f:
                split_info = json.load(f)
                features = split_info["features"]
                assert split_info["num_shards"] == 1

            # All splits under the same task have the same features dictionary (and thus the same features list)
            if task_and_their_splits[task_name] == {}:
                task_and_their_splits[task_name] = {
                    "splits": [],
                    "features": [],
                }

            task_and_their_splits[task_name]["splits"].append(split_name)
            if task_and_their_splits[task_name]["features"] == []:
                task_and_their_splits[task_name]["features"] = sorted(list(features.keys()))
            else:
                assert task_and_their_splits[task_name]["features"] == sorted(list(features.keys()))
    print(task_and_their_splits.keys())
    return task_and_their_splits


_TASK_SPLITS_AND_FEATURES = find_task_splits_and_features()
_URLs = {
    task_name: {
        split_name: {
            "tfrecord": f"{_DATA_PATH}/{task_name}/{split_name}.tfrecord-00000-of-00001",
            "features_file": f"{_DATA_PATH}/{task_name}/info.{split_name}.json",
        }
        for split_name in splits_and_features["splits"]
    }
    for task_name, splits_and_features in _TASK_SPLITS_AND_FEATURES.items()
}


class P3Config(datasets.BuilderConfig):
    """BuilderConfig for P3."""

    def __init__(self, splits, features, score_eval, **kwargs):
        """BuilderConfig for P3.

        Args:
          splits: `List[str]`, the lists of splits which are available for this task
          features: `List[str]`, the list of features for this task
          score_eval: `bool`, whether this is task formulated as a rank classification problem
          **kwargs: keyword arguments forwarded to super.
        """
        # Version history:
        # 0.1 initial commit
        super(P3Config, self).__init__(version=datasets.Version("0.1.0"), **kwargs)
        self.splits = splits
        self.features = features
        self.score_eval = score_eval


class P3(datasets.GeneratorBasedBuilder):
    """Subset of P3 used in `Multitask Prompted Training Enables Zero-Shot Task Generalization`"""

    BUILDER_CONFIGS = [
        P3Config(
            name=task_name,
            splits=splits_and_features["splits"],
            features=splits_and_features["features"],
            score_eval=task_name.endswith("score_eval")
        )
        for task_name, splits_and_features in _TASK_SPLITS_AND_FEATURES.items()
    ]

    def _info(self):
        # All features available are: 'inputs', 'inputs_pretokenized', 'targets',
        # 'targets_pretokenized', 'idx', 'is_correct', 'weight', and 'answer_choices'
        _FEAT_MAPPING = {
            "answer_choices": datasets.Sequence(datasets.Value("string")),
            "inputs": datasets.Sequence(datasets.Value("int32")),
            "inputs_pretokenized": datasets.Value("string"),
            "targets": datasets.Sequence(datasets.Value("int32")),
            "targets_pretokenized": datasets.Value("string"),
            "idx": datasets.Sequence(datasets.Value("int32")),
            "weight": datasets.Value("float32"),
            "is_correct": datasets.Value("bool"),
        }

        features = {}
        for feat_name in self.config.features:
            features[feat_name] = _FEAT_MAPPING[feat_name]

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(features),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
            license=_LICENSE,
        )

    def _split_generators(self, dl_manager):
        split_generators = []
        data_dir = dl_manager.download_and_extract(_URLs)
        task_name = self.config.name
        if "train" in self.config.splits:
            split_name = "train"
            split_generators.append(
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "features_file": data_dir[task_name][split_name]["features_file"],
                        "tfrecord": data_dir[task_name][split_name]["tfrecord"],
                        "split": split_name,
                    }
                )
            )
        if "validation" in self.config.splits:
            split_name = "validation"
            split_generators.append(
                datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION,
                    gen_kwargs={
                        "features_file": data_dir[task_name][split_name]["features_file"],
                        "tfrecord": data_dir[task_name][split_name]["tfrecord"],
                        "split": split_name,
                    }
                )
            )
        if "test" in self.config.splits:
            split_name = "test"
            split_generators.append(
                datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                        "features_file": data_dir[task_name][split_name]["features_file"],
                        "tfrecord": data_dir[task_name][split_name]["tfrecord"],
                        "split": split_name,
                    }
                )
            )
        # Handle splits that are not train, validation or test
        special_splits = set(self.config.splits) - set(["train", "validation", "test"])
        for special_split_name in special_splits:
            split_generators.append(
                datasets.SplitGenerator(
                    name=datasets.Split(special_split_name),
                    gen_kwargs={
                        "features_file": data_dir[task_name][special_split_name]["features_file"],
                        "tfrecord": data_dir[task_name][special_split_name]["tfrecord"],
                        "split": special_split_name,
                    }
                )
            )
        return split_generators


    def _generate_examples(self, features_file, tfrecord, split):
        """This function returns the examples in the raw (text) form."""
        _FEAT_MAPPING_FUNCTIONS = {
            "answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
            "inputs": lambda x: x.tolist(),
            "inputs_pretokenized": lambda x: x.decode("utf-8"),
            "targets": lambda x: x.tolist(),
            "targets_pretokenized": lambda x: x.decode("utf-8"),
            "idx": lambda x: x.tolist(),
            "weight": lambda x: float(x),
            "is_correct": lambda x: x,
        }

        key = 0
        ds = load_cached_task(features_file, tfrecord, split)
        for ex in ds.as_numpy_iterator():
            ex_dict = {}
            for feat_name, feat_value in ex.items():
                ex_dict[feat_name] = _FEAT_MAPPING_FUNCTIONS[feat_name](feat_value)
            yield key, ex_dict
            key += 1