| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """P3""" |
| |
|
| |
|
| | import datasets |
| | import glob |
| | import json |
| | import os |
| | from collections import defaultdict |
| | import tensorflow as tf |
| |
|
| |
|
| | _CITATION = """\ |
| | TODO""" |
| |
|
| | _DESCRIPTION = """\ |
| | P3 is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2). |
| | |
| | Prompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource). |
| | |
| | To train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.** |
| | """ |
| |
|
| | _LICENSE = "Apache License 2.0" |
| |
|
| | _HOMEPAGE = "https://github.com/bigscience-workshop/promptsource" |
| |
|
| | _DATA_PATH = "./data/" |
| |
|
| |
|
| | def load_cached_task(cache_dir, split): |
| | |
| | with tf.io.gfile.GFile(os.path.join(cache_dir, f"info.{split}.json")) as f: |
| | split_info = json.load(f) |
| | features = split_info["features"] |
| |
|
| | |
| | def _feature_config(shape, dtype): |
| | if dtype in ("int32", "bool"): |
| | |
| | dtype = "int64" |
| | if shape and shape[0] is None: |
| | return tf.io.FixedLenSequenceFeature( |
| | shape[1:], dtype, allow_missing=True |
| | ) |
| | return tf.io.FixedLenFeature(shape, dtype) |
| |
|
| | feature_description = { |
| | feat: _feature_config(**desc) for feat, desc in features.items() |
| | } |
| |
|
| | tfrecords = os.path.join( |
| | cache_dir, f"{split}.tfrecord-*-of-*{split_info['num_shards']}" |
| | ) |
| | ds = tf.data.TFRecordDataset(tf.io.gfile.glob(tfrecords)) |
| | ds = ds.map( |
| | lambda pb: tf.io.parse_single_example(pb, feature_description), |
| | num_parallel_calls=tf.data.experimental.AUTOTUNE |
| | ) |
| | |
| | |
| | ds = ds.map( |
| | lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()}, |
| | num_parallel_calls=tf.data.experimental.AUTOTUNE |
| | ) |
| | return ds |
| |
|
| |
|
| | def find_task_splits_and_features(): |
| | """Find the available tasks under ./data and their available splits and features.""" |
| | task_and_their_splits = defaultdict(dict) |
| | for stats in glob.glob(f"{_DATA_PATH}/*/stats.*.json"): |
| | if "anli" not in stats: |
| | continue |
| | folder_path = os.path.dirname(stats) |
| | task_name = folder_path.split("/")[-1] |
| | split_name = os.path.basename(stats).split(".")[1] |
| |
|
| | if not os.path.exists(f"{folder_path}/COMPLETED"): |
| | continue |
| |
|
| | with open(stats, "r") as f: |
| | split_stats = json.load(f) |
| | nb_examples = split_stats["examples"] |
| |
|
| | if nb_examples > 0: |
| | with open(os.path.join(folder_path, f"info.{split_name}.json")) as f: |
| | split_info = json.load(f) |
| | features = split_info["features"] |
| |
|
| | |
| | if task_and_their_splits[task_name] == {}: |
| | task_and_their_splits[task_name] = { |
| | "splits": [], |
| | "features": [], |
| | } |
| |
|
| | task_and_their_splits[task_name]["splits"].append(split_name) |
| | if task_and_their_splits[task_name]["features"] == []: |
| | task_and_their_splits[task_name]["features"] = sorted(list(features.keys())) |
| | else: |
| | assert task_and_their_splits[task_name]["features"] == sorted(list(features.keys())) |
| | return task_and_their_splits |
| |
|
| |
|
| | TASK_SPLITS_AND_FEATURES = find_task_splits_and_features() |
| |
|
| |
|
| |
|
| | class P3Config(datasets.BuilderConfig): |
| | """BuilderConfig for P3.""" |
| |
|
| | def __init__(self, splits, features, score_eval, **kwargs): |
| | """BuilderConfig for P3. |
| | |
| | Args: |
| | splits: `List[str]`, the lists of splits which are available for this task |
| | features: `List[str]`, the list of features for this task |
| | score_eval: `bool`, whether this is task formulated as a rank classification problem |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | |
| | |
| | super(P3Config, self).__init__(version=datasets.Version("0.1.0"), **kwargs) |
| | self.splits = splits |
| | self.features = features |
| | self.score_eval = score_eval |
| |
|
| |
|
| | class P3(datasets.GeneratorBasedBuilder): |
| | """Subset of P3 used in `Multitask Prompted Training Enables Zero-Shot Task Generalization`""" |
| |
|
| | BUILDER_CONFIGS = [ |
| | P3Config( |
| | name=task_name, |
| | splits=splits_and_features["splits"], |
| | features=splits_and_features["features"], |
| | score_eval=task_name.endswith("score_eval") |
| | ) |
| | for task_name, splits_and_features in TASK_SPLITS_AND_FEATURES.items() |
| | ] |
| |
|
| | def _info(self): |
| | |
| | |
| | _FEAT_MAPPING = { |
| | "answer_choices": datasets.Sequence(datasets.Value("string")), |
| | "inputs": datasets.Sequence(datasets.Value("int32")), |
| | "inputs_pretokenized": datasets.Value("string"), |
| | "targets": datasets.Sequence(datasets.Value("int32")), |
| | "targets_pretokenized": datasets.Value("string"), |
| | "idx": datasets.Sequence(datasets.Value("int32")), |
| | "weight": datasets.Value("float32"), |
| | "is_correct": datasets.Value("bool"), |
| | } |
| |
|
| | features = {} |
| | for feat_name in self.config.features: |
| | features[feat_name] = _FEAT_MAPPING[feat_name] |
| |
|
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features(features), |
| | supervised_keys=None, |
| | homepage=_HOMEPAGE, |
| | citation=_CITATION, |
| | license=_LICENSE, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | split_generators = [] |
| | if "train" in self.config.splits: |
| | split_generators.append( |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={ |
| | "data_folder": os.path.join(_DATA_PATH, self.config.name), |
| | "split": "train", |
| | } |
| | ) |
| | ) |
| | if "validation" in self.config.splits: |
| | split_generators.append( |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={ |
| | "data_folder": os.path.join(_DATA_PATH, self.config.name), |
| | "split": "validation", |
| | } |
| | ) |
| | ) |
| | if "test" in self.config.splits: |
| | split_generators.append( |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={ |
| | "data_folder": os.path.join(_DATA_PATH, self.config.name), |
| | "split": "test", |
| | } |
| | ) |
| | ) |
| | |
| | special_splits = set(self.config.splits) - set(["train", "validation", "test"]) |
| | for special_split_name in special_splits: |
| | split_generators.append( |
| | datasets.SplitGenerator( |
| | name=datasets.Split(special_split_name), |
| | gen_kwargs={ |
| | "data_folder": os.path.join(_DATA_PATH, self.config.name), |
| | "split": special_split_name, |
| | } |
| | ) |
| | ) |
| | return split_generators |
| |
|
| |
|
| | def _generate_examples(self, data_folder, split): |
| | """This function returns the examples in the raw (text) form.""" |
| | _FEAT_MAPPING_FUNCTIONS = { |
| | "answer_choices": lambda x: [choice.decode("utf-8") for choice in x], |
| | "inputs": lambda x: x.tolist(), |
| | "inputs_pretokenized": lambda x: x.decode("utf-8"), |
| | "targets": lambda x: x.tolist(), |
| | "targets_pretokenized": lambda x: x.decode("utf-8"), |
| | "idx": lambda x: x.tolist(), |
| | "weight": lambda x: float(x), |
| | "is_correct": lambda x: x, |
| | } |
| |
|
| | key = 0 |
| | ds = load_cached_task(data_folder, split) |
| | for ex in ds.as_numpy_iterator(): |
| | ex_dict = {} |
| | for feat_name, feat_value in ex.items(): |
| | ex_dict[feat_name] = _FEAT_MAPPING_FUNCTIONS[feat_name](feat_value) |
| | yield key, ex_dict |
| | key += 1 |
| |
|