| """NPSC dataset.""" |
| import gzip |
| import json |
| import datasets |
|
|
| logger = datasets.logging.get_logger(__name__) |
| _DESCRIPTION = """\\nNorwegian Colossal Corpus v2. Short sequences of maximum 100k characters.""" |
| _CITATION = """ |
| TO BE DONE |
| """ |
| _URL = "https://www.nb.no/sprakbanken/ressurskatalog/oai-nb-no-sbr-58/" |
| _DATA_URL = "https://huggingface.co/datasets/NbAiLab/NPSC/resolve/main/data/{split_suffix}-shard-{index:04d}-of-{n_shards:04d}.json.gz" |
| _N_SHARDS_PER_SPLIT = { |
| "train": 1, "dev": 1, "test": 1 |
| } |
|
|
|
|
| class NPSCConfig(datasets.BuilderConfig): |
| """BuilderConfig for NbNn.""" |
|
|
| def __init__(self, *args, **kwargs): |
| """BuilderConfig for NbNn. |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super().__init__( |
| *args, |
| name="NPSC", |
| **kwargs, |
| ) |
|
|
|
|
| class NPSC(datasets.GeneratorBasedBuilder): |
| BUILDER_CONFIGS = [NPSCConfig()] |
| BUILDER_CONFIG_CLASS = NPSCConfig |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "sentence_order": datasets.Value("int32"), |
| "speaker_id" : datasets.Value("int32"), |
| "speaker_name": datasets.Value("string"), |
| "sentence_text": datasets.Value("string"), |
| "sentence_language_code": datasets.Value("string"), |
| "text": datasets.Value("string"), |
| "start_time": datasets.Value("int32"), |
| "end_time": datasets.Value("int32"), |
| "normsentence_text": datasets.Value("string"), |
| "transsentence_text": datasets.Value("string"), |
| "translated": datasets.Value("int32"), |
| "audio": datasets.features.Audio(sampling_rate=48000), |
|
|
| } |
| ), |
| supervised_keys=None, |
| homepage=_URL, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| data_urls = {} |
| for split in ["train", "dev", "test"]: |
| data_urls[split] = [ |
| _DATA_URL.format( |
| language=self.config.name, |
| split_suffix=split, |
| index=index, |
| n_shards=_N_SHARDS_PER_SPLIT[split], |
| ) |
| for index in range(1, _N_SHARDS_PER_SPLIT[split] + 1) |
| ] |
| train_downloaded_files = dl_manager.download(data_urls["train"]) |
| dev_downloaded_files = dl_manager.download(data_urls["dev"]) |
| test_downloaded_files = dl_manager.download(data_urls["test"]) |
|
|
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files} |
| ), |
|
|
| ] |
|
|
| def _generate_examples(self, filepaths): |
| """This function returns the examples in the raw (text) form by iterating on all the files.""" |
| id_ = 0 |
| for filepath in filepaths: |
| logger.info("generating examples from = %s", filepath) |
| with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: |
| for line in f: |
| if line: |
| example = json.loads(line) |
| yield id_, example |
| id_ += 1 |
|
|