| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| """Librispeech automatic speech recognition dataset.""" |
|
|
|
|
| import os |
|
|
| import datasets |
| from datasets.tasks import AutomaticSpeechRecognition |
|
|
|
|
| _CITATION = """\ |
| @inproceedings{panayotov2015librispeech, |
| title={Librispeech: an ASR corpus based on public domain audio books}, |
| author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev}, |
| booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on}, |
| pages={5206--5210}, |
| year={2015}, |
| organization={IEEE} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz, |
| prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read |
| audiobooks from the LibriVox project, and has been carefully segmented and aligned.87 |
| """ |
|
|
| _URL = "http://www.openslr.org/12" |
| _DL_URL = "http://www.openslr.org/resources/12/" |
|
|
|
|
| _DL_URLS = { |
| "clean": { |
| "dev": _DL_URL + "dev-clean.tar.gz", |
| "test": _DL_URL + "test-clean.tar.gz", |
| "train.100": _DL_URL + "train-clean-100.tar.gz", |
| "train.360": _DL_URL + "train-clean-360.tar.gz", |
| }, |
| "other": { |
| "test": _DL_URL + "test-other.tar.gz", |
| "dev": _DL_URL + "dev-other.tar.gz", |
| "train.500": _DL_URL + "train-other-500.tar.gz", |
| }, |
| "all": { |
| "dev.clean": _DL_URL + "dev-clean.tar.gz", |
| "dev.other": _DL_URL + "dev-other.tar.gz", |
| "test.clean": _DL_URL + "test-clean.tar.gz", |
| "test.other": _DL_URL + "test-other.tar.gz", |
| "train.clean.100": _DL_URL + "train-clean-100.tar.gz", |
| "train.clean.360": _DL_URL + "train-clean-360.tar.gz", |
| "train.other.500": _DL_URL + "train-other-500.tar.gz", |
| }, |
| "validation.clean": { |
| "dev.clean": _DL_URL + "dev-clean.tar.gz", |
| }, |
| "validation.other": { |
| "dev.other": _DL_URL + "dev-other.tar.gz", |
| }, |
| "test.clean": { |
| "test.clean": _DL_URL + "test-clean.tar.gz", |
| }, |
| "test.other": { |
| "test.other": _DL_URL + "test-other.tar.gz", |
| }, |
| "train.clean.100":{ |
| "train.clean.100": _DL_URL + "train-clean-100.tar.gz", |
| }, |
| "train.clean.360":{ |
| "train.clean.360": _DL_URL + "train-clean-360.tar.gz", |
| }, |
| "train.other.500":{ |
| "train.other.500": _DL_URL + "train-other-500.tar.gz", |
| }, |
| "train.10": { |
| "train.10": "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz", |
| } |
| } |
|
|
|
|
| class LibrispeechASRConfig(datasets.BuilderConfig): |
| """BuilderConfig for LibriSpeechASR.""" |
|
|
| def __init__(self, **kwargs): |
| """ |
| Args: |
| data_dir: `string`, the path to the folder containing the files in the |
| downloaded .tar |
| citation: `string`, citation for the data set |
| url: `string`, url for information about the data set |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs) |
|
|
|
|
| class LibrispeechASR(datasets.GeneratorBasedBuilder): |
| """Librispeech dataset.""" |
|
|
| DEFAULT_WRITER_BATCH_SIZE = 256 |
| DEFAULT_CONFIG_NAME = "all" |
| BUILDER_CONFIGS = [ |
| LibrispeechASRConfig(name="clean", description="'Clean' speech."), |
| LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."), |
| LibrispeechASRConfig(name="all", description="Combined clean and other dataset."), |
| LibrispeechASRConfig(name="validation.clean", description="Validation Clean only"), |
| LibrispeechASRConfig(name="validation.other", description="Validation Other only"), |
| LibrispeechASRConfig(name="test.clean", description="Test Clean only"), |
| LibrispeechASRConfig(name="test.other", description="Test Other only"), |
| LibrispeechASRConfig(name="train.clean.100", description="train clean 100 only"), |
| LibrispeechASRConfig(name="train.clean.360", description="train clean 360 only"), |
| LibrispeechASRConfig(name="train.other.500", description="train other 500 only"), |
| LibrispeechASRConfig(name="train.10", description="train 10 only"), |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "file": datasets.Value("string"), |
| "audio": datasets.Audio(sampling_rate=16_000), |
| "text": datasets.Value("string"), |
| "speaker_id": datasets.Value("int64"), |
| "chapter_id": datasets.Value("int64"), |
| "id": datasets.Value("string"), |
| } |
| ), |
| supervised_keys=("file", "text"), |
| homepage=_URL, |
| citation=_CITATION, |
| task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")], |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| archive_path = dl_manager.download(_DL_URLS[self.config.name]) |
| |
| local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {} |
|
|
| if self.config.name == "clean": |
| train_splits = [ |
| datasets.SplitGenerator( |
| name="train.100", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.100"), |
| "files": dl_manager.iter_archive(archive_path["train.100"]), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name="train.360", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.360"), |
| "files": dl_manager.iter_archive(archive_path["train.360"]), |
| }, |
| ), |
| ] |
| dev_splits = [ |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("dev"), |
| "files": dl_manager.iter_archive(archive_path["dev"]), |
| }, |
| ) |
| ] |
| test_splits = [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("test"), |
| "files": dl_manager.iter_archive(archive_path["test"]), |
| }, |
| ) |
| ] |
| elif self.config.name == "other": |
| train_splits = [ |
| datasets.SplitGenerator( |
| name="train.500", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.500"), |
| "files": dl_manager.iter_archive(archive_path["train.500"]), |
| }, |
| ) |
| ] |
| dev_splits = [ |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("dev"), |
| "files": dl_manager.iter_archive(archive_path["dev"]), |
| }, |
| ) |
| ] |
| test_splits = [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("test"), |
| "files": dl_manager.iter_archive(archive_path["test"]), |
| }, |
| ) |
| ] |
| elif self.config.name == "all": |
| train_splits = [ |
| datasets.SplitGenerator( |
| name="train.clean.100", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.clean.100"), |
| "files": dl_manager.iter_archive(archive_path["train.clean.100"]), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name="train.clean.360", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.clean.360"), |
| "files": dl_manager.iter_archive(archive_path["train.clean.360"]), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name="train.other.500", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.other.500"), |
| "files": dl_manager.iter_archive(archive_path["train.other.500"]), |
| }, |
| ), |
| ] |
| dev_splits = [ |
| datasets.SplitGenerator( |
| name="validation.clean", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("dev.clean"), |
| "files": dl_manager.iter_archive(archive_path["dev.clean"]), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name="validation.other", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("dev.other"), |
| "files": dl_manager.iter_archive(archive_path["dev.other"]), |
| }, |
| ), |
| ] |
| test_splits = [ |
| datasets.SplitGenerator( |
| name="test.clean", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("test.clean"), |
| "files": dl_manager.iter_archive(archive_path["test.clean"]), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name="test.other", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("test.other"), |
| "files": dl_manager.iter_archive(archive_path["test.other"]), |
| }, |
| ), |
| ] |
| elif self.config.name == "validation.clean": |
| train_splits = [] |
| dev_splits = [ |
| datasets.SplitGenerator( |
| name="validation.clean", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("dev.clean"), |
| "files": dl_manager.iter_archive(archive_path["dev.clean"]), |
| }, |
| ), |
| ] |
| test_splits = [] |
| elif self.config.name == "validation.other": |
| train_splits = [] |
| dev_splits = [ |
| datasets.SplitGenerator( |
| name="validation.other", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("dev.other"), |
| "files": dl_manager.iter_archive(archive_path["dev.other"]), |
| }, |
| ), |
| ] |
| test_splits = [] |
| elif self.config.name == "test.clean": |
| train_splits = [] |
| dev_splits = [] |
| test_splits = [ |
| datasets.SplitGenerator( |
| name="test.clean", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("test.clean"), |
| "files": dl_manager.iter_archive(archive_path["test.clean"]), |
| }, |
| ), |
| ] |
| elif self.config.name == "test.other": |
| train_splits = [] |
| dev_splits = [] |
| test_splits = [ |
| datasets.SplitGenerator( |
| name="test.other", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("test.other"), |
| "files": dl_manager.iter_archive(archive_path["test.other"]), |
| }, |
| ), |
| ] |
| elif self.config.name == "train.clean.100": |
| train_splits = [ |
| datasets.SplitGenerator( |
| name="train.clean.100", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.clean.100"), |
| "files": dl_manager.iter_archive(archive_path["train.clean.100"]), |
| }, |
| ), |
| ] |
| dev_splits = [] |
| test_splits = [] |
| elif self.config.name == "train.clean.100": |
| train_splits = [ |
| datasets.SplitGenerator( |
| name="train.clean.100", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.clean.100"), |
| "files": dl_manager.iter_archive(archive_path["train.clean.100"]), |
| }, |
| ), |
| ] |
| dev_splits = [] |
| test_splits = [] |
| elif self.config.name == "train.clean.360": |
| train_splits = [ |
| datasets.SplitGenerator( |
| name="train.clean.360", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.clean.360"), |
| "files": dl_manager.iter_archive(archive_path["train.clean.360"]), |
| }, |
| ), |
| ] |
| dev_splits = [] |
| test_splits = [] |
| elif self.config.name == "train.other.500": |
| train_splits = [ |
| datasets.SplitGenerator( |
| name="train.other.500", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.other.500"), |
| "files": dl_manager.iter_archive(archive_path["train.other.500"]), |
| }, |
| ), |
| ] |
| dev_splits = [] |
| test_splits = [] |
| elif self.config.name == "train.10": |
| train_splits = [ |
| datasets.SplitGenerator( |
| name="train.10", |
| gen_kwargs={ |
| "local_extracted_archive": local_extracted_archive.get("train.10"), |
| "files": dl_manager.iter_archive(archive_path["train.10"]), |
| }, |
| ), |
| ] |
| dev_splits = [] |
| test_splits = [] |
| return train_splits + dev_splits + test_splits |
|
|
| def _generate_examples(self, files, local_extracted_archive): |
| """Generate examples from a LibriSpeech archive_path.""" |
| key = 0 |
| audio_data = {} |
| transcripts = [] |
| for path, f in files: |
| if path.endswith(".flac"): |
| id_ = path.split("/")[-1][: -len(".flac")] |
| audio_data[id_] = f.read() |
| elif path.endswith(".trans.txt"): |
| for line in f: |
| if line: |
| line = line.decode("utf-8").strip() |
| id_, transcript = line.split(" ", 1) |
| audio_file = f"{id_}.flac" |
| speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]] |
| audio_file = ( |
| os.path.join(local_extracted_archive, audio_file) |
| if local_extracted_archive |
| else audio_file |
| ) |
| transcripts.append( |
| { |
| "id": id_, |
| "speaker_id": speaker_id, |
| "chapter_id": chapter_id, |
| "file": audio_file, |
| "text": transcript, |
| } |
| ) |
| if audio_data and len(audio_data) == len(transcripts): |
| for transcript in transcripts: |
| audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]} |
| yield key, {"audio": audio, **transcript} |
| key += 1 |
| audio_data = {} |
| transcripts = [] |
|
|