Datasets:
ArXiv:
License:
| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| # TODO: Address all TODOs and remove all explanatory comments | |
| """TODO: Add a description here.""" | |
| import os | |
| import csv | |
| import json | |
| import datasets | |
| import pandas as pd | |
| from scipy.io import wavfile | |
| _CITATION = """\ | |
| @inproceedings{Raju2022SnowMD, | |
| title={Snow Mountain: Dataset of Audio Recordings of The Bible in Low Resource Languages}, | |
| author={Kavitha Raju and V. Anjaly and R. Allen Lish and Joel Mathew}, | |
| year={2022} | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| The Snow Mountain dataset contains the audio recordings (in .mp3 format) and the corresponding text of The Bible | |
| in 11 Indian languages. The recordings were done in a studio setting by native speakers. Each language has a single | |
| speaker in the dataset. Most of these languages are geographically concentrated in the Northern part of India around | |
| the state of Himachal Pradesh. Being related to Hindi they all use the Devanagari script for transcription. | |
| """ | |
| _HOMEPAGE = "https://gitlabdev.bridgeconn.com/software/research/datasets/snow-mountain" | |
| _LICENSE = "" | |
| _URL = "https://gitlabdev.bridgeconn.com/software/research/datasets/snow-mountain/" | |
| _FILES = {} | |
| _LANGUAGES = ['hindi', 'dogri', 'gaddi', 'bilaspuri', 'haryanvi', 'kulvi', 'kangri', 'bhadrawahi', | |
| 'mandeali', 'pahari_mahasui', 'kulvi_outer_seraji', 'malayalam', 'tamil', 'kannada', | |
| 'telugu'] | |
| for lang in _LANGUAGES: | |
| file_dic = { | |
| "train_500": f"data/experiments/{lang}/train_500.csv", | |
| "val_500": f"data/experiments/{lang}/val_500.csv", | |
| "train_1000": f"data/experiments/{lang}/train_1000.csv", | |
| "val_1000": f"data/experiments/{lang}/val_1000.csv", | |
| "train_2500": f"data/experiments/{lang}/train_2500.csv", | |
| "val_2500": f"data/experiments/{lang}/val_2500.csv", | |
| "train_short": f"data/experiments/{lang}/train_short.csv", | |
| "val_short": f"data/experiments/{lang}/val_short.csv", | |
| "train_full": f"data/experiments/{lang}/train_full.csv", | |
| "val_full": f"data/experiments/{lang}/val_full.csv", | |
| "test_common": f"data/experiments/{lang}/test_common.csv", | |
| "all_verses": f"data/cleaned/{lang}/all_verses.tar.gz", | |
| "short_verses": f"data/cleaned/{lang}/short_verses.tar.gz", | |
| } | |
| _FILES[lang] = file_dic | |
| NT_BOOKS = ['MAT', 'MRK', 'LUK', 'JHN', 'ACT', 'ROM', '1CO', '2CO', 'GAL', 'EPH', 'PHP', 'COL', '1TH', | |
| '2TH', '1TI', '2TI', 'TIT', 'PHM', 'HEB', 'JAS', '1PE', '2PE', '1JN', '2JN', '3JN', 'JUD', 'REV'] | |
| OT_BOOKS = ['GEN', 'EXO', 'LEV', 'NUM', 'DEU', 'JOS', 'JDG', 'RUT', '1SA', '2SA', '1KI', '2KI', '1CH', | |
| '2CH', 'EZR', 'NEH', 'EST', 'JOB', 'PSA', 'PRO', 'ECC', 'SNG', 'ISA', 'JER', 'LAM', 'EZK', | |
| 'DAN', 'HOS', 'JOL', 'AMO', 'OBA', 'JON', 'MIC', 'NAM', 'HAB', 'ZEP', 'HAG', 'ZEC', 'MAL'] | |
| BOOKS_DIC = {'hindi':OT_BOOKS, 'bhadrawahi':NT_BOOKS, 'bilaspuri':NT_BOOKS, 'dogri':NT_BOOKS, 'gaddi': | |
| NT_BOOKS, 'haryanvi':NT_BOOKS, 'kangri':NT_BOOKS, 'kulvi':NT_BOOKS, 'kulvi_outer_seraji':NT_BOOKS | |
| , 'mandeali':NT_BOOKS, 'pahari_mahasui':NT_BOOKS, 'malayalam':OT_BOOKS+NT_BOOKS, 'tamil': | |
| OT_BOOKS+NT_BOOKS, 'kannada': OT_BOOKS+NT_BOOKS, 'telugu': OT_BOOKS+NT_BOOKS} | |
| class Test(datasets.GeneratorBasedBuilder): | |
| VERSION = datasets.Version("1.0.0") | |
| BUILDER_CONFIGS = [] | |
| for lang in _LANGUAGES: | |
| text = lang.capitalize()+" data" | |
| BUILDER_CONFIGS.append(datasets.BuilderConfig(name=f"{lang}", version=VERSION, description=text)) | |
| DEFAULT_CONFIG_NAME = "hindi" | |
| def _info(self): | |
| features = datasets.Features( | |
| { | |
| "sentence": datasets.Value("string"), | |
| "audio": datasets.features.Audio(sampling_rate=16_000), | |
| "path": datasets.Value("string"), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=features, | |
| supervised_keys=("sentence", "path"), | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| downloaded_files = dl_manager.download(_FILES[self.config.name]) | |
| '''Downloads full audio here''' | |
| audio_data = {} | |
| for book in BOOKS_DIC[self.config.name]: | |
| archive_url = f"data/cleaned/{self.config.name}/{book}.tar.gz" | |
| archive_path = dl_manager.download(archive_url) | |
| for path, file in dl_manager.iter_archive(archive_path): | |
| audio_ = path.split('/')[-1] | |
| if audio_ not in audio_data: | |
| content = file.read() | |
| audio_data[audio_] = content | |
| data_size = ['500', '1000', '2500', 'short', 'full'] | |
| splits = [] | |
| for size in data_size: | |
| splits.append( | |
| datasets.SplitGenerator( | |
| name=f"train_{size}", | |
| gen_kwargs={ | |
| "filepath": downloaded_files[f"train_{size}"], | |
| "audio_data": audio_data, | |
| "dl_manager":dl_manager, | |
| }, | |
| ) | |
| ) | |
| splits.append( | |
| datasets.SplitGenerator( | |
| name=f"val_{size}", | |
| gen_kwargs={ | |
| "filepath": downloaded_files[f"val_{size}"], | |
| "audio_data": audio_data, | |
| "dl_manager":dl_manager, | |
| }, | |
| ) | |
| ) | |
| splits.append( | |
| datasets.SplitGenerator( | |
| name="test_common", | |
| gen_kwargs={ | |
| "filepath": downloaded_files["test_common"], | |
| "audio_data": audio_data, | |
| "dl_manager":dl_manager, | |
| }, | |
| ) | |
| ) | |
| splits.append( | |
| datasets.SplitGenerator( | |
| name="all_verses", | |
| gen_kwargs={ | |
| "filepath": downloaded_files["all_verses"], | |
| "audio_data": audio_data, | |
| "dl_manager":dl_manager, | |
| }, | |
| ) | |
| ) | |
| splits.append( | |
| datasets.SplitGenerator( | |
| name="short_verses", | |
| gen_kwargs={ | |
| "filepath": downloaded_files["short_verses"], | |
| "audio_data": audio_data, | |
| "dl_manager":dl_manager, | |
| }, | |
| ) | |
| ) | |
| return splits | |
| def _generate_examples(self, filepath, audio_data, dl_manager): | |
| key = 0 | |
| '''Function for parsing large csv archives (all_verses, short_verses)''' | |
| def parse_archive(archive): | |
| temp_df = pd.DataFrame() | |
| for path, file in dl_manager.iter_archive(archive): | |
| if path.endswith('_verses.csv'): | |
| verses_filepath = file | |
| verses_lines = file.readlines() | |
| verses_lines = [line.decode("utf-8") for line in verses_lines] | |
| column_names = verses_lines[0].strip().split(",") | |
| rows = [row.split(',') for row in verses_lines[1:]] | |
| rows = [[i[0], i[1], ','.join(i[2:])]for i in rows] | |
| temp_df = pd.DataFrame(rows, columns =column_names) | |
| break | |
| return temp_df | |
| with open(filepath) as f: | |
| try: | |
| data_df = pd.read_csv(f,sep=',') | |
| except: | |
| data_df = parse_archive(filepath) | |
| for index,row in data_df.iterrows(): | |
| audio = row['path'].split('/')[-1] | |
| content = '' | |
| if audio in list(audio_data.keys()): | |
| content = audio_data[audio] | |
| else: | |
| print(f"*********** Couldn't find audio: {audio} **************") | |
| yield key, { | |
| "sentence": row["sentence"], | |
| "path": row["path"], | |
| "audio":{"path": row["path"], "bytes": content} | |
| } | |
| key+=1 | |