Datasets:
upload hf dataset and rename py dataset
Browse files- dataset.py +36 -0
- libris2s_dataset.py → libris2s_dataset_pt.py +94 -94
dataset.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
from datasets import DatasetInfo, GeneratorBasedBuilder, SplitGenerator, Split, Features, Value, Audio
|
| 3 |
+
|
| 4 |
+
class Libris2s(GeneratorBasedBuilder):
|
| 5 |
+
def _info(self):
|
| 6 |
+
return DatasetInfo(
|
| 7 |
+
features=Features({
|
| 8 |
+
"book_id": Value("int64"),
|
| 9 |
+
"DE_audio": Audio(),
|
| 10 |
+
"EN_audio": Audio(),
|
| 11 |
+
"score": Value("float32"),
|
| 12 |
+
"DE_transcript": Value("string"),
|
| 13 |
+
"EN_transcript": Value("string"),
|
| 14 |
+
}),
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
def _split_generators(self, dl_manager):
|
| 18 |
+
return [
|
| 19 |
+
SplitGenerator(
|
| 20 |
+
name=Split.TRAIN,
|
| 21 |
+
gen_kwargs={"filepath": "alignments/all_de_en_alligned_cleaned.csv"}
|
| 22 |
+
),
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
def _generate_examples(self, filepath):
|
| 26 |
+
with open(filepath, encoding="utf-8") as f:
|
| 27 |
+
reader = csv.DictReader(f)
|
| 28 |
+
for idx, row in enumerate(reader):
|
| 29 |
+
yield idx, {
|
| 30 |
+
"book_id": int(row["book_id"]),
|
| 31 |
+
"DE_audio": row["DE_audio"],
|
| 32 |
+
"EN_audio": row["EN_audio"],
|
| 33 |
+
"score": float(row["score"]),
|
| 34 |
+
"DE_transcript": row["DE_transcript"],
|
| 35 |
+
"EN_transcript": row["EN_transcript"],
|
| 36 |
+
}
|
libris2s_dataset.py → libris2s_dataset_pt.py
RENAMED
|
@@ -1,95 +1,95 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import torch
|
| 3 |
-
import pandas as pd
|
| 4 |
-
import torchaudio
|
| 5 |
-
from torch.utils.data import Dataset
|
| 6 |
-
from typing import List, Optional
|
| 7 |
-
|
| 8 |
-
class Libris2sDataset(torch.utils.data.Dataset):
|
| 9 |
-
def __init__(self, data_dir: str, split: str, transform=None, book_ids: Optional[List[str]]=None):
|
| 10 |
-
"""
|
| 11 |
-
Initialize the LibriS2S dataset.
|
| 12 |
-
|
| 13 |
-
Args:
|
| 14 |
-
data_dir (str): Root directory containing the dataset
|
| 15 |
-
split (str): Path to the CSV file containing alignments
|
| 16 |
-
transform (callable, optional): Optional transform to be applied on the audio
|
| 17 |
-
book_ids (List[str], optional): List of book IDs to include. If None, includes all books.
|
| 18 |
-
Example: ['9', '10', '11'] will only load these books.
|
| 19 |
-
"""
|
| 20 |
-
self.data_dir = data_dir
|
| 21 |
-
self.transform = transform
|
| 22 |
-
self.book_ids = set(book_ids) if book_ids is not None else None
|
| 23 |
-
|
| 24 |
-
# Load alignment CSV file
|
| 25 |
-
self.alignments = pd.read_csv(split)
|
| 26 |
-
|
| 27 |
-
# Create lists to store paths and metadata
|
| 28 |
-
self.de_audio_paths = []
|
| 29 |
-
self.en_audio_paths = []
|
| 30 |
-
self.de_transcripts = []
|
| 31 |
-
self.en_transcripts = []
|
| 32 |
-
self.alignment_scores = []
|
| 33 |
-
|
| 34 |
-
# Process each entry in the alignments
|
| 35 |
-
for _, row in self.alignments.iterrows():
|
| 36 |
-
# Get book ID from the path
|
| 37 |
-
book_id = str(row['book_id'])
|
| 38 |
-
|
| 39 |
-
# Skip if book_id is not in the filtered set
|
| 40 |
-
if self.book_ids is not None and book_id not in self.book_ids:
|
| 41 |
-
continue
|
| 42 |
-
|
| 43 |
-
# Get full paths from CSV
|
| 44 |
-
de_audio = os.path.join(data_dir, row['DE_audio'])
|
| 45 |
-
en_audio = os.path.join(data_dir, row['EN_audio'])
|
| 46 |
-
|
| 47 |
-
# Only add if both audio files exist
|
| 48 |
-
if os.path.exists(de_audio) and os.path.exists(en_audio):
|
| 49 |
-
self.de_audio_paths.append(de_audio)
|
| 50 |
-
self.en_audio_paths.append(en_audio)
|
| 51 |
-
self.de_transcripts.append(row['DE_transcript'])
|
| 52 |
-
self.en_transcripts.append(row['EN_transcript'])
|
| 53 |
-
self.alignment_scores.append(float(row['score']))
|
| 54 |
-
else:
|
| 55 |
-
print(f"Skipping {de_audio} or {en_audio} because they don't exist")
|
| 56 |
-
|
| 57 |
-
def __len__(self):
|
| 58 |
-
"""Return the number of items in the dataset."""
|
| 59 |
-
return len(self.de_audio_paths)
|
| 60 |
-
|
| 61 |
-
def __getitem__(self, idx):
|
| 62 |
-
"""
|
| 63 |
-
Get a single item from the dataset.
|
| 64 |
-
|
| 65 |
-
Args:
|
| 66 |
-
idx (int): Index of the item to get
|
| 67 |
-
|
| 68 |
-
Returns:
|
| 69 |
-
dict: A dictionary containing:
|
| 70 |
-
- de_audio: German audio waveform
|
| 71 |
-
- de_sample_rate: German audio sample rate
|
| 72 |
-
- en_audio: English audio waveform
|
| 73 |
-
- en_sample_rate: English audio sample rate
|
| 74 |
-
- de_transcript: German transcript
|
| 75 |
-
- en_transcript: English transcript
|
| 76 |
-
- alignment_score: Alignment score between the pair
|
| 77 |
-
"""
|
| 78 |
-
# Load audio files
|
| 79 |
-
de_audio, de_sr = torchaudio.load(self.de_audio_paths[idx])
|
| 80 |
-
en_audio, en_sr = torchaudio.load(self.en_audio_paths[idx])
|
| 81 |
-
|
| 82 |
-
# Apply transforms if specified
|
| 83 |
-
if self.transform:
|
| 84 |
-
de_audio = self.transform(de_audio)
|
| 85 |
-
en_audio = self.transform(en_audio)
|
| 86 |
-
|
| 87 |
-
return {
|
| 88 |
-
'de_audio': de_audio,
|
| 89 |
-
'de_sample_rate': de_sr,
|
| 90 |
-
'en_audio': en_audio,
|
| 91 |
-
'en_sample_rate': en_sr,
|
| 92 |
-
'de_transcript': self.de_transcripts[idx],
|
| 93 |
-
'en_transcript': self.en_transcripts[idx],
|
| 94 |
-
'alignment_score': self.alignment_scores[idx]
|
| 95 |
}
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import torchaudio
|
| 5 |
+
from torch.utils.data import Dataset
|
| 6 |
+
from typing import List, Optional
|
| 7 |
+
|
| 8 |
+
class Libris2sDataset(torch.utils.data.Dataset):
|
| 9 |
+
def __init__(self, data_dir: str, split: str, transform=None, book_ids: Optional[List[str]]=None):
|
| 10 |
+
"""
|
| 11 |
+
Initialize the LibriS2S dataset.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
data_dir (str): Root directory containing the dataset
|
| 15 |
+
split (str): Path to the CSV file containing alignments
|
| 16 |
+
transform (callable, optional): Optional transform to be applied on the audio
|
| 17 |
+
book_ids (List[str], optional): List of book IDs to include. If None, includes all books.
|
| 18 |
+
Example: ['9', '10', '11'] will only load these books.
|
| 19 |
+
"""
|
| 20 |
+
self.data_dir = data_dir
|
| 21 |
+
self.transform = transform
|
| 22 |
+
self.book_ids = set(book_ids) if book_ids is not None else None
|
| 23 |
+
|
| 24 |
+
# Load alignment CSV file
|
| 25 |
+
self.alignments = pd.read_csv(split)
|
| 26 |
+
|
| 27 |
+
# Create lists to store paths and metadata
|
| 28 |
+
self.de_audio_paths = []
|
| 29 |
+
self.en_audio_paths = []
|
| 30 |
+
self.de_transcripts = []
|
| 31 |
+
self.en_transcripts = []
|
| 32 |
+
self.alignment_scores = []
|
| 33 |
+
|
| 34 |
+
# Process each entry in the alignments
|
| 35 |
+
for _, row in self.alignments.iterrows():
|
| 36 |
+
# Get book ID from the path
|
| 37 |
+
book_id = str(row['book_id'])
|
| 38 |
+
|
| 39 |
+
# Skip if book_id is not in the filtered set
|
| 40 |
+
if self.book_ids is not None and book_id not in self.book_ids:
|
| 41 |
+
continue
|
| 42 |
+
|
| 43 |
+
# Get full paths from CSV
|
| 44 |
+
de_audio = os.path.join(data_dir, row['DE_audio'])
|
| 45 |
+
en_audio = os.path.join(data_dir, row['EN_audio'])
|
| 46 |
+
|
| 47 |
+
# Only add if both audio files exist
|
| 48 |
+
if os.path.exists(de_audio) and os.path.exists(en_audio):
|
| 49 |
+
self.de_audio_paths.append(de_audio)
|
| 50 |
+
self.en_audio_paths.append(en_audio)
|
| 51 |
+
self.de_transcripts.append(row['DE_transcript'])
|
| 52 |
+
self.en_transcripts.append(row['EN_transcript'])
|
| 53 |
+
self.alignment_scores.append(float(row['score']))
|
| 54 |
+
else:
|
| 55 |
+
print(f"Skipping {de_audio} or {en_audio} because they don't exist")
|
| 56 |
+
|
| 57 |
+
def __len__(self):
|
| 58 |
+
"""Return the number of items in the dataset."""
|
| 59 |
+
return len(self.de_audio_paths)
|
| 60 |
+
|
| 61 |
+
def __getitem__(self, idx):
|
| 62 |
+
"""
|
| 63 |
+
Get a single item from the dataset.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
idx (int): Index of the item to get
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
dict: A dictionary containing:
|
| 70 |
+
- de_audio: German audio waveform
|
| 71 |
+
- de_sample_rate: German audio sample rate
|
| 72 |
+
- en_audio: English audio waveform
|
| 73 |
+
- en_sample_rate: English audio sample rate
|
| 74 |
+
- de_transcript: German transcript
|
| 75 |
+
- en_transcript: English transcript
|
| 76 |
+
- alignment_score: Alignment score between the pair
|
| 77 |
+
"""
|
| 78 |
+
# Load audio files
|
| 79 |
+
de_audio, de_sr = torchaudio.load(self.de_audio_paths[idx])
|
| 80 |
+
en_audio, en_sr = torchaudio.load(self.en_audio_paths[idx])
|
| 81 |
+
|
| 82 |
+
# Apply transforms if specified
|
| 83 |
+
if self.transform:
|
| 84 |
+
de_audio = self.transform(de_audio)
|
| 85 |
+
en_audio = self.transform(en_audio)
|
| 86 |
+
|
| 87 |
+
return {
|
| 88 |
+
'de_audio': de_audio,
|
| 89 |
+
'de_sample_rate': de_sr,
|
| 90 |
+
'en_audio': en_audio,
|
| 91 |
+
'en_sample_rate': en_sr,
|
| 92 |
+
'de_transcript': self.de_transcripts[idx],
|
| 93 |
+
'en_transcript': self.en_transcripts[idx],
|
| 94 |
+
'alignment_score': self.alignment_scores[idx]
|
| 95 |
}
|