| # Dataset Loading Script | |
| # Save this as load_dataset.py to use | |
| import csv | |
| import os | |
| from datasets import Dataset, Audio, Value, Features | |
| def load_dataset(): | |
| # Define features | |
| features = Features({ | |
| # Preserve original sampling rates by not forcing a fixed rate | |
| "audio": Audio(sampling_rate=None), | |
| "text": Value("string"), | |
| "speaker_id": Value("string"), | |
| "emotion": Value("string"), | |
| "language": Value("string") | |
| }) | |
| # Load data from CSV | |
| data = { | |
| "audio": [], | |
| "text": [], | |
| "speaker_id": [], | |
| "emotion": [], | |
| "language": [] | |
| } | |
| # Read JSONL | |
| import json | |
| with open("data.jsonl", "r", encoding="utf-8") as f: | |
| for line in f: | |
| obj = json.loads(line) | |
| data["audio"].append(obj["audio"]) # relative path within repo | |
| data["text"].append(obj.get("text", "")) | |
| data["speaker_id"].append(obj.get("speaker_id", "")) | |
| data["emotion"].append(obj.get("emotion", "neutral")) | |
| data["language"].append(obj.get("language", "en")) | |
| # Create dataset | |
| dataset = Dataset.from_dict(data, features=features) | |
| return dataset | |
| # For direct loading | |
| if __name__ == "__main__": | |
| dataset = load_dataset() | |
| print(f"Dataset loaded with {len(dataset)} examples") | |