Datasets:
File size: 7,438 Bytes
11dc96b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 |
"""Create a Hugging Face dataset from the JamendoLyrics dataset in its original layout."""
# %%
import json
import logging
from pathlib import Path
import shutil
import datasets
import numpy as np
from scipy.sparse import csr_array
from scipy.sparse.csgraph import connected_components as scipy_connected_components
import soundfile as sf
# %%
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.DEBUG)
# %%
dataset_src = datasets.load_dataset(
"jamendolyrics/jam-alt", revision="v1.4.0", split="test"
)
# %%
language_fixes = (
[json.loads(li) for li in Path("language_fixes.jsonl").read_text().splitlines()]
if Path("language_fixes.jsonl").exists()
else []
)
# %%
OVERLAP_SOFT_THRESHOLD = 0.1
OVERLAP_HARD_THRESHOLD = 0.2
PADDING = 0.5
MAX_DURATION = 20.0
SUBSETS_DIR = Path(".")
SUBSETS = ["pure", "groups"]
# %%
features = datasets.Features(
{
"song_name": datasets.Value("string"),
"file_name": datasets.Value("string"),
"text": datasets.Value("string"),
"language": datasets.Value("string"),
"song_language": datasets.Value("string"),
"line_indices": [datasets.Value("int64")],
"start": datasets.Value("float64"),
"end": datasets.Value("float64"),
"merged": datasets.Value("bool"),
"artist": datasets.Value("string"),
"title": datasets.Value("string"),
"genre": datasets.Value("string"),
"license_type": datasets.Value("string"),
}
)
# %%
def find_connected_components(matrix: np.ndarray) -> list[list[int]]:
num_components, labels = scipy_connected_components(
csgraph=csr_array(matrix), directed=False, return_labels=True
)
components = [np.where(labels == i)[0].tolist() for i in range(num_components)]
return sorted(components)
# $$
for subset in SUBSETS:
if (SUBSETS_DIR / subset).exists():
shutil.rmtree(SUBSETS_DIR / subset)
# %%
records = []
stats = []
for item in dataset_src:
name = item["name"]
song_language = item["language"]
lines = item["lines"]
audio = item["audio"]["array"]
sr = item["audio"]["sampling_rate"]
audio_duration = len(audio) / sr
starts = np.array([line["start"] for line in lines], dtype=float)
ends = np.array([line["end"] for line in lines], dtype=float)
texts = [line["text"] for line in lines]
# Compute pairwise overlaps
overlap_ends = np.minimum(ends[:, None], ends)
overlap_starts = np.maximum(starts[:, None], starts)
overlaps = np.maximum(0.0, overlap_ends - overlap_starts)
overlap_groups = find_connected_components(overlaps > OVERLAP_HARD_THRESHOLD)
for indices in overlap_groups:
group = [lines[i] for i in indices]
group_name = (
f"{min(indices):03d}-{max(indices):03d}"
if len(indices) > 1
else f"{indices[0]:03d}"
)
text = "\n".join(line["text"] for line in group)
group_starts = [line["start"] for line in group]
group_ends = [line["end"] for line in group]
start, end = min(group_starts), max(group_ends)
# Handle soft overlaps
_, small_overlap_indices = np.where(
(overlaps[indices] > 0) & (overlaps[indices] <= OVERLAP_HARD_THRESHOLD)
)
for i in small_overlap_indices:
line = lines[i]
if line["start"] < end and line["end"] >= end:
msg = f"{name}: Group {group_name} has {overlaps[small_overlap_indices][:, indices].max():.2f} s overlap with line {i:03d}."
if end - line["start"] < OVERLAP_SOFT_THRESHOLD:
msg += " Ignoring"
else:
new_end = line["start"] + OVERLAP_SOFT_THRESHOLD
msg += f"\n Adjusting end from {end:.2f} to {new_end:.2f} ({new_end - end:.2f} s)"
end = new_end
logger.debug(msg)
# Pad segment without exceeding duration and causing overlaps
non_group_indices = [i for i in range(len(lines)) if i not in indices]
l_limit = max(
[0.0] + [ends[i] + 0.1 for i in non_group_indices if ends[i] < end]
)
r_limit = min(
[audio_duration]
+ [starts[i] - 0.1 for i in non_group_indices if starts[i] > start]
)
max_total_pad = max(0.0, MAX_DURATION - (end - start))
l_pad = min(max(0.0, start - l_limit), PADDING)
r_pad = min(max(0.0, r_limit - end), PADDING)
if l_pad + r_pad > max_total_pad:
l_pad = r_pad = min(l_pad, r_pad, max_total_pad / 2)
extra = max(0.0, max_total_pad - (l_pad + r_pad)) - 1e-3
l_pad += extra / 2
r_pad += extra / 2
assert l_pad + r_pad < max_total_pad
start, end = start - l_pad, end + r_pad
duration = end - start
stats.append(
{
"name": name,
"group_name": group_name,
"duration": duration,
"group_size": len(indices),
"excluded": False,
}
)
if duration > MAX_DURATION:
logger.info(f"Excluding segment {name}.{group_name} of duration {duration}")
stats[-1]["excluded"] = True
continue
start_frame, end_frame = round(start * sr), round(end * sr)
line_audio = audio[start_frame:end_frame]
file_name = f"{name}.{group_name}.flac"
language = song_language
for fix in language_fixes:
if fix["file_name"] == file_name:
assert fix["text"] == text, (
f"Text mismatch for {file_name}: {fix['text']} != {text}"
)
language = fix["language"]
logger.debug(
f"{name}: Fixing language of group {group_name} to {language}"
)
if len(group) > 1:
subset_dir = SUBSETS_DIR / "groups" / language
else:
subset_dir = SUBSETS_DIR / "pure" / language
out_audio_path = subset_dir / "audio" / file_name
out_audio_path.parent.mkdir(parents=True, exist_ok=True)
sf.write(out_audio_path, line_audio, sr)
records.append(
{
"song_name": name,
"file_name": str(out_audio_path.relative_to(subset_dir)),
"text": "\n".join(line["text"] for line in group),
"language": language,
"song_language": song_language,
"line_indices": indices,
"start": start,
"end": end,
"merged": len(group) > 1,
**{k: item[k] for k in ["artist", "title", "genre", "license_type"]},
}
)
dataset_out = datasets.Dataset.from_list(records, features=features, split="test")
# %%
for config_name in ["pure", "groups"]:
for subset_language in ["en", "es", "de", "fr"]:
subset_dir = SUBSETS_DIR / config_name / subset_language
subset_dir.mkdir(exist_ok=True)
subset = dataset_out.filter(lambda x: x["language"] == subset_language)
if config_name == "pure":
subset = subset.filter(lambda x: not x["merged"])
elif config_name == "groups":
subset = subset.filter(lambda x: x["merged"])
subset.to_json(subset_dir / "metadata.jsonl")
|