Add helper scripts (create_parquet.py, upload_to_hf.py)
Browse files- scripts/create_parquet.py +91 -0
- scripts/upload_to_hf.py +49 -0
scripts/create_parquet.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Create a parquet dataset from mimicstudio.db and audio_files folder.
|
| 4 |
+
|
| 5 |
+
Output:
|
| 6 |
+
- data/parquetfile/dataset.parquet
|
| 7 |
+
- data/audio_files/<speaker_id>/<audio_id>.wav (copied)
|
| 8 |
+
|
| 9 |
+
The parquet will contain columns: source, text, audio (relative path)
|
| 10 |
+
|
| 11 |
+
Run from repo root: python3 scripts/create_parquet.py
|
| 12 |
+
"""
|
| 13 |
+
import sqlite3
|
| 14 |
+
import os
|
| 15 |
+
import shutil
|
| 16 |
+
import argparse
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
import pandas as pd
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def prepare(db_path: str, audio_root: str, out_audio_root: str, out_parquet: str, dry_run: bool = False):
|
| 22 |
+
conn = sqlite3.connect(db_path)
|
| 23 |
+
cur = conn.cursor()
|
| 24 |
+
|
| 25 |
+
# Query audiomodel table for audio_id, prompt (text), speaker_id
|
| 26 |
+
cur.execute("SELECT audio_id, prompt, speaker_id FROM audiomodel")
|
| 27 |
+
rows = cur.fetchall()
|
| 28 |
+
|
| 29 |
+
records = []
|
| 30 |
+
total = len(rows)
|
| 31 |
+
missing = 0
|
| 32 |
+
|
| 33 |
+
for idx, (audio_id, prompt, speaker_id) in enumerate(rows, start=1):
|
| 34 |
+
# Construct source path: audio_files/<speaker_id>/<audio_id>.wav
|
| 35 |
+
src_path = Path(audio_root) / speaker_id / f"{audio_id}.wav"
|
| 36 |
+
dest_dir = Path(out_audio_root) / speaker_id
|
| 37 |
+
dest_path = dest_dir / f"{audio_id}.wav"
|
| 38 |
+
|
| 39 |
+
# Use relative path that will be accessible from Colab when copying the data folder
|
| 40 |
+
rel_audio_path = os.path.join("data", "audio_files", speaker_id, f"{audio_id}.wav")
|
| 41 |
+
|
| 42 |
+
if not src_path.exists():
|
| 43 |
+
print(f"Warning: audio file not found for row {idx}/{total}: {src_path}")
|
| 44 |
+
missing += 1
|
| 45 |
+
continue
|
| 46 |
+
|
| 47 |
+
if not dry_run:
|
| 48 |
+
dest_dir.mkdir(parents=True, exist_ok=True)
|
| 49 |
+
# copy if not exists
|
| 50 |
+
if not dest_path.exists():
|
| 51 |
+
shutil.copy2(src_path, dest_path)
|
| 52 |
+
|
| 53 |
+
# Build record for parquet
|
| 54 |
+
records.append({
|
| 55 |
+
"source": speaker_id if speaker_id is not None else "0",
|
| 56 |
+
"text": prompt if prompt is not None else "",
|
| 57 |
+
"audio": rel_audio_path,
|
| 58 |
+
})
|
| 59 |
+
|
| 60 |
+
if idx % 500 == 0:
|
| 61 |
+
print(f"Processed {idx}/{total} rows...")
|
| 62 |
+
|
| 63 |
+
conn.close()
|
| 64 |
+
|
| 65 |
+
df = pd.DataFrame.from_records(records)
|
| 66 |
+
|
| 67 |
+
if not dry_run:
|
| 68 |
+
out_parquet_path = Path(out_parquet)
|
| 69 |
+
out_parquet_path.parent.mkdir(parents=True, exist_ok=True)
|
| 70 |
+
# Write parquet with pyarrow engine
|
| 71 |
+
df.to_parquet(out_parquet, index=False)
|
| 72 |
+
print(f"Wrote parquet to: {out_parquet} (rows: {len(df)})")
|
| 73 |
+
|
| 74 |
+
print(f"Done. Total rows: {total}, written: {len(records)}, missing audio: {missing}")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def main():
|
| 78 |
+
parser = argparse.ArgumentParser()
|
| 79 |
+
parser.add_argument("--db", default="db/mimicstudio.db", help="Path to mimicstudio.db")
|
| 80 |
+
parser.add_argument("--audio-root", default="audio_files", help="Root folder containing original audio files")
|
| 81 |
+
parser.add_argument("--out-audio-root", default="data/audio_files", help="Destination audio folder to copy into")
|
| 82 |
+
parser.add_argument("--out-parquet", default="data/dataset.parquet", help="Output parquet path (default: data/dataset.parquet)")
|
| 83 |
+
parser.add_argument("--dry-run", action="store_true", help="Don't copy or write files; just show counts")
|
| 84 |
+
|
| 85 |
+
args = parser.parse_args()
|
| 86 |
+
|
| 87 |
+
prepare(args.db, args.audio_root, args.out_audio_root, args.out_parquet, dry_run=args.dry_run)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
if __name__ == "__main__":
|
| 91 |
+
main()
|
scripts/upload_to_hf.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Upload the prepared `data/` folder to a Hugging Face repo under the `data/` path.
|
| 4 |
+
|
| 5 |
+
Usage:
|
| 6 |
+
# interactive login (recommended)
|
| 7 |
+
huggingface-cli login
|
| 8 |
+
python3 scripts/upload_to_hf.py --repo Aybee5/ha-tts-mixed
|
| 9 |
+
|
| 10 |
+
# or provide token via env var HUGGINGFACE_HUB_TOKEN
|
| 11 |
+
HUGGINGFACE_HUB_TOKEN=... python3 scripts/upload_to_hf.py --repo Aybee5/ha-tts-mixed
|
| 12 |
+
|
| 13 |
+
This will use `huggingface_hub.upload_folder` to upload `data/` content to the repo under the `data/` folder.
|
| 14 |
+
"""
|
| 15 |
+
import os
|
| 16 |
+
import argparse
|
| 17 |
+
from huggingface_hub import upload_folder, HfApi
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def main():
|
| 21 |
+
parser = argparse.ArgumentParser()
|
| 22 |
+
parser.add_argument("--repo", required=True, help="Repo id, e.g. username/repo")
|
| 23 |
+
parser.add_argument("--path-in-repo", default="data", help="Destination path inside the repo")
|
| 24 |
+
parser.add_argument("--local-folder", default="data", help="Local folder to upload")
|
| 25 |
+
parser.add_argument("--token", default=None, help="HF token (optional; can be provided via HUGGINGFACE_HUB_TOKEN env var or huggingface-cli login)")
|
| 26 |
+
args = parser.parse_args()
|
| 27 |
+
|
| 28 |
+
token = args.token or os.environ.get("HUGGINGFACE_HUB_TOKEN")
|
| 29 |
+
|
| 30 |
+
print(f"Uploading local folder '{args.local_folder}' to repo '{args.repo}' at path '{args.path_in_repo}'")
|
| 31 |
+
|
| 32 |
+
api = HfApi()
|
| 33 |
+
# Ensure repo exists or will error
|
| 34 |
+
try:
|
| 35 |
+
upload_folder(
|
| 36 |
+
folder_path=args.local_folder,
|
| 37 |
+
repo_id=args.repo,
|
| 38 |
+
path_in_repo=args.path_in_repo,
|
| 39 |
+
token=token,
|
| 40 |
+
repo_type="dataset",
|
| 41 |
+
# allow large uploads; may still be subject to HF limits
|
| 42 |
+
max_workers=8,
|
| 43 |
+
)
|
| 44 |
+
except Exception as e:
|
| 45 |
+
print(f"Upload failed: {e}")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if __name__ == "__main__":
|
| 49 |
+
main()
|