# /// script # requires-python = ">=3.12" # dependencies = [ # "datasets", # "dynaword", # "marker-pdf", # "requests", # "torch", # ] # # [tool.uv.sources] # dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword" } # /// """ Script for downloading and processing the Domsdatabasen.dk site. Note: To run this script, you need to set `GIT_LFS_SKIP_SMUDGE=1` to be able to install dynaword: ```bash GIT_LFS_SKIP_SMUDGE=1 uv run data/domsdatabasen/create.py ``` Note: This script is designed to be run using a GPU. """ import atexit import logging import os import csv import time from typing import cast import torch import gc import requests import torch.multiprocessing as mp from pathlib import Path from datetime import date, datetime from datasets import Dataset, concatenate_datasets from marker.converters.pdf import PdfConverter from marker.models import create_model_dict from marker.output import text_from_rendered from dynaword.process_dataset import ( add_token_count, ensure_column_order, remove_duplicate_text, remove_empty_texts, ) logger = logging.getLogger(__name__) # ----------------- Config ------------------ PDF_DIR = Path(__file__).parent / "pdfs" LOG_FILE = Path(__file__).parent / "progress_log.csv" PARQUET_FILE = Path(__file__).parent / "domsdatabasen.parquet" MAX_WORKERS = 10 RETRY_COUNT = 3 RETRY_DELAY = 2 # ----------------- Headers ------------------ HEADERS = { "Accept": "application/json, text/plain, */*", "Accept-Encoding": "gzip, deflate, br, zstd", "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8", "Connection": "keep-alive", "Content-Type": "application/json", } def init_csv(): if not LOG_FILE.exists(): with open(LOG_FILE, "w", newline="", encoding="utf-8") as f: writer = csv.DictWriter( f, fieldnames=["document_id", "pdf_downloaded", "text_extracted", "error"], ) writer.writeheader() def append_log(document_id: str, pdf: bool, text: bool, error: str = ""): with open(LOG_FILE, "a", newline="", encoding="utf-8") as f: writer = csv.DictWriter( f, fieldnames=["document_id", "pdf_downloaded", "text_extracted", "error"] ) writer.writerow( { "document_id": document_id, "pdf_downloaded": int(pdf), "text_extracted": int(text), "error": error, } ) def load_existing_ids() -> set: if not PARQUET_FILE.exists(): return set() ds = Dataset.from_parquet(str(PARQUET_FILE)) ds = cast(Dataset, ds) return set(ds["id"]) # ----------------- Retry Helpers ------------------ def retry(func, *args, retries=RETRY_COUNT, delay=RETRY_DELAY, **kwargs): for attempt in range(retries): try: return func(*args, **kwargs) except Exception as e: logger.warning(f"⚠️ Retry {attempt + 1}/{retries} failed: {e}") time.sleep(delay) raise RuntimeError(f"❌ All retries failed for {func.__name__}({args})") # ----------------- PDF Download ------------------ def download_pdf(document: dict) -> Path | None: document_id = document["id"] out_path = PDF_DIR / f"document_{document_id}.pdf" if out_path.exists(): logger.info(f"⏭️ Skipped PDF (exists): {document_id}") return out_path url = f"https://domsdatabasen.dk/webapi/api/Case/document/download/{document_id}" try: response = retry(requests.get, url, headers=HEADERS) if response.status_code == 200: with open(out_path, "wb") as f: f.write(response.content) logger.info(f"✅ Downloaded PDF: {document_id}") append_log(document_id, pdf=True, text=False) return out_path else: raise RuntimeError(f"Download failed: {response.status_code}") except Exception as e: append_log(document_id, pdf=False, text=False, error=str(e)) return None # ----------------- Parallel Extract Text ------------------ def worker_init(): model_dict = create_model_dict() global model_refs model_refs = model_dict # Ensure we clean up the model references on exit atexit.register(worker_exit) def worker_exit(): global model_refs try: del model_refs except Exception: pass def process_document(document: dict) -> dict | None: # from marker.output import text_from_rendered # from marker.converters.pdf import PdfConverter torch.set_num_threads(2) document_id = document["id"] verdict_date = document.get("verdictDateTime") pdf_path = PDF_DIR / f"document_{document_id}.pdf" if not pdf_path.exists(): url = ( f"https://domsdatabasen.dk/webapi/api/Case/document/download/{document_id}" ) try: response = retry(requests.get, url, headers=HEADERS) if response.status_code == 200: with open(pdf_path, "wb") as f: f.write(response.content) logger.info(f"✅ Downloaded PDF: {document_id}") else: raise RuntimeError(f"Download failed: {response.status_code}") except Exception as e: append_log(document_id, pdf=False, text=False, error=str(e)) return None config = {"pdftext_workers": 1, "extract_images": False, "disable_tqdm": True} try: converter = PdfConverter(artifact_dict=model_refs, config=config) rendered = retry(converter, str(pdf_path)) text, _, _ = text_from_rendered(rendered) logger.info(f"🖍️ Extracted text: {document_id}") append_log(document_id, pdf=True, text=True) del rendered del converter return { "id": document_id, "text": text, "source": "Domsdatabasen", "created": format_created(verdict_date), "added": date.today().isoformat(), "metadata": {}, } except Exception as e: append_log(document_id, pdf=True, text=False, error=str(e)) return None finally: gc.collect() # ----------------- Page Fetching ------------------ def fetch_case_page(page_num: int) -> tuple[list[dict], int]: url = f"https://domsdatabasen.dk/webapi/api/Case/advanced?sorting=VerdictDateDesc&page={page_num}&pageSize=100" response = retry(requests.post, url, headers=HEADERS, json={}) data = response.json() document_entries = [] for case in data.get("cases", []): for doc in case.get("documents", []): document_entries.append( { "id": doc["id"], "verdictDateTime": doc.get("verdictDateTime"), } ) return document_entries, data.get("pageCount", 1) # ----------------- Utilities ------------------ def format_created(verdict_date: str | None) -> str: if verdict_date: try: dt = datetime.fromisoformat(verdict_date) formatted = dt.date().isoformat() return f"{formatted}, {formatted}" except Exception: pass today = date.today().isoformat() return f"{today}, {today}" # ----------------- Main Loop ------------------ def main(): PDF_DIR.mkdir(exist_ok=True) init_csv() all_records = [] page_num = 1 _, total_pages = fetch_case_page(1) logger.info(f"📄 Total pages: {total_pages}") existing_ids = load_existing_ids() logger.info(f"🔄 Resuming with {len(existing_ids)} already processed IDs") while page_num <= total_pages: logger.info(f"\n🔎 Fetching page {page_num}/{total_pages}") try: doc_infos, _ = fetch_case_page(page_num) except Exception as e: logger.warning(f"❌ Failed to fetch page {page_num}: {e}") page_num += 1 continue doc_infos = [doc for doc in doc_infos if doc["id"] not in existing_ids] # Extract text in parallel using multiprocessing with mp.Pool( processes=MAX_WORKERS, initializer=worker_init, maxtasksperchild=10 ) as pool: results = pool.map(process_document, doc_infos) all_records.extend([r for r in results if r]) if all_records: ds_new = Dataset.from_list(all_records) if PARQUET_FILE.exists(): ds_old = Dataset.from_parquet(str(PARQUET_FILE)) ds_old = cast(Dataset, ds_old) ds_combined = concatenate_datasets([ds_old, ds_new]) else: ds_combined = ds_new ds_combined.to_parquet(str(PARQUET_FILE)) logger.info(f"📦 Appended {len(all_records)} records to {PARQUET_FILE}") existing_ids.update([r["id"] for r in all_records]) all_records.clear() page_num += 1 ds = Dataset.from_parquet(str(PARQUET_FILE)) ds = cast(Dataset, ds) ds = remove_empty_texts(ds) ds = remove_duplicate_text(ds) ds = add_token_count(ds) ds = ensure_column_order(ds) ds.to_parquet(str(PARQUET_FILE)) if __name__ == "__main__": # Ensure threads don't contend os.environ["MKL_DYNAMIC"] = "FALSE" os.environ["OMP_DYNAMIC"] = "FALSE" os.environ["OMP_NUM_THREADS"] = "2" # Avoid OpenMP issues with multiprocessing os.environ["OPENBLAS_NUM_THREADS"] = "2" os.environ["MKL_NUM_THREADS"] = "2" os.environ["GRPC_VERBOSITY"] = "ERROR" os.environ["GLOG_minloglevel"] = "2" os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = ( "1" # Transformers uses .isin for a simple op, which is not supported on MPS ) os.environ["IN_STREAMLIT"] = "true" # Avoid multiprocessing inside surya mp.set_start_method("spawn", force=True) log_path = Path(__file__).parent / "domsdatabasen.log" logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s", handlers=[ logging.StreamHandler(), logging.FileHandler(log_path), ], ) main()