davanstrien HF Staff commited on
Commit
882e08a
·
verified ·
1 Parent(s): 505c869

Upload convert_scottish_exams.py

Browse files
Files changed (1) hide show
  1. convert_scottish_exams.py +506 -0
convert_scottish_exams.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Convert NLS Scottish School Exams dataset to Hugging Face format with proper page numbering.
4
+
5
+ This script processes directories containing:
6
+ - image/ folder with JPG files
7
+ - alto/ folder with ALTO XML files
8
+ - METS XML files with page ordering information
9
+ - Creates one row per page with image, text, raw XML, and correct page numbers
10
+ """
11
+
12
+ import argparse
13
+ import csv
14
+ import logging
15
+ import os
16
+ import re
17
+ import sys
18
+ import xml.etree.ElementTree as ET
19
+ from collections import defaultdict
20
+ from pathlib import Path
21
+ from typing import Optional, Dict, Tuple
22
+
23
+ from datasets import Dataset, Features, Value
24
+ from datasets import Image as HFImage
25
+ from tqdm import tqdm
26
+
27
+ # Set up logging
28
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
29
+ logger = logging.getLogger(__name__)
30
+
31
+
32
+ def extract_base_number(filename: str) -> str:
33
+ """Extract the base number from a filename (before first dot)."""
34
+ return filename.split('.')[0]
35
+
36
+
37
+ def parse_mets_page_order(mets_path: Path) -> Dict[str, int]:
38
+ """
39
+ Parse METS XML file to extract page ordering information.
40
+
41
+ Returns:
42
+ Dictionary mapping file base numbers to page order numbers
43
+ """
44
+ page_order_map = {}
45
+
46
+ try:
47
+ tree = ET.parse(mets_path)
48
+ root = tree.getroot()
49
+
50
+ # Define namespaces
51
+ ns = {
52
+ 'mets': 'http://www.loc.gov/METS/',
53
+ 'xlink': 'http://www.w3.org/1999/xlink'
54
+ }
55
+
56
+ # Find all div elements with ORDER attribute
57
+ for div in root.findall('.//mets:div[@ORDER]', ns):
58
+ order = div.get('ORDER')
59
+ if order:
60
+ # Find all file pointers in this div
61
+ for fptr in div.findall('.//mets:fptr', ns):
62
+ file_id = fptr.get('FILEID')
63
+ if file_id and '.3' in file_id: # Look for image files (.3.jpg)
64
+ # Extract base number from file ID
65
+ base_num = file_id.split('.')[0].replace('file_', '')
66
+ page_order_map[base_num] = int(order)
67
+
68
+ logger.debug(f"Extracted page order for {len(page_order_map)} pages from METS")
69
+
70
+ except Exception as e:
71
+ logger.warning(f"Error parsing METS file {mets_path}: {e}")
72
+
73
+ return page_order_map
74
+
75
+
76
+ def extract_exam_info_from_metadata(metadata: str) -> Dict[str, str]:
77
+ """
78
+ Extract exam information from metadata string.
79
+
80
+ Example: "Leaving Certificate - 1888 - P.P.1888 XLI"
81
+ Returns: {"exam_type": "Leaving Certificate", "year": "1888", "reference": "P.P.1888 XLI"}
82
+ """
83
+ info = {
84
+ "exam_type": "",
85
+ "year": "",
86
+ "reference": ""
87
+ }
88
+
89
+ if not metadata:
90
+ return info
91
+
92
+ # Try to extract year (4 digits)
93
+ year_match = re.search(r'\b(18\d{2}|19\d{2}|20\d{2})\b', metadata)
94
+ if year_match:
95
+ info["year"] = year_match.group(1)
96
+
97
+ # Extract exam type (everything before the first dash)
98
+ parts = metadata.split(' - ')
99
+ if parts:
100
+ info["exam_type"] = parts[0].strip()
101
+
102
+ # Extract reference (usually after the last dash)
103
+ if len(parts) >= 3:
104
+ info["reference"] = parts[2].strip()
105
+
106
+ return info
107
+
108
+
109
+ def parse_inventory_csv(root_dir: Path) -> dict[str, str]:
110
+ """
111
+ Parse inventory CSV file if it exists in the dataset directory.
112
+
113
+ Returns:
114
+ Dictionary mapping document_id to metadata description
115
+ """
116
+ inventory_pattern = "*-inventory.csv"
117
+ inventory_files = list(root_dir.glob(inventory_pattern))
118
+
119
+ if not inventory_files:
120
+ logger.info("No inventory CSV file found")
121
+ return {}
122
+
123
+ if len(inventory_files) > 1:
124
+ logger.warning(f"Multiple inventory files found: {inventory_files}. Using first one.")
125
+
126
+ inventory_file = inventory_files[0]
127
+ logger.info(f"Reading inventory from: {inventory_file}")
128
+
129
+ metadata_map = {}
130
+
131
+ try:
132
+ # Use utf-8-sig to handle BOM if present
133
+ with open(inventory_file, encoding='utf-8-sig') as f:
134
+ reader = csv.reader(f)
135
+ for row_num, row in enumerate(reader, 1):
136
+ if len(row) >= 2:
137
+ doc_id = row[0].strip()
138
+ description = row[1].strip()
139
+ metadata_map[doc_id] = description
140
+ else:
141
+ logger.warning(f"Skipping malformed row {row_num} in {inventory_file}: {row}")
142
+
143
+ except Exception as e:
144
+ logger.error(f"Error reading inventory CSV: {e}")
145
+ return {}
146
+
147
+ logger.info(f"Loaded metadata for {len(metadata_map)} documents from inventory")
148
+ return metadata_map
149
+
150
+
151
+ def extract_text_from_alto(alto_path: Path) -> tuple[str, str]:
152
+ """
153
+ Extract text content from an ALTO XML file.
154
+
155
+ Returns:
156
+ Tuple of (extracted_text, raw_xml)
157
+ """
158
+ try:
159
+ with open(alto_path, encoding='utf-8') as f:
160
+ raw_xml = f.read()
161
+
162
+ # Parse XML
163
+ root = ET.fromstring(raw_xml)
164
+
165
+ # Find all String elements (they contain the actual text)
166
+ # ALTO namespace
167
+ ns = {'alto': 'http://www.loc.gov/standards/alto/v3/alto.xsd'}
168
+
169
+ # Extract text from all String elements
170
+ text_parts = []
171
+
172
+ # Find all TextLine elements
173
+ for textline in root.findall('.//alto:TextLine', ns):
174
+ line_parts = []
175
+
176
+ # Get all String elements in this line
177
+ for string_elem in textline.findall('./alto:String', ns):
178
+ content = string_elem.get('CONTENT', '')
179
+ if content:
180
+ line_parts.append(content)
181
+
182
+ # Join words in the line with spaces
183
+ if line_parts:
184
+ text_parts.append(' '.join(line_parts))
185
+
186
+ # Join lines with newlines
187
+ extracted_text = '\n'.join(text_parts)
188
+
189
+ return extracted_text, raw_xml
190
+
191
+ except Exception as e:
192
+ logger.warning(f"Error processing ALTO file {alto_path}: {e}")
193
+ return "", ""
194
+
195
+
196
+ def process_document_folder(doc_path: Path, metadata_map: dict[str, str] = None) -> list[dict]:
197
+ """
198
+ Process a single document folder and return list of page records.
199
+
200
+ Args:
201
+ doc_path: Path to document folder
202
+ metadata_map: Optional dictionary mapping document_id to metadata
203
+ """
204
+ records = []
205
+ doc_id = doc_path.name
206
+ doc_metadata = metadata_map.get(doc_id, None) if metadata_map else None
207
+
208
+ # Extract exam information from metadata
209
+ exam_info = extract_exam_info_from_metadata(doc_metadata)
210
+
211
+ image_dir = doc_path / "image"
212
+ alto_dir = doc_path / "alto"
213
+ mets_file = doc_path / f"{doc_id}-mets.xml"
214
+
215
+ if not image_dir.exists() or not alto_dir.exists():
216
+ logger.warning(f"Skipping {doc_path}: missing image or alto directory")
217
+ return records
218
+
219
+ # Parse METS file to get page ordering
220
+ page_order_map = {}
221
+ if mets_file.exists():
222
+ page_order_map = parse_mets_page_order(mets_file)
223
+ else:
224
+ logger.warning(f"No METS file found for {doc_id}, using filename sorting for page order")
225
+
226
+ # Get all image files
227
+ image_files = {f for f in os.listdir(image_dir)
228
+ if f.lower().endswith(('.jpg', '.jpeg', '.png', '.tiff', '.tif'))}
229
+
230
+ # Get all ALTO files
231
+ alto_files = {f for f in os.listdir(alto_dir) if f.endswith('.xml')}
232
+
233
+ # Create mapping from base number to files
234
+ image_map = {extract_base_number(f): f for f in image_files}
235
+ alto_map = {extract_base_number(f): f for f in alto_files}
236
+
237
+ # Get all unique page numbers
238
+ all_pages = set(image_map.keys()) | set(alto_map.keys())
239
+
240
+ # If no METS page order, create sequential numbering
241
+ if not page_order_map:
242
+ sorted_pages = sorted(all_pages)
243
+ page_order_map = {page: idx + 1 for idx, page in enumerate(sorted_pages)}
244
+
245
+ # Process each page
246
+ for page_base in sorted(all_pages, key=lambda x: page_order_map.get(x, 999999)):
247
+ actual_page_number = page_order_map.get(page_base, 0)
248
+
249
+ record = {
250
+ 'document_id': doc_path.name,
251
+ 'page_number': actual_page_number,
252
+ 'file_identifier': page_base,
253
+ 'image_path': None,
254
+ 'alto_xml': None,
255
+ 'text': None,
256
+ 'has_image': False,
257
+ 'has_alto': False,
258
+ 'document_metadata': doc_metadata,
259
+ 'has_metadata': doc_metadata is not None,
260
+ 'exam_type': exam_info['exam_type'],
261
+ 'exam_year': exam_info['year'],
262
+ 'exam_reference': exam_info['reference']
263
+ }
264
+
265
+ # Check for image
266
+ if page_base in image_map:
267
+ image_path = image_dir / image_map[page_base]
268
+ if image_path.exists():
269
+ record['image_path'] = str(image_path)
270
+ record['has_image'] = True
271
+
272
+ # Check for ALTO
273
+ if page_base in alto_map:
274
+ alto_path = alto_dir / alto_map[page_base]
275
+ if alto_path.exists():
276
+ text, xml = extract_text_from_alto(alto_path)
277
+ record['alto_xml'] = xml
278
+ record['text'] = text
279
+ record['has_alto'] = True
280
+
281
+ records.append(record)
282
+
283
+ return records
284
+
285
+
286
+ def process_dataset(root_dir: Path, max_docs: Optional[int] = None,
287
+ include_metadata: bool = True) -> list[dict]:
288
+ """
289
+ Process entire dataset directory.
290
+
291
+ Args:
292
+ root_dir: Root directory of dataset
293
+ max_docs: Maximum number of documents to process
294
+ include_metadata: Whether to include metadata from inventory CSV
295
+ """
296
+ all_records = []
297
+
298
+ # Parse inventory CSV if requested
299
+ metadata_map = {}
300
+ if include_metadata:
301
+ metadata_map = parse_inventory_csv(root_dir)
302
+
303
+ # Find all document directories
304
+ doc_dirs = [d for d in root_dir.iterdir()
305
+ if d.is_dir() and not d.name.startswith('.')
306
+ and d.name not in ['__pycache__']]
307
+
308
+ if max_docs:
309
+ doc_dirs = doc_dirs[:max_docs]
310
+
311
+ logger.info(f"Processing {len(doc_dirs)} document directories...")
312
+
313
+ # Process each document
314
+ for doc_dir in tqdm(doc_dirs, desc="Processing documents"):
315
+ records = process_document_folder(doc_dir, metadata_map)
316
+ all_records.extend(records)
317
+
318
+ return all_records
319
+
320
+
321
+ def create_huggingface_dataset(records: list[dict], include_missing: bool = True) -> Dataset:
322
+ """
323
+ Create a Hugging Face dataset from records.
324
+
325
+ Args:
326
+ records: List of page records
327
+ include_missing: If False, only include pages with both image and ALTO
328
+ """
329
+ # Filter records if needed
330
+ if not include_missing:
331
+ records = [r for r in records if r['has_image'] and r['has_alto']]
332
+ logger.info(f"Filtered to {len(records)} records with both image and ALTO")
333
+
334
+ # Prepare data for HF dataset
335
+ dataset_dict = defaultdict(list)
336
+
337
+ for record in records:
338
+ dataset_dict['document_id'].append(record['document_id'])
339
+ dataset_dict['page_number'].append(record['page_number'])
340
+ dataset_dict['file_identifier'].append(record['file_identifier'])
341
+
342
+ # Store image path instead of loading image
343
+ # HuggingFace datasets will handle loading when needed
344
+ if record['has_image'] and record['image_path']:
345
+ dataset_dict['image'].append(record['image_path'])
346
+ else:
347
+ dataset_dict['image'].append(None)
348
+
349
+ dataset_dict['text'].append(record['text'] or "")
350
+ dataset_dict['alto_xml'].append(record['alto_xml'] or "")
351
+ dataset_dict['has_image'].append(record['has_image'])
352
+ dataset_dict['has_alto'].append(record['has_alto'])
353
+ dataset_dict['document_metadata'].append(record.get('document_metadata') or "")
354
+ dataset_dict['has_metadata'].append(record.get('has_metadata', False))
355
+ dataset_dict['exam_type'].append(record.get('exam_type', ''))
356
+ dataset_dict['exam_year'].append(record.get('exam_year', ''))
357
+ dataset_dict['exam_reference'].append(record.get('exam_reference', ''))
358
+
359
+ # Create HF dataset
360
+ features = Features({
361
+ 'document_id': Value('string'),
362
+ 'page_number': Value('int32'),
363
+ 'file_identifier': Value('string'),
364
+ 'image': HFImage(),
365
+ 'text': Value('string'),
366
+ 'alto_xml': Value('string'),
367
+ 'has_image': Value('bool'),
368
+ 'has_alto': Value('bool'),
369
+ 'document_metadata': Value('string'),
370
+ 'has_metadata': Value('bool'),
371
+ 'exam_type': Value('string'),
372
+ 'exam_year': Value('string'),
373
+ 'exam_reference': Value('string')
374
+ })
375
+
376
+ dataset = Dataset.from_dict(dict(dataset_dict), features=features)
377
+
378
+ return dataset
379
+
380
+
381
+ def print_statistics(records: list[dict]):
382
+ """Print statistics about the processed dataset."""
383
+ total = len(records)
384
+ with_both = sum(1 for r in records if r['has_image'] and r['has_alto'])
385
+ image_only = sum(1 for r in records if r['has_image'] and not r['has_alto'])
386
+ alto_only = sum(1 for r in records if not r['has_image'] and r['has_alto'])
387
+ with_metadata = sum(1 for r in records if r.get('has_metadata', False))
388
+
389
+ print("\n=== Dataset Statistics ===")
390
+ print(f"Total pages: {total:,}")
391
+ print(f"Pages with both image and ALTO: {with_both:,} ({with_both/total*100:.1f}%)")
392
+ print(f"Pages with image only: {image_only:,} ({image_only/total*100:.1f}%)")
393
+ print(f"Pages with ALTO only: {alto_only:,} ({alto_only/total*100:.1f}%)")
394
+ if with_metadata > 0:
395
+ print(f"Pages with metadata: {with_metadata:,} ({with_metadata/total*100:.1f}%)")
396
+
397
+ # Document statistics
398
+ docs = defaultdict(lambda: {'pages': 0, 'complete': 0, 'has_metadata': False})
399
+ for r in records:
400
+ docs[r['document_id']]['pages'] += 1
401
+ if r['has_image'] and r['has_alto']:
402
+ docs[r['document_id']]['complete'] += 1
403
+ if r.get('has_metadata', False):
404
+ docs[r['document_id']]['has_metadata'] = True
405
+
406
+ print(f"\nTotal documents: {len(docs)}")
407
+ complete_docs = sum(1 for d in docs.values() if d['pages'] == d['complete'])
408
+ print(f"Documents with all pages complete: {complete_docs} "
409
+ f"({complete_docs/len(docs)*100:.1f}%)")
410
+
411
+ docs_with_metadata = sum(1 for d in docs.values() if d['has_metadata'])
412
+ if docs_with_metadata > 0:
413
+ print(f"Documents with metadata: {docs_with_metadata} "
414
+ f"({docs_with_metadata/len(docs)*100:.1f}%)")
415
+
416
+ # Year distribution
417
+ years = defaultdict(int)
418
+ for r in records:
419
+ year = r.get('exam_year', '')
420
+ if year:
421
+ years[year] += 1
422
+
423
+ if years:
424
+ print("\n=== Exam Years Distribution ===")
425
+ for year in sorted(years.keys()):
426
+ print(f"{year}: {years[year]} pages")
427
+
428
+
429
+ def main():
430
+ parser = argparse.ArgumentParser(description='Convert NLS Scottish Exams dataset to Hugging Face format')
431
+ parser.add_argument('input_dir', type=str, help='Path to dataset directory')
432
+ parser.add_argument('output_path', type=str, help='Output path for HF dataset')
433
+ parser.add_argument('--max-docs', type=int, help='Maximum number of documents to process')
434
+ parser.add_argument('--include-missing', action='store_true',
435
+ help='Include pages with missing image or ALTO')
436
+ parser.add_argument('--format', choices=['parquet', 'json', 'csv'],
437
+ default='parquet', help='Output format')
438
+ parser.add_argument('--push-to-hub', action='store_true',
439
+ help='Push dataset to Hugging Face Hub')
440
+ parser.add_argument('--repo-id', type=str,
441
+ help='Repository ID on Hugging Face Hub (e.g., username/dataset-name)')
442
+ parser.add_argument('--private', action='store_true',
443
+ help='Make the dataset private on Hugging Face Hub')
444
+ parser.add_argument('--include-metadata', type=str, default='true',
445
+ choices=['true', 'false'],
446
+ help='Include metadata from inventory CSV if available (default: true)')
447
+
448
+ args = parser.parse_args()
449
+
450
+ # Validate arguments
451
+ if args.push_to_hub and not args.repo_id:
452
+ logger.error("--repo-id is required when using --push-to-hub")
453
+ sys.exit(1)
454
+
455
+ input_path = Path(args.input_dir)
456
+ if not input_path.exists():
457
+ logger.error(f"Input directory does not exist: {input_path}")
458
+ sys.exit(1)
459
+
460
+ # Convert string boolean to actual boolean
461
+ include_metadata = args.include_metadata.lower() == 'true'
462
+
463
+ # Process dataset
464
+ logger.info(f"Processing dataset from {input_path}")
465
+ records = process_dataset(input_path, args.max_docs, include_metadata)
466
+
467
+ if not records:
468
+ logger.error("No records found!")
469
+ sys.exit(1)
470
+
471
+ # Print statistics
472
+ print_statistics(records)
473
+
474
+ # Create HF dataset
475
+ logger.info("Creating Hugging Face dataset...")
476
+ dataset = create_huggingface_dataset(records, include_missing=args.include_missing)
477
+
478
+ # Save dataset locally
479
+ logger.info(f"Saving dataset to {args.output_path}")
480
+ if args.format == 'parquet':
481
+ dataset.to_parquet(args.output_path)
482
+ elif args.format == 'json':
483
+ dataset.to_json(args.output_path)
484
+ elif args.format == 'csv':
485
+ dataset.to_csv(args.output_path)
486
+
487
+ logger.info(f"Dataset saved successfully! Total rows: {len(dataset)}")
488
+
489
+ # Push to Hugging Face Hub if requested
490
+ if args.push_to_hub:
491
+ logger.info(f"Pushing dataset to Hugging Face Hub: {args.repo_id}")
492
+ try:
493
+ dataset.push_to_hub(
494
+ repo_id=args.repo_id,
495
+ private=args.private,
496
+ commit_message=f"Add NLS Scottish Exams dataset with {len(dataset)} pages"
497
+ )
498
+ logger.info(f"Dataset successfully pushed to https://huggingface.co/datasets/{args.repo_id}")
499
+ except Exception as e:
500
+ logger.error(f"Failed to push to Hub: {e}")
501
+ logger.info("Make sure you're logged in with 'huggingface-cli login'")
502
+ sys.exit(1)
503
+
504
+
505
+ if __name__ == "__main__":
506
+ main()