simplexsigil2 commited on
Commit
3d97c3d
·
verified ·
1 Parent(s): 88dbfe7

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. .gitignore +2 -1
  2. README.md +261 -82
  3. wanfall.py +222 -5
.gitignore CHANGED
@@ -4,4 +4,5 @@ create_splits.py
4
  export_via_to_csv.py
5
  extract_jsonl_metadata.py
6
  test_wanfall_builder.py
7
-
 
 
4
  export_via_to_csv.py
5
  extract_jsonl_metadata.py
6
  test_wanfall_builder.py
7
+ .claude
8
+ test_framewise_complete.py
README.md CHANGED
@@ -16,8 +16,7 @@ configs:
16
  - config_name: labels
17
  data_files:
18
  - labels/wanfall.csv
19
- default: true
20
- description: "Temporal segment labels for all videos. Load splits to get train/val/test paths."
21
 
22
  - config_name: metadata
23
  data_files:
@@ -32,6 +31,7 @@ configs:
32
  path: "splits/random/val.csv"
33
  - split: test
34
  path: "splits/random/test.csv"
 
35
  description: "Random 80/10/10 train/val/test split (seed 42)"
36
 
37
  - config_name: cross_age
@@ -63,6 +63,9 @@ configs:
63
  - split: test
64
  path: "splits/cross_bmi/test.csv"
65
  description: "Cross-BMI evaluation: train on normal/underweight, val on overweight, test on obese"
 
 
 
66
  ---
67
  [![License: CC BY-NC 4.0](https://img.shields.io/badge/License-CC%20BY--NC%204.0-lightgrey.svg)](https://creativecommons.org/licenses/by-nc/4.0/)
68
 
@@ -91,11 +94,13 @@ WanFall is a large-scale synthetic dataset designed for activity recognition res
91
  - **Video duration**: 5.0625 seconds per clip
92
  - **Frame count**: 81 frames per video
93
  - **Frame rate**: 16 fps
94
- - **Split configurations**: 4 available configs
 
95
  - `random`: 80/10/10 train/val/test split (seed 42) - 9,600/1,200/1,200 videos
96
  - `cross_age`: Cross-age evaluation - 4,000/2,000/6,000 videos
97
  - `cross_ethnicity`: Cross-ethnicity evaluation - 5,178/1,741/5,081 videos
98
  - `cross_bmi`: Cross-BMI evaluation - 6,066/2,962/2,972 videos
 
99
  - **Metadata fields**: 12 demographic and scene attributes per video
100
 
101
  ## Activity Categories
@@ -174,71 +179,267 @@ fall/fall_ch_002
174
  ...
175
  ```
176
 
177
- ## Usage Example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  ```python
180
  from datasets import load_dataset
181
- import pandas as pd
182
-
183
- # Load the datasets
184
- print("Loading WanFall dataset...")
185
-
186
- # Note: All segment labels are in the "train" split when loaded from the labels config,
187
- # but we join them with the actual train/val/test splits afterwards.
188
- labels = load_dataset("simplexsigil2/wanfall", "labels")["train"]
189
-
190
- # Load the random 80/10/10 split
191
- random_split = load_dataset("simplexsigil2/wanfall", "random")
192
-
193
- # Load video metadata (optional, for demographic filtering)
194
- video_metadata = pd.read_csv("videos/metadata.csv")
195
- print(f"Video metadata shape: {video_metadata.shape}")
196
-
197
- # Convert labels to DataFrame
198
- labels_df = pd.DataFrame(labels)
199
- print(f"Labels dataframe shape: {labels_df.shape}")
200
- print(f"Total temporal segments: {len(labels_df)}")
201
-
202
- # Process each split (train, validation, test)
203
- for split_name, split_data in random_split.items():
204
- # Convert to DataFrame
205
- split_df = pd.DataFrame(split_data)
206
-
207
- # Join with labels on 'path'
208
- merged_df = pd.merge(split_df, labels_df, on="path", how="left")
209
-
210
- # Print statistics
211
- print(f"\n{split_name} split: {len(split_df)} videos, {len(merged_df)} temporal segments")
212
-
213
- # Print examples
214
- if not merged_df.empty:
215
- print(f"\n {split_name.upper()} EXAMPLES:")
216
- random_samples = merged_df.sample(min(3, len(merged_df)))
217
- for i, (_, row) in enumerate(random_samples.iterrows()):
218
- print(f" Example {i+1}:")
219
- print(f" Path: {row['path']}")
220
- print(f" Label: {row['label']} (segment {row['start']:.2f}s - {row['end']:.2f}s)")
221
- print(f" Age: {row['age_group']}, Gender: {row['gender_presentation']}")
222
- print(f" Ethnicity: {row['race_ethnicity_omb']}, Environment: {row['environment_category']}")
223
- print()
224
-
225
- # Example: Filter by demographics
226
- elderly_falls = labels_df[
227
- (labels_df['age_group'] == 'elderly_65_plus') &
228
- (labels_df['label'] == 1) # fall = label 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
  ]
230
- print(f"\nElderly fall segments: {len(elderly_falls)} ({elderly_falls['path'].nunique()} unique videos)")
 
231
  ```
232
 
233
- ### Label Mapping
 
 
234
 
235
  ```python
236
- LABEL_MAP = {
237
- 0: 'walk', 1: 'fall', 2: 'fallen', 3: 'sit_down',
238
- 4: 'sitting', 5: 'lie_down', 6: 'lying', 7: 'stand_up',
239
- 8: 'standing', 9: 'other', 10: 'kneel_down', 11: 'kneeling',
240
- 12: 'squat_down', 13: 'squatting', 14: 'crawl', 15: 'jump'
241
- }
 
 
 
 
 
 
 
 
242
  ```
243
 
244
  ### Cross-Demographic Evaluation Splits
@@ -263,28 +464,6 @@ Evaluates model performance across different body types:
263
  - **Validation** (2,962 videos): Overweight
264
  - **Test** (2,972 videos): Obese
265
 
266
- **Usage Example:**
267
- ```python
268
- from datasets import load_dataset
269
- import pandas as pd
270
-
271
- # Load cross-age splits
272
- cross_age = load_dataset("simplexsigil2/wanfall", "cross_age")
273
- labels = load_dataset("simplexsigil2/wanfall", "labels")["train"]
274
-
275
- # Merge labels with splits
276
- labels_df = pd.DataFrame(labels)
277
- train_df = pd.DataFrame(cross_age["train"])
278
- train_labels = pd.merge(train_df, labels_df, on="path", how="left")
279
-
280
- print(f"Cross-age train: {len(train_labels)} segments")
281
- print(f"Age groups: {train_labels['age_group'].unique()}")
282
-
283
- # Similarly for cross_ethnicity and cross_bmi configs
284
- cross_ethnicity = load_dataset("simplexsigil2/wanfall", "cross_ethnicity")
285
- cross_bmi = load_dataset("simplexsigil2/wanfall", "cross_bmi")
286
- ```
287
-
288
  ## Technical Properties
289
 
290
  ### Video Specifications
 
16
  - config_name: labels
17
  data_files:
18
  - labels/wanfall.csv
19
+ description: "All temporal segment labels (19,228 segments) in a single split."
 
20
 
21
  - config_name: metadata
22
  data_files:
 
31
  path: "splits/random/val.csv"
32
  - split: test
33
  path: "splits/random/test.csv"
34
+ default: true
35
  description: "Random 80/10/10 train/val/test split (seed 42)"
36
 
37
  - config_name: cross_age
 
63
  - split: test
64
  path: "splits/cross_bmi/test.csv"
65
  description: "Cross-BMI evaluation: train on normal/underweight, val on overweight, test on obese"
66
+
67
+ - config_name: framewise
68
+ description: "Frame-wise labels (81 per video). Use framewise=True parameter with any split config instead."
69
  ---
70
  [![License: CC BY-NC 4.0](https://img.shields.io/badge/License-CC%20BY--NC%204.0-lightgrey.svg)](https://creativecommons.org/licenses/by-nc/4.0/)
71
 
 
94
  - **Video duration**: 5.0625 seconds per clip
95
  - **Frame count**: 81 frames per video
96
  - **Frame rate**: 16 fps
97
+ - **Annotation formats**: Temporal segments (start/end times) OR frame-wise labels (81 per video)
98
+ - **Split configurations**: 4 split configs + framewise support
99
  - `random`: 80/10/10 train/val/test split (seed 42) - 9,600/1,200/1,200 videos
100
  - `cross_age`: Cross-age evaluation - 4,000/2,000/6,000 videos
101
  - `cross_ethnicity`: Cross-ethnicity evaluation - 5,178/1,741/5,081 videos
102
  - `cross_bmi`: Cross-BMI evaluation - 6,066/2,962/2,972 videos
103
+ - `framewise=True`: Add frame-wise labels (81 per video) to any split
104
  - **Metadata fields**: 12 demographic and scene attributes per video
105
 
106
  ## Activity Categories
 
179
  ...
180
  ```
181
 
182
+ ## Usage
183
+
184
+ The WanFall dataset provides a flexible Python API through the HuggingFace `datasets` library with multiple configurations and loading modes.
185
+
186
+ ### Quick Start
187
+
188
+ ```python
189
+ from datasets import load_dataset
190
+
191
+ # Load with random 80/10/10 split (temporal segments, default)
192
+ dataset = load_dataset("simplexsigil2/wanfall", "random")
193
+
194
+ print(f"Train: {len(dataset['train'])} segments")
195
+ print(f"Validation: {len(dataset['validation'])} segments")
196
+ print(f"Test: {len(dataset['test'])} segments")
197
+
198
+ # Access example
199
+ example = dataset['train'][0]
200
+ print(f"Video: {example['path']}")
201
+ print(f"Activity: {example['label']} ({example['start']:.2f}s - {example['end']:.2f}s)")
202
+ print(f"Age group: {example['age_group']}")
203
+ ```
204
+
205
+ ### Dataset Configurations
206
+
207
+ WanFall provides **7 configurations** for different use cases:
208
+
209
+ **Key Distinction: Segment-Level vs Video-Level**
210
+
211
+ | Configuration | Sample Unit | Train Size | Has start/end? | Has frame_labels? |
212
+ |--------------|-------------|------------|----------------|-------------------|
213
+ | `random` | **Segment** | 15,344 segments | ✅ Yes | ❌ No |
214
+ | `random` + `framewise=True` | **Video** | 9,600 videos | ❌ No | ✅ Yes (81 labels) |
215
+ | `cross_age` | **Segment** | 6,267 segments | ✅ Yes | ❌ No |
216
+ | `cross_age` + `framewise=True` | **Video** | 4,000 videos | ❌ No | ✅ Yes (81 labels) |
217
+ | `labels` | **Segment** | 19,228 segments | ✅ Yes | ❌ No |
218
+ | `framewise` | **Video** | 12,000 videos | ❌ No | ✅ Yes (81 labels) |
219
+
220
+ #### 1. **Temporal Segments** (Default)
221
+
222
+ Load temporal segment annotations where **each sample is a segment** with start/end times:
223
+
224
+ ```python
225
+ # Default: random split with temporal segments
226
+ dataset = load_dataset("simplexsigil2/wanfall") # or "random"
227
+
228
+ # Each example is a SEGMENT (not a video)
229
+ example = dataset['train'][0]
230
+ print(example['path']) # "fall/fall_ch_001"
231
+ print(example['label']) # 1 (activity class ID)
232
+ print(example['start']) # 0.0 (start time in seconds)
233
+ print(example['end']) # 1.006 (end time in seconds)
234
+ print(example['age_group']) # Demographic metadata
235
+
236
+ # Dataset contains multiple segments per video
237
+ print(f"Total segments in train: {len(dataset['train'])}") # 15,344 segments
238
+ print(f"Unique videos: {len(set([ex['path'] for ex in dataset['train']]))}") # 9,600 videos
239
+ ```
240
+
241
+ **Key characteristics:**
242
+ - **Sample = Temporal Segment** (one video can have multiple segments)
243
+ - Each segment has `start` and `end` times
244
+ - Train: 15,344 segments from 9,600 videos
245
+ - Val: 1,927 segments from 1,200 videos
246
+ - Test: 1,957 segments from 1,200 videos
247
+
248
+ **Available split configs:**
249
+ - `random` - 80/10/10 split (15,344/1,927/1,957 segments)
250
+ - `cross_age` - Cross-age evaluation (6,267/3,762/9,199 segments)
251
+ - `cross_ethnicity` - Cross-ethnicity evaluation (8,267/2,762/8,199 segments)
252
+ - `cross_bmi` - Cross-BMI evaluation (9,675/4,701/4,852 segments)
253
+
254
+ #### 2. **Frame-Wise Labels**
255
+
256
+ Load dense frame-level labels where **each sample is a video** with 81 labels:
257
+
258
+ ```python
259
+ # Standalone: all 12,000 videos with frame-wise labels
260
+ dataset = load_dataset("simplexsigil2/wanfall", "framewise")
261
+
262
+ # With splits: random split with frame-wise labels
263
+ dataset = load_dataset("simplexsigil2/wanfall", "random", framewise=True)
264
+
265
+ # Each example is a VIDEO (not a segment)
266
+ example = dataset['train'][0]
267
+ print(example['path']) # "fall/fall_ch_001"
268
+ print(example['frame_labels']) # [1, 1, 1, ..., 11, 11] (81 labels)
269
+ print(len(example['frame_labels'])) # 81 frames
270
+ print(example['age_group']) # Demographic metadata included
271
+
272
+ # Dataset contains one sample per video
273
+ print(f"Total videos in train: {len(dataset['train'])}") # 9,600 videos
274
+ ```
275
+
276
+ **Key characteristics:**
277
+ - **Sample = Video** (one sample per video, no segments)
278
+ - Each video has 81 frame labels (no start/end times)
279
+ - Train: 9,600 videos
280
+ - Val: 1,200 videos
281
+ - Test: 1,200 videos
282
+
283
+ **Key features:**
284
+ - **81 labels per video** (one per frame @ 16fps)
285
+ - **Works with all split configs**: Add `framewise=True` to any split
286
+ - **Efficient**: 348KB compressed archive, automatically cached
287
+ - **Complete metadata**: All demographic attributes included
288
+
289
+ #### 3. **Paths Only Mode**
290
+
291
+ Load only video paths for custom video loading:
292
+
293
+ ```python
294
+ # Minimal loading: only video paths
295
+ dataset = load_dataset("simplexsigil2/wanfall", "random", paths_only=True)
296
+
297
+ # Only contains paths
298
+ example = dataset['train'][0]
299
+ print(example) # {'path': 'fall/fall_ch_001'}
300
+ ```
301
+
302
+ #### 4. **All Segments** (No Splits)
303
+
304
+ Load all 19,228 temporal segments without split partitions:
305
+
306
+ ```python
307
+ dataset = load_dataset("simplexsigil2/wanfall", "labels")
308
+ all_segments = dataset['train'] # Single split with all segments
309
+ print(f"Total segments: {len(all_segments)}") # 19,228 segments
310
+
311
+ # Each sample is a segment (like config 1, but no train/val/test split)
312
+ example = all_segments[0]
313
+ print(f"Path: {example['path']}")
314
+ print(f"Segment: {example['start']:.2f}s - {example['end']:.2f}s")
315
+ print(f"Label: {example['label']}")
316
+ ```
317
+
318
+ #### 5. **Video Metadata Only**
319
+
320
+ Load only video-level metadata (12,000 videos):
321
+
322
+ ```python
323
+ dataset = load_dataset("simplexsigil2/wanfall", "metadata")
324
+ metadata = dataset['train'] # 12,000 videos
325
+ print(f"Columns: {metadata.column_names}")
326
+ # ['path', 'dataset', 'age_group', 'gender_presentation', ...]
327
+ ```
328
+
329
+ ### Complete Usage Examples
330
+
331
+ #### Example 1: Training with Temporal Segments (Segment-Level)
332
+
333
+ When using temporal segments, **each sample is a segment** with start/end times. Multiple segments can come from the same video.
334
 
335
  ```python
336
  from datasets import load_dataset
337
+
338
+ # Load random split (segment-level samples)
339
+ dataset = load_dataset("simplexsigil2/wanfall", "random")
340
+
341
+ print(f"Training on {len(dataset['train'])} segments") # 15,344 segments
342
+
343
+ # Training loop - each iteration is ONE SEGMENT
344
+ for example in dataset['train']:
345
+ video_path = example['path']
346
+ activity_label = example['label'] # 0-15
347
+ start_time = example['start']
348
+ end_time = example['end']
349
+
350
+ # Load only the frames for this segment
351
+ # frames = load_video_segment(video_path, start_time, end_time)
352
+ # model.train(frames, activity_label)
353
+
354
+ # Note: The same video can appear multiple times with different segments
355
+ # E.g., "fall/fall_ch_001" might have segments [0.0-1.0] and [1.0-5.0]
356
+ ```
357
+
358
+ #### Example 2: Training with Frame-Wise Labels (Video-Level)
359
+
360
+ When using frame-wise labels, **each sample is a video** with 81 frame labels. Each video appears only once.
361
+
362
+ ```python
363
+ from datasets import load_dataset
364
+
365
+ # Load random split with frame-wise labels (video-level samples)
366
+ dataset = load_dataset("simplexsigil2/wanfall", "random", framewise=True)
367
+
368
+ print(f"Training on {len(dataset['train'])} videos") # 9,600 videos
369
+
370
+ # Training loop - each iteration is ONE VIDEO
371
+ for example in dataset['train']:
372
+ video_path = example['path']
373
+ frame_labels = example['frame_labels'] # 81 labels (one per frame)
374
+
375
+ # Load all frames from the video
376
+ # frames = load_video(video_path) # Shape: (81, H, W, 3)
377
+ # model.train(frames, frame_labels)
378
+
379
+ # Note: Each video appears exactly once with its 81 frame labels
380
+ ```
381
+
382
+ #### Example 3: Cross-Demographic Evaluation
383
+
384
+ ```python
385
+ from datasets import load_dataset
386
+
387
+ # Train on young adults, test on elderly
388
+ cross_age = load_dataset("simplexsigil2/wanfall", "cross_age", framewise=True)
389
+
390
+ # Train
391
+ for example in cross_age['train']:
392
+ age = cross_age['train'].features['age_group'].int2str(example['age_group'])
393
+ print(f"Training on {age}") # "young_adults_18_34" or "middle_aged_35_64"
394
+
395
+ # Test
396
+ for example in cross_age['test']:
397
+ age = cross_age['test'].features['age_group'].int2str(example['age_group'])
398
+ print(f"Testing on {age}") # "elderly_65_plus", "children_5_12", etc.
399
+ ```
400
+
401
+ #### Example 4: Filtering by Demographics
402
+
403
+ ```python
404
+ from datasets import load_dataset
405
+
406
+ # Load all segments
407
+ dataset = load_dataset("simplexsigil2/wanfall", "labels")
408
+ segments = dataset['train']
409
+
410
+ # Access label feature for conversion
411
+ label_feature = segments.features['label']
412
+ age_feature = segments.features['age_group']
413
+
414
+ # Filter elderly fall segments
415
+ elderly_falls = [
416
+ ex for ex in segments
417
+ if age_feature.int2str(ex['age_group']) == 'elderly_65_plus'
418
+ and ex['label'] == 1 # fall
419
  ]
420
+
421
+ print(f"Found {len(elderly_falls)} elderly fall segments")
422
  ```
423
 
424
+ ### Label Conversion
425
+
426
+ Labels are stored as integers (0-15) but can be converted to strings:
427
 
428
  ```python
429
+ dataset = load_dataset("simplexsigil2/wanfall", "random")
430
+
431
+ # Get label feature
432
+ label_feature = dataset['train'].features['label']
433
+
434
+ # Convert integer to string
435
+ label_name = label_feature.int2str(1) # "fall"
436
+
437
+ # Convert string to integer
438
+ label_id = label_feature.str2int("walk") # 0
439
+
440
+ # Access all label names
441
+ all_labels = label_feature.names
442
+ print(all_labels) # ['walk', 'fall', 'fallen', ...]
443
  ```
444
 
445
  ### Cross-Demographic Evaluation Splits
 
464
  - **Validation** (2,962 videos): Overweight
465
  - **Test** (2,972 videos): Obese
466
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
  ## Technical Properties
468
 
469
  ### Video Specifications
wanfall.py CHANGED
@@ -9,7 +9,10 @@ robust activity recognition across diverse populations.
9
 
10
  import pandas as pd
11
  import datasets
12
- from datasets import BuilderConfig, GeneratorBasedBuilder, Features, Value, ClassLabel, SplitGenerator, Split
 
 
 
13
 
14
 
15
  # Dataset metadata
@@ -81,15 +84,17 @@ class WanFallConfig(BuilderConfig):
81
  """BuilderConfig for WanFall dataset.
82
 
83
  Args:
84
- split_type: Type of data to load ("labels", "metadata", or split name like "random")
85
  paths_only: If True, only return video paths for split configs (no label merging)
 
86
  **kwargs: Keyword arguments forwarded to super.
87
  """
88
 
89
- def __init__(self, split_type="labels", paths_only=False, **kwargs):
90
  super().__init__(**kwargs)
91
  self.split_type = split_type
92
  self.paths_only = paths_only
 
93
 
94
 
95
  class WanFall(GeneratorBasedBuilder):
@@ -135,6 +140,13 @@ class WanFall(GeneratorBasedBuilder):
135
  description="Cross-BMI evaluation: train on normal/underweight, test on obese",
136
  split_type="cross_bmi",
137
  ),
 
 
 
 
 
 
 
138
  ]
139
 
140
  DEFAULT_CONFIG_NAME = "random"
@@ -145,6 +157,8 @@ class WanFall(GeneratorBasedBuilder):
145
  # Define features based on config type
146
  if self.config.split_type == "metadata":
147
  features = self._get_metadata_features()
 
 
148
  elif self.config.paths_only:
149
  features = self._get_paths_only_features()
150
  else:
@@ -222,9 +236,83 @@ class WanFall(GeneratorBasedBuilder):
222
  "path": Value("string"),
223
  })
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  def _split_generators(self, dl_manager):
226
  """Define data splits and their source files."""
227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
  # Handle different config types
229
  if self.config.split_type == "labels":
230
  # Labels config: single split with all temporal segments
@@ -289,8 +377,8 @@ class WanFall(GeneratorBasedBuilder):
289
  ]
290
 
291
  def _generate_examples(self, filepath=None, split_file=None, labels_path=None,
292
- split_name=None, split_dir=None):
293
- """Generate examples from CSV files.
294
 
295
  Args:
296
  filepath: Direct path to CSV file (for labels/metadata configs)
@@ -298,8 +386,137 @@ class WanFall(GeneratorBasedBuilder):
298
  labels_path: Path to labels file for merging (for split configs with full data)
299
  split_name: Name of the split being generated
300
  split_dir: Directory containing split files
 
 
301
  """
302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
  # Case 1: Direct file loading (labels or metadata config)
304
  if filepath is not None:
305
  df = pd.read_csv(filepath)
 
9
 
10
  import pandas as pd
11
  import datasets
12
+ from datasets import BuilderConfig, GeneratorBasedBuilder, Features, Value, ClassLabel, SplitGenerator, Split, Sequence
13
+ import h5py
14
+ import tarfile
15
+ from pathlib import Path
16
 
17
 
18
  # Dataset metadata
 
84
  """BuilderConfig for WanFall dataset.
85
 
86
  Args:
87
+ split_type: Type of data to load ("labels", "metadata", "framewise", or split name like "random")
88
  paths_only: If True, only return video paths for split configs (no label merging)
89
+ framewise: If True, load frame-wise labels from HDF5 files (81 labels per video)
90
  **kwargs: Keyword arguments forwarded to super.
91
  """
92
 
93
+ def __init__(self, split_type="labels", paths_only=False, framewise=False, **kwargs):
94
  super().__init__(**kwargs)
95
  self.split_type = split_type
96
  self.paths_only = paths_only
97
+ self.framewise = framewise
98
 
99
 
100
  class WanFall(GeneratorBasedBuilder):
 
140
  description="Cross-BMI evaluation: train on normal/underweight, test on obese",
141
  split_type="cross_bmi",
142
  ),
143
+ WanFallConfig(
144
+ name="framewise",
145
+ version=VERSION,
146
+ description="Frame-wise labels for all videos (81 labels per video, one per frame)",
147
+ split_type="framewise",
148
+ framewise=True,
149
+ ),
150
  ]
151
 
152
  DEFAULT_CONFIG_NAME = "random"
 
157
  # Define features based on config type
158
  if self.config.split_type == "metadata":
159
  features = self._get_metadata_features()
160
+ elif self.config.framewise:
161
+ features = self._get_framewise_features()
162
  elif self.config.paths_only:
163
  features = self._get_paths_only_features()
164
  else:
 
236
  "path": Value("string"),
237
  })
238
 
239
+ def _get_framewise_features(self):
240
+ """Feature schema for frame-wise labels (81 labels per video)."""
241
+ return Features({
242
+ # Core identity
243
+ "path": Value("string"),
244
+ "dataset": Value("string"),
245
+
246
+ # Frame-wise labels (81 frames @ 16fps = 5.0625 seconds)
247
+ "frame_labels": Sequence(ClassLabel(num_classes=16, names=_ACTIVITY_LABELS), length=81),
248
+
249
+ # Demographic metadata (same as metadata config)
250
+ "age_group": ClassLabel(num_classes=6, names=_AGE_GROUPS),
251
+ "gender_presentation": ClassLabel(num_classes=2, names=_GENDERS),
252
+ "monk_skin_tone": ClassLabel(num_classes=10, names=_SKIN_TONES),
253
+ "race_ethnicity_omb": ClassLabel(num_classes=7, names=_ETHNICITIES),
254
+ "bmi_band": ClassLabel(num_classes=4, names=_BMI_BANDS),
255
+ "height_band": ClassLabel(num_classes=3, names=_HEIGHT_BANDS),
256
+
257
+ # Scene metadata
258
+ "environment_category": ClassLabel(num_classes=2, names=_ENVIRONMENTS),
259
+ "camera_shot": ClassLabel(num_classes=2, names=_CAMERA_SHOTS),
260
+ "speed": ClassLabel(num_classes=4, names=_SPEEDS),
261
+ "camera_elevation": ClassLabel(num_classes=4, names=_CAMERA_ELEVATIONS),
262
+ "camera_azimuth": ClassLabel(num_classes=4, names=_CAMERA_AZIMUTHS),
263
+ "camera_distance": ClassLabel(num_classes=2, names=_CAMERA_DISTANCES),
264
+ })
265
+
266
  def _split_generators(self, dl_manager):
267
  """Define data splits and their source files."""
268
 
269
+ # Handle framewise config - needs to download and extract HDF5 files
270
+ if self.config.framewise:
271
+ # Download the frame-wise labels archive
272
+ archive_path = dl_manager.download_and_extract("data_files/frame_wise_labels.tar.zst")
273
+
274
+ # If split_type is "framewise", return all videos in one split
275
+ if self.config.split_type == "framewise":
276
+ return [
277
+ SplitGenerator(
278
+ name=Split.TRAIN,
279
+ gen_kwargs={
280
+ "hdf5_dir": archive_path,
281
+ "metadata_path": "videos/metadata.csv",
282
+ "split_file": None,
283
+ },
284
+ ),
285
+ ]
286
+ # Otherwise, use split files (random, cross_age, etc.)
287
+ else:
288
+ split_dir = f"splits/{self.config.split_type}"
289
+ return [
290
+ SplitGenerator(
291
+ name=Split.TRAIN,
292
+ gen_kwargs={
293
+ "hdf5_dir": archive_path,
294
+ "metadata_path": "videos/metadata.csv",
295
+ "split_file": f"{split_dir}/train.csv",
296
+ },
297
+ ),
298
+ SplitGenerator(
299
+ name=Split.VALIDATION,
300
+ gen_kwargs={
301
+ "hdf5_dir": archive_path,
302
+ "metadata_path": "videos/metadata.csv",
303
+ "split_file": f"{split_dir}/val.csv",
304
+ },
305
+ ),
306
+ SplitGenerator(
307
+ name=Split.TEST,
308
+ gen_kwargs={
309
+ "hdf5_dir": archive_path,
310
+ "metadata_path": "videos/metadata.csv",
311
+ "split_file": f"{split_dir}/test.csv",
312
+ },
313
+ ),
314
+ ]
315
+
316
  # Handle different config types
317
  if self.config.split_type == "labels":
318
  # Labels config: single split with all temporal segments
 
377
  ]
378
 
379
  def _generate_examples(self, filepath=None, split_file=None, labels_path=None,
380
+ split_name=None, split_dir=None, hdf5_dir=None, metadata_path=None):
381
+ """Generate examples from CSV files or HDF5 files.
382
 
383
  Args:
384
  filepath: Direct path to CSV file (for labels/metadata configs)
 
386
  labels_path: Path to labels file for merging (for split configs with full data)
387
  split_name: Name of the split being generated
388
  split_dir: Directory containing split files
389
+ hdf5_dir: Directory containing extracted HDF5 files (for framewise config)
390
+ metadata_path: Path to metadata CSV (for framewise config)
391
  """
392
 
393
+ # Case 0: Frame-wise labels from HDF5 files
394
+ if hdf5_dir is not None:
395
+ # Load metadata
396
+ metadata_df = pd.read_csv(metadata_path)
397
+
398
+ # If split_file is provided, load the video paths for this split
399
+ valid_paths = None
400
+ if split_file is not None:
401
+ split_df = pd.read_csv(split_file)
402
+ valid_paths = set(split_df['path'].tolist())
403
+
404
+ # hdf5_dir might be a tar file (if zst extraction only decompressed to tar)
405
+ # Check if it's a tar file and handle appropriately
406
+ hdf5_path = Path(hdf5_dir)
407
+
408
+ if hdf5_path.is_file() and (hdf5_path.suffix == '.tar' or tarfile.is_tarfile(str(hdf5_path))):
409
+ # It's a tar file - iterate through it directly
410
+ idx = 0
411
+ with tarfile.open(hdf5_path, 'r') as tar:
412
+ for member in tar.getmembers():
413
+ if not member.name.endswith('.h5'):
414
+ continue
415
+
416
+ # Extract path from tar member name
417
+ # e.g., "./lie_down/lie_down_yo_076.h5" or "lie_down/lie_down_yo_076.h5"
418
+ video_path = member.name.lstrip('./').replace('.h5', '')
419
+
420
+ # Skip if not in this split
421
+ if valid_paths is not None and video_path not in valid_paths:
422
+ continue
423
+
424
+ try:
425
+ # Extract H5 file to memory
426
+ h5_file = tar.extractfile(member)
427
+ if h5_file is None:
428
+ continue
429
+
430
+ # h5py can't read from file-like object directly, need temp file
431
+ import tempfile
432
+ with tempfile.NamedTemporaryFile(suffix='.h5', delete=True) as tmp:
433
+ tmp.write(h5_file.read())
434
+ tmp.flush()
435
+
436
+ with h5py.File(tmp.name, 'r') as f:
437
+ frame_labels = f['label_indices'][:].tolist()
438
+
439
+ # Get metadata for this video
440
+ video_metadata = metadata_df[metadata_df['path'] == video_path]
441
+
442
+ if len(video_metadata) == 0:
443
+ continue
444
+
445
+ video_meta = video_metadata.iloc[0]
446
+
447
+ # Create example
448
+ example = {
449
+ "path": video_path,
450
+ "dataset": "wanfall",
451
+ "frame_labels": frame_labels,
452
+ }
453
+
454
+ # Add metadata fields
455
+ metadata_fields = [
456
+ "age_group", "gender_presentation", "monk_skin_tone",
457
+ "race_ethnicity_omb", "bmi_band", "height_band",
458
+ "environment_category", "camera_shot", "speed",
459
+ "camera_elevation", "camera_azimuth", "camera_distance"
460
+ ]
461
+ for field in metadata_fields:
462
+ if field in video_meta and pd.notna(video_meta[field]):
463
+ example[field] = str(video_meta[field])
464
+
465
+ yield idx, example
466
+ idx += 1
467
+
468
+ except Exception as e:
469
+ print(f"Warning: Failed to process {member.name}: {e}")
470
+ continue
471
+ else:
472
+ # It's a directory - glob for H5 files
473
+ hdf5_files = sorted(hdf5_path.glob("**/*.h5"))
474
+
475
+ idx = 0
476
+ for h5_file in hdf5_files:
477
+ relative_path = h5_file.relative_to(hdf5_path)
478
+ video_path = str(relative_path.with_suffix(''))
479
+
480
+ # Skip if not in this split
481
+ if valid_paths is not None and video_path not in valid_paths:
482
+ continue
483
+
484
+ try:
485
+ with h5py.File(h5_file, 'r') as f:
486
+ frame_labels = f['label_indices'][:].tolist()
487
+
488
+ video_metadata = metadata_df[metadata_df['path'] == video_path]
489
+
490
+ if len(video_metadata) == 0:
491
+ continue
492
+
493
+ video_meta = video_metadata.iloc[0]
494
+
495
+ example = {
496
+ "path": video_path,
497
+ "dataset": "wanfall",
498
+ "frame_labels": frame_labels,
499
+ }
500
+
501
+ metadata_fields = [
502
+ "age_group", "gender_presentation", "monk_skin_tone",
503
+ "race_ethnicity_omb", "bmi_band", "height_band",
504
+ "environment_category", "camera_shot", "speed",
505
+ "camera_elevation", "camera_azimuth", "camera_distance"
506
+ ]
507
+ for field in metadata_fields:
508
+ if field in video_meta and pd.notna(video_meta[field]):
509
+ example[field] = str(video_meta[field])
510
+
511
+ yield idx, example
512
+ idx += 1
513
+
514
+ except Exception as e:
515
+ print(f"Warning: Failed to process {h5_file}: {e}")
516
+ continue
517
+
518
+ return
519
+
520
  # Case 1: Direct file loading (labels or metadata config)
521
  if filepath is not None:
522
  df = pd.read_csv(filepath)