""" HuggingFace Dataset Loader for SimEIT - Electrical Impedance Tomography Dataset This loader supports loading EIT data from HDF5 files with train/validation/test splits. The dataset contains voltage measurements and conductivity maps (images) at different resolutions. """ import datasets import h5py import numpy as np from pathlib import Path class EITDatasetConfig(datasets.BuilderConfig): """BuilderConfig for EIT Dataset.""" def __init__(self, subset="CirclesOnly", image_resolution="128_log", **kwargs): """ Args: subset: Which dataset subset to load ("CirclesOnly" or "FourObjects") image_resolution: Image resolution to load ("32_log", "64_log", "128_log", or "256") **kwargs: keyword arguments forwarded to super. """ super(EITDatasetConfig, self).__init__(**kwargs) self.subset = subset self.image_resolution = image_resolution class EITDataset(datasets.GeneratorBasedBuilder): """A custom dataset loader for EIT (Electrical Impedance Tomography) .h5 files.""" BUILDER_CONFIGS = [ EITDatasetConfig( name="circles_128", version=datasets.Version("1.0.0"), description="CirclesOnly dataset with 128x128 resolution (log scale)", subset="CirclesOnly", image_resolution="128_log" ), EITDatasetConfig( name="circles_256", version=datasets.Version("1.0.0"), description="CirclesOnly dataset with 256x256 resolution", subset="CirclesOnly", image_resolution="256" ), EITDatasetConfig( name="four_objects_128", version=datasets.Version("1.0.0"), description="FourObjects dataset with 128x128 resolution (log scale)", subset="FourObjects", image_resolution="128_log" ), EITDatasetConfig( name="four_objects_256", version=datasets.Version("1.0.0"), description="FourObjects dataset with 256x256 resolution", subset="FourObjects", image_resolution="256" ), ] DEFAULT_CONFIG_NAME = "circles_128" def _info(self): """Define the features (columns) of the dataset.""" # Determine image shape based on resolution if self.config.image_resolution == "256": image_shape = (256, 256) elif self.config.image_resolution == "128_log": image_shape = (128, 128) elif self.config.image_resolution == "64_log": image_shape = (64, 64) elif self.config.image_resolution == "32_log": image_shape = (32, 32) else: image_shape = (128, 128) # default return datasets.DatasetInfo( description=( "SimEIT: A Scalable Simulation Framework for Generating Large-Scale " "Electrical Impedance Tomography Datasets. This dataset contains " "voltage measurements and corresponding conductivity maps for EIT imaging." ), features=datasets.Features({ "voltage_measurements": datasets.Sequence(datasets.Value("float32")), "conductivity_map": datasets.Array2D(shape=image_shape, dtype="float32"), "graph_representation": datasets.Sequence(datasets.Value("float32")), "sample_id": datasets.Value("int32"), }), homepage="https://huggingface.co/datasets/your-dataset-repo", license="apache-2.0", citation="", ) def _split_generators(self, dl_manager): """Define data splits and their corresponding files.""" # Get the base path - assumes the script is in the dataset directory # or you can modify this to point to your data location base_path = Path(self.config.data_dir) if self.config.data_dir else Path(".") subset_path = base_path / self.config.subset # Path to the HDF5 file h5_file = subset_path / "dataset.h5" # Paths to split files train_split_file = subset_path / "parameters" / "train.txt" val_split_file = subset_path / "parameters" / "val.txt" test_split_file = subset_path / "parameters" / "test.txt" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": str(h5_file), "split_file": str(train_split_file), "image_resolution": self.config.image_resolution, } ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": str(h5_file), "split_file": str(val_split_file), "image_resolution": self.config.image_resolution, } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": str(h5_file), "split_file": str(test_split_file), "image_resolution": self.config.image_resolution, } ), ] def _generate_examples(self, filepath, split_file, image_resolution): """ Read the .h5 file and yield examples based on the split file. Args: filepath: Path to the HDF5 file split_file: Path to the text file containing sample indices for this split image_resolution: Resolution of images to load """ # Read the split indices with open(split_file, 'r') as f: indices = [int(line.strip()) for line in f if line.strip()] # Open the HDF5 file and load data with h5py.File(filepath, "r") as h5_file: # Access the datasets voltage_data = h5_file["volt"]["16"] # Shape: (256, 110000) image_data = h5_file["image"][image_resolution] # Shape: (H, W, 110000) # Check if graph data exists for this resolution graph_key = image_resolution if image_resolution != "256" else "128_log" if graph_key in h5_file["graph"]: graph_data = h5_file["graph"][graph_key] else: graph_data = None # Iterate over the indices for this split for idx, sample_idx in enumerate(indices): # Extract data for this sample voltage_measurements = voltage_data[:, sample_idx].astype(np.float32) conductivity_map = image_data[:, :, sample_idx].astype(np.float32) # Prepare the example example = { "voltage_measurements": voltage_measurements.tolist(), "conductivity_map": conductivity_map, "sample_id": sample_idx, } # Add graph representation if available if graph_data is not None: graph_representation = graph_data[:, sample_idx].astype(np.float32) example["graph_representation"] = graph_representation.tolist() else: # Provide empty list if graph data is not available example["graph_representation"] = [] yield idx, example # Example usage: if __name__ == "__main__": # Example 1: Load the dataset with default configuration print("Loading CirclesOnly dataset with 128x128 resolution...") dataset = datasets.load_dataset( __file__, name="circles_128", data_dir="https://huggingface.co/datasets/AymanAmeen/SimEIT-dataset", trust_remote_code=True ) print(f"Train split size: {len(dataset['train'])}") print(f"Validation split size: {len(dataset['validation'])}") print(f"Test split size: {len(dataset['test'])}") # Access a single example example = dataset['train'][0] print("\nExample structure:") print(f" Voltage measurements shape: {len(example['voltage_measurements'])}") print(f" Conductivity map shape: {example['conductivity_map'].shape}") print(f" Graph representation shape: {len(example['graph_representation'])}") print(f" Sample ID: {example['sample_id']}") # Example 2: Load FourObjects dataset print("\n" + "="*50) print("Loading FourObjects dataset with 256x256 resolution...") dataset_4obj = datasets.load_dataset( __file__, name="four_objects_256", data_dir="https://huggingface.co/datasets/AymanAmeen/SimEIT-dataset", trust_remote_code=True ) print(f"Train split size: {len(dataset_4obj['train'])}") # Example 3: Iterate through a few samples print("\n" + "="*50) print("Iterating through first 3 samples...") for i, sample in enumerate(dataset['train'].select(range(3))): print(f"Sample {i}: ID={sample['sample_id']}, " f"Voltage shape={len(sample['voltage_measurements'])}, " f"Image shape={sample['conductivity_map'].shape}")