MMSearch-Plus / mmsearch_plus.py
Cie1's picture
Update MMSearch-Plus dataset with encrypted text fields (images unchanged)
a541ccb verified
raw
history blame
7.21 kB
"""MMSearch-Plus dataset with transparent decryption."""
import base64
import hashlib
import io
import os
from typing import Dict, Any, List
import datasets
from PIL import Image
_CITATION = """\
@article{tao2025mmsearch,
title={MMSearch-Plus: A Simple Yet Challenging Benchmark for Multimodal Browsing Agents},
author={Tao, Xijia and Teng, Yihua and Su, Xinxing and Fu, Xinyu and Wu, Jihao and Tao, Chaofan and Liu, Ziru and Bai, Haoli and Liu, Rui and Kong, Lingpeng},
journal={arXiv preprint arXiv:2508.21475},
year={2025}
}
"""
_DESCRIPTION = """\
MMSearch-Plus is a challenging benchmark designed to test multimodal browsing agents' ability to perform genuine visual reasoning.
Unlike existing benchmarks where many tasks can be solved with text-only approaches, MMSearch-Plus requires models to extract
and use fine-grained visual cues through iterative image-text retrieval.
"""
_HOMEPAGE = "https://mmsearch-plus.github.io/"
_LICENSE = "CC BY-NC 4.0"
_URLS = {
"train": [
"data-00000-of-00002.arrow",
"data-00001-of-00002.arrow"
]
}
def derive_key(password: str, length: int) -> bytes:
"""Derive encryption key from password using SHA-256."""
hasher = hashlib.sha256()
hasher.update(password.encode())
key = hasher.digest()
return key * (length // len(key)) + key[: length % len(key)]
def decrypt_image(ciphertext_b64: str, password: str) -> Image.Image:
"""Decrypt base64-encoded encrypted image bytes back to PIL Image."""
if not ciphertext_b64:
return None
try:
encrypted = base64.b64decode(ciphertext_b64)
key = derive_key(password, len(encrypted))
decrypted = bytes([a ^ b for a, b in zip(encrypted, key)])
# Convert bytes back to PIL Image
img_buffer = io.BytesIO(decrypted)
image = Image.open(img_buffer)
return image
except Exception:
return None
def decrypt_text(ciphertext_b64: str, password: str) -> str:
"""Decrypt base64-encoded ciphertext using XOR cipher with derived key."""
if not ciphertext_b64:
return ciphertext_b64
try:
encrypted = base64.b64decode(ciphertext_b64)
key = derive_key(password, len(encrypted))
decrypted = bytes([a ^ b for a, b in zip(encrypted, key)])
return decrypted.decode('utf-8')
except Exception:
return ciphertext_b64
class MmsearchPlus(datasets.GeneratorBasedBuilder):
"""MMSearch-Plus dataset with transparent decryption."""
VERSION = datasets.Version("1.0.0")
def _info(self):
# Define features to handle the complete dataset schema
features = datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Sequence(datasets.Value("string")),
"num_images": datasets.Value("int64"),
"arxiv_id": datasets.Value("string"),
"video_url": datasets.Value("string"),
"category": datasets.Value("string"),
"difficulty": datasets.Value("string"),
"subtask": datasets.Value("string"),
# Image fields (not encrypted, kept as PIL Images)
"img_1": datasets.Image(),
"img_2": datasets.Image(),
"img_3": datasets.Image(),
"img_4": datasets.Image(),
"img_5": datasets.Image(),
# Additional fields that might exist in the dataset
"choices": datasets.Sequence(datasets.Value("string")),
"question_zh": datasets.Value("string"),
"answer_zh": datasets.Sequence(datasets.Value("string")),
"regex": datasets.Value("string"),
"text_criteria": datasets.Value("string"),
"original_filename": datasets.Value("string"),
"screenshots_dir": datasets.Value("string"),
"time_points": datasets.Sequence(datasets.Value("string")),
"search_query": datasets.Value("string"),
"question_type": datasets.Value("string"),
"requires_image_understanding": datasets.Value("bool"),
"source": datasets.Value("string"),
"content_keywords": datasets.Value("string"),
"reasoning": datasets.Value("string"),
"processed_at": datasets.Value("string"),
"model_used": datasets.Value("string"),
"entry_index": datasets.Value("int64"),
"original_image_paths": datasets.Sequence(datasets.Value("string")),
"masked_image_paths": datasets.Sequence(datasets.Value("string")),
"is_valid": datasets.Value("bool"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# Get canary from environment variable or kwargs
canary = os.environ.get("MMSEARCH_PLUS")
# Check if passed in the builder's initialization
if hasattr(self, 'canary'):
canary = self.canary
if not canary:
raise ValueError(
"Canary string is required for decryption. Either set the MMSEARCH_PLUS "
"environment variable or pass it via the dataset loading kwargs. "
"Example: load_dataset('path/to/dataset', trust_remote_code=True) after setting "
"os.environ['MMSEARCH_PLUS'] = 'your_canary_string'"
)
# Download files
urls = _URLS["train"]
downloaded_files = dl_manager.download(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": downloaded_files,
"canary": canary,
},
),
]
def _generate_examples(self, filepaths, canary):
"""Generate examples with transparent decryption."""
key = 0
for filepath in filepaths:
# Load the arrow file
arrow_dataset = datasets.Dataset.from_file(filepath)
for idx in range(len(arrow_dataset)):
example = arrow_dataset[idx]
# Decrypt text fields - matches encryption script fields
text_fields = ['question', 'video_url', 'arxiv_id']
for field in text_fields:
if example.get(field):
example[field] = decrypt_text(example[field], canary)
# Handle answer field (list of strings)
if example.get("answer"):
decrypted_answers = []
for answer in example["answer"]:
if answer:
decrypted_answers.append(decrypt_text(answer, canary))
else:
decrypted_answers.append(answer)
example["answer"] = decrypted_answers
# Images are not encrypted - they remain as PIL Image objects
# No image decryption needed as per the encryption script
yield key, example
key += 1