import os import re import glob import json import argparse import random import uuid from tqdm import tqdm from pathlib import Path from collections import defaultdict def parse_ground_truth(name): """Extract ground truth rotation axis and angle from filename or folder name""" # Remove file extension if present basename = name.split(".")[0] if "." in name else name parts = basename.split("_") if len(parts) >= 4: # figXXXX_XXX_axis_angle rotation_axis = parts[-2] # Second to last element is axis rotation_angle = int(parts[-1]) # Last element is angle # Convert negative angles to 0-360 range if rotation_angle < 0: rotation_angle += 360 return rotation_axis, rotation_angle print(f"Warning: Could not parse name: {basename}") return None, None def load_examples(example_dir, generation_mode): """Load example images from the example directory""" if generation_mode == "combined": # Load all single PNG files from the example directory files = glob.glob(os.path.join(example_dir, "*.png")) print(f"Found {len(files)} combined example images in {example_dir}") return files else: # separate mode # Find all folders in the example directory folders = [f for f in glob.glob(os.path.join(example_dir, "*")) if os.path.isdir(f)] # Filter folders that contain both _ini.png and _rot.png files valid_folders = [] for folder in folders: folder_name = os.path.basename(folder) ini_file = os.path.join(folder, f"{folder_name}_ini.png") rot_file = os.path.join(folder, f"{folder_name}_rot.png") if os.path.exists(ini_file) and os.path.exists(rot_file): valid_folders.append(folder) print(f"Found {len(valid_folders)} example folder pairs in {example_dir}") return valid_folders def organize_examples(examples, generation_mode): """Organize examples by rotation axis and angle""" organized = defaultdict(list) for example in examples: basename = os.path.basename(example) if generation_mode == "combined": basename = basename.split(".")[0] axis, angle = parse_ground_truth(basename) if axis is None or angle is None: continue key = (axis, angle) organized[key].append(example) # Print statistics print("\nDistribution of examples by axis-angle:") for key, examples_list in organized.items(): print(f" {key[0]}-axis, {key[1]} degrees: {len(examples_list)} examples") return dict(organized) def select_example(organized_examples, test_axis): """Select a single random example for the test case""" # Collect all examples for this axis regardless of angle all_examples_for_axis = [] for (axis, angle), example_list in organized_examples.items(): if axis == test_axis: for example in example_list: all_examples_for_axis.append((example, angle)) # If we have any examples for this axis, select one randomly if all_examples_for_axis and len(all_examples_for_axis) > 0: return random.choice(all_examples_for_axis) else: print(f"Warning: No examples found for rotation around {test_axis}-axis") return None def construct_prompt_with_example(axis, angle_increment, example=None, difficulty="easy", generation_mode="combined"): """Create prompt for the VLM with an in-context example""" # Generate list of all possible rotation angles based on angle increment possible_angles = [] current_angle = 0 + angle_increment while current_angle < 360: possible_angles.append(current_angle) current_angle += angle_increment # Common instructions for both modes coordinate_system = ( f"The 3D Cartesian coordinate system is defined as follows: " f"\n- x-axis: points horizontally from left to right (positive direction is right)" f"\n- y-axis: points vertically from bottom to top (positive direction is up)" f"\n- z-axis: points from inside the image toward the viewer (positive direction is out of the screen)" f"\n\nWhen discussing rotations around an axis, imagine looking along the positive direction of that axis (as if looking from the origin toward the positive end)." ) angle_constraints = ( f"The rotation angle is always a multiple of {angle_increment} degrees between 0 and 360 degrees inclusive. " f"A positive angle means rotation in the CLOCKWISE direction when looking along the positive direction of the axis. " ) # Add example text if an example is provided example_text = "" if example: _, example_angle = example if generation_mode == "combined": example_text = f"\n### EXAMPLE OF ROTATION ###\n\nExample: Image 1 shows a 3D object with its left half showing the initial view and right half showing a {example_angle} degree rotation around the {axis}-axis.\n" else: # separate mode example_text = f"\n### EXAMPLE OF ROTATION ###\n\nExample: Image 1 shows the initial view and Image 2 shows the object after a {example_angle} degree rotation around the {axis}-axis.\n" # Different instructions based on difficulty if difficulty == "easy": # For easy mode - axis is provided, internal reasoning but only output number thinking_instructions = ( f"IMPORTANT: Please follow this systematic approach to determine the rotation angle:" f"\n\n1. First, analyze the object's features in both views to understand its structure." f"\n\n2. For the {axis}-axis rotation, you must evaluate ALL of these possible rotation angles: {possible_angles}" f"\n - For each angle in the list, mentally visualize what the object would look like after rotating around the {axis}-axis by that amount" f"\n - Compare these visualizations with the actual second view" f"\n - DO NOT make a decision until you have evaluated all possible angles in the list" f"\n\n3. After evaluating all angles, choose the one that best matches the observed changes" f"\n\n4. Verify your answer by mentally applying the rotation to confirm it matches the second view" ) # Updated response format to match rot_pred_sft.py response_format = ( f"IMPORTANT: You must ONLY output the rotation angle as a number from this list: {possible_angles}. " f"Your output should contain ONLY the number. " f"Do NOT include any reasoning, explanation, or additional text - ONLY the number." f"\n\nExample of correct output format: 30" f"\n\nIncorrect output formats:" f"\n\"I think it's 30 degrees\"" f"\n\"The rotation angle is 30\"" f"\n\"30 degrees\"" ) task_description = ( f"Your task is to determine the angle of rotation around the {axis}-axis in degrees." ) else: # hard mode - axis is not provided thinking_instructions = ( f"IMPORTANT: Please follow this systematic approach to determine the rotation:" f"\n\n1. First, analyze the object's features in both views to understand its structure." f"\n\n2. Consider what would happen if rotation occurred around each of the three axes (x, y, and z):" f"\n - For x-axis rotation: What specific features would change and how?" f"\n - For y-axis rotation: What specific features would change and how?" f"\n - For z-axis rotation: What specific features would change and how?" f"\n - Based on the observed changes, explain which axis makes the most sense and why." f"\n\n3. Once you've determined the most likely axis, evaluate ALL of these possible rotation angles: {possible_angles}" f"\n - For each angle in the list, describe what the object would look like after rotating around your chosen axis by that amount" f"\n - Compare these descriptions with the actual second view" f"\n - DO NOT make a decision until you have evaluated all angles in the list" f"\n\n4. After evaluating all angles, choose the one that best matches the observed changes" ) response_format = ( f"Place your detailed reasoning process in tags. Your reasoning should include:" f"\n- Analysis of how rotation around each axis would affect the object" f"\n- Systematic evaluation of possible rotation angles from the provided list" f"\n- Specific visual features you used to determine your answer" f"\n\nThen provide your final answer in and tags respectively (use only x, y, or z for axis and only a number from the list for angle)." f"\ni.e., your reasoning process here your predicted axis here your predicted degrees here " ) task_description = ( f"Your task is to determine which axis the object was rotated around and by what angle." ) # Generate the prompt based on generation mode if generation_mode == "combined": test_img_num = 2 if example else 1 # If we have an example, test image is #2 prompt = ( f"IMPORTANT: I'm showing you {2 if example else 1} image{'s' if example else ''} of 3D objects. " f"{'Each' if example else 'The'} image contains TWO separate 3D renderings side-by-side. " f"\n\nThe LEFT HALF shows a 3D object in its initial orientation. " f"The RIGHT HALF shows the SAME 3D object after being rotated." f"\n\n{task_description}" f"\n\n{coordinate_system}" f"\n\n{angle_constraints}" f"\n\n{example_text}" f"\n\n### YOUR TASK ###" f"\nNow, for Image {test_img_num}, determine the angle of rotation around the {axis}-axis." f"\n{'' if not example else 'Based on the example provided, '}analyze Image {test_img_num} carefully." f"\n\n{thinking_instructions}" f"\n\n{response_format}" ) else: # separate mode # Calculate image numbers based on examples test_img_start = 3 if example else 1 # If we have an example (2 images), test starts at #3 test_img_end = 4 if example else 2 prompt = ( f"I'm showing you {4 if example else 2} images of 3D objects. " f"{'For each example or test case, ' if example else ''}two images represent the same object before and after rotation." f"\n\n{task_description}" f"\n\n{coordinate_system}" f"\n\n{angle_constraints}" f"\n\n{example_text}" f"\n\n### YOUR TASK ###" f"\nNow, determine the angle of rotation around the {axis}-axis from Image {test_img_start} to Image {test_img_end}." f"\n{'' if not example else 'Based on the example provided, '}analyze the rotation carefully." f"\n\n{thinking_instructions}" f"\n\n{response_format}" ) return prompt def create_metadata_jsonl_combined(input_dir, output_file, example_dir=None, angle_increment=30, difficulty="easy"): """Create metadata JSONL file for all images in input_dir (combined mode)""" # Get all PNG files in the input directory png_files = glob.glob(os.path.join(input_dir, "*.png")) # Sort files to ensure consistent order png_files = sorted(png_files) if not png_files: print(f"No PNG files found in {input_dir}") return print(f"Found {len(png_files)} PNG files in {input_dir}") # Load and organize examples if example_dir is provided organized_examples = None if example_dir: examples = load_examples(example_dir, "combined") organized_examples = organize_examples(examples, "combined") # Create output directory if it doesn't exist output_dir = os.path.dirname(output_file) os.makedirs(output_dir, exist_ok=True) # Process each file and create metadata entries entries = [] for png_file in tqdm(png_files, desc="Creating metadata for combined mode"): # Parse ground truth from filename axis, angle = parse_ground_truth(os.path.basename(png_file)) if axis is None or angle is None: print(f"Skipping {png_file} - could not parse ground truth") continue # Get the relative path to the image rel_path = os.path.relpath(png_file, os.path.dirname(output_file)) # Generate a unique ID based on the filename image_base_id = os.path.splitext(os.path.basename(png_file))[0] # Select an example if examples are available example = None if organized_examples: example = select_example(organized_examples, axis) # Construct prompt with or without example prompt = construct_prompt_with_example(axis, angle_increment, example, difficulty, generation_mode="combined") # Create assistant response based on difficulty if difficulty == "easy": # For easy mode, just output the number assistant_content = f"{angle}" else: # For hard mode, include both axis and angle in XML tags assistant_content = f"Detailed reasoning about rotation axis and angle...{axis}{angle}" # Create the conversations array conversations = [] # Add human message with prompt and images human_value = "" # Add example image if available if example: example_path, _ = example example_rel_path = os.path.relpath(example_path, os.path.dirname(output_file)) human_value += f"{example_rel_path}\n" # Add test image human_value += f"{rel_path}\n{prompt}" conversations.append({ "from": "human", "value": human_value }) # Add assistant response conversations.append({ "from": "gpt", "value": assistant_content }) # Create entry with the correct format entry = { "id": image_base_id, "image": rel_path, "conversations": conversations } entries.append(entry) # Write entries to JSONL file with open(output_file, 'w') as f: for entry in entries: f.write(json.dumps(entry) + '\n') print(f"\nSummary for combined mode:") print(f" Found {len(png_files)} PNG files") print(f" Created metadata for {len(entries)} entries") print(f" Output file: {output_file}") def create_metadata_jsonl_separate(input_dir, output_file, example_dir=None, angle_increment=30, difficulty="easy"): """Create metadata JSONL file for folders in input_dir (separate mode)""" # Get all directories in the input directory folders = [f for f in glob.glob(os.path.join(input_dir, "*")) if os.path.isdir(f) and os.path.basename(f) != "examples"] # Sort folders to ensure consistent order folders = sorted(folders) if not folders: print(f"No folders found in {input_dir}") return print(f"Found {len(folders)} folders in {input_dir}") # Load and organize examples if example_dir is provided organized_examples = None if example_dir: examples = load_examples(example_dir, "separate") organized_examples = organize_examples(examples, "separate") # Create output directory if it doesn't exist output_dir = os.path.dirname(output_file) os.makedirs(output_dir, exist_ok=True) # Process each folder and create metadata entries entries = [] valid_folders = 0 for folder in tqdm(folders, desc="Creating metadata for separate mode"): folder_name = os.path.basename(folder) # Parse ground truth from folder name axis, angle = parse_ground_truth(folder_name) if axis is None or angle is None: print(f"Skipping {folder} - could not parse ground truth") continue # Check for the two required images in the folder ini_path = os.path.join(folder, f"{folder_name}_ini.png") rot_path = os.path.join(folder, f"{folder_name}_rot.png") if not os.path.exists(ini_path): print(f"Skipping {folder} - missing initial view image") continue if not os.path.exists(rot_path): print(f"Skipping {folder} - missing rotated view image") continue # Get the relative paths to the images rel_ini_path = os.path.relpath(ini_path, os.path.dirname(output_file)) rel_rot_path = os.path.relpath(rot_path, os.path.dirname(output_file)) # Select an example if examples are available example = None image_paths = [] if organized_examples: example = select_example(organized_examples, axis) # Construct prompt with or without example prompt = construct_prompt_with_example(axis, angle_increment, example, difficulty, generation_mode="separate") # Create assistant response based on difficulty if difficulty == "easy": # For easy mode, just output the number assistant_content = f"{angle}" else: # For hard mode, include both axis and angle in XML tags assistant_content = f"Detailed reasoning about rotation axis and angle...{axis}{angle}" # Create the conversations array conversations = [] # Prepare images array for the entry all_image_paths = [] # Add example images if available if example: example_folder, _ = example example_folder_name = os.path.basename(example_folder) example_ini_path = os.path.join(example_folder, f"{example_folder_name}_ini.png") example_rot_path = os.path.join(example_folder, f"{example_folder_name}_rot.png") example_rel_ini_path = os.path.relpath(example_ini_path, os.path.dirname(output_file)) example_rel_rot_path = os.path.relpath(example_rot_path, os.path.dirname(output_file)) all_image_paths.append(example_rel_ini_path) all_image_paths.append(example_rel_rot_path) # Add test images all_image_paths.append(rel_ini_path) all_image_paths.append(rel_rot_path) # Add human message with prompt and images - format with tags at the beginning human_value = "\n\n\n\n" + prompt conversations.append({ "from": "human", "value": human_value }) # Add assistant response conversations.append({ "from": "gpt", "value": assistant_content }) # Create entry with the correct format entry = { "id": folder_name, "image": all_image_paths, "conversations": conversations } entries.append(entry) valid_folders += 1 # Write entries to JSONL file with open(output_file, 'w') as f: for entry in entries: f.write(json.dumps(entry) + '\n') print(f"\nSummary for separate mode:") print(f" Found {len(folders)} folders") print(f" Created metadata for {valid_folders} valid folders") print(f" Output file: {output_file}") def main(): parser = argparse.ArgumentParser(description="Create metadata JSONL for rotation dataset") parser.add_argument('--input-dir', type=str, required=True, help="Directory containing rotation dataset images or folders") parser.add_argument('--output-file', type=str, default="metadata.jsonl", help="Output JSONL file path") parser.add_argument('--example-dir', type=str, default=None, help="Directory containing example images for in-context learning") parser.add_argument('--angle-increment', type=int, default=30, help="Angle increment used in the dataset (e.g., 30, 45, 90)") parser.add_argument('--difficulty', type=str, choices=["easy", "hard"], default="easy", help="Difficulty mode: easy (axis provided) or hard (axis not provided)") parser.add_argument('--generation-mode', type=str, choices=["combined", "separate"], default="combined", help="Mode for dataset generation (combined = one image with both views, separate = folder with two images)") parser.add_argument('--random-seed', type=int, default=None, help="Random seed for example selection (None for true randomness)") args = parser.parse_args() # Set random seed for reproducibility if provided if args.random_seed is not None: print(f"Using fixed random seed: {args.random_seed}") random.seed(args.random_seed) else: print("Using true randomness (different examples each run)") print(f"Creating metadata JSONL for rotation dataset:") print(f"Input directory: {args.input_dir}") print(f"Output file: {args.output_file}") if args.example_dir: print(f"Example directory: {args.example_dir}") print(f"Angle increment: {args.angle_increment} degrees") print(f"Difficulty mode: {args.difficulty}") print(f"Generation mode: {args.generation_mode}") # Check if example_dir is None but there's an 'examples' subdirectory in input_dir if args.example_dir is None and os.path.exists(os.path.join(args.input_dir, "examples")): args.example_dir = os.path.join(args.input_dir, "examples") print(f"Using examples directory: {args.example_dir}") if args.generation_mode == "combined": create_metadata_jsonl_combined( input_dir=args.input_dir, output_file=args.output_file, example_dir=args.example_dir, angle_increment=args.angle_increment, difficulty=args.difficulty ) else: # separate mode create_metadata_jsonl_separate( input_dir=args.input_dir, output_file=args.output_file, example_dir=args.example_dir, angle_increment=args.angle_increment, difficulty=args.difficulty ) if __name__ == "__main__": main()