RoboChallengeAI commited on
Commit
4701daf
·
verified ·
1 Parent(s): 71ad00a

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +30 -0
  2. convert_to_lerobot.py +258 -0
README.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # convert_to_lerobot
2
+ This script generates a ready-to-use [LeRobot](https://github.com/huggingface/lerobot) dataset repository from RoboChallenge dataset.
3
+
4
+ ## Prerequisites
5
+
6
+ - Python 3.9+ with the following packages:
7
+ - `lerobot`
8
+ - `opencv-python`
9
+ - `numpy`
10
+ - Configure `$LEROBOT_HOME` (defaults to `~/.lerobot` if unset).
11
+
12
+ ```bash
13
+ pip install lerobot opencv-python numpy
14
+ export LEROBOT_HOME="/path/to/lerobot_home"
15
+ ```
16
+
17
+ ## Usage
18
+
19
+ Run the converter from the repository root (or provide an absolute path):
20
+
21
+ ```bash
22
+ python convert_to_lerobot.py \
23
+ --repo-name example_repo \
24
+ --raw-dataset /path/to/example_dataset \
25
+ --frame-interval 1
26
+ ```
27
+
28
+ ## Output
29
+ - Frames and metadata are saved to $LEROBOT_HOME/<repo-name>.
30
+ - At the end, the script calls dataset.consolidate(run_compute_stats=False). If you require aggregated statistics, run it with run_compute_stats=True or execute a separate stats job.
convert_to_lerobot.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Minimal example: convert dataset to the LeRobot format.
3
+
4
+ CLI Example (using the *arrange_flowers* task as an example):
5
+ python convert_libero_to_lerobot.py \
6
+ --repo-name arrange_flowers_repo \
7
+ --raw-dataset /path/to/arrange_flowers \
8
+ --frame-interval 1 \
9
+
10
+ Notes:
11
+ - If you plan to push to the Hugging Face Hub later, handle that outside this script.
12
+ """
13
+
14
+ import argparse
15
+ import json
16
+ import shutil
17
+ from pathlib import Path
18
+ from typing import Any, Dict, List
19
+
20
+ import cv2
21
+ import numpy as np
22
+ from lerobot.common.datasets.lerobot_dataset import LEROBOT_HOME, LeRobotDataset
23
+
24
+
25
+ def load_jsonl(path: Path) -> List[Dict[str, Any]]:
26
+ """Load a JSONL file into a list of dicts."""
27
+ with path.open("r", encoding="utf-8") as f:
28
+ return [json.loads(line) for line in f]
29
+
30
+
31
+ def create_lerobot_dataset(
32
+ repo_name: str,
33
+ robot_type: str,
34
+ fps: float,
35
+ height: int,
36
+ width: int,
37
+ ) -> LeRobotDataset:
38
+ """
39
+ Create a LeRobot dataset with custom feature schema
40
+ """
41
+ dataset = LeRobotDataset.create(
42
+ repo_id=repo_name,
43
+ robot_type=robot_type,
44
+ fps=fps,
45
+ features={
46
+ "global_image": {
47
+ "dtype": "image",
48
+ "shape": (height, width, 3),
49
+ "names": ["height", "width", "channel"],
50
+ },
51
+ "wrist_image": {
52
+ "dtype": "image",
53
+ "shape": (height, width, 3),
54
+ "names": ["height", "width", "channel"],
55
+ },
56
+ "right_image": {
57
+ "dtype": "image",
58
+ "shape": (height, width, 3),
59
+ "names": ["height", "width", "channel"],
60
+ },
61
+ "state": {
62
+ "dtype": "float32",
63
+ "shape": (7,), # for ee_pose and gripper width
64
+ "names": ["state"],
65
+ },
66
+ "actions": {
67
+ "dtype": "float32",
68
+ "shape": (7,), # for ee_pose and gripper width
69
+ "names": ["actions"],
70
+ },
71
+ },
72
+ image_writer_threads=32,
73
+ image_writer_processes=16,
74
+ )
75
+ return dataset
76
+
77
+
78
+ def process_episode_dir(
79
+ episode_path: Path,
80
+ dataset: LeRobotDataset,
81
+ frame_interval: int,
82
+ prompt: str,
83
+ ) -> None:
84
+ """
85
+ Process a single episode directory and append frames to the given dataset.
86
+
87
+ episode_path : Path
88
+ Episode directory containing `states/states.jsonl` and `videos/*.mp4`.
89
+ dataset : LeRobotDataset
90
+ Target dataset to which frames are added.
91
+ frame_interval : int
92
+ Sampling stride (>=1).
93
+ prompt : str
94
+ Language instruction of this episode.
95
+ """
96
+ # Modify if your dataset consists of bimanual data.
97
+ states_path = episode_path / "states" / "states.jsonl"
98
+ videos_dir = episode_path / "videos"
99
+
100
+ ep_states = load_jsonl(states_path)
101
+
102
+ # adjust them to match your dataset’s actual naming.
103
+ wrist_video = cv2.VideoCapture(str(videos_dir / "arm_realsense_rgb.mp4"))
104
+ global_video = cv2.VideoCapture(str(videos_dir / "global_realsense_rgb.mp4"))
105
+ right_video = cv2.VideoCapture(str(videos_dir / "right_realsense_rgb.mp4"))
106
+
107
+ wrist_frames_count = int(wrist_video.get(cv2.CAP_PROP_FRAME_COUNT))
108
+ global_frames_count = int(global_video.get(cv2.CAP_PROP_FRAME_COUNT))
109
+ right_frames_count = int(right_video.get(cv2.CAP_PROP_FRAME_COUNT))
110
+ n_states = len(ep_states)
111
+
112
+ # assert all lengths match
113
+ assert (
114
+ n_states == wrist_frames_count == global_frames_count == right_frames_count
115
+ ), (
116
+ f"Mismatch in episode {episode_path.name}: "
117
+ f"states={n_states}, wrist={wrist_frames_count}, "
118
+ f"global={global_frames_count}, right={right_frames_count}"
119
+ )
120
+
121
+ # write frames to the episode of lerobot dataset
122
+ for idx in range(frame_interval, n_states, frame_interval):
123
+ # Build pose
124
+ pose = np.concatenate(
125
+ (np.asarray(ep_states[idx]["end_effector_pose"]), [ep_states[idx]["gripper_width"]])
126
+ )
127
+ last_pose = np.concatenate(
128
+ (np.asarray(ep_states[idx - frame_interval]["end_effector_pose"]),
129
+ [ep_states[idx - frame_interval]["gripper_width"]])
130
+ )
131
+
132
+ # Read frames && BGR -> RGB
133
+ # Resize as needed, but update the LeRobot feature shape accordingly.
134
+ _, wrist_image = wrist_video.read()
135
+ _, global_image = global_video.read()
136
+ _, right_image = right_video.read()
137
+
138
+ wrist_image = cv2.cvtColor(wrist_image, cv2.COLOR_BGR2RGB)
139
+ global_image = cv2.cvtColor(global_image, cv2.COLOR_BGR2RGB)
140
+ right_image = cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB)
141
+
142
+ dataset.add_frame(
143
+ {
144
+ "global_image": global_image,
145
+ "wrist_image": wrist_image,
146
+ "right_image": right_image,
147
+ "state": last_pose.astype(np.float32, copy=False),
148
+ "actions": pose.astype(np.float32, copy=False),
149
+ }
150
+ )
151
+
152
+ wrist_video.release()
153
+ global_video.release()
154
+ right_video.release()
155
+ dataset.save_episode(task=prompt)
156
+
157
+
158
+ def main(
159
+ repo_name: str,
160
+ raw_dataset: Path,
161
+ frame_interval: int = 1,
162
+ overwrite_repo: bool = False,
163
+ ) -> None:
164
+ """
165
+ Convert a dataset directory into LeRobot format.
166
+
167
+ repo_name : str
168
+ Output repo/dataset name (saved under $LEROBOT_HOME / repo_name).
169
+ raw_dataset : Path
170
+ Path to the raw dataset root directory.
171
+ frame_interval : int, default=1
172
+ Sample every N frames (kept identical).
173
+ overwrite_repo : bool, default=False
174
+ If True, remove the existing dataset directory before writing.
175
+ """
176
+ assert frame_interval >= 1, "frame_interval must be >= 1"
177
+
178
+ # overwrite repo
179
+ dst_dir = LEROBOT_HOME / repo_name
180
+ if overwrite_repo and dst_dir.exists():
181
+ print(f"removing existing dataset at {dst_dir}")
182
+ shutil.rmtree(dst_dir)
183
+
184
+ # Load task_infos
185
+ task_info_path = raw_dataset / "meta" / "task_info.json"
186
+ with task_info_path.open("r", encoding="utf-8") as f:
187
+ task_info = json.load(f)
188
+
189
+ robot_type = task_info["task_desc"]["task_tag"][2] # "ARX5"
190
+ video_info = task_info["video_info"]
191
+ video_info["width"] = 480 # TODO: derive from task_info or actual videos
192
+ video_info["height"] = 640
193
+ fps = float(video_info["fps"])
194
+
195
+ prompt = task_info["task_desc"]["prompt"]
196
+
197
+ # Create dataset, define feature in the form you need.
198
+ # - proprio is stored in `state` and actions in `action`
199
+ # - LeRobot assumes that dtype of image data is `image`
200
+ dataset = create_lerobot_dataset(
201
+ repo_name=repo_name,
202
+ robot_type=robot_type,
203
+ fps=fps,
204
+ height=video_info["height"],
205
+ width=video_info["width"],
206
+ )
207
+
208
+ # populate the dataset to lerobot dataset
209
+ data_root = raw_dataset / "data"
210
+ for episode_path in data_root.iterdir():
211
+ if not episode_path.is_dir():
212
+ continue
213
+ print(f"Processing episode: {episode_path.name}")
214
+ process_episode_dir(
215
+ episode_path=episode_path,
216
+ dataset=dataset,
217
+ frame_interval=frame_interval,
218
+ prompt=prompt,
219
+ )
220
+
221
+ dataset.consolidate(run_compute_stats=False)
222
+ print("Done. Dataset saved to: {dst_dir}")
223
+
224
+
225
+ if __name__ == "__main__":
226
+ parser = argparse.ArgumentParser(
227
+ description="Convert a custom dataset to LeRobot format."
228
+ )
229
+ parser.add_argument(
230
+ "--repo-name",
231
+ required=True,
232
+ help="Name of the output dataset (under $LEROBOT_HOME).",
233
+ )
234
+ parser.add_argument(
235
+ "--raw-dataset",
236
+ required=True,
237
+ type=str,
238
+ help="Path to the raw dataset root.",
239
+ )
240
+ parser.add_argument(
241
+ "--frame-interval",
242
+ type=int,
243
+ default=1,
244
+ help="Sample every N frames. Default: 1",
245
+ )
246
+ parser.add_argument(
247
+ "--overwrite-repo",
248
+ action="store_true",
249
+ help="Remove existing output directory if it exists.",
250
+ )
251
+ args = parser.parse_args()
252
+
253
+ main(
254
+ repo_name=args.repo_name,
255
+ raw_dataset=Path(args.raw_dataset),
256
+ frame_interval=args.frame_interval,
257
+ overwrite_repo=args.overwrite_repo,
258
+ )