Spaces:
Sleeping
Sleeping
Шатурный Алексей Давыдович
commited on
Commit
·
0269f70
1
Parent(s):
bdcda2c
add files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .DS_Store +0 -0
- Dockerfile +55 -0
- README.md +139 -6
- backend/.DS_Store +0 -0
- backend/ReConV2/.DS_Store +0 -0
- backend/ReConV2/extensions/chamfer_distance/__init__.py +1 -0
- backend/ReConV2/extensions/chamfer_distance/chamfer_distance.cpp +185 -0
- backend/ReConV2/extensions/chamfer_distance/chamfer_distance.cu +209 -0
- backend/ReConV2/extensions/chamfer_distance/chamfer_distance.py +71 -0
- backend/ReConV2/models/ReCon.py +630 -0
- backend/ReConV2/models/__init__.py +4 -0
- backend/ReConV2/models/build.py +14 -0
- backend/ReConV2/models/transformer.py +788 -0
- backend/ReConV2/utils/checkpoint.py +129 -0
- backend/ReConV2/utils/config.py +73 -0
- backend/ReConV2/utils/knn.py +37 -0
- backend/ReConV2/utils/logger.py +130 -0
- backend/ReConV2/utils/misc.py +287 -0
- backend/ReConV2/utils/registry.py +290 -0
- backend/cad_retrieval_utils/__init__.py +3 -0
- backend/cad_retrieval_utils/augmentations.py +15 -0
- backend/cad_retrieval_utils/configs/config.py +57 -0
- backend/cad_retrieval_utils/datasets.py +80 -0
- backend/cad_retrieval_utils/evaluation.py +43 -0
- backend/cad_retrieval_utils/inference.py +242 -0
- backend/cad_retrieval_utils/inference_runner.py +45 -0
- backend/cad_retrieval_utils/models.py +124 -0
- backend/cad_retrieval_utils/type_defs.py +27 -0
- backend/cad_retrieval_utils/utils.py +91 -0
- backend/config.py +57 -0
- backend/download_utils.py +56 -0
- backend/inference_utils.py +355 -0
- backend/main.py +278 -0
- backend/requirements.txt +31 -0
- frontend/.DS_Store +0 -0
- frontend/App.tsx +220 -0
- frontend/components/ComparisonTool.tsx +558 -0
- frontend/components/DatasetManager.tsx +460 -0
- frontend/components/DatasetViewer.tsx +220 -0
- frontend/components/common/FullscreenViewer.tsx +81 -0
- frontend/components/common/MeshViewer.tsx +207 -0
- frontend/components/common/Modal.tsx +47 -0
- frontend/components/common/ProgressBar.tsx +23 -0
- frontend/components/common/Spinner.tsx +9 -0
- frontend/index.html +26 -0
- frontend/index.tsx +16 -0
- frontend/metadata.json +5 -0
- frontend/package-lock.json +1874 -0
- frontend/package.json +23 -0
- frontend/services/apiService.ts +265 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Dockerfile
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dockerfile
|
| 2 |
+
|
| 3 |
+
# --- Этап 1: Сборка статического фронтенда ---
|
| 4 |
+
# Мы используем легковесный образ Node.js для сборки React-приложения
|
| 5 |
+
FROM node:18-alpine AS frontend-builder
|
| 6 |
+
WORKDIR /app/frontend
|
| 7 |
+
|
| 8 |
+
# Копируем только package.json, чтобы кэшировать установку зависимостей
|
| 9 |
+
COPY frontend/package.json ./
|
| 10 |
+
COPY frontend/package-lock.json ./
|
| 11 |
+
RUN npm install
|
| 12 |
+
|
| 13 |
+
# Копируем весь остальной код фронтенда и запускаем сборку
|
| 14 |
+
COPY frontend/ ./
|
| 15 |
+
# Важно: Убедитесь, что в вашем package.json есть скрипт "build"
|
| 16 |
+
# Обычно он выглядит так: "build": "vite build" или "react-scripts build"
|
| 17 |
+
RUN npm run build
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# --- Этап 2: Настройка Python-окружения и бэкенда ---
|
| 21 |
+
# Используем официальный образ Python
|
| 22 |
+
FROM python:3.10-slim
|
| 23 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 24 |
+
ENV PYTHONUNBUFFERED=1
|
| 25 |
+
|
| 26 |
+
WORKDIR /app
|
| 27 |
+
|
| 28 |
+
# Устанавливаем системные зависимости, если они нужны
|
| 29 |
+
# (например, для компиляции C++ расширений в ReConV2)
|
| 30 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 31 |
+
build-essential \
|
| 32 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 33 |
+
|
| 34 |
+
# Устанавливаем Python-зависимости
|
| 35 |
+
COPY backend/requirements.txt .
|
| 36 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 37 |
+
|
| 38 |
+
# Копируем код бэкенда
|
| 39 |
+
COPY backend/ .
|
| 40 |
+
|
| 41 |
+
# Если у ReConV2 есть C++ расширения, которые нужно компилировать,
|
| 42 |
+
# раскомментируйте и адаптируйте следующую строку:
|
| 43 |
+
# RUN cd /app/ReConV2/extensions/ && python setup.py install
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# --- Этап 3: Финальный образ ---
|
| 47 |
+
# Копируем собранный фронтенд из первого этапа в папку 'static'
|
| 48 |
+
# FastAPI будет автоматически раздавать файлы из этой папки
|
| 49 |
+
COPY --from=frontend-builder /app/frontend/dist ./static
|
| 50 |
+
|
| 51 |
+
# Открываем порт, на котором будет работать FastAPI (стандартный для HF Spaces)
|
| 52 |
+
EXPOSE 7860
|
| 53 |
+
|
| 54 |
+
# Запускаем наш API-сервер с помощью Uvicorn
|
| 55 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,10 +1,143 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
-
pinned:
|
|
|
|
|
|
|
| 8 |
---
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Cross-Modal Object Comparison Tool
|
| 3 |
+
emoji: 👀
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: yellow
|
| 6 |
sdk: docker
|
| 7 |
+
pinned: true
|
| 8 |
+
short_description: Demo of Image <-> 3D <-> Text retrival tool for AI Challenge
|
| 9 |
+
license: mit
|
| 10 |
---
|
| 11 |
|
| 12 |
+
|
| 13 |
+
# 🚀 Cross-Modal 3D Asset Retrieval & Comparison Tool
|
| 14 |
+
|
| 15 |
+
[](https://opensource.org/licenses/MIT)
|
| 16 |
+
[](https://react.dev/)
|
| 17 |
+
[](https://fastapi.tiangolo.com/)
|
| 18 |
+
[](https://pytorch.org/)
|
| 19 |
+
|
| 20 |
+
An advanced, full-stack application designed to manage and analyze multi-modal datasets containing 3D models, images, and text descriptions. The tool leverages deep learning models to compute and compare embeddings across different modalities, enabling powerful cross-modal search and retrieval.
|
| 21 |
+
|
| 22 |
+
The interface allows users to upload their own datasets, explore a pre-loaded shared dataset, and perform detailed comparisons to find the most similar assets, regardless of their original format.
|
| 23 |
+
|
| 24 |
+
---
|
| 25 |
+
|
| 26 |
+
## ✨ Key Features
|
| 27 |
+
|
| 28 |
+
- **🗂️ Multi-Modal Dataset Management**: Upload `.zip` archives containing images (`.png`), text (`.txt`), and 3D models (`.stl`). The system automatically processes and indexes them.
|
| 29 |
+
- **☁️ Cloud & Local Datasets**: Seamlessly switch between a large, pre-processed shared dataset hosted on the server and local datasets stored securely in your browser's IndexedDB.
|
| 30 |
+
- **👁️ Interactive Content Viewer**:
|
| 31 |
+
- A high-performance 3D viewer for `.stl` models with zoom/pan/rotate controls, powered by **Three.js**.
|
| 32 |
+
- Integrated image and text viewers.
|
| 33 |
+
- Fullscreen mode for detailed inspection of any asset.
|
| 34 |
+
- **🧠 Powerful Cross-Modal Comparison**:
|
| 35 |
+
- **Dataset Item Search**: Select any item within a dataset to instantly see its top matches across all other modalities based on semantic similarity.
|
| 36 |
+
- **Ad-Hoc Search**: Upload a new, external image, 3D model, or text snippet to find the most similar items within a selected dataset.
|
| 37 |
+
- **📊 Full Analysis Export**: Download the complete, pre-computed similarity matrix for any processed dataset as a `.json` or `.csv` file for offline analysis and reporting.
|
| 38 |
+
- **⚡ Responsive & Modern UI**: A clean, fast, and intuitive user interface built with **React**, **TypeScript**, and **TailwindCSS**.
|
| 39 |
+
- **🚀 High-Performance Backend**: Powered by **FastAPI** and **PyTorch**, the backend is optimized for asynchronous operations and efficient deep learning inference.
|
| 40 |
+
|
| 41 |
+
---
|
| 42 |
+
|
| 43 |
+
## 🛠️ Technical Stack
|
| 44 |
+
|
| 45 |
+
| Area | Technology |
|
| 46 |
+
| :-------- | :---------------------------------------------------------------------------------------------------------- |
|
| 47 |
+
| **Frontend** | [React 19](https://react.dev/), [TypeScript](https://www.typescriptlang.org/), [TailwindCSS](https://tailwindcss.com/), [Three.js](https://threejs.org/), [IndexedDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) |
|
| 48 |
+
| **Backend** | [Python 3.10](https://www.python.org/), [FastAPI](https://fastapi.tiangolo.com/), [PyTorch](https://pytorch.org/), [Uvicorn](https://www.uvicorn.org/), [scikit-learn](https://scikit-learn.org/) |
|
| 49 |
+
| **Deployment**| [Docker](https://www.docker.com/), [Hugging Face Spaces](https://huggingface.co/spaces) (or any container-based platform) |
|
| 50 |
+
|
| 51 |
+
---
|
| 52 |
+
|
| 53 |
+
## 🏛️ Project Architecture
|
| 54 |
+
|
| 55 |
+
The application is architected as a modern monorepo with a clear separation between the frontend and backend services, designed for containerization and easy deployment.
|
| 56 |
+
|
| 57 |
+
### Frontend (`/frontend`)
|
| 58 |
+
|
| 59 |
+
A standalone Single-Page Application (SPA) built with React.
|
| 60 |
+
- **`components/`**: Contains reusable UI components, organized by feature (e.g., `DatasetManager`, `ComparisonTool`, `common/`).
|
| 61 |
+
- **`services/`**: Handles all side effects and external communication.
|
| 62 |
+
- `apiService.ts`: Manages all HTTP requests to the backend API.
|
| 63 |
+
- `dbService.ts`: Provides a simple interface for interacting with the browser's IndexedDB for local dataset persistence.
|
| 64 |
+
- `comparisonService.ts`: Logic for handling client-side interactions with pre-computed similarity data.
|
| 65 |
+
- **`types.ts`**: Centralized TypeScript type definitions for robust data modeling.
|
| 66 |
+
- **`App.tsx`**: The main application component that orchestrates state and views.
|
| 67 |
+
|
| 68 |
+
### Backend (`/backend`)
|
| 69 |
+
|
| 70 |
+
A high-performance API server built with FastAPI.
|
| 71 |
+
- **`main.py`**: The main entry point for the FastAPI application. It defines all API endpoints, manages application lifecycle events (like model loading on startup), and serves the static frontend files.
|
| 72 |
+
- **`inference_utils.py`**: The core of the AI logic. It handles ZIP file processing, asset parsing, embedding generation using the PyTorch models, and similarity calculation (cosine similarity). It also manages an in-memory cache for embeddings to ensure fast retrieval.
|
| 73 |
+
- **`download_utils.py`**: A utility module for downloading model weights and shared datasets from external storage (e.g., Yandex.Disk) during the startup phase.
|
| 74 |
+
- **`cad_retrieval_utils/`**: A proprietary library containing the core model definitions, data loaders, and training/inference configurations for the cross-modal retrieval task.
|
| 75 |
+
- **`ReConV2/`**: A dependency containing model architectures and potentially C++ extensions for efficient 3D point cloud processing.
|
| 76 |
+
|
| 77 |
+
---
|
| 78 |
+
|
| 79 |
+
## ⚙️ How It Works
|
| 80 |
+
|
| 81 |
+
The core workflow for processing a new dataset is as follows:
|
| 82 |
+
|
| 83 |
+
1. **Upload**: The user uploads a `.zip` file via the React frontend.
|
| 84 |
+
2. **API Request**: The frontend sends the file to the `/api/process-dataset` endpoint on the FastAPI backend.
|
| 85 |
+
3. **Unpacking & Preprocessing**: The backend saves the archive to a temporary directory and extracts all image, text, and mesh files.
|
| 86 |
+
4. **Embedding Generation**: For each file, a specialized PyTorch model generates a high-dimensional vector embedding:
|
| 87 |
+
- An **Image Encoder** processes `.png` files.
|
| 88 |
+
- A **Text Encoder** processes `.txt` files.
|
| 89 |
+
- A **Point Cloud (PC) Encoder** processes `.stl` files after converting them to point clouds.
|
| 90 |
+
5. **Caching**: The generated embeddings and asset metadata are stored in an in-memory cache on the server for instant access.
|
| 91 |
+
6. **Full Comparison**: The backend pre-computes a full N x N similarity matrix by calculating the cosine similarity between every pair of embeddings.
|
| 92 |
+
7. **Response & Client-Side Storage**: The fully processed dataset object, including the comparison matrix, is sent back to the client. The frontend then saves this complete dataset to IndexedDB, making it available for future sessions without needing to re-upload.
|
| 93 |
+
|
| 94 |
+
---
|
| 95 |
+
|
| 96 |
+
## 🚀 Getting Started
|
| 97 |
+
|
| 98 |
+
You can run this project locally using Docker, which encapsulates both the frontend and backend services.
|
| 99 |
+
|
| 100 |
+
### Prerequisites
|
| 101 |
+
|
| 102 |
+
- [Docker](https://www.docker.com/get-started) installed on your machine.
|
| 103 |
+
|
| 104 |
+
### Local Installation & Startup
|
| 105 |
+
|
| 106 |
+
1. **Clone the repository:**
|
| 107 |
+
```bash
|
| 108 |
+
git clone <your-repository-url>
|
| 109 |
+
cd <repository-name>
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
2. **Check Model & Data URLs:**
|
| 113 |
+
The application is configured to download pre-trained models and a shared dataset from public URLs. Please verify the links inside `backend/main.py` and replace them with your own if necessary.
|
| 114 |
+
|
| 115 |
+
3. **Build and run with Docker:**
|
| 116 |
+
The provided `Dockerfile` is a multi-stage build that compiles the frontend and sets up the Python backend in a single, optimized image.
|
| 117 |
+
|
| 118 |
+
```bash
|
| 119 |
+
# Build the Docker image
|
| 120 |
+
docker build -t cross-modal-retrieval .
|
| 121 |
+
|
| 122 |
+
# Run the container
|
| 123 |
+
docker run -p 7860:7860 cross-modal-retrieval
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
4. **Access the application:**
|
| 127 |
+
Open your browser and navigate to [http://localhost:7860](http://localhost:7860).
|
| 128 |
+
|
| 129 |
+
---
|
| 130 |
+
|
| 131 |
+
## 💡 Future Improvements
|
| 132 |
+
|
| 133 |
+
- **Support for More Formats**: Extend file support to `.obj`/`.glb` for 3D models and `.jpeg`/`.webp` for images.
|
| 134 |
+
- **Advanced Search**: Implement more complex filtering and search options within the dataset viewer (e.g., by similarity score, item count).
|
| 135 |
+
- **Embedding Visualization**: Add a new section to visualize the high-dimensional embedding space using techniques like t-SNE or UMAP.
|
| 136 |
+
- **User Authentication**: Introduce user accounts to manage private datasets and share them with collaborators.
|
| 137 |
+
- **Model Fine-tuning**: Allow users to fine-tune the retrieval models on their own datasets to improve domain-specific accuracy.
|
| 138 |
+
|
| 139 |
+
---
|
| 140 |
+
|
| 141 |
+
## 📜 License
|
| 142 |
+
|
| 143 |
+
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
|
backend/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
backend/ReConV2/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
backend/ReConV2/extensions/chamfer_distance/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .chamfer_distance import ChamferDistance
|
backend/ReConV2/extensions/chamfer_distance/chamfer_distance.cpp
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/torch.h>
|
| 2 |
+
|
| 3 |
+
// CUDA forward declarations
|
| 4 |
+
void ChamferDistanceKernelLauncher(
|
| 5 |
+
const int b, const int n,
|
| 6 |
+
const float* xyz,
|
| 7 |
+
const int m,
|
| 8 |
+
const float* xyz2,
|
| 9 |
+
float* result,
|
| 10 |
+
int* result_i,
|
| 11 |
+
float* result2,
|
| 12 |
+
int* result2_i);
|
| 13 |
+
|
| 14 |
+
void ChamferDistanceGradKernelLauncher(
|
| 15 |
+
const int b, const int n,
|
| 16 |
+
const float* xyz1,
|
| 17 |
+
const int m,
|
| 18 |
+
const float* xyz2,
|
| 19 |
+
const float* grad_dist1,
|
| 20 |
+
const int* idx1,
|
| 21 |
+
const float* grad_dist2,
|
| 22 |
+
const int* idx2,
|
| 23 |
+
float* grad_xyz1,
|
| 24 |
+
float* grad_xyz2);
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
void chamfer_distance_forward_cuda(
|
| 28 |
+
const at::Tensor xyz1,
|
| 29 |
+
const at::Tensor xyz2,
|
| 30 |
+
const at::Tensor dist1,
|
| 31 |
+
const at::Tensor dist2,
|
| 32 |
+
const at::Tensor idx1,
|
| 33 |
+
const at::Tensor idx2)
|
| 34 |
+
{
|
| 35 |
+
ChamferDistanceKernelLauncher(xyz1.size(0), xyz1.size(1), xyz1.data<float>(),
|
| 36 |
+
xyz2.size(1), xyz2.data<float>(),
|
| 37 |
+
dist1.data<float>(), idx1.data<int>(),
|
| 38 |
+
dist2.data<float>(), idx2.data<int>());
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
void chamfer_distance_backward_cuda(
|
| 42 |
+
const at::Tensor xyz1,
|
| 43 |
+
const at::Tensor xyz2,
|
| 44 |
+
at::Tensor gradxyz1,
|
| 45 |
+
at::Tensor gradxyz2,
|
| 46 |
+
at::Tensor graddist1,
|
| 47 |
+
at::Tensor graddist2,
|
| 48 |
+
at::Tensor idx1,
|
| 49 |
+
at::Tensor idx2)
|
| 50 |
+
{
|
| 51 |
+
ChamferDistanceGradKernelLauncher(xyz1.size(0), xyz1.size(1), xyz1.data<float>(),
|
| 52 |
+
xyz2.size(1), xyz2.data<float>(),
|
| 53 |
+
graddist1.data<float>(), idx1.data<int>(),
|
| 54 |
+
graddist2.data<float>(), idx2.data<int>(),
|
| 55 |
+
gradxyz1.data<float>(), gradxyz2.data<float>());
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
void nnsearch(
|
| 60 |
+
const int b, const int n, const int m,
|
| 61 |
+
const float* xyz1,
|
| 62 |
+
const float* xyz2,
|
| 63 |
+
float* dist,
|
| 64 |
+
int* idx)
|
| 65 |
+
{
|
| 66 |
+
for (int i = 0; i < b; i++) {
|
| 67 |
+
for (int j = 0; j < n; j++) {
|
| 68 |
+
const float x1 = xyz1[(i*n+j)*3+0];
|
| 69 |
+
const float y1 = xyz1[(i*n+j)*3+1];
|
| 70 |
+
const float z1 = xyz1[(i*n+j)*3+2];
|
| 71 |
+
double best = 0;
|
| 72 |
+
int besti = 0;
|
| 73 |
+
for (int k = 0; k < m; k++) {
|
| 74 |
+
const float x2 = xyz2[(i*m+k)*3+0] - x1;
|
| 75 |
+
const float y2 = xyz2[(i*m+k)*3+1] - y1;
|
| 76 |
+
const float z2 = xyz2[(i*m+k)*3+2] - z1;
|
| 77 |
+
const double d=x2*x2+y2*y2+z2*z2;
|
| 78 |
+
if (k==0 || d < best){
|
| 79 |
+
best = d;
|
| 80 |
+
besti = k;
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
dist[i*n+j] = best;
|
| 84 |
+
idx[i*n+j] = besti;
|
| 85 |
+
}
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
void chamfer_distance_forward(
|
| 91 |
+
const at::Tensor xyz1,
|
| 92 |
+
const at::Tensor xyz2,
|
| 93 |
+
const at::Tensor dist1,
|
| 94 |
+
const at::Tensor dist2,
|
| 95 |
+
const at::Tensor idx1,
|
| 96 |
+
const at::Tensor idx2)
|
| 97 |
+
{
|
| 98 |
+
const int batchsize = xyz1.size(0);
|
| 99 |
+
const int n = xyz1.size(1);
|
| 100 |
+
const int m = xyz2.size(1);
|
| 101 |
+
|
| 102 |
+
const float* xyz1_data = xyz1.data<float>();
|
| 103 |
+
const float* xyz2_data = xyz2.data<float>();
|
| 104 |
+
float* dist1_data = dist1.data<float>();
|
| 105 |
+
float* dist2_data = dist2.data<float>();
|
| 106 |
+
int* idx1_data = idx1.data<int>();
|
| 107 |
+
int* idx2_data = idx2.data<int>();
|
| 108 |
+
|
| 109 |
+
nnsearch(batchsize, n, m, xyz1_data, xyz2_data, dist1_data, idx1_data);
|
| 110 |
+
nnsearch(batchsize, m, n, xyz2_data, xyz1_data, dist2_data, idx2_data);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
void chamfer_distance_backward(
|
| 115 |
+
const at::Tensor xyz1,
|
| 116 |
+
const at::Tensor xyz2,
|
| 117 |
+
at::Tensor gradxyz1,
|
| 118 |
+
at::Tensor gradxyz2,
|
| 119 |
+
at::Tensor graddist1,
|
| 120 |
+
at::Tensor graddist2,
|
| 121 |
+
at::Tensor idx1,
|
| 122 |
+
at::Tensor idx2)
|
| 123 |
+
{
|
| 124 |
+
const int b = xyz1.size(0);
|
| 125 |
+
const int n = xyz1.size(1);
|
| 126 |
+
const int m = xyz2.size(1);
|
| 127 |
+
|
| 128 |
+
const float* xyz1_data = xyz1.data<float>();
|
| 129 |
+
const float* xyz2_data = xyz2.data<float>();
|
| 130 |
+
float* gradxyz1_data = gradxyz1.data<float>();
|
| 131 |
+
float* gradxyz2_data = gradxyz2.data<float>();
|
| 132 |
+
float* graddist1_data = graddist1.data<float>();
|
| 133 |
+
float* graddist2_data = graddist2.data<float>();
|
| 134 |
+
const int* idx1_data = idx1.data<int>();
|
| 135 |
+
const int* idx2_data = idx2.data<int>();
|
| 136 |
+
|
| 137 |
+
for (int i = 0; i < b*n*3; i++)
|
| 138 |
+
gradxyz1_data[i] = 0;
|
| 139 |
+
for (int i = 0; i < b*m*3; i++)
|
| 140 |
+
gradxyz2_data[i] = 0;
|
| 141 |
+
for (int i = 0;i < b; i++) {
|
| 142 |
+
for (int j = 0; j < n; j++) {
|
| 143 |
+
const float x1 = xyz1_data[(i*n+j)*3+0];
|
| 144 |
+
const float y1 = xyz1_data[(i*n+j)*3+1];
|
| 145 |
+
const float z1 = xyz1_data[(i*n+j)*3+2];
|
| 146 |
+
const int j2 = idx1_data[i*n+j];
|
| 147 |
+
|
| 148 |
+
const float x2 = xyz2_data[(i*m+j2)*3+0];
|
| 149 |
+
const float y2 = xyz2_data[(i*m+j2)*3+1];
|
| 150 |
+
const float z2 = xyz2_data[(i*m+j2)*3+2];
|
| 151 |
+
const float g = graddist1_data[i*n+j]*2;
|
| 152 |
+
|
| 153 |
+
gradxyz1_data[(i*n+j)*3+0] += g*(x1-x2);
|
| 154 |
+
gradxyz1_data[(i*n+j)*3+1] += g*(y1-y2);
|
| 155 |
+
gradxyz1_data[(i*n+j)*3+2] += g*(z1-z2);
|
| 156 |
+
gradxyz2_data[(i*m+j2)*3+0] -= (g*(x1-x2));
|
| 157 |
+
gradxyz2_data[(i*m+j2)*3+1] -= (g*(y1-y2));
|
| 158 |
+
gradxyz2_data[(i*m+j2)*3+2] -= (g*(z1-z2));
|
| 159 |
+
}
|
| 160 |
+
for (int j = 0; j < m; j++) {
|
| 161 |
+
const float x1 = xyz2_data[(i*m+j)*3+0];
|
| 162 |
+
const float y1 = xyz2_data[(i*m+j)*3+1];
|
| 163 |
+
const float z1 = xyz2_data[(i*m+j)*3+2];
|
| 164 |
+
const int j2 = idx2_data[i*m+j];
|
| 165 |
+
const float x2 = xyz1_data[(i*n+j2)*3+0];
|
| 166 |
+
const float y2 = xyz1_data[(i*n+j2)*3+1];
|
| 167 |
+
const float z2 = xyz1_data[(i*n+j2)*3+2];
|
| 168 |
+
const float g = graddist2_data[i*m+j]*2;
|
| 169 |
+
gradxyz2_data[(i*m+j)*3+0] += g*(x1-x2);
|
| 170 |
+
gradxyz2_data[(i*m+j)*3+1] += g*(y1-y2);
|
| 171 |
+
gradxyz2_data[(i*m+j)*3+2] += g*(z1-z2);
|
| 172 |
+
gradxyz1_data[(i*n+j2)*3+0] -= (g*(x1-x2));
|
| 173 |
+
gradxyz1_data[(i*n+j2)*3+1] -= (g*(y1-y2));
|
| 174 |
+
gradxyz1_data[(i*n+j2)*3+2] -= (g*(z1-z2));
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 181 |
+
m.def("forward", &chamfer_distance_forward, "ChamferDistance forward");
|
| 182 |
+
m.def("forward_cuda", &chamfer_distance_forward_cuda, "ChamferDistance forward (CUDA)");
|
| 183 |
+
m.def("backward", &chamfer_distance_backward, "ChamferDistance backward");
|
| 184 |
+
m.def("backward_cuda", &chamfer_distance_backward_cuda, "ChamferDistance backward (CUDA)");
|
| 185 |
+
}
|
backend/ReConV2/extensions/chamfer_distance/chamfer_distance.cu
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/ATen.h>
|
| 2 |
+
|
| 3 |
+
#include <cuda.h>
|
| 4 |
+
#include <cuda_runtime.h>
|
| 5 |
+
|
| 6 |
+
__global__
|
| 7 |
+
void ChamferDistanceKernel(
|
| 8 |
+
int b,
|
| 9 |
+
int n,
|
| 10 |
+
const float* xyz,
|
| 11 |
+
int m,
|
| 12 |
+
const float* xyz2,
|
| 13 |
+
float* result,
|
| 14 |
+
int* result_i)
|
| 15 |
+
{
|
| 16 |
+
const int batch=512;
|
| 17 |
+
__shared__ float buf[batch*3];
|
| 18 |
+
for (int i=blockIdx.x;i<b;i+=gridDim.x){
|
| 19 |
+
for (int k2=0;k2<m;k2+=batch){
|
| 20 |
+
int end_k=min(m,k2+batch)-k2;
|
| 21 |
+
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
|
| 22 |
+
buf[j]=xyz2[(i*m+k2)*3+j];
|
| 23 |
+
}
|
| 24 |
+
__syncthreads();
|
| 25 |
+
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
|
| 26 |
+
float x1=xyz[(i*n+j)*3+0];
|
| 27 |
+
float y1=xyz[(i*n+j)*3+1];
|
| 28 |
+
float z1=xyz[(i*n+j)*3+2];
|
| 29 |
+
int best_i=0;
|
| 30 |
+
float best=0;
|
| 31 |
+
int end_ka=end_k-(end_k&3);
|
| 32 |
+
if (end_ka==batch){
|
| 33 |
+
for (int k=0;k<batch;k+=4){
|
| 34 |
+
{
|
| 35 |
+
float x2=buf[k*3+0]-x1;
|
| 36 |
+
float y2=buf[k*3+1]-y1;
|
| 37 |
+
float z2=buf[k*3+2]-z1;
|
| 38 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 39 |
+
if (k==0 || d<best){
|
| 40 |
+
best=d;
|
| 41 |
+
best_i=k+k2;
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
{
|
| 45 |
+
float x2=buf[k*3+3]-x1;
|
| 46 |
+
float y2=buf[k*3+4]-y1;
|
| 47 |
+
float z2=buf[k*3+5]-z1;
|
| 48 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 49 |
+
if (d<best){
|
| 50 |
+
best=d;
|
| 51 |
+
best_i=k+k2+1;
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
{
|
| 55 |
+
float x2=buf[k*3+6]-x1;
|
| 56 |
+
float y2=buf[k*3+7]-y1;
|
| 57 |
+
float z2=buf[k*3+8]-z1;
|
| 58 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 59 |
+
if (d<best){
|
| 60 |
+
best=d;
|
| 61 |
+
best_i=k+k2+2;
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
{
|
| 65 |
+
float x2=buf[k*3+9]-x1;
|
| 66 |
+
float y2=buf[k*3+10]-y1;
|
| 67 |
+
float z2=buf[k*3+11]-z1;
|
| 68 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 69 |
+
if (d<best){
|
| 70 |
+
best=d;
|
| 71 |
+
best_i=k+k2+3;
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
}
|
| 75 |
+
}else{
|
| 76 |
+
for (int k=0;k<end_ka;k+=4){
|
| 77 |
+
{
|
| 78 |
+
float x2=buf[k*3+0]-x1;
|
| 79 |
+
float y2=buf[k*3+1]-y1;
|
| 80 |
+
float z2=buf[k*3+2]-z1;
|
| 81 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 82 |
+
if (k==0 || d<best){
|
| 83 |
+
best=d;
|
| 84 |
+
best_i=k+k2;
|
| 85 |
+
}
|
| 86 |
+
}
|
| 87 |
+
{
|
| 88 |
+
float x2=buf[k*3+3]-x1;
|
| 89 |
+
float y2=buf[k*3+4]-y1;
|
| 90 |
+
float z2=buf[k*3+5]-z1;
|
| 91 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 92 |
+
if (d<best){
|
| 93 |
+
best=d;
|
| 94 |
+
best_i=k+k2+1;
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
{
|
| 98 |
+
float x2=buf[k*3+6]-x1;
|
| 99 |
+
float y2=buf[k*3+7]-y1;
|
| 100 |
+
float z2=buf[k*3+8]-z1;
|
| 101 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 102 |
+
if (d<best){
|
| 103 |
+
best=d;
|
| 104 |
+
best_i=k+k2+2;
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
{
|
| 108 |
+
float x2=buf[k*3+9]-x1;
|
| 109 |
+
float y2=buf[k*3+10]-y1;
|
| 110 |
+
float z2=buf[k*3+11]-z1;
|
| 111 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 112 |
+
if (d<best){
|
| 113 |
+
best=d;
|
| 114 |
+
best_i=k+k2+3;
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
for (int k=end_ka;k<end_k;k++){
|
| 120 |
+
float x2=buf[k*3+0]-x1;
|
| 121 |
+
float y2=buf[k*3+1]-y1;
|
| 122 |
+
float z2=buf[k*3+2]-z1;
|
| 123 |
+
float d=x2*x2+y2*y2+z2*z2;
|
| 124 |
+
if (k==0 || d<best){
|
| 125 |
+
best=d;
|
| 126 |
+
best_i=k+k2;
|
| 127 |
+
}
|
| 128 |
+
}
|
| 129 |
+
if (k2==0 || result[(i*n+j)]>best){
|
| 130 |
+
result[(i*n+j)]=best;
|
| 131 |
+
result_i[(i*n+j)]=best_i;
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
__syncthreads();
|
| 135 |
+
}
|
| 136 |
+
}
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
void ChamferDistanceKernelLauncher(
|
| 140 |
+
const int b, const int n,
|
| 141 |
+
const float* xyz,
|
| 142 |
+
const int m,
|
| 143 |
+
const float* xyz2,
|
| 144 |
+
float* result,
|
| 145 |
+
int* result_i,
|
| 146 |
+
float* result2,
|
| 147 |
+
int* result2_i)
|
| 148 |
+
{
|
| 149 |
+
ChamferDistanceKernel<<<dim3(32,16,1),512>>>(b, n, xyz, m, xyz2, result, result_i);
|
| 150 |
+
ChamferDistanceKernel<<<dim3(32,16,1),512>>>(b, m, xyz2, n, xyz, result2, result2_i);
|
| 151 |
+
|
| 152 |
+
cudaError_t err = cudaGetLastError();
|
| 153 |
+
if (err != cudaSuccess)
|
| 154 |
+
printf("error in chamfer distance updateOutput: %s\n", cudaGetErrorString(err));
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
__global__
|
| 159 |
+
void ChamferDistanceGradKernel(
|
| 160 |
+
int b, int n,
|
| 161 |
+
const float* xyz1,
|
| 162 |
+
int m,
|
| 163 |
+
const float* xyz2,
|
| 164 |
+
const float* grad_dist1,
|
| 165 |
+
const int* idx1,
|
| 166 |
+
float* grad_xyz1,
|
| 167 |
+
float* grad_xyz2)
|
| 168 |
+
{
|
| 169 |
+
for (int i = blockIdx.x; i<b; i += gridDim.x) {
|
| 170 |
+
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x*gridDim.y) {
|
| 171 |
+
float x1=xyz1[(i*n+j)*3+0];
|
| 172 |
+
float y1=xyz1[(i*n+j)*3+1];
|
| 173 |
+
float z1=xyz1[(i*n+j)*3+2];
|
| 174 |
+
int j2=idx1[i*n+j];
|
| 175 |
+
float x2=xyz2[(i*m+j2)*3+0];
|
| 176 |
+
float y2=xyz2[(i*m+j2)*3+1];
|
| 177 |
+
float z2=xyz2[(i*m+j2)*3+2];
|
| 178 |
+
float g=grad_dist1[i*n+j]*2;
|
| 179 |
+
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
|
| 180 |
+
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
|
| 181 |
+
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
|
| 182 |
+
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
|
| 183 |
+
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
|
| 184 |
+
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
void ChamferDistanceGradKernelLauncher(
|
| 190 |
+
const int b, const int n,
|
| 191 |
+
const float* xyz1,
|
| 192 |
+
const int m,
|
| 193 |
+
const float* xyz2,
|
| 194 |
+
const float* grad_dist1,
|
| 195 |
+
const int* idx1,
|
| 196 |
+
const float* grad_dist2,
|
| 197 |
+
const int* idx2,
|
| 198 |
+
float* grad_xyz1,
|
| 199 |
+
float* grad_xyz2)
|
| 200 |
+
{
|
| 201 |
+
cudaMemset(grad_xyz1, 0, b*n*3*4);
|
| 202 |
+
cudaMemset(grad_xyz2, 0, b*m*3*4);
|
| 203 |
+
ChamferDistanceGradKernel<<<dim3(1,16,1), 256>>>(b, n, xyz1, m, xyz2, grad_dist1, idx1, grad_xyz1, grad_xyz2);
|
| 204 |
+
ChamferDistanceGradKernel<<<dim3(1,16,1), 256>>>(b, m, xyz2, n, xyz1, grad_dist2, idx2, grad_xyz2, grad_xyz1);
|
| 205 |
+
|
| 206 |
+
cudaError_t err = cudaGetLastError();
|
| 207 |
+
if (err != cudaSuccess)
|
| 208 |
+
printf("error in chamfer distance get grad: %s\n", cudaGetErrorString(err));
|
| 209 |
+
}
|
backend/ReConV2/extensions/chamfer_distance/chamfer_distance.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
script_path = os.path.dirname(os.path.abspath(__file__))
|
| 6 |
+
|
| 7 |
+
from torch.utils.cpp_extension import load
|
| 8 |
+
|
| 9 |
+
if torch.cuda.is_available():
|
| 10 |
+
cd = load(
|
| 11 |
+
name="cd",
|
| 12 |
+
sources=[
|
| 13 |
+
os.path.join(script_path, "chamfer_distance.cpp"),
|
| 14 |
+
os.path.join(script_path, "chamfer_distance.cu"),
|
| 15 |
+
],
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ChamferDistanceFunction(torch.autograd.Function):
|
| 20 |
+
@staticmethod
|
| 21 |
+
def forward(ctx, xyz1, xyz2):
|
| 22 |
+
batchsize, n, _ = xyz1.size()
|
| 23 |
+
_, m, _ = xyz2.size()
|
| 24 |
+
xyz1 = xyz1.contiguous()
|
| 25 |
+
xyz2 = xyz2.contiguous()
|
| 26 |
+
dist1 = torch.zeros(batchsize, n)
|
| 27 |
+
dist2 = torch.zeros(batchsize, m)
|
| 28 |
+
|
| 29 |
+
idx1 = torch.zeros(batchsize, n, dtype=torch.int)
|
| 30 |
+
idx2 = torch.zeros(batchsize, m, dtype=torch.int)
|
| 31 |
+
|
| 32 |
+
if not xyz1.is_cuda:
|
| 33 |
+
cd.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
|
| 34 |
+
else:
|
| 35 |
+
dist1 = dist1.cuda()
|
| 36 |
+
dist2 = dist2.cuda()
|
| 37 |
+
idx1 = idx1.cuda()
|
| 38 |
+
idx2 = idx2.cuda()
|
| 39 |
+
cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2)
|
| 40 |
+
|
| 41 |
+
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
|
| 42 |
+
|
| 43 |
+
return dist1, dist2, idx1
|
| 44 |
+
|
| 45 |
+
@staticmethod
|
| 46 |
+
def backward(ctx, graddist1, graddist2, _):
|
| 47 |
+
xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
|
| 48 |
+
|
| 49 |
+
graddist1 = graddist1.contiguous()
|
| 50 |
+
graddist2 = graddist2.contiguous()
|
| 51 |
+
|
| 52 |
+
gradxyz1 = torch.zeros(xyz1.size())
|
| 53 |
+
gradxyz2 = torch.zeros(xyz2.size())
|
| 54 |
+
|
| 55 |
+
if not graddist1.is_cuda:
|
| 56 |
+
cd.backward(
|
| 57 |
+
xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2
|
| 58 |
+
)
|
| 59 |
+
else:
|
| 60 |
+
gradxyz1 = gradxyz1.cuda()
|
| 61 |
+
gradxyz2 = gradxyz2.cuda()
|
| 62 |
+
cd.backward_cuda(
|
| 63 |
+
xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
return gradxyz1, gradxyz2
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class ChamferDistance(torch.nn.Module):
|
| 70 |
+
def forward(self, xyz1, xyz2):
|
| 71 |
+
return ChamferDistanceFunction.apply(xyz1, xyz2)
|
backend/ReConV2/models/ReCon.py
ADDED
|
@@ -0,0 +1,630 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import timm
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from scipy.optimize import linear_sum_assignment
|
| 7 |
+
from timm.layers import trunc_normal_
|
| 8 |
+
|
| 9 |
+
from ReConV2.extensions.chamfer_distance import ChamferDistance
|
| 10 |
+
from ReConV2.models.transformer import (
|
| 11 |
+
GPTExtractor,
|
| 12 |
+
GPTGenerator,
|
| 13 |
+
Group,
|
| 14 |
+
MAEExtractor,
|
| 15 |
+
MAEGenerator,
|
| 16 |
+
PatchEmbedding,
|
| 17 |
+
PositionEmbeddingCoordsSine,
|
| 18 |
+
ZGroup,
|
| 19 |
+
)
|
| 20 |
+
from ReConV2.utils.checkpoint import (
|
| 21 |
+
get_missing_parameters_message,
|
| 22 |
+
get_unexpected_parameters_message,
|
| 23 |
+
)
|
| 24 |
+
from ReConV2.utils.logger import *
|
| 25 |
+
|
| 26 |
+
from .build import MODELS
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Pretrain model
|
| 30 |
+
class MaskTransformer(nn.Module):
|
| 31 |
+
def __init__(self, config):
|
| 32 |
+
super().__init__()
|
| 33 |
+
|
| 34 |
+
self.embed_dim = config.embed_dim
|
| 35 |
+
self.num_group = config.num_group
|
| 36 |
+
self.group_size = config.group_size
|
| 37 |
+
self.with_color = config.with_color
|
| 38 |
+
self.input_channel = 6 if self.with_color else 3
|
| 39 |
+
self.img_queries = config.img_queries
|
| 40 |
+
self.text_queries = config.text_queries
|
| 41 |
+
self.global_query_num = self.img_queries + self.text_queries
|
| 42 |
+
self.mask_type = config.mask_type
|
| 43 |
+
self.mask_ratio = config.mask_ratio
|
| 44 |
+
self.stop_grad = config.stop_grad
|
| 45 |
+
|
| 46 |
+
self.embed = PatchEmbedding(
|
| 47 |
+
embed_dim=self.embed_dim,
|
| 48 |
+
input_channel=self.input_channel,
|
| 49 |
+
large=config.large_embedding,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
print_log(
|
| 53 |
+
f"[ReCon] divide point cloud into G{config.num_group} x S{config.group_size} points ...",
|
| 54 |
+
logger="ReCon",
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
if self.mask_type == "causal":
|
| 58 |
+
self.group_divider = ZGroup(
|
| 59 |
+
num_group=config.num_group, group_size=config.group_size
|
| 60 |
+
)
|
| 61 |
+
self.encoder = GPTExtractor(
|
| 62 |
+
embed_dim=config.embed_dim,
|
| 63 |
+
num_heads=config.num_heads,
|
| 64 |
+
depth=config.depth,
|
| 65 |
+
group_size=config.group_size,
|
| 66 |
+
drop_path_rate=config.drop_path_rate,
|
| 67 |
+
stop_grad=self.stop_grad,
|
| 68 |
+
pretrained_model_name=config.pretrained_model_name,
|
| 69 |
+
)
|
| 70 |
+
self.decoder = GPTGenerator(
|
| 71 |
+
embed_dim=config.embed_dim,
|
| 72 |
+
depth=config.decoder_depth,
|
| 73 |
+
drop_path_rate=config.drop_path_rate,
|
| 74 |
+
num_heads=config.num_heads,
|
| 75 |
+
group_size=config.group_size,
|
| 76 |
+
input_channel=self.input_channel,
|
| 77 |
+
)
|
| 78 |
+
self.pos_embed = PositionEmbeddingCoordsSine(3, self.embed_dim, 1.0)
|
| 79 |
+
|
| 80 |
+
else:
|
| 81 |
+
self.group_divider = Group(
|
| 82 |
+
num_group=config.num_group, group_size=config.group_size
|
| 83 |
+
)
|
| 84 |
+
self.encoder = MAEExtractor(
|
| 85 |
+
embed_dim=config.embed_dim,
|
| 86 |
+
num_heads=config.num_heads,
|
| 87 |
+
depth=config.depth,
|
| 88 |
+
group_size=config.group_size,
|
| 89 |
+
drop_path_rate=config.drop_path_rate,
|
| 90 |
+
stop_grad=self.stop_grad,
|
| 91 |
+
pretrained_model_name=config.pretrained_model_name,
|
| 92 |
+
)
|
| 93 |
+
self.decoder = MAEGenerator(
|
| 94 |
+
embed_dim=config.embed_dim,
|
| 95 |
+
depth=config.decoder_depth,
|
| 96 |
+
drop_path_rate=config.drop_path_rate,
|
| 97 |
+
num_heads=config.num_heads,
|
| 98 |
+
group_size=config.group_size,
|
| 99 |
+
input_channel=self.input_channel,
|
| 100 |
+
)
|
| 101 |
+
self.pos_embed = nn.Sequential(
|
| 102 |
+
nn.Linear(3, 128), nn.GELU(), nn.Linear(128, self.embed_dim)
|
| 103 |
+
)
|
| 104 |
+
self.decoder_pos_embed = nn.Sequential(
|
| 105 |
+
nn.Linear(3, 128), nn.GELU(), nn.Linear(128, self.embed_dim)
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
self.norm = nn.LayerNorm(self.embed_dim)
|
| 109 |
+
self.global_query = nn.Parameter(
|
| 110 |
+
torch.zeros(1, self.global_query_num, self.embed_dim)
|
| 111 |
+
)
|
| 112 |
+
self.apply(self._init_weights)
|
| 113 |
+
|
| 114 |
+
# do not perform additional mask on the first (self.keep_attend) tokens
|
| 115 |
+
self.keep_attend = 10
|
| 116 |
+
self.num_group = config.num_group
|
| 117 |
+
self.num_mask = int((self.num_group - self.keep_attend) * self.mask_ratio)
|
| 118 |
+
|
| 119 |
+
if config.pretrained_model_name == "":
|
| 120 |
+
print_log("[ReCon] No pretrained model is loaded.", logger="ReCon")
|
| 121 |
+
elif config.pretrained_model_name in timm.list_models(pretrained=True):
|
| 122 |
+
self.encoder.blocks.load_pretrained_timm_weights()
|
| 123 |
+
print_log(
|
| 124 |
+
f"[ReCon] Timm pretrained model {config.pretrained_model_name} is successful loaded.",
|
| 125 |
+
logger="ReCon",
|
| 126 |
+
)
|
| 127 |
+
else:
|
| 128 |
+
print_log(
|
| 129 |
+
f"[ReCon] Pretrained model {config.pretrained_model_name} is not found in Timm.",
|
| 130 |
+
logger="ReCon",
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
def _init_weights(self, m):
|
| 134 |
+
if isinstance(m, nn.Linear):
|
| 135 |
+
nn.init.normal_(m.weight, 0.02, 0.01)
|
| 136 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 137 |
+
nn.init.constant_(m.bias, 0)
|
| 138 |
+
elif isinstance(m, nn.BatchNorm1d):
|
| 139 |
+
nn.init.constant_(m.bias, 0)
|
| 140 |
+
nn.init.constant_(m.weight, 1.0)
|
| 141 |
+
|
| 142 |
+
def _mask_center_rand(self, center):
|
| 143 |
+
"""
|
| 144 |
+
center : B G 3
|
| 145 |
+
--------------
|
| 146 |
+
mask : B G (bool)
|
| 147 |
+
"""
|
| 148 |
+
B, G, _ = center.shape
|
| 149 |
+
num_mask = int(self.mask_ratio * G)
|
| 150 |
+
|
| 151 |
+
overall_mask = np.zeros([B, G])
|
| 152 |
+
for i in range(B):
|
| 153 |
+
mask = np.hstack([
|
| 154 |
+
np.zeros(G - num_mask),
|
| 155 |
+
np.ones(num_mask),
|
| 156 |
+
])
|
| 157 |
+
np.random.shuffle(mask)
|
| 158 |
+
overall_mask[i, :] = mask
|
| 159 |
+
overall_mask = torch.from_numpy(overall_mask).to(torch.bool)
|
| 160 |
+
|
| 161 |
+
return num_mask, overall_mask.to(center.device)
|
| 162 |
+
|
| 163 |
+
def inference(self, pts):
|
| 164 |
+
with torch.no_grad():
|
| 165 |
+
neighborhood, center = self.group_divider(pts)
|
| 166 |
+
group_input_tokens = self.embed(neighborhood) # B G C
|
| 167 |
+
batch_size, seq_len, C = group_input_tokens.size()
|
| 168 |
+
|
| 169 |
+
global_query = self.global_query.expand(batch_size, -1, -1)
|
| 170 |
+
pos = self.pos_embed(center.to(group_input_tokens.dtype))
|
| 171 |
+
|
| 172 |
+
mask = torch.full(
|
| 173 |
+
(seq_len, seq_len),
|
| 174 |
+
-float("Inf"),
|
| 175 |
+
device=group_input_tokens.device,
|
| 176 |
+
dtype=group_input_tokens.dtype,
|
| 177 |
+
).to(torch.bool)
|
| 178 |
+
if self.mask_type == "causal":
|
| 179 |
+
mask = torch.triu(mask, diagonal=1)
|
| 180 |
+
else:
|
| 181 |
+
mask = None
|
| 182 |
+
|
| 183 |
+
local_features, global_features = self.encoder(
|
| 184 |
+
group_input_tokens, pos, mask, global_query
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
return pos, local_features, global_features
|
| 188 |
+
|
| 189 |
+
def forward_mae(self, pts):
|
| 190 |
+
neighborhood, center = self.group_divider(pts)
|
| 191 |
+
num_mask, mask = self._mask_center_rand(center)
|
| 192 |
+
group_input_tokens = self.embed(neighborhood) # B G C
|
| 193 |
+
batch_size, seq_len, C = group_input_tokens.size()
|
| 194 |
+
global_query = self.global_query.expand(batch_size, -1, -1)
|
| 195 |
+
|
| 196 |
+
pos = self.pos_embed(center.reshape(batch_size, -1, 3))
|
| 197 |
+
decoder_pos = self.decoder_pos_embed(center.reshape(batch_size, -1, 3))
|
| 198 |
+
x_vis, global_features = self.encoder(
|
| 199 |
+
group_input_tokens, pos, mask, global_query
|
| 200 |
+
)
|
| 201 |
+
generated_points = self.decoder(x_vis, decoder_pos, mask)
|
| 202 |
+
|
| 203 |
+
gt_points = neighborhood[mask].reshape(
|
| 204 |
+
batch_size * num_mask, self.group_size, self.input_channel
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
return generated_points, gt_points, global_features
|
| 208 |
+
|
| 209 |
+
def forward_gpt(self, pts):
|
| 210 |
+
neighborhood, center = self.group_divider(pts)
|
| 211 |
+
group_input_tokens = self.embed(neighborhood) # B G C
|
| 212 |
+
batch_size, seq_len, C = group_input_tokens.size()
|
| 213 |
+
|
| 214 |
+
global_query = self.global_query.expand(batch_size, -1, -1)
|
| 215 |
+
pos_absolute = self.pos_embed(center).to(group_input_tokens.dtype)
|
| 216 |
+
|
| 217 |
+
relative_position = center[:, 1:, :] - center[:, :-1, :]
|
| 218 |
+
relative_norm = torch.norm(relative_position, dim=-1, keepdim=True)
|
| 219 |
+
relative_direction = relative_position / (relative_norm + 1e-5)
|
| 220 |
+
position = torch.cat([center[:, 0, :].unsqueeze(1), relative_direction], dim=1)
|
| 221 |
+
pos_relative = self.pos_embed(position).to(group_input_tokens.dtype)
|
| 222 |
+
|
| 223 |
+
attn_mask = torch.full(
|
| 224 |
+
(seq_len, seq_len),
|
| 225 |
+
-float("Inf"),
|
| 226 |
+
device=group_input_tokens.device,
|
| 227 |
+
dtype=group_input_tokens.dtype,
|
| 228 |
+
).to(torch.bool)
|
| 229 |
+
|
| 230 |
+
with torch.no_grad():
|
| 231 |
+
attn_mask = torch.triu(attn_mask, diagonal=1)
|
| 232 |
+
|
| 233 |
+
# column wise
|
| 234 |
+
overall_mask = np.hstack([
|
| 235 |
+
np.zeros(self.num_group - self.keep_attend - self.num_mask),
|
| 236 |
+
np.ones(self.num_mask),
|
| 237 |
+
])
|
| 238 |
+
np.random.shuffle(overall_mask)
|
| 239 |
+
overall_mask = np.hstack([
|
| 240 |
+
np.zeros(self.keep_attend),
|
| 241 |
+
overall_mask,
|
| 242 |
+
])
|
| 243 |
+
overall_mask = (
|
| 244 |
+
torch.from_numpy(overall_mask)
|
| 245 |
+
.to(torch.bool)
|
| 246 |
+
.to(group_input_tokens.device)
|
| 247 |
+
)
|
| 248 |
+
eye_mask = torch.eye(
|
| 249 |
+
self.num_group, device=group_input_tokens.device, dtype=torch.bool
|
| 250 |
+
)
|
| 251 |
+
attn_mask = attn_mask | overall_mask.unsqueeze(0) & ~eye_mask
|
| 252 |
+
|
| 253 |
+
local_features, global_features = self.encoder(
|
| 254 |
+
group_input_tokens, pos_absolute, attn_mask, global_query
|
| 255 |
+
)
|
| 256 |
+
generated_points = self.decoder(local_features, pos_relative, attn_mask)
|
| 257 |
+
|
| 258 |
+
gt_points = neighborhood.reshape(
|
| 259 |
+
batch_size * self.num_group, self.group_size, self.input_channel
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
return generated_points, gt_points, global_features
|
| 263 |
+
|
| 264 |
+
def forward(self, pts):
|
| 265 |
+
if self.mask_type == "causal":
|
| 266 |
+
generated_points, gt_points, global_query = self.forward_gpt(pts)
|
| 267 |
+
else:
|
| 268 |
+
generated_points, gt_points, global_query = self.forward_mae(pts)
|
| 269 |
+
|
| 270 |
+
return generated_points, gt_points, global_query
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
@MODELS.register_module()
|
| 274 |
+
class ReCon2(nn.Module):
|
| 275 |
+
def __init__(self, config):
|
| 276 |
+
super().__init__()
|
| 277 |
+
print_log("[ReCon V2]", logger="ReCon V2")
|
| 278 |
+
self.config = config
|
| 279 |
+
self.embed_dim = config.embed_dim
|
| 280 |
+
self.with_color = config.with_color
|
| 281 |
+
self.img_queries = config.img_queries
|
| 282 |
+
self.text_queries = config.text_queries
|
| 283 |
+
self.global_query_num = self.img_queries + self.text_queries
|
| 284 |
+
self.input_channel = 6 if self.with_color else 3
|
| 285 |
+
self.contrast_type = config.contrast_type
|
| 286 |
+
|
| 287 |
+
self.model = MaskTransformer(config)
|
| 288 |
+
self.cd_loss = ChamferDistance()
|
| 289 |
+
self.l1_loss = torch.nn.SmoothL1Loss()
|
| 290 |
+
|
| 291 |
+
self.img_proj = nn.Linear(self.embed_dim, 1280)
|
| 292 |
+
self.img_proj.apply(self._init_weights)
|
| 293 |
+
self.text_proj = nn.Linear(self.embed_dim, 1280)
|
| 294 |
+
self.text_proj.apply(self._init_weights)
|
| 295 |
+
|
| 296 |
+
def _init_weights(self, m):
|
| 297 |
+
if isinstance(m, nn.Linear):
|
| 298 |
+
nn.init.normal_(m.weight, 0.02, 0.01)
|
| 299 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 300 |
+
nn.init.constant_(m.bias, 0)
|
| 301 |
+
elif isinstance(m, nn.BatchNorm1d):
|
| 302 |
+
nn.init.constant_(m.bias, 0)
|
| 303 |
+
nn.init.constant_(m.weight, 1.0)
|
| 304 |
+
|
| 305 |
+
def info_nce_loss(self, feat1, feat2, logit_scale=1, mask=None):
|
| 306 |
+
feat1 = F.normalize(feat1, dim=1)
|
| 307 |
+
feat2 = F.normalize(feat2, dim=1)
|
| 308 |
+
all_feat1 = torch.cat(torch.distributed.nn.all_gather(feat1), dim=0)
|
| 309 |
+
all_feat2 = torch.cat(torch.distributed.nn.all_gather(feat2), dim=0)
|
| 310 |
+
logits = logit_scale * all_feat1 @ all_feat2.T
|
| 311 |
+
if mask is not None:
|
| 312 |
+
logits = logits * mask
|
| 313 |
+
labels = torch.arange(logits.shape[0]).to(self.config.device)
|
| 314 |
+
accuracy = (logits.argmax(dim=1) == labels).float().mean()
|
| 315 |
+
loss = (F.cross_entropy(logits, labels) + F.cross_entropy(logits.T, labels)) / 2
|
| 316 |
+
return loss, accuracy
|
| 317 |
+
|
| 318 |
+
def distillation_loss(self, token, feature):
|
| 319 |
+
B = token.shape[0]
|
| 320 |
+
loss = 0.0
|
| 321 |
+
for i in range(B):
|
| 322 |
+
pred = token[i]
|
| 323 |
+
feat = feature[i][torch.any(feature[i] != 0, dim=1)]
|
| 324 |
+
feat = F.normalize(feat, dim=-1)
|
| 325 |
+
similarity_matrix = torch.mm(pred, feat.T).cpu().detach().numpy()
|
| 326 |
+
row_ind, col_ind = linear_sum_assignment(-similarity_matrix)
|
| 327 |
+
loss = loss + self.l1_loss(pred[row_ind], feat[col_ind])
|
| 328 |
+
|
| 329 |
+
return loss * 5
|
| 330 |
+
|
| 331 |
+
def contrast_loss(self, token, feature):
|
| 332 |
+
if self.contrast_type == "simclr":
|
| 333 |
+
return self.info_nce_loss(
|
| 334 |
+
token, feature, logit_scale=self.logit_scale, mask=self.mask
|
| 335 |
+
)
|
| 336 |
+
elif self.contrast_type == "byol":
|
| 337 |
+
return self.distillation_loss(token, feature)
|
| 338 |
+
else:
|
| 339 |
+
raise ValueError("Unknown contrast type")
|
| 340 |
+
|
| 341 |
+
def inference(self, pts):
|
| 342 |
+
_, encoded_features, global_token = self.model.inference(pts)
|
| 343 |
+
|
| 344 |
+
img_token = global_token[:, : self.img_queries]
|
| 345 |
+
img_token = self.img_proj(img_token)
|
| 346 |
+
img_token = F.normalize(img_token, dim=-1)
|
| 347 |
+
|
| 348 |
+
text_token = global_token[:, self.img_queries :]
|
| 349 |
+
text_token = self.text_proj(text_token)
|
| 350 |
+
text_token = F.normalize(text_token, dim=-1)
|
| 351 |
+
|
| 352 |
+
return encoded_features, global_token, img_token, text_token
|
| 353 |
+
|
| 354 |
+
def forward_features(self, pts):
|
| 355 |
+
generated_points, gt_points, global_token = self.model(pts)
|
| 356 |
+
|
| 357 |
+
img_token = global_token[:, : self.img_queries]
|
| 358 |
+
img_token = self.img_proj(img_token)
|
| 359 |
+
img_token = F.normalize(img_token, dim=-1)
|
| 360 |
+
|
| 361 |
+
text_token = global_token[:, self.img_queries :]
|
| 362 |
+
text_token = self.text_proj(text_token)
|
| 363 |
+
text_token = F.normalize(text_token, dim=-1)
|
| 364 |
+
|
| 365 |
+
return img_token, text_token, gt_points, generated_points
|
| 366 |
+
|
| 367 |
+
def forward_reconstruct(self, pts):
|
| 368 |
+
_, _, gt_points, generated_points = self.forward_features(pts)
|
| 369 |
+
|
| 370 |
+
generated_xyz = generated_points[:, :, :3]
|
| 371 |
+
gt_xyz = gt_points[:, :, :3]
|
| 372 |
+
dist1, dist2, idx = self.cd_loss(generated_xyz, gt_xyz)
|
| 373 |
+
if self.with_color:
|
| 374 |
+
generated_color = generated_points[:, :, 3:]
|
| 375 |
+
gt_color = gt_points[:, :, 3:]
|
| 376 |
+
color_l1_loss = self.l1_loss(
|
| 377 |
+
generated_color,
|
| 378 |
+
torch.gather(gt_color, 1, idx.unsqueeze(-1).expand(-1, -1, 3).long()),
|
| 379 |
+
)
|
| 380 |
+
else:
|
| 381 |
+
color_l1_loss = 0
|
| 382 |
+
cd_l2_loss = (torch.mean(dist1)) + (torch.mean(dist2))
|
| 383 |
+
cd_l1_loss = (torch.mean(torch.sqrt(dist1)) + torch.mean(torch.sqrt(dist2))) / 2
|
| 384 |
+
|
| 385 |
+
loss = cd_l1_loss + cd_l2_loss + color_l1_loss
|
| 386 |
+
|
| 387 |
+
return loss
|
| 388 |
+
|
| 389 |
+
def forward_contrast(self, pts, img, text):
|
| 390 |
+
img_token, text_token, _, _ = self.forward_features(pts)
|
| 391 |
+
img_loss = self.contrast_loss(img_token, img)
|
| 392 |
+
text_loss = self.contrast_loss(text_token, text)
|
| 393 |
+
loss = img_loss + text_loss
|
| 394 |
+
|
| 395 |
+
return loss
|
| 396 |
+
|
| 397 |
+
def forward_all(self, pts, img, text):
|
| 398 |
+
img_token, text_token, gt_points, generated_points = self.forward_features(pts)
|
| 399 |
+
|
| 400 |
+
losses = {"mdm": 0, "csc_img": 0, "csc_text": 0}
|
| 401 |
+
|
| 402 |
+
generated_xyz = generated_points[:, :, :3]
|
| 403 |
+
gt_xyz = gt_points[:, :, :3]
|
| 404 |
+
dist1, dist2, idx = self.cd_loss(generated_xyz, gt_xyz)
|
| 405 |
+
if self.with_color:
|
| 406 |
+
generated_color = generated_points[:, :, 3:]
|
| 407 |
+
gt_color = gt_points[:, :, 3:]
|
| 408 |
+
color_l1_loss = self.l1_loss(
|
| 409 |
+
generated_color,
|
| 410 |
+
torch.gather(gt_color, 1, idx.unsqueeze(-1).expand(-1, -1, 3).long()),
|
| 411 |
+
)
|
| 412 |
+
else:
|
| 413 |
+
color_l1_loss = 0
|
| 414 |
+
cd_l2_loss = (torch.mean(dist1)) + (torch.mean(dist2))
|
| 415 |
+
cd_l1_loss = (torch.mean(torch.sqrt(dist1)) + torch.mean(torch.sqrt(dist2))) / 2
|
| 416 |
+
|
| 417 |
+
losses["mdm"] = cd_l1_loss + cd_l2_loss + color_l1_loss
|
| 418 |
+
losses["csc_img"] = self.contrast_loss(img_token, img)
|
| 419 |
+
losses["csc_text"] = self.contrast_loss(text_token, text)
|
| 420 |
+
|
| 421 |
+
print(losses)
|
| 422 |
+
loss = sum(losses.values())
|
| 423 |
+
return loss
|
| 424 |
+
|
| 425 |
+
def forward(self, pts, img, text, type="all"):
|
| 426 |
+
if type == "all":
|
| 427 |
+
return self.forward_all(pts, img, text)
|
| 428 |
+
elif type == "reconstruct":
|
| 429 |
+
return self.forward_reconstruct(pts)
|
| 430 |
+
elif type == "contrast":
|
| 431 |
+
return self.forward_contrast(pts, img, text)
|
| 432 |
+
else:
|
| 433 |
+
raise ValueError("Unknown type")
|
| 434 |
+
|
| 435 |
+
@property
|
| 436 |
+
def device(self):
|
| 437 |
+
return next(self.parameters()).device
|
| 438 |
+
|
| 439 |
+
@property
|
| 440 |
+
def dtype(self):
|
| 441 |
+
return next(self.parameters()).dtype
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
# finetune model
|
| 445 |
+
@MODELS.register_module()
|
| 446 |
+
class PointTransformer(nn.Module):
|
| 447 |
+
def __init__(self, config):
|
| 448 |
+
super().__init__()
|
| 449 |
+
self.config = config
|
| 450 |
+
|
| 451 |
+
self.cls_dim = config.cls_dim
|
| 452 |
+
self.embed_dim = config.embed_dim
|
| 453 |
+
self.with_color = config.with_color
|
| 454 |
+
self.input_channel = 6 if self.with_color else 3
|
| 455 |
+
self.num_group = config.num_group
|
| 456 |
+
self.group_size = config.group_size
|
| 457 |
+
self.img_queries = config.img_queries
|
| 458 |
+
self.text_queries = config.text_queries
|
| 459 |
+
self.global_query_num = self.img_queries + self.text_queries
|
| 460 |
+
self.large_embedding = config.large_embedding
|
| 461 |
+
|
| 462 |
+
self.embed = PatchEmbedding(
|
| 463 |
+
embed_dim=self.embed_dim,
|
| 464 |
+
input_channel=self.input_channel,
|
| 465 |
+
large=self.large_embedding,
|
| 466 |
+
)
|
| 467 |
+
self.pos_embed = PositionEmbeddingCoordsSine(3, self.embed_dim, 1.0)
|
| 468 |
+
|
| 469 |
+
self.group_divider = ZGroup(
|
| 470 |
+
num_group=config.num_group, group_size=config.group_size
|
| 471 |
+
)
|
| 472 |
+
print_log(
|
| 473 |
+
f"[PointTransformer] divide point cloud into G{config.num_group} x S{config.group_size} points ...",
|
| 474 |
+
logger="PointTransformer",
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
self.encoder = GPTExtractor(
|
| 478 |
+
embed_dim=config.embed_dim,
|
| 479 |
+
num_heads=config.num_heads,
|
| 480 |
+
depth=config.depth,
|
| 481 |
+
group_size=config.group_size,
|
| 482 |
+
drop_path_rate=config.drop_path_rate,
|
| 483 |
+
stop_grad=False,
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
self.decoder = GPTGenerator(
|
| 487 |
+
embed_dim=config.embed_dim,
|
| 488 |
+
depth=config.decoder_depth,
|
| 489 |
+
drop_path_rate=config.drop_path_rate,
|
| 490 |
+
num_heads=config.num_heads,
|
| 491 |
+
group_size=config.group_size,
|
| 492 |
+
input_channel=self.input_channel,
|
| 493 |
+
)
|
| 494 |
+
self.global_query = nn.Parameter(
|
| 495 |
+
torch.zeros(1, self.global_query_num, self.embed_dim)
|
| 496 |
+
)
|
| 497 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
|
| 498 |
+
|
| 499 |
+
feature_dim = self.embed_dim * 4
|
| 500 |
+
self.cls_head_finetune = nn.Sequential(
|
| 501 |
+
nn.Linear(feature_dim, 256),
|
| 502 |
+
nn.BatchNorm1d(256),
|
| 503 |
+
nn.ReLU(inplace=True),
|
| 504 |
+
nn.Dropout(0.5),
|
| 505 |
+
nn.Linear(256, 256),
|
| 506 |
+
nn.BatchNorm1d(256),
|
| 507 |
+
nn.ReLU(inplace=True),
|
| 508 |
+
nn.Dropout(0.5),
|
| 509 |
+
nn.Linear(256, self.cls_dim),
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
self.loss_ce = nn.CrossEntropyLoss()
|
| 513 |
+
# chamfer distance loss
|
| 514 |
+
self.cd_loss = ChamferDistance()
|
| 515 |
+
self.apply(self._init_weights)
|
| 516 |
+
|
| 517 |
+
def get_loss_acc(self, ret, gt):
|
| 518 |
+
loss = self.loss_ce(ret, gt.long())
|
| 519 |
+
pred = ret.argmax(-1)
|
| 520 |
+
acc = (pred == gt).sum() / float(gt.size(0))
|
| 521 |
+
return loss, acc * 100
|
| 522 |
+
|
| 523 |
+
def load_model_from_ckpt(self, ckpt_path, log=True):
|
| 524 |
+
if ckpt_path is not None:
|
| 525 |
+
ckpt = torch.load(ckpt_path)
|
| 526 |
+
base_ckpt = {
|
| 527 |
+
k.replace("module.", ""): v for k, v in ckpt["base_model"].items()
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
for k in list(base_ckpt.keys()):
|
| 531 |
+
if k.startswith("model"):
|
| 532 |
+
base_ckpt[k[len("model.") :]] = base_ckpt[k]
|
| 533 |
+
del base_ckpt[k]
|
| 534 |
+
elif k.startswith("cls_head_finetune"):
|
| 535 |
+
del base_ckpt[k]
|
| 536 |
+
|
| 537 |
+
incompatible = self.load_state_dict(base_ckpt, strict=False)
|
| 538 |
+
if log:
|
| 539 |
+
if incompatible.missing_keys:
|
| 540 |
+
print_log("missing_keys", logger="PointTransformer")
|
| 541 |
+
print_log(
|
| 542 |
+
get_missing_parameters_message(incompatible.missing_keys),
|
| 543 |
+
logger="PointTransformer",
|
| 544 |
+
)
|
| 545 |
+
if incompatible.unexpected_keys:
|
| 546 |
+
print_log("unexpected_keys", logger="PointTransformer")
|
| 547 |
+
print_log(
|
| 548 |
+
get_unexpected_parameters_message(incompatible.unexpected_keys),
|
| 549 |
+
logger="PointTransformer",
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
print_log(
|
| 553 |
+
f"[PointTransformer] Successful Loading the ckpt from {ckpt_path}",
|
| 554 |
+
logger="PointTransformer",
|
| 555 |
+
)
|
| 556 |
+
else:
|
| 557 |
+
print_log("Training from scratch!!!", logger="PointTransformer")
|
| 558 |
+
|
| 559 |
+
def _init_weights(self, m):
|
| 560 |
+
if isinstance(m, nn.Linear):
|
| 561 |
+
trunc_normal_(m.weight, std=0.02)
|
| 562 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 563 |
+
nn.init.constant_(m.bias, 0)
|
| 564 |
+
elif isinstance(m, nn.LayerNorm):
|
| 565 |
+
nn.init.constant_(m.bias, 0)
|
| 566 |
+
nn.init.constant_(m.weight, 1.0)
|
| 567 |
+
elif isinstance(m, nn.Conv1d):
|
| 568 |
+
trunc_normal_(m.weight, std=0.02)
|
| 569 |
+
if m.bias is not None:
|
| 570 |
+
nn.init.constant_(m.bias, 0)
|
| 571 |
+
|
| 572 |
+
def forward(self, pts):
|
| 573 |
+
neighborhood, center = self.group_divider(pts)
|
| 574 |
+
group_input_tokens = self.embed(neighborhood) # B G C
|
| 575 |
+
batch_size, seq_len, C = group_input_tokens.size()
|
| 576 |
+
|
| 577 |
+
global_query = self.global_query.expand(batch_size, -1, -1)
|
| 578 |
+
cls_query = self.cls_token.expand(batch_size, -1, -1)
|
| 579 |
+
query = torch.cat([global_query, cls_query], dim=1)
|
| 580 |
+
|
| 581 |
+
relative_position = center[:, 1:, :] - center[:, :-1, :]
|
| 582 |
+
relative_norm = torch.norm(relative_position, dim=-1, keepdim=True)
|
| 583 |
+
relative_direction = relative_position / (relative_norm + 1e-5)
|
| 584 |
+
position = torch.cat([center[:, 0, :].unsqueeze(1), relative_direction], dim=1)
|
| 585 |
+
pos_relative = self.pos_embed(position).to(group_input_tokens.dtype)
|
| 586 |
+
|
| 587 |
+
pos = self.pos_embed(center).to(group_input_tokens.dtype)
|
| 588 |
+
|
| 589 |
+
attn_mask = torch.full(
|
| 590 |
+
(seq_len, seq_len),
|
| 591 |
+
-float("Inf"),
|
| 592 |
+
device=group_input_tokens.device,
|
| 593 |
+
dtype=group_input_tokens.dtype,
|
| 594 |
+
).to(torch.bool)
|
| 595 |
+
attn_mask = torch.triu(attn_mask, diagonal=1)
|
| 596 |
+
|
| 597 |
+
# transformer
|
| 598 |
+
encoded_features, global_tokens = self.encoder(
|
| 599 |
+
group_input_tokens, pos, attn_mask, query
|
| 600 |
+
)
|
| 601 |
+
generated_points = self.decoder(encoded_features, pos_relative, attn_mask)
|
| 602 |
+
|
| 603 |
+
# neighborhood[:, :, :, :3] = neighborhood[:, :, :, :3] + center.unsqueeze(2)
|
| 604 |
+
gt_points = neighborhood.reshape(
|
| 605 |
+
batch_size * self.num_group, self.group_size, self.input_channel
|
| 606 |
+
)
|
| 607 |
+
|
| 608 |
+
generated_xyz = generated_points[:, :, :3]
|
| 609 |
+
gt_xyz = gt_points[:, :, :3]
|
| 610 |
+
dist1, dist2, idx = self.cd_loss(generated_xyz, gt_xyz)
|
| 611 |
+
|
| 612 |
+
cd_l2_loss = (torch.mean(dist1)) + (torch.mean(dist2))
|
| 613 |
+
cd_l1_loss = (torch.mean(torch.sqrt(dist1)) + torch.mean(torch.sqrt(dist2))) / 2
|
| 614 |
+
|
| 615 |
+
img_token = global_tokens[:, : self.img_queries]
|
| 616 |
+
text_token = global_tokens[:, self.img_queries : -1]
|
| 617 |
+
cls_token = global_tokens[:, -1]
|
| 618 |
+
|
| 619 |
+
concat_f = torch.cat(
|
| 620 |
+
[
|
| 621 |
+
cls_token,
|
| 622 |
+
img_token.max(1)[0],
|
| 623 |
+
text_token.max(1)[0],
|
| 624 |
+
encoded_features.max(1)[0],
|
| 625 |
+
],
|
| 626 |
+
dim=-1,
|
| 627 |
+
)
|
| 628 |
+
ret = self.cls_head_finetune(concat_f)
|
| 629 |
+
|
| 630 |
+
return ret, cd_l1_loss + cd_l2_loss
|
backend/ReConV2/models/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ReConV2.models.ReCon
|
| 2 |
+
import ReConV2.models.transformer
|
| 3 |
+
|
| 4 |
+
from .build import build_model_from_cfg
|
backend/ReConV2/models/build.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ReConV2.utils import registry
|
| 2 |
+
|
| 3 |
+
MODELS = registry.Registry("models")
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def build_model_from_cfg(cfg, **kwargs):
|
| 7 |
+
"""
|
| 8 |
+
Build a dataset, defined by `dataset_name`.
|
| 9 |
+
Args:
|
| 10 |
+
cfg (eDICT):
|
| 11 |
+
Returns:
|
| 12 |
+
Dataset: a constructed dataset specified by dataset_name.
|
| 13 |
+
"""
|
| 14 |
+
return MODELS.build(cfg, **kwargs)
|
backend/ReConV2/models/transformer.py
ADDED
|
@@ -0,0 +1,788 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import timm
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from timm.layers import DropPath, Mlp
|
| 8 |
+
|
| 9 |
+
from ReConV2.utils import misc
|
| 10 |
+
from ReConV2.utils.knn import knn_point
|
| 11 |
+
from ReConV2.utils.logger import *
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class PatchEmbedding(nn.Module): # Embedding module
|
| 15 |
+
def __init__(self, embed_dim, input_channel=3, large=False):
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.embed_dim = embed_dim
|
| 18 |
+
self.input_channel = input_channel
|
| 19 |
+
|
| 20 |
+
# embed_dim_list = [c * (embed_dim // 512 + 1) for c in [128, 256, 512]]
|
| 21 |
+
#
|
| 22 |
+
# self.first_conv = nn.Sequential(
|
| 23 |
+
# nn.Conv1d(self.input_channel, embed_dim_list[0], 1),
|
| 24 |
+
# nn.BatchNorm1d(embed_dim_list[0]),
|
| 25 |
+
# nn.ReLU(inplace=True),
|
| 26 |
+
# nn.Conv1d(embed_dim_list[0], embed_dim_list[1], 1)
|
| 27 |
+
# )
|
| 28 |
+
# self.second_conv = nn.Sequential(
|
| 29 |
+
# nn.Conv1d(embed_dim_list[2], embed_dim_list[2], 1),
|
| 30 |
+
# nn.BatchNorm1d(embed_dim_list[2]),
|
| 31 |
+
# nn.ReLU(inplace=True),
|
| 32 |
+
# nn.Conv1d(embed_dim_list[2], self.embed_dim, 1)
|
| 33 |
+
# )
|
| 34 |
+
|
| 35 |
+
if large:
|
| 36 |
+
self.first_conv = nn.Sequential(
|
| 37 |
+
nn.Conv1d(self.input_channel, 256, 1),
|
| 38 |
+
nn.BatchNorm1d(256),
|
| 39 |
+
nn.ReLU(inplace=True),
|
| 40 |
+
nn.Conv1d(256, 512, 1),
|
| 41 |
+
nn.BatchNorm1d(512),
|
| 42 |
+
nn.ReLU(inplace=True),
|
| 43 |
+
nn.Conv1d(512, 1024, 1),
|
| 44 |
+
)
|
| 45 |
+
self.second_conv = nn.Sequential(
|
| 46 |
+
nn.Conv1d(2048, 2048, 1),
|
| 47 |
+
nn.BatchNorm1d(2048),
|
| 48 |
+
nn.ReLU(inplace=True),
|
| 49 |
+
nn.Conv1d(2048, embed_dim, 1),
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
self.first_conv = nn.Sequential(
|
| 53 |
+
nn.Conv1d(self.input_channel, 128, 1),
|
| 54 |
+
nn.BatchNorm1d(128),
|
| 55 |
+
nn.ReLU(inplace=True),
|
| 56 |
+
nn.Conv1d(128, 256, 1),
|
| 57 |
+
)
|
| 58 |
+
self.second_conv = nn.Sequential(
|
| 59 |
+
nn.Conv1d(512, 512, 1),
|
| 60 |
+
nn.BatchNorm1d(512),
|
| 61 |
+
nn.ReLU(inplace=True),
|
| 62 |
+
nn.Conv1d(512, embed_dim, 1),
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def forward(self, point_groups):
|
| 66 |
+
"""
|
| 67 |
+
point_groups : B G N 3/6
|
| 68 |
+
-----------------
|
| 69 |
+
feature_global : B G C
|
| 70 |
+
"""
|
| 71 |
+
bs, g, n, _ = point_groups.shape
|
| 72 |
+
point_groups = point_groups.reshape(bs * g, n, self.input_channel)
|
| 73 |
+
# encoder
|
| 74 |
+
feature = self.first_conv(point_groups.transpose(2, 1))
|
| 75 |
+
feature_global = torch.max(feature, dim=2, keepdim=True)[0]
|
| 76 |
+
feature = torch.cat([feature_global.expand(-1, -1, n), feature], dim=1)
|
| 77 |
+
feature = self.second_conv(feature)
|
| 78 |
+
feature_global = torch.max(feature, dim=2, keepdim=False)[0]
|
| 79 |
+
return feature_global.reshape(bs, g, self.embed_dim)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class PositionEmbeddingCoordsSine(nn.Module):
|
| 83 |
+
"""Similar to transformer's position encoding, but generalizes it to
|
| 84 |
+
arbitrary dimensions and continuous coordinates.
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
n_dim: Number of input dimensions, e.g. 2 for image coordinates.
|
| 88 |
+
d_model: Number of dimensions to encode into
|
| 89 |
+
temperature:
|
| 90 |
+
scale:
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
def __init__(self, n_dim: int = 1, d_model: int = 256, temperature=1.0, scale=None):
|
| 94 |
+
super().__init__()
|
| 95 |
+
|
| 96 |
+
self.n_dim = n_dim
|
| 97 |
+
self.num_pos_feats = d_model // n_dim // 2 * 2
|
| 98 |
+
self.temperature = temperature
|
| 99 |
+
self.padding = d_model - self.num_pos_feats * self.n_dim
|
| 100 |
+
|
| 101 |
+
if scale is None:
|
| 102 |
+
scale = 1.0
|
| 103 |
+
self.scale = scale * 2 * math.pi
|
| 104 |
+
|
| 105 |
+
def forward(self, xyz: torch.Tensor) -> torch.Tensor:
|
| 106 |
+
"""
|
| 107 |
+
Args:
|
| 108 |
+
xyz: Point positions (*, d_in)
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
pos_emb (*, d_out)
|
| 112 |
+
"""
|
| 113 |
+
assert xyz.shape[-1] == self.n_dim
|
| 114 |
+
|
| 115 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=xyz.device)
|
| 116 |
+
dim_t = self.temperature ** (
|
| 117 |
+
2 * torch.div(dim_t, 2, rounding_mode="trunc") / self.num_pos_feats
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
xyz = xyz * self.scale
|
| 121 |
+
pos_divided = xyz.unsqueeze(-1) / dim_t
|
| 122 |
+
pos_sin = pos_divided[..., 0::2].sin()
|
| 123 |
+
pos_cos = pos_divided[..., 1::2].cos()
|
| 124 |
+
pos_emb = torch.stack([pos_sin, pos_cos], dim=-1).reshape(*xyz.shape[:-1], -1)
|
| 125 |
+
|
| 126 |
+
# Pad unused dimensions with zeros
|
| 127 |
+
pos_emb = F.pad(pos_emb, (0, self.padding))
|
| 128 |
+
return pos_emb
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class Group(nn.Module): # FPS + KNN
|
| 132 |
+
def __init__(self, num_group, group_size):
|
| 133 |
+
super().__init__()
|
| 134 |
+
self.num_group = num_group
|
| 135 |
+
self.group_size = group_size
|
| 136 |
+
|
| 137 |
+
def forward(self, pts):
|
| 138 |
+
"""
|
| 139 |
+
input: B N 3/6
|
| 140 |
+
---------------------------
|
| 141 |
+
output: B G M 3/6
|
| 142 |
+
center : B G 3
|
| 143 |
+
"""
|
| 144 |
+
xyz = pts[:, :, :3]
|
| 145 |
+
c = pts.shape[2]
|
| 146 |
+
batch_size, num_points, _ = xyz.shape
|
| 147 |
+
# fps the centers out
|
| 148 |
+
xyz = xyz.float()
|
| 149 |
+
center = misc.fps(xyz.contiguous(), self.num_group) # B G 3
|
| 150 |
+
# knn to get the neighborhood
|
| 151 |
+
idx = knn_point(self.group_size, xyz, center)
|
| 152 |
+
assert idx.size(1) == self.num_group
|
| 153 |
+
assert idx.size(2) == self.group_size
|
| 154 |
+
idx_base = (
|
| 155 |
+
torch.arange(0, batch_size, device=xyz.device).view(-1, 1, 1) * num_points
|
| 156 |
+
)
|
| 157 |
+
idx = idx + idx_base
|
| 158 |
+
idx = idx.view(-1)
|
| 159 |
+
neighborhood = pts.view(batch_size * num_points, -1)[idx, :]
|
| 160 |
+
neighborhood = neighborhood.view(
|
| 161 |
+
batch_size, self.num_group, self.group_size, c
|
| 162 |
+
).contiguous()
|
| 163 |
+
# normalize
|
| 164 |
+
neighborhood[:, :, :, :3] = neighborhood[:, :, :, :3] - center.unsqueeze(2)
|
| 165 |
+
return neighborhood, center
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class ZGroup(nn.Module):
|
| 169 |
+
def __init__(self, num_group, group_size):
|
| 170 |
+
super().__init__()
|
| 171 |
+
self.num_group = num_group
|
| 172 |
+
self.group_size = group_size
|
| 173 |
+
|
| 174 |
+
def simplied_morton_sorting(self, xyz, center):
|
| 175 |
+
"""
|
| 176 |
+
Simplifying the Morton code sorting to iterate and set the nearest patch to the last patch as the next patch, we found this to be more efficient.
|
| 177 |
+
"""
|
| 178 |
+
batch_size, num_points, _ = xyz.shape
|
| 179 |
+
distances_batch = torch.cdist(center, center)
|
| 180 |
+
distances_batch[:, torch.eye(self.num_group).bool()] = float("inf")
|
| 181 |
+
idx_base = torch.arange(0, batch_size, device=xyz.device) * self.num_group
|
| 182 |
+
sorted_indices_list = [idx_base]
|
| 183 |
+
distances_batch = (
|
| 184 |
+
distances_batch.view(batch_size, self.num_group, self.num_group)
|
| 185 |
+
.transpose(1, 2)
|
| 186 |
+
.contiguous()
|
| 187 |
+
.view(batch_size * self.num_group, self.num_group)
|
| 188 |
+
)
|
| 189 |
+
distances_batch[idx_base] = float("inf")
|
| 190 |
+
distances_batch = (
|
| 191 |
+
distances_batch.view(batch_size, self.num_group, self.num_group)
|
| 192 |
+
.transpose(1, 2)
|
| 193 |
+
.contiguous()
|
| 194 |
+
)
|
| 195 |
+
for i in range(self.num_group - 1):
|
| 196 |
+
distances_batch = distances_batch.view(
|
| 197 |
+
batch_size * self.num_group, self.num_group
|
| 198 |
+
)
|
| 199 |
+
distances_to_last_batch = distances_batch[sorted_indices_list[-1]]
|
| 200 |
+
closest_point_idx = torch.argmin(distances_to_last_batch, dim=-1)
|
| 201 |
+
closest_point_idx = closest_point_idx + idx_base
|
| 202 |
+
sorted_indices_list.append(closest_point_idx)
|
| 203 |
+
distances_batch = (
|
| 204 |
+
distances_batch.view(batch_size, self.num_group, self.num_group)
|
| 205 |
+
.transpose(1, 2)
|
| 206 |
+
.contiguous()
|
| 207 |
+
.view(batch_size * self.num_group, self.num_group)
|
| 208 |
+
)
|
| 209 |
+
distances_batch[closest_point_idx] = float("inf")
|
| 210 |
+
distances_batch = (
|
| 211 |
+
distances_batch.view(batch_size, self.num_group, self.num_group)
|
| 212 |
+
.transpose(1, 2)
|
| 213 |
+
.contiguous()
|
| 214 |
+
)
|
| 215 |
+
sorted_indices = torch.stack(sorted_indices_list, dim=-1)
|
| 216 |
+
sorted_indices = sorted_indices.view(-1)
|
| 217 |
+
return sorted_indices
|
| 218 |
+
|
| 219 |
+
def forward(self, pts):
|
| 220 |
+
"""
|
| 221 |
+
input: B N 3/6
|
| 222 |
+
---------------------------
|
| 223 |
+
output: B G M 3/6
|
| 224 |
+
center : B G 3
|
| 225 |
+
"""
|
| 226 |
+
xyz = pts[:, :, :3]
|
| 227 |
+
c = pts.shape[2]
|
| 228 |
+
batch_size, num_points, _ = xyz.shape
|
| 229 |
+
# fps the centers out
|
| 230 |
+
xyz = xyz.float()
|
| 231 |
+
center = misc.fps(xyz.contiguous(), self.num_group) # B G 3
|
| 232 |
+
# knn to get the neighborhood
|
| 233 |
+
idx = knn_point(self.group_size, xyz, center)
|
| 234 |
+
assert idx.size(1) == self.num_group
|
| 235 |
+
assert idx.size(2) == self.group_size
|
| 236 |
+
idx_base = (
|
| 237 |
+
torch.arange(0, batch_size, device=xyz.device).view(-1, 1, 1) * num_points
|
| 238 |
+
)
|
| 239 |
+
idx = idx + idx_base
|
| 240 |
+
idx = idx.view(-1)
|
| 241 |
+
neighborhood = pts.view(batch_size * num_points, -1)[idx, :]
|
| 242 |
+
neighborhood = neighborhood.view(
|
| 243 |
+
batch_size, self.num_group, self.group_size, c
|
| 244 |
+
).contiguous()
|
| 245 |
+
# normalize
|
| 246 |
+
neighborhood[:, :, :, :3] = neighborhood[:, :, :, :3] - center.unsqueeze(2)
|
| 247 |
+
|
| 248 |
+
# can utilize morton_sorting by choosing morton_sorting function
|
| 249 |
+
sorted_indices = self.simplied_morton_sorting(xyz, center)
|
| 250 |
+
|
| 251 |
+
neighborhood = neighborhood.view(
|
| 252 |
+
batch_size * self.num_group, self.group_size, c
|
| 253 |
+
)[sorted_indices, :, :]
|
| 254 |
+
neighborhood = neighborhood.view(
|
| 255 |
+
batch_size, self.num_group, self.group_size, c
|
| 256 |
+
).contiguous()
|
| 257 |
+
center = center.view(batch_size * self.num_group, 3)[sorted_indices, :]
|
| 258 |
+
center = center.view(batch_size, self.num_group, 3).contiguous()
|
| 259 |
+
|
| 260 |
+
return neighborhood, center
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
# Transformers
|
| 264 |
+
class Attention(nn.Module):
|
| 265 |
+
def __init__(
|
| 266 |
+
self,
|
| 267 |
+
dim: int,
|
| 268 |
+
num_heads: int = 8,
|
| 269 |
+
qkv_bias: bool = True,
|
| 270 |
+
qk_norm: bool = False,
|
| 271 |
+
attn_drop: float = 0.0,
|
| 272 |
+
proj_drop: float = 0.0,
|
| 273 |
+
norm_layer: nn.Module = nn.LayerNorm,
|
| 274 |
+
) -> None:
|
| 275 |
+
super().__init__()
|
| 276 |
+
assert dim % num_heads == 0, "dim should be divisible by num_heads"
|
| 277 |
+
self.num_heads = num_heads
|
| 278 |
+
self.head_dim = dim // num_heads
|
| 279 |
+
self.scale = self.head_dim**-0.5
|
| 280 |
+
|
| 281 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 282 |
+
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 283 |
+
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 284 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 285 |
+
self.proj = nn.Linear(dim, dim)
|
| 286 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 287 |
+
|
| 288 |
+
def forward(
|
| 289 |
+
self, x: torch.Tensor, mask: torch.Tensor | None = None
|
| 290 |
+
) -> torch.Tensor:
|
| 291 |
+
B, N, C = x.shape
|
| 292 |
+
qkv = (
|
| 293 |
+
self.qkv(x)
|
| 294 |
+
.reshape(B, N, 3, self.num_heads, self.head_dim)
|
| 295 |
+
.permute(2, 0, 3, 1, 4)
|
| 296 |
+
)
|
| 297 |
+
q, k, v = qkv.unbind(0)
|
| 298 |
+
q, k = self.q_norm(q), self.k_norm(k)
|
| 299 |
+
|
| 300 |
+
q = q * self.scale
|
| 301 |
+
attn = q @ k.transpose(-2, -1)
|
| 302 |
+
if mask is not None:
|
| 303 |
+
attn = attn.masked_fill(mask, float("-inf"))
|
| 304 |
+
attn = attn.softmax(dim=-1)
|
| 305 |
+
attn = self.attn_drop(attn)
|
| 306 |
+
x = attn @ v
|
| 307 |
+
|
| 308 |
+
x = x.transpose(1, 2).reshape(B, N, C)
|
| 309 |
+
x = self.proj(x)
|
| 310 |
+
x = self.proj_drop(x)
|
| 311 |
+
return x
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
class CrossAttention(nn.Module):
|
| 315 |
+
def __init__(
|
| 316 |
+
self,
|
| 317 |
+
dim: int,
|
| 318 |
+
num_heads: int = 8,
|
| 319 |
+
qkv_bias: bool = True,
|
| 320 |
+
qk_norm: bool = False,
|
| 321 |
+
attn_drop: float = 0.0,
|
| 322 |
+
proj_drop: float = 0.0,
|
| 323 |
+
norm_layer: nn.Module = nn.LayerNorm,
|
| 324 |
+
) -> None:
|
| 325 |
+
super().__init__()
|
| 326 |
+
assert dim % num_heads == 0, "dim should be divisible by num_heads"
|
| 327 |
+
self.num_heads = num_heads
|
| 328 |
+
self.head_dim = dim // num_heads
|
| 329 |
+
self.scale = self.head_dim**-0.5
|
| 330 |
+
|
| 331 |
+
self.q = nn.Linear(dim, dim, bias=qkv_bias)
|
| 332 |
+
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
|
| 333 |
+
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 334 |
+
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 335 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 336 |
+
self.proj = nn.Linear(dim, dim)
|
| 337 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 338 |
+
|
| 339 |
+
def forward(
|
| 340 |
+
self, x: torch.Tensor, y: torch.Tensor, mask: torch.Tensor | None = None
|
| 341 |
+
) -> torch.Tensor:
|
| 342 |
+
B, N, C = y.shape
|
| 343 |
+
kv = (
|
| 344 |
+
self.kv(y)
|
| 345 |
+
.reshape(B, N, 2, self.num_heads, C // self.num_heads)
|
| 346 |
+
.permute(2, 0, 3, 1, 4)
|
| 347 |
+
)
|
| 348 |
+
k, v = kv.unbind(0)
|
| 349 |
+
|
| 350 |
+
B, N, C = x.shape
|
| 351 |
+
q = (
|
| 352 |
+
self.q(x)
|
| 353 |
+
.reshape(B, N, 1, self.num_heads, C // self.num_heads)
|
| 354 |
+
.permute(2, 0, 3, 1, 4)[0]
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
q, k = self.q_norm(q), self.k_norm(k)
|
| 358 |
+
q = q * self.scale
|
| 359 |
+
attn = q @ k.transpose(-2, -1)
|
| 360 |
+
if mask is not None:
|
| 361 |
+
attn = attn.masked_fill(mask, float("-inf"))
|
| 362 |
+
attn = attn.softmax(dim=-1)
|
| 363 |
+
attn = self.attn_drop(attn)
|
| 364 |
+
x = attn @ v
|
| 365 |
+
|
| 366 |
+
x = x.transpose(1, 2).reshape(B, N, C)
|
| 367 |
+
x = self.proj(x)
|
| 368 |
+
x = self.proj_drop(x)
|
| 369 |
+
return x
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class LayerScale(nn.Module):
|
| 373 |
+
def __init__(
|
| 374 |
+
self,
|
| 375 |
+
dim: int,
|
| 376 |
+
init_values: float = 1e-5,
|
| 377 |
+
inplace: bool = False,
|
| 378 |
+
) -> None:
|
| 379 |
+
super().__init__()
|
| 380 |
+
self.inplace = inplace
|
| 381 |
+
self.gamma = nn.Parameter(init_values * torch.ones(dim))
|
| 382 |
+
|
| 383 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 384 |
+
return x.mul_(self.gamma) if self.inplace else x * self.gamma
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
class Block(nn.Module):
|
| 388 |
+
def __init__(
|
| 389 |
+
self,
|
| 390 |
+
dim: int,
|
| 391 |
+
num_heads: int,
|
| 392 |
+
mlp_ratio: float = 4.0,
|
| 393 |
+
qkv_bias: bool = True,
|
| 394 |
+
qk_norm: bool = False,
|
| 395 |
+
proj_drop: float = 0.0,
|
| 396 |
+
attn_drop: float = 0.0,
|
| 397 |
+
init_values: float | None = None,
|
| 398 |
+
drop_path: float = 0.0,
|
| 399 |
+
act_layer: nn.Module = nn.GELU,
|
| 400 |
+
norm_layer: nn.Module = nn.LayerNorm,
|
| 401 |
+
) -> None:
|
| 402 |
+
super().__init__()
|
| 403 |
+
self.norm1 = norm_layer(dim)
|
| 404 |
+
self.attn = Attention(
|
| 405 |
+
dim,
|
| 406 |
+
num_heads=num_heads,
|
| 407 |
+
qkv_bias=qkv_bias,
|
| 408 |
+
qk_norm=qk_norm,
|
| 409 |
+
attn_drop=attn_drop,
|
| 410 |
+
proj_drop=proj_drop,
|
| 411 |
+
norm_layer=norm_layer,
|
| 412 |
+
)
|
| 413 |
+
self.ls1 = (
|
| 414 |
+
LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
|
| 415 |
+
)
|
| 416 |
+
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 417 |
+
|
| 418 |
+
self.norm2 = norm_layer(dim)
|
| 419 |
+
self.mlp = Mlp(
|
| 420 |
+
in_features=dim,
|
| 421 |
+
hidden_features=int(dim * mlp_ratio),
|
| 422 |
+
act_layer=act_layer,
|
| 423 |
+
drop=proj_drop,
|
| 424 |
+
)
|
| 425 |
+
self.ls2 = (
|
| 426 |
+
LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
|
| 427 |
+
)
|
| 428 |
+
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 429 |
+
|
| 430 |
+
def forward(self, x, attn_mask=None):
|
| 431 |
+
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), attn_mask)))
|
| 432 |
+
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
|
| 433 |
+
return x
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
class CrossBlock(nn.Module):
|
| 437 |
+
def __init__(
|
| 438 |
+
self,
|
| 439 |
+
dim: int,
|
| 440 |
+
num_heads: int,
|
| 441 |
+
mlp_ratio: float = 4.0,
|
| 442 |
+
qkv_bias: bool = True,
|
| 443 |
+
qk_norm: bool = False,
|
| 444 |
+
proj_drop: float = 0.0,
|
| 445 |
+
attn_drop: float = 0.0,
|
| 446 |
+
init_values: float | None = None,
|
| 447 |
+
drop_path: float = 0.0,
|
| 448 |
+
act_layer: nn.Module = nn.GELU,
|
| 449 |
+
norm_layer: nn.Module = nn.LayerNorm,
|
| 450 |
+
stop_grad: bool = False,
|
| 451 |
+
) -> None:
|
| 452 |
+
super().__init__()
|
| 453 |
+
self.norm1 = norm_layer(dim)
|
| 454 |
+
self.attn = CrossAttention(
|
| 455 |
+
dim,
|
| 456 |
+
num_heads=num_heads,
|
| 457 |
+
qkv_bias=qkv_bias,
|
| 458 |
+
qk_norm=qk_norm,
|
| 459 |
+
attn_drop=attn_drop,
|
| 460 |
+
proj_drop=proj_drop,
|
| 461 |
+
norm_layer=norm_layer,
|
| 462 |
+
)
|
| 463 |
+
self.ls1 = (
|
| 464 |
+
LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
|
| 465 |
+
)
|
| 466 |
+
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 467 |
+
|
| 468 |
+
self.norm2 = norm_layer(dim)
|
| 469 |
+
self.mlp = Mlp(
|
| 470 |
+
in_features=dim,
|
| 471 |
+
hidden_features=int(dim * mlp_ratio),
|
| 472 |
+
act_layer=act_layer,
|
| 473 |
+
drop=proj_drop,
|
| 474 |
+
)
|
| 475 |
+
self.ls2 = (
|
| 476 |
+
LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
|
| 477 |
+
)
|
| 478 |
+
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 479 |
+
|
| 480 |
+
self.stop_grad = stop_grad
|
| 481 |
+
|
| 482 |
+
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 483 |
+
if self.stop_grad:
|
| 484 |
+
x = x + self.drop_path1(
|
| 485 |
+
self.ls1(self.attn(self.norm1(x), self.norm1(y.detach())))
|
| 486 |
+
)
|
| 487 |
+
else:
|
| 488 |
+
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), self.norm1(y))))
|
| 489 |
+
|
| 490 |
+
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
|
| 491 |
+
return x
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
class ReConBlocks(nn.Module):
|
| 495 |
+
def __init__(
|
| 496 |
+
self,
|
| 497 |
+
embed_dim: int = 768,
|
| 498 |
+
depth: int = 12,
|
| 499 |
+
num_heads: int = 12,
|
| 500 |
+
mlp_ratio: float = 4.0,
|
| 501 |
+
qkv_bias: bool = True,
|
| 502 |
+
qk_norm: bool = False,
|
| 503 |
+
init_values: float | None = None,
|
| 504 |
+
proj_drop: float = 0.0,
|
| 505 |
+
attn_drop_rate: float = 0.0,
|
| 506 |
+
drop_path_rate: list = [],
|
| 507 |
+
norm_layer: nn.Module = nn.LayerNorm,
|
| 508 |
+
act_layer: nn.Module = nn.GELU,
|
| 509 |
+
stop_grad: bool = False,
|
| 510 |
+
pretrained_model_name: str = "vit_base_patch32_clip_224.openai",
|
| 511 |
+
every_layer_add_pos: bool = True,
|
| 512 |
+
):
|
| 513 |
+
super().__init__()
|
| 514 |
+
|
| 515 |
+
self.depth = depth
|
| 516 |
+
self.stop_grad = stop_grad
|
| 517 |
+
self.pretrained_model_name = pretrained_model_name
|
| 518 |
+
self.every_layer_add_pos = every_layer_add_pos
|
| 519 |
+
if "dino" in self.pretrained_model_name:
|
| 520 |
+
init_values = 1e-5
|
| 521 |
+
if "giant" in self.pretrained_model_name:
|
| 522 |
+
mlp_ratio = 48 / 11
|
| 523 |
+
self.local_blocks = nn.Sequential(*[
|
| 524 |
+
Block(
|
| 525 |
+
dim=embed_dim,
|
| 526 |
+
num_heads=num_heads,
|
| 527 |
+
mlp_ratio=mlp_ratio,
|
| 528 |
+
qkv_bias=qkv_bias,
|
| 529 |
+
qk_norm=qk_norm,
|
| 530 |
+
init_values=init_values,
|
| 531 |
+
proj_drop=proj_drop,
|
| 532 |
+
attn_drop=attn_drop_rate,
|
| 533 |
+
drop_path=drop_path_rate[i],
|
| 534 |
+
norm_layer=norm_layer,
|
| 535 |
+
act_layer=act_layer,
|
| 536 |
+
)
|
| 537 |
+
for i in range(depth)
|
| 538 |
+
])
|
| 539 |
+
|
| 540 |
+
self.global_blocks = nn.Sequential(*[
|
| 541 |
+
CrossBlock(
|
| 542 |
+
dim=embed_dim,
|
| 543 |
+
num_heads=num_heads,
|
| 544 |
+
mlp_ratio=mlp_ratio,
|
| 545 |
+
qkv_bias=qkv_bias,
|
| 546 |
+
qk_norm=qk_norm,
|
| 547 |
+
init_values=init_values,
|
| 548 |
+
proj_drop=proj_drop,
|
| 549 |
+
attn_drop=attn_drop_rate,
|
| 550 |
+
drop_path=drop_path_rate[i],
|
| 551 |
+
norm_layer=norm_layer,
|
| 552 |
+
act_layer=act_layer,
|
| 553 |
+
stop_grad=stop_grad,
|
| 554 |
+
)
|
| 555 |
+
for i in range(depth)
|
| 556 |
+
])
|
| 557 |
+
|
| 558 |
+
def load_pretrained_timm_weights(self):
|
| 559 |
+
model = timm.create_model(self.pretrained_model_name, pretrained=True)
|
| 560 |
+
state_dict = model.blocks.state_dict()
|
| 561 |
+
self.local_blocks.load_state_dict(state_dict, strict=True)
|
| 562 |
+
|
| 563 |
+
cross_state_dict = {}
|
| 564 |
+
for k, v in state_dict.items():
|
| 565 |
+
if "qkv" in k:
|
| 566 |
+
cross_state_dict[k.replace("qkv", "q")] = v[: int(v.shape[0] / 3)]
|
| 567 |
+
cross_state_dict[k.replace("qkv", "kv")] = v[int(v.shape[0] / 3) :]
|
| 568 |
+
else:
|
| 569 |
+
cross_state_dict[k] = v
|
| 570 |
+
self.global_blocks.load_state_dict(cross_state_dict, strict=True)
|
| 571 |
+
|
| 572 |
+
def forward(self, x, pos, attn_mask=None, query=None):
|
| 573 |
+
if self.every_layer_add_pos:
|
| 574 |
+
for i in range(self.depth):
|
| 575 |
+
x = self.local_blocks[i](x + pos, attn_mask)
|
| 576 |
+
if query is not None:
|
| 577 |
+
query = self.global_blocks[i](query, x)
|
| 578 |
+
else:
|
| 579 |
+
x = x + pos
|
| 580 |
+
for i in range(self.depth):
|
| 581 |
+
x = self.local_blocks[i](x, attn_mask)
|
| 582 |
+
if query is not None:
|
| 583 |
+
query = self.global_blocks[i](query, x)
|
| 584 |
+
return x, query
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
class GPTExtractor(nn.Module):
|
| 588 |
+
def __init__(
|
| 589 |
+
self,
|
| 590 |
+
embed_dim: int = 768,
|
| 591 |
+
num_heads: int = 12,
|
| 592 |
+
depth: int = 12,
|
| 593 |
+
group_size: int = 32,
|
| 594 |
+
drop_path_rate: float = 0.0,
|
| 595 |
+
stop_grad: bool = False,
|
| 596 |
+
pretrained_model_name: str = "vit_base_patch32_clip_224.openai",
|
| 597 |
+
):
|
| 598 |
+
super().__init__()
|
| 599 |
+
|
| 600 |
+
self.embed_dim = embed_dim
|
| 601 |
+
self.group_size = group_size
|
| 602 |
+
|
| 603 |
+
# start of sequence token
|
| 604 |
+
self.sos = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 605 |
+
self.sos_pos = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 606 |
+
nn.init.normal_(self.sos)
|
| 607 |
+
nn.init.normal_(self.sos_pos)
|
| 608 |
+
|
| 609 |
+
drop_path_rate = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
| 610 |
+
self.blocks = ReConBlocks(
|
| 611 |
+
embed_dim=embed_dim,
|
| 612 |
+
num_heads=num_heads,
|
| 613 |
+
depth=depth,
|
| 614 |
+
drop_path_rate=drop_path_rate,
|
| 615 |
+
stop_grad=stop_grad,
|
| 616 |
+
pretrained_model_name=pretrained_model_name,
|
| 617 |
+
)
|
| 618 |
+
|
| 619 |
+
self.ln_f1 = nn.LayerNorm(embed_dim)
|
| 620 |
+
self.ln_f2 = nn.LayerNorm(embed_dim)
|
| 621 |
+
|
| 622 |
+
def forward(self, x, pos, attn_mask, query):
|
| 623 |
+
"""
|
| 624 |
+
Expect input as shape [sequence len, batch]
|
| 625 |
+
"""
|
| 626 |
+
|
| 627 |
+
batch, length, _ = x.shape
|
| 628 |
+
|
| 629 |
+
# prepend sos token
|
| 630 |
+
sos = self.sos.expand(batch, -1, -1)
|
| 631 |
+
sos_pos = self.sos_pos.expand(batch, -1, -1)
|
| 632 |
+
|
| 633 |
+
x = torch.cat([sos, x[:, :-1]], dim=1)
|
| 634 |
+
pos = torch.cat([sos_pos, pos[:, :-1]], dim=1)
|
| 635 |
+
|
| 636 |
+
# transformer
|
| 637 |
+
x, query = self.blocks(x, pos, attn_mask, query)
|
| 638 |
+
|
| 639 |
+
encoded_points = self.ln_f1(x)
|
| 640 |
+
query = self.ln_f2(query)
|
| 641 |
+
|
| 642 |
+
return encoded_points, query
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
class GPTGenerator(nn.Module):
|
| 646 |
+
def __init__(
|
| 647 |
+
self,
|
| 648 |
+
embed_dim: int = 768,
|
| 649 |
+
num_heads: int = 12,
|
| 650 |
+
depth: int = 4,
|
| 651 |
+
group_size: int = 32,
|
| 652 |
+
drop_path_rate: float = 0.0,
|
| 653 |
+
input_channel: int = 3,
|
| 654 |
+
):
|
| 655 |
+
super().__init__()
|
| 656 |
+
|
| 657 |
+
self.embed_dim = embed_dim
|
| 658 |
+
self.input_channel = input_channel
|
| 659 |
+
|
| 660 |
+
drop_path_rate = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
| 661 |
+
self.blocks = nn.ModuleList([
|
| 662 |
+
Block(dim=embed_dim, num_heads=num_heads, drop_path=drop_path_rate[i])
|
| 663 |
+
for i in range(depth)
|
| 664 |
+
])
|
| 665 |
+
|
| 666 |
+
self.ln_f = nn.LayerNorm(embed_dim)
|
| 667 |
+
self.increase_dim = nn.Sequential(
|
| 668 |
+
nn.Conv1d(embed_dim, input_channel * group_size, 1)
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
def forward(self, x, pos, attn_mask):
|
| 672 |
+
batch, length, C = x.shape
|
| 673 |
+
|
| 674 |
+
# transformer
|
| 675 |
+
for block in self.blocks:
|
| 676 |
+
x = block(x + pos, attn_mask)
|
| 677 |
+
|
| 678 |
+
x = self.ln_f(x)
|
| 679 |
+
|
| 680 |
+
rebuild_points = (
|
| 681 |
+
self.increase_dim(x.transpose(1, 2))
|
| 682 |
+
.transpose(1, 2)
|
| 683 |
+
.reshape(batch * length, -1, self.input_channel)
|
| 684 |
+
)
|
| 685 |
+
|
| 686 |
+
return rebuild_points
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
class MAEExtractor(nn.Module):
|
| 690 |
+
def __init__(
|
| 691 |
+
self,
|
| 692 |
+
embed_dim: int = 768,
|
| 693 |
+
num_heads: int = 12,
|
| 694 |
+
depth: int = 12,
|
| 695 |
+
group_size: int = 32,
|
| 696 |
+
drop_path_rate: float = 0.0,
|
| 697 |
+
stop_grad: bool = False,
|
| 698 |
+
pretrained_model_name: str = "vit_base_patch32_clip_224.openai",
|
| 699 |
+
):
|
| 700 |
+
super().__init__()
|
| 701 |
+
|
| 702 |
+
self.embed_dim = embed_dim
|
| 703 |
+
self.group_size = group_size
|
| 704 |
+
|
| 705 |
+
drop_path_rate = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
| 706 |
+
self.blocks = ReConBlocks(
|
| 707 |
+
embed_dim=embed_dim,
|
| 708 |
+
num_heads=num_heads,
|
| 709 |
+
depth=depth,
|
| 710 |
+
drop_path_rate=drop_path_rate,
|
| 711 |
+
stop_grad=stop_grad,
|
| 712 |
+
pretrained_model_name=pretrained_model_name,
|
| 713 |
+
)
|
| 714 |
+
|
| 715 |
+
self.ln_f1 = nn.LayerNorm(embed_dim)
|
| 716 |
+
self.ln_f2 = nn.LayerNorm(embed_dim)
|
| 717 |
+
|
| 718 |
+
def forward(self, x, pos, mask=None, query=None):
|
| 719 |
+
"""
|
| 720 |
+
Expect input as shape [sequence len, batch]
|
| 721 |
+
"""
|
| 722 |
+
|
| 723 |
+
batch, length, C = x.shape
|
| 724 |
+
if mask is not None:
|
| 725 |
+
x_vis = x[~mask].reshape(batch, -1, C)
|
| 726 |
+
pos_vis = pos[~mask].reshape(batch, -1, C)
|
| 727 |
+
else:
|
| 728 |
+
x_vis = x
|
| 729 |
+
pos_vis = pos
|
| 730 |
+
|
| 731 |
+
# transformer
|
| 732 |
+
x_vis, query = self.blocks(x_vis, pos_vis, None, query)
|
| 733 |
+
|
| 734 |
+
encoded_points = self.ln_f1(x_vis)
|
| 735 |
+
query = self.ln_f2(query)
|
| 736 |
+
|
| 737 |
+
return encoded_points, query
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
class MAEGenerator(nn.Module):
|
| 741 |
+
def __init__(
|
| 742 |
+
self,
|
| 743 |
+
embed_dim: int = 768,
|
| 744 |
+
num_heads: int = 12,
|
| 745 |
+
depth: int = 4,
|
| 746 |
+
group_size: int = 32,
|
| 747 |
+
drop_path_rate: float = 0.0,
|
| 748 |
+
input_channel: int = 3,
|
| 749 |
+
):
|
| 750 |
+
super().__init__()
|
| 751 |
+
|
| 752 |
+
self.embed_dim = embed_dim
|
| 753 |
+
self.input_channel = input_channel
|
| 754 |
+
self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
|
| 755 |
+
|
| 756 |
+
drop_path_rate = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
| 757 |
+
self.blocks = nn.ModuleList([
|
| 758 |
+
Block(dim=embed_dim, num_heads=num_heads, drop_path=drop_path_rate[i])
|
| 759 |
+
for i in range(depth)
|
| 760 |
+
])
|
| 761 |
+
|
| 762 |
+
self.ln_f = nn.LayerNorm(embed_dim)
|
| 763 |
+
self.increase_dim = nn.Sequential(
|
| 764 |
+
nn.Conv1d(embed_dim, input_channel * group_size, 1)
|
| 765 |
+
)
|
| 766 |
+
|
| 767 |
+
def forward(self, x_vis, pos, mask):
|
| 768 |
+
batch, length, C = x_vis.shape
|
| 769 |
+
|
| 770 |
+
pos_vis = pos[~mask].reshape(batch, -1, C)
|
| 771 |
+
pos_mask = pos[mask].reshape(batch, -1, C)
|
| 772 |
+
pos_full = torch.cat([pos_vis, pos_mask], dim=1)
|
| 773 |
+
mask_token = self.mask_token.expand(batch, pos_mask.shape[1], -1)
|
| 774 |
+
x = torch.cat([x_vis, mask_token], dim=1)
|
| 775 |
+
|
| 776 |
+
# transformer
|
| 777 |
+
for block in self.blocks:
|
| 778 |
+
x = block(x + pos_full)
|
| 779 |
+
|
| 780 |
+
x = self.ln_f(x[:, -pos_mask.shape[1] :])
|
| 781 |
+
|
| 782 |
+
rebuild_points = (
|
| 783 |
+
self.increase_dim(x.transpose(1, 2))
|
| 784 |
+
.transpose(1, 2)
|
| 785 |
+
.reshape(batch * pos_mask.shape[1], -1, self.input_channel)
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
return rebuild_points
|
backend/ReConV2/utils/checkpoint.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
| 3 |
+
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from collections.abc import Iterable
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from termcolor import colored
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_missing_parameters_message(keys: list[str]) -> str:
|
| 13 |
+
"""
|
| 14 |
+
Get a logging-friendly message to report parameter names (keys) that are in
|
| 15 |
+
the model but not found in a checkpoint.
|
| 16 |
+
Args:
|
| 17 |
+
keys (list[str]): List of keys that were not found in the checkpoint.
|
| 18 |
+
Returns:
|
| 19 |
+
str: message.
|
| 20 |
+
"""
|
| 21 |
+
groups = _group_checkpoint_keys(keys)
|
| 22 |
+
msg = "Some model parameters or buffers are not found in the checkpoint:\n"
|
| 23 |
+
msg += "\n".join(
|
| 24 |
+
" " + colored(k + _group_to_str(v), "blue") for k, v in groups.items()
|
| 25 |
+
)
|
| 26 |
+
return msg
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def get_unexpected_parameters_message(keys: list[str]) -> str:
|
| 30 |
+
"""
|
| 31 |
+
Get a logging-friendly message to report parameter names (keys) that are in
|
| 32 |
+
the checkpoint but not found in the model.
|
| 33 |
+
Args:
|
| 34 |
+
keys (list[str]): List of keys that were not found in the model.
|
| 35 |
+
Returns:
|
| 36 |
+
str: message.
|
| 37 |
+
"""
|
| 38 |
+
groups = _group_checkpoint_keys(keys)
|
| 39 |
+
msg = "The checkpoint state_dict contains keys that are not used by the model:\n"
|
| 40 |
+
msg += "\n".join(
|
| 41 |
+
" " + colored(k + _group_to_str(v), "magenta") for k, v in groups.items()
|
| 42 |
+
)
|
| 43 |
+
return msg
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _strip_prefix_if_present(state_dict: dict[str, Any], prefix: str) -> None:
|
| 47 |
+
"""
|
| 48 |
+
Strip the prefix in metadata, if any.
|
| 49 |
+
Args:
|
| 50 |
+
state_dict (OrderedDict): a state-dict to be loaded to the model.
|
| 51 |
+
prefix (str): prefix.
|
| 52 |
+
"""
|
| 53 |
+
keys = sorted(state_dict.keys())
|
| 54 |
+
if not all(len(key) == 0 or key.startswith(prefix) for key in keys):
|
| 55 |
+
return
|
| 56 |
+
|
| 57 |
+
for key in keys:
|
| 58 |
+
newkey = key[len(prefix) :]
|
| 59 |
+
state_dict[newkey] = state_dict.pop(key)
|
| 60 |
+
|
| 61 |
+
# also strip the prefix in metadata, if any..
|
| 62 |
+
try:
|
| 63 |
+
metadata = state_dict._metadata # pyre-ignore
|
| 64 |
+
except AttributeError:
|
| 65 |
+
pass
|
| 66 |
+
else:
|
| 67 |
+
for key in list(metadata.keys()):
|
| 68 |
+
# for the metadata dict, the key can be:
|
| 69 |
+
# '': for the DDP module, which we want to remove.
|
| 70 |
+
# 'module': for the actual model.
|
| 71 |
+
# 'module.xx.xx': for the rest.
|
| 72 |
+
|
| 73 |
+
if len(key) == 0:
|
| 74 |
+
continue
|
| 75 |
+
newkey = key[len(prefix) :]
|
| 76 |
+
metadata[newkey] = metadata.pop(key)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _group_checkpoint_keys(keys: list[str]) -> dict[str, list[str]]:
|
| 80 |
+
"""
|
| 81 |
+
Group keys based on common prefixes. A prefix is the string up to the final
|
| 82 |
+
"." in each key.
|
| 83 |
+
Args:
|
| 84 |
+
keys (list[str]): list of parameter names, i.e. keys in the model
|
| 85 |
+
checkpoint dict.
|
| 86 |
+
Returns:
|
| 87 |
+
dict[list]: keys with common prefixes are grouped into lists.
|
| 88 |
+
"""
|
| 89 |
+
groups = defaultdict(list)
|
| 90 |
+
for key in keys:
|
| 91 |
+
pos = key.rfind(".")
|
| 92 |
+
if pos >= 0:
|
| 93 |
+
head, tail = key[:pos], [key[pos + 1 :]]
|
| 94 |
+
else:
|
| 95 |
+
head, tail = key, []
|
| 96 |
+
groups[head].extend(tail)
|
| 97 |
+
return groups
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _group_to_str(group: list[str]) -> str:
|
| 101 |
+
"""
|
| 102 |
+
Format a group of parameter name suffixes into a loggable string.
|
| 103 |
+
Args:
|
| 104 |
+
group (list[str]): list of parameter name suffixes.
|
| 105 |
+
Returns:
|
| 106 |
+
str: formated string.
|
| 107 |
+
"""
|
| 108 |
+
if len(group) == 0:
|
| 109 |
+
return ""
|
| 110 |
+
|
| 111 |
+
if len(group) == 1:
|
| 112 |
+
return "." + group[0]
|
| 113 |
+
|
| 114 |
+
return ".{" + ", ".join(group) + "}"
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _named_modules_with_dup(
|
| 118 |
+
model: nn.Module, prefix: str = ""
|
| 119 |
+
) -> Iterable[tuple[str, nn.Module]]:
|
| 120 |
+
"""
|
| 121 |
+
The same as `model.named_modules()`, except that it includes
|
| 122 |
+
duplicated modules that have more than one name.
|
| 123 |
+
"""
|
| 124 |
+
yield prefix, model
|
| 125 |
+
for name, module in model._modules.items(): # pyre-ignore
|
| 126 |
+
if module is None:
|
| 127 |
+
continue
|
| 128 |
+
submodule_prefix = prefix + ("." if prefix else "") + name
|
| 129 |
+
yield from _named_modules_with_dup(module, submodule_prefix)
|
backend/ReConV2/utils/config.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import yaml
|
| 4 |
+
from easydict import EasyDict
|
| 5 |
+
|
| 6 |
+
from .logger import print_log
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def log_args_to_file(args, pre="args", logger=None):
|
| 10 |
+
for key, val in args.__dict__.items():
|
| 11 |
+
print_log(f"{pre}.{key} : {val}", logger=logger)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def log_config_to_file(cfg, pre="cfg", logger=None):
|
| 15 |
+
for key, val in cfg.items():
|
| 16 |
+
if isinstance(cfg[key], EasyDict):
|
| 17 |
+
print_log(f"{pre}.{key} = edict()", logger=logger)
|
| 18 |
+
log_config_to_file(cfg[key], pre=pre + "." + key, logger=logger)
|
| 19 |
+
continue
|
| 20 |
+
print_log(f"{pre}.{key} : {val}", logger=logger)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def merge_new_config(config, new_config):
|
| 24 |
+
for key, val in new_config.items():
|
| 25 |
+
if not isinstance(val, dict):
|
| 26 |
+
if key == "_base_":
|
| 27 |
+
with open(new_config["_base_"]) as f:
|
| 28 |
+
try:
|
| 29 |
+
val = yaml.load(f, Loader=yaml.FullLoader)
|
| 30 |
+
except:
|
| 31 |
+
val = yaml.load(f)
|
| 32 |
+
config[key] = EasyDict()
|
| 33 |
+
merge_new_config(config[key], val)
|
| 34 |
+
else:
|
| 35 |
+
config[key] = val
|
| 36 |
+
continue
|
| 37 |
+
if key not in config:
|
| 38 |
+
config[key] = EasyDict()
|
| 39 |
+
merge_new_config(config[key], val)
|
| 40 |
+
return config
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def cfg_from_yaml_file(cfg_file):
|
| 44 |
+
config = EasyDict()
|
| 45 |
+
with open(cfg_file) as f:
|
| 46 |
+
try:
|
| 47 |
+
new_config = yaml.load(f, Loader=yaml.FullLoader)
|
| 48 |
+
except:
|
| 49 |
+
new_config = yaml.load(f)
|
| 50 |
+
merge_new_config(config=config, new_config=new_config)
|
| 51 |
+
return config
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_config(args, logger=None):
|
| 55 |
+
if args.resume:
|
| 56 |
+
cfg_path = os.path.join(args.experiment_path, "config.yaml")
|
| 57 |
+
if not os.path.exists(cfg_path):
|
| 58 |
+
print_log("Failed to resume", logger=logger)
|
| 59 |
+
raise FileNotFoundError()
|
| 60 |
+
print_log(f"Resume yaml from {cfg_path}", logger=logger)
|
| 61 |
+
args.config = cfg_path
|
| 62 |
+
config = cfg_from_yaml_file(args.config)
|
| 63 |
+
if not args.resume and args.local_rank == 0:
|
| 64 |
+
save_experiment_config(args, config, logger)
|
| 65 |
+
return config
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def save_experiment_config(args, config, logger=None):
|
| 69 |
+
config_path = os.path.join(args.experiment_path, "config.yaml")
|
| 70 |
+
os.system(f"cp {args.config} {config_path}")
|
| 71 |
+
print_log(
|
| 72 |
+
f"Copy the Config file from {args.config} to {config_path}", logger=logger
|
| 73 |
+
)
|
backend/ReConV2/utils/knn.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def square_distance(src, dst):
|
| 5 |
+
"""
|
| 6 |
+
Calculate Euclid distance between each two points.
|
| 7 |
+
src^T * dst = xn * xm + yn * ym + zn * zm;
|
| 8 |
+
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
|
| 9 |
+
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
|
| 10 |
+
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
|
| 11 |
+
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
|
| 12 |
+
Input:
|
| 13 |
+
src: source points, [B, N, C]
|
| 14 |
+
dst: target points, [B, M, C]
|
| 15 |
+
Output:
|
| 16 |
+
dist: per-point square distance, [B, N, M]
|
| 17 |
+
"""
|
| 18 |
+
B, N, _ = src.shape
|
| 19 |
+
_, M, _ = dst.shape
|
| 20 |
+
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
|
| 21 |
+
dist += torch.sum(src**2, -1).view(B, N, 1)
|
| 22 |
+
dist += torch.sum(dst**2, -1).view(B, 1, M)
|
| 23 |
+
return dist
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def knn_point(nsample, xyz, new_xyz):
|
| 27 |
+
"""
|
| 28 |
+
Input:
|
| 29 |
+
nsample: max sample number in local region
|
| 30 |
+
xyz: all points, [B, N, C]
|
| 31 |
+
new_xyz: query points, [B, S, C]
|
| 32 |
+
Return:
|
| 33 |
+
group_idx: grouped points index, [B, S, nsample]
|
| 34 |
+
"""
|
| 35 |
+
sqrdists = square_distance(new_xyz, xyz)
|
| 36 |
+
_, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)
|
| 37 |
+
return group_idx
|
backend/ReConV2/utils/logger.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
|
| 5 |
+
logger_initialized = {}
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_root_logger(log_file=None, log_level=logging.INFO, name="main"):
|
| 9 |
+
"""Get root logger and add a keyword filter to it.
|
| 10 |
+
The logger will be initialized if it has not been initialized. By default a
|
| 11 |
+
StreamHandler will be added. If `log_file` is specified, a FileHandler will
|
| 12 |
+
also be added. The name of the root logger is the top-level package name,
|
| 13 |
+
e.g., "mmdet3d".
|
| 14 |
+
Args:
|
| 15 |
+
log_file (str, optional): File path of log. Defaults to None.
|
| 16 |
+
log_level (int, optional): The level of logger.
|
| 17 |
+
Defaults to logging.INFO.
|
| 18 |
+
name (str, optional): The name of the root logger, also used as a
|
| 19 |
+
filter keyword. Defaults to 'mmdet3d'.
|
| 20 |
+
Returns:
|
| 21 |
+
:obj:`logging.Logger`: The obtained logger
|
| 22 |
+
"""
|
| 23 |
+
logger = get_logger(name=name, log_file=log_file, log_level=log_level)
|
| 24 |
+
# add a logging filter
|
| 25 |
+
logging_filter = logging.Filter(name)
|
| 26 |
+
logging_filter.filter = lambda record: record.find(name) != -1
|
| 27 |
+
|
| 28 |
+
return logger
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_logger(name, log_file=None, log_level=logging.INFO, file_mode="w"):
|
| 32 |
+
"""Initialize and get a logger by name.
|
| 33 |
+
If the logger has not been initialized, this method will initialize the
|
| 34 |
+
logger by adding one or two handlers, otherwise the initialized logger will
|
| 35 |
+
be directly returned. During initialization, a StreamHandler will always be
|
| 36 |
+
added. If `log_file` is specified and the process rank is 0, a FileHandler
|
| 37 |
+
will also be added.
|
| 38 |
+
Args:
|
| 39 |
+
name (str): Logger name.
|
| 40 |
+
log_file (str | None): The log filename. If specified, a FileHandler
|
| 41 |
+
will be added to the logger.
|
| 42 |
+
log_level (int): The logger level. Note that only the process of
|
| 43 |
+
rank 0 is affected, and other processes will set the level to
|
| 44 |
+
"Error" thus be silent most of the time.
|
| 45 |
+
file_mode (str): The file mode used in opening log file.
|
| 46 |
+
Defaults to 'w'.
|
| 47 |
+
Returns:
|
| 48 |
+
logging.Logger: The expected logger.
|
| 49 |
+
"""
|
| 50 |
+
logger = logging.getLogger(name)
|
| 51 |
+
if name in logger_initialized:
|
| 52 |
+
return logger
|
| 53 |
+
# handle hierarchical names
|
| 54 |
+
# e.g., logger "a" is initialized, then logger "a.b" will skip the
|
| 55 |
+
# initialization since it is a child of "a".
|
| 56 |
+
for logger_name in logger_initialized:
|
| 57 |
+
if name.startswith(logger_name):
|
| 58 |
+
return logger
|
| 59 |
+
|
| 60 |
+
# handle duplicate logs to the console
|
| 61 |
+
# Starting in 1.8.0, PyTorch DDP attaches a StreamHandler <stderr> (NOTSET)
|
| 62 |
+
# to the root logger. As logger.propagate is True by default, this root
|
| 63 |
+
# level handler causes logging messages from rank>0 processes to
|
| 64 |
+
# unexpectedly show up on the console, creating much unwanted clutter.
|
| 65 |
+
# To fix this issue, we set the root logger's StreamHandler, if any, to log
|
| 66 |
+
# at the ERROR level.
|
| 67 |
+
for handler in logger.root.handlers:
|
| 68 |
+
if type(handler) is logging.StreamHandler:
|
| 69 |
+
handler.setLevel(logging.ERROR)
|
| 70 |
+
|
| 71 |
+
stream_handler = logging.StreamHandler()
|
| 72 |
+
handlers = [stream_handler]
|
| 73 |
+
|
| 74 |
+
if dist.is_available() and dist.is_initialized():
|
| 75 |
+
rank = dist.get_rank()
|
| 76 |
+
else:
|
| 77 |
+
rank = 0
|
| 78 |
+
|
| 79 |
+
# only rank 0 will add a FileHandler
|
| 80 |
+
if rank == 0 and log_file is not None:
|
| 81 |
+
# Here, the default behaviour of the official logger is 'a'. Thus, we
|
| 82 |
+
# provide an interface to change the file mode to the default
|
| 83 |
+
# behaviour.
|
| 84 |
+
file_handler = logging.FileHandler(log_file, file_mode)
|
| 85 |
+
handlers.append(file_handler)
|
| 86 |
+
|
| 87 |
+
formatter = logging.Formatter(
|
| 88 |
+
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
| 89 |
+
)
|
| 90 |
+
for handler in handlers:
|
| 91 |
+
handler.setFormatter(formatter)
|
| 92 |
+
handler.setLevel(log_level)
|
| 93 |
+
logger.addHandler(handler)
|
| 94 |
+
|
| 95 |
+
if rank == 0:
|
| 96 |
+
logger.setLevel(log_level)
|
| 97 |
+
else:
|
| 98 |
+
logger.setLevel(logging.ERROR)
|
| 99 |
+
|
| 100 |
+
logger_initialized[name] = True
|
| 101 |
+
|
| 102 |
+
return logger
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def print_log(msg, logger=None, level=logging.INFO):
|
| 106 |
+
"""Print a log message.
|
| 107 |
+
Args:
|
| 108 |
+
msg (str): The message to be logged.
|
| 109 |
+
logger (logging.Logger | str | None): The logger to be used.
|
| 110 |
+
Some special loggers are:
|
| 111 |
+
- "silent": no message will be printed.
|
| 112 |
+
- other str: the logger obtained with `get_root_logger(logger)`.
|
| 113 |
+
- None: The `print()` method will be used to print log messages.
|
| 114 |
+
level (int): Logging level. Only available when `logger` is a Logger
|
| 115 |
+
object or "root".
|
| 116 |
+
"""
|
| 117 |
+
if logger is None:
|
| 118 |
+
print(msg)
|
| 119 |
+
elif isinstance(logger, logging.Logger):
|
| 120 |
+
logger.log(level, msg)
|
| 121 |
+
elif logger == "silent":
|
| 122 |
+
pass
|
| 123 |
+
elif isinstance(logger, str):
|
| 124 |
+
_logger = get_logger(logger)
|
| 125 |
+
_logger.log(level, msg)
|
| 126 |
+
else:
|
| 127 |
+
raise TypeError(
|
| 128 |
+
"logger should be either a logging.Logger object, str, "
|
| 129 |
+
f'"silent" or None, but got {type(logger)}'
|
| 130 |
+
)
|
backend/ReConV2/utils/misc.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
from collections import abc
|
| 4 |
+
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from mpl_toolkits.mplot3d import Axes3D
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def fps(data: torch.Tensor, number: int) -> torch.Tensor:
|
| 14 |
+
B, N, _ = data.shape
|
| 15 |
+
device = data.device
|
| 16 |
+
|
| 17 |
+
centroids = torch.empty(B, number, dtype=torch.long, device=device)
|
| 18 |
+
distances = torch.full((B, N), float("inf"), device=device)
|
| 19 |
+
farthest = torch.randint(0, N, (B,), device=device) # случайная первая
|
| 20 |
+
|
| 21 |
+
for i in range(number):
|
| 22 |
+
centroids[:, i] = farthest
|
| 23 |
+
|
| 24 |
+
centroid = data[torch.arange(B, device=device), farthest] # (B,3)
|
| 25 |
+
dist = torch.sum((data - centroid[:, None, :]) ** 2, dim=-1)
|
| 26 |
+
|
| 27 |
+
distances = torch.minimum(distances, dist)
|
| 28 |
+
farthest = torch.max(distances, dim=1).indices # чуть короче
|
| 29 |
+
# (или .indices в ≥1.10)
|
| 30 |
+
return data.gather(1, centroids[..., None].expand(-1, -1, 3))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def worker_init_fn(worker_id):
|
| 34 |
+
np.random.seed(np.random.get_state()[1][0] + worker_id)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def build_lambda_sche(opti, config):
|
| 38 |
+
if config.get("decay_step") is not None:
|
| 39 |
+
|
| 40 |
+
def lr_lbmd(e):
|
| 41 |
+
return max(config.lr_decay ** (e / config.decay_step), config.lowest_decay)
|
| 42 |
+
|
| 43 |
+
scheduler = torch.optim.lr_scheduler.LambdaLR(opti, lr_lbmd)
|
| 44 |
+
else:
|
| 45 |
+
raise NotImplementedError()
|
| 46 |
+
return scheduler
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def build_lambda_bnsche(model, config):
|
| 50 |
+
if config.get("decay_step") is not None:
|
| 51 |
+
|
| 52 |
+
def bnm_lmbd(e):
|
| 53 |
+
return max(
|
| 54 |
+
config.bn_momentum * config.bn_decay ** (e / config.decay_step),
|
| 55 |
+
config.lowest_decay,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
bnm_scheduler = BNMomentumScheduler(model, bnm_lmbd)
|
| 59 |
+
else:
|
| 60 |
+
raise NotImplementedError()
|
| 61 |
+
return bnm_scheduler
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def set_random_seed(seed, deterministic=False):
|
| 65 |
+
"""Set random seed.
|
| 66 |
+
Args:
|
| 67 |
+
seed (int): Seed to be used.
|
| 68 |
+
deterministic (bool): Whether to set the deterministic option for
|
| 69 |
+
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
|
| 70 |
+
to True and `torch.backends.cudnn.benchmark` to False.
|
| 71 |
+
Default: False.
|
| 72 |
+
|
| 73 |
+
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
|
| 74 |
+
if cuda_deterministic: # slower, more reproducible
|
| 75 |
+
cudnn.deterministic = True
|
| 76 |
+
cudnn.benchmark = False
|
| 77 |
+
else: # faster, less reproducible
|
| 78 |
+
cudnn.deterministic = False
|
| 79 |
+
cudnn.benchmark = True
|
| 80 |
+
|
| 81 |
+
"""
|
| 82 |
+
random.seed(seed)
|
| 83 |
+
np.random.seed(seed)
|
| 84 |
+
torch.manual_seed(seed)
|
| 85 |
+
torch.cuda.manual_seed_all(seed)
|
| 86 |
+
if deterministic:
|
| 87 |
+
torch.backends.cudnn.deterministic = True
|
| 88 |
+
torch.backends.cudnn.benchmark = False
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def is_seq_of(seq, expected_type, seq_type=None):
|
| 92 |
+
"""Check whether it is a sequence of some type.
|
| 93 |
+
Args:
|
| 94 |
+
seq (Sequence): The sequence to be checked.
|
| 95 |
+
expected_type (type): Expected type of sequence items.
|
| 96 |
+
seq_type (type, optional): Expected sequence type.
|
| 97 |
+
Returns:
|
| 98 |
+
bool: Whether the sequence is valid.
|
| 99 |
+
"""
|
| 100 |
+
if seq_type is None:
|
| 101 |
+
exp_seq_type = abc.Sequence
|
| 102 |
+
else:
|
| 103 |
+
assert isinstance(seq_type, type)
|
| 104 |
+
exp_seq_type = seq_type
|
| 105 |
+
if not isinstance(seq, exp_seq_type):
|
| 106 |
+
return False
|
| 107 |
+
for item in seq:
|
| 108 |
+
if not isinstance(item, expected_type):
|
| 109 |
+
return False
|
| 110 |
+
return True
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def set_bn_momentum_default(bn_momentum):
|
| 114 |
+
def fn(m):
|
| 115 |
+
if isinstance(m, nn.BatchNorm1d | nn.BatchNorm2d | nn.BatchNorm3d):
|
| 116 |
+
m.momentum = bn_momentum
|
| 117 |
+
|
| 118 |
+
return fn
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class BNMomentumScheduler:
|
| 122 |
+
def __init__(self, model, bn_lambda, last_epoch=-1, setter=set_bn_momentum_default):
|
| 123 |
+
if not isinstance(model, nn.Module):
|
| 124 |
+
raise RuntimeError(
|
| 125 |
+
f"Class '{type(model).__name__}' is not a PyTorch nn Module"
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
self.model = model
|
| 129 |
+
self.setter = setter
|
| 130 |
+
self.lmbd = bn_lambda
|
| 131 |
+
|
| 132 |
+
self.step(last_epoch + 1)
|
| 133 |
+
self.last_epoch = last_epoch
|
| 134 |
+
|
| 135 |
+
def step(self, epoch=None):
|
| 136 |
+
if epoch is None:
|
| 137 |
+
epoch = self.last_epoch + 1
|
| 138 |
+
|
| 139 |
+
self.last_epoch = epoch
|
| 140 |
+
self.model.apply(self.setter(self.lmbd(epoch)))
|
| 141 |
+
|
| 142 |
+
def get_momentum(self, epoch=None):
|
| 143 |
+
if epoch is None:
|
| 144 |
+
epoch = self.last_epoch + 1
|
| 145 |
+
return self.lmbd(epoch)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def seprate_point_cloud(xyz, num_points, crop, fixed_points=None, padding_zeros=False):
|
| 149 |
+
"""
|
| 150 |
+
seprate point cloud: usage : using to generate the incomplete point cloud with a setted number.
|
| 151 |
+
"""
|
| 152 |
+
_, n, c = xyz.shape
|
| 153 |
+
|
| 154 |
+
assert n == num_points
|
| 155 |
+
assert c == 3
|
| 156 |
+
if crop == num_points:
|
| 157 |
+
return xyz, None
|
| 158 |
+
|
| 159 |
+
INPUT = []
|
| 160 |
+
CROP = []
|
| 161 |
+
for points in xyz:
|
| 162 |
+
if isinstance(crop, list):
|
| 163 |
+
num_crop = random.randint(crop[0], crop[1])
|
| 164 |
+
else:
|
| 165 |
+
num_crop = crop
|
| 166 |
+
|
| 167 |
+
points = points.unsqueeze(0)
|
| 168 |
+
|
| 169 |
+
if fixed_points is None:
|
| 170 |
+
center = F.normalize(torch.randn(1, 1, 3), p=2, dim=-1).cuda()
|
| 171 |
+
else:
|
| 172 |
+
if isinstance(fixed_points, list):
|
| 173 |
+
fixed_point = random.sample(fixed_points, 1)[0]
|
| 174 |
+
else:
|
| 175 |
+
fixed_point = fixed_points
|
| 176 |
+
center = fixed_point.reshape(1, 1, 3).cuda()
|
| 177 |
+
|
| 178 |
+
distance_matrix = torch.norm(
|
| 179 |
+
center.unsqueeze(2) - points.unsqueeze(1), p=2, dim=-1
|
| 180 |
+
) # 1 1 2048
|
| 181 |
+
|
| 182 |
+
idx = torch.argsort(distance_matrix, dim=-1, descending=False)[0, 0] # 2048
|
| 183 |
+
|
| 184 |
+
if padding_zeros:
|
| 185 |
+
input_data = points.clone()
|
| 186 |
+
input_data[0, idx[:num_crop]] = input_data[0, idx[:num_crop]] * 0
|
| 187 |
+
|
| 188 |
+
else:
|
| 189 |
+
input_data = points.clone()[0, idx[num_crop:]].unsqueeze(0) # 1 N 3
|
| 190 |
+
|
| 191 |
+
crop_data = points.clone()[0, idx[:num_crop]].unsqueeze(0)
|
| 192 |
+
|
| 193 |
+
if isinstance(crop, list):
|
| 194 |
+
INPUT.append(fps(input_data, 2048))
|
| 195 |
+
CROP.append(fps(crop_data, 2048))
|
| 196 |
+
else:
|
| 197 |
+
INPUT.append(input_data)
|
| 198 |
+
CROP.append(crop_data)
|
| 199 |
+
|
| 200 |
+
input_data = torch.cat(INPUT, dim=0) # B N 3
|
| 201 |
+
crop_data = torch.cat(CROP, dim=0) # B M 3
|
| 202 |
+
|
| 203 |
+
return input_data.contiguous(), crop_data.contiguous()
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def get_ptcloud_img(ptcloud, roll, pitch):
|
| 207 |
+
fig = plt.figure(figsize=(8, 8))
|
| 208 |
+
|
| 209 |
+
x, z, y = ptcloud.transpose(1, 0)
|
| 210 |
+
ax = fig.gca(projection=Axes3D.name, adjustable="box")
|
| 211 |
+
ax.axis("off")
|
| 212 |
+
# ax.axis('scaled')
|
| 213 |
+
ax.view_init(roll, pitch)
|
| 214 |
+
max, min = np.max(ptcloud), np.min(ptcloud)
|
| 215 |
+
ax.set_xbound(min, max)
|
| 216 |
+
ax.set_ybound(min, max)
|
| 217 |
+
ax.set_zbound(min, max)
|
| 218 |
+
ax.scatter(x, y, z, zdir="z", c=y, cmap="jet")
|
| 219 |
+
|
| 220 |
+
fig.canvas.draw()
|
| 221 |
+
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
|
| 222 |
+
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
| 223 |
+
return img
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def visualize_KITTI(
|
| 227 |
+
path,
|
| 228 |
+
data_list,
|
| 229 |
+
titles=["input", "pred"],
|
| 230 |
+
cmap=["bwr", "autumn"],
|
| 231 |
+
zdir="y",
|
| 232 |
+
xlim=(-1, 1),
|
| 233 |
+
ylim=(-1, 1),
|
| 234 |
+
zlim=(-1, 1),
|
| 235 |
+
):
|
| 236 |
+
fig = plt.figure(figsize=(6 * len(data_list), 6))
|
| 237 |
+
cmax = data_list[-1][:, 0].max()
|
| 238 |
+
|
| 239 |
+
for i in range(len(data_list)):
|
| 240 |
+
data = data_list[i][:-2048] if i == 1 else data_list[i]
|
| 241 |
+
color = data[:, 0] / cmax
|
| 242 |
+
ax = fig.add_subplot(1, len(data_list), i + 1, projection="3d")
|
| 243 |
+
ax.view_init(30, -120)
|
| 244 |
+
ax.scatter(
|
| 245 |
+
data[:, 0],
|
| 246 |
+
data[:, 1],
|
| 247 |
+
data[:, 2],
|
| 248 |
+
zdir=zdir,
|
| 249 |
+
c=color,
|
| 250 |
+
vmin=-1,
|
| 251 |
+
vmax=1,
|
| 252 |
+
cmap=cmap[0],
|
| 253 |
+
s=4,
|
| 254 |
+
linewidth=0.05,
|
| 255 |
+
edgecolors="black",
|
| 256 |
+
)
|
| 257 |
+
ax.set_title(titles[i])
|
| 258 |
+
|
| 259 |
+
ax.set_axis_off()
|
| 260 |
+
ax.set_xlim(xlim)
|
| 261 |
+
ax.set_ylim(ylim)
|
| 262 |
+
ax.set_zlim(zlim)
|
| 263 |
+
plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0.2, hspace=0)
|
| 264 |
+
if not os.path.exists(path):
|
| 265 |
+
os.makedirs(path)
|
| 266 |
+
|
| 267 |
+
pic_path = path + ".png"
|
| 268 |
+
fig.savefig(pic_path)
|
| 269 |
+
|
| 270 |
+
np.save(os.path.join(path, "input.npy"), data_list[0].numpy())
|
| 271 |
+
np.save(os.path.join(path, "pred.npy"), data_list[1].numpy())
|
| 272 |
+
plt.close(fig)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def random_dropping(pc, e):
|
| 276 |
+
up_num = max(64, 768 // (e // 50 + 1))
|
| 277 |
+
pc = pc
|
| 278 |
+
random_num = torch.randint(1, up_num, (1, 1))[0, 0]
|
| 279 |
+
pc = fps(pc, random_num)
|
| 280 |
+
padding = torch.zeros(pc.size(0), 2048 - pc.size(1), 3).to(pc.device)
|
| 281 |
+
pc = torch.cat([pc, padding], dim=1)
|
| 282 |
+
return pc
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def random_scale(partial, scale_range=[0.8, 1.2]):
|
| 286 |
+
scale = torch.rand(1).cuda() * (scale_range[1] - scale_range[0]) + scale_range[0]
|
| 287 |
+
return partial * scale
|
backend/ReConV2/utils/registry.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import warnings
|
| 3 |
+
from functools import partial
|
| 4 |
+
|
| 5 |
+
from ReConV2.utils import config
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Registry:
|
| 9 |
+
"""A registry to map strings to classes.
|
| 10 |
+
Registered object could be built from registry.
|
| 11 |
+
Example:
|
| 12 |
+
>>> MODELS = Registry('models')
|
| 13 |
+
>>> @MODELS.register_module()
|
| 14 |
+
>>> class ResNet:
|
| 15 |
+
>>> pass
|
| 16 |
+
>>> resnet = MODELS.build(dict(NAME='ResNet'))
|
| 17 |
+
Please refer to https://mmcv.readthedocs.io/en/latest/registry.html for
|
| 18 |
+
advanced useage.
|
| 19 |
+
Args:
|
| 20 |
+
name (str): Registry name.
|
| 21 |
+
build_func(func, optional): Build function to construct instance from
|
| 22 |
+
Registry, func:`build_from_cfg` is used if neither ``parent`` or
|
| 23 |
+
``build_func`` is specified. If ``parent`` is specified and
|
| 24 |
+
``build_func`` is not given, ``build_func`` will be inherited
|
| 25 |
+
from ``parent``. Default: None.
|
| 26 |
+
parent (Registry, optional): Parent registry. The class registered in
|
| 27 |
+
children registry could be built from parent. Default: None.
|
| 28 |
+
scope (str, optional): The scope of registry. It is the key to search
|
| 29 |
+
for children registry. If not specified, scope will be the name of
|
| 30 |
+
the package where class is defined, e.g. mmdet, mmcls, mmseg.
|
| 31 |
+
Default: None.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(self, name, build_func=None, parent=None, scope=None):
|
| 35 |
+
self._name = name
|
| 36 |
+
self._module_dict = dict()
|
| 37 |
+
self._children = dict()
|
| 38 |
+
self._scope = self.infer_scope() if scope is None else scope
|
| 39 |
+
|
| 40 |
+
# self.build_func will be set with the following priority:
|
| 41 |
+
# 1. build_func
|
| 42 |
+
# 2. parent.build_func
|
| 43 |
+
# 3. build_from_cfg
|
| 44 |
+
if build_func is None:
|
| 45 |
+
if parent is not None:
|
| 46 |
+
self.build_func = parent.build_func
|
| 47 |
+
else:
|
| 48 |
+
self.build_func = build_from_cfg
|
| 49 |
+
else:
|
| 50 |
+
self.build_func = build_func
|
| 51 |
+
if parent is not None:
|
| 52 |
+
assert isinstance(parent, Registry)
|
| 53 |
+
parent._add_children(self)
|
| 54 |
+
self.parent = parent
|
| 55 |
+
else:
|
| 56 |
+
self.parent = None
|
| 57 |
+
|
| 58 |
+
def __len__(self):
|
| 59 |
+
return len(self._module_dict)
|
| 60 |
+
|
| 61 |
+
def __contains__(self, key):
|
| 62 |
+
return self.get(key) is not None
|
| 63 |
+
|
| 64 |
+
def __repr__(self):
|
| 65 |
+
format_str = (
|
| 66 |
+
self.__class__.__name__ + f"(name={self._name}, items={self._module_dict})"
|
| 67 |
+
)
|
| 68 |
+
return format_str
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def infer_scope():
|
| 72 |
+
"""Infer the scope of registry.
|
| 73 |
+
The name of the package where registry is defined will be returned.
|
| 74 |
+
Example:
|
| 75 |
+
# in mmdet/models/backbone/resnet.py
|
| 76 |
+
>>> MODELS = Registry('models')
|
| 77 |
+
>>> @MODELS.register_module()
|
| 78 |
+
>>> class ResNet:
|
| 79 |
+
>>> pass
|
| 80 |
+
The scope of ``ResNet`` will be ``mmdet``.
|
| 81 |
+
Returns:
|
| 82 |
+
scope (str): The inferred scope name.
|
| 83 |
+
"""
|
| 84 |
+
# inspect.stack() trace where this function is called, the index-2
|
| 85 |
+
# indicates the frame where `infer_scope()` is called
|
| 86 |
+
filename = inspect.getmodule(inspect.stack()[2][0]).__name__
|
| 87 |
+
split_filename = filename.split(".")
|
| 88 |
+
return split_filename[0]
|
| 89 |
+
|
| 90 |
+
@staticmethod
|
| 91 |
+
def split_scope_key(key):
|
| 92 |
+
"""Split scope and key.
|
| 93 |
+
The first scope will be split from key.
|
| 94 |
+
Examples:
|
| 95 |
+
>>> Registry.split_scope_key('mmdet.ResNet')
|
| 96 |
+
'mmdet', 'ResNet'
|
| 97 |
+
>>> Registry.split_scope_key('ResNet')
|
| 98 |
+
None, 'ResNet'
|
| 99 |
+
Return:
|
| 100 |
+
scope (str, None): The first scope.
|
| 101 |
+
key (str): The remaining key.
|
| 102 |
+
"""
|
| 103 |
+
split_index = key.find(".")
|
| 104 |
+
if split_index != -1:
|
| 105 |
+
return key[:split_index], key[split_index + 1 :]
|
| 106 |
+
else:
|
| 107 |
+
return None, key
|
| 108 |
+
|
| 109 |
+
@property
|
| 110 |
+
def name(self):
|
| 111 |
+
return self._name
|
| 112 |
+
|
| 113 |
+
@property
|
| 114 |
+
def scope(self):
|
| 115 |
+
return self._scope
|
| 116 |
+
|
| 117 |
+
@property
|
| 118 |
+
def module_dict(self):
|
| 119 |
+
return self._module_dict
|
| 120 |
+
|
| 121 |
+
@property
|
| 122 |
+
def children(self):
|
| 123 |
+
return self._children
|
| 124 |
+
|
| 125 |
+
def get(self, key):
|
| 126 |
+
"""Get the registry record.
|
| 127 |
+
Args:
|
| 128 |
+
key (str): The class name in string format.
|
| 129 |
+
Returns:
|
| 130 |
+
class: The corresponding class.
|
| 131 |
+
"""
|
| 132 |
+
scope, real_key = self.split_scope_key(key)
|
| 133 |
+
if scope is None or scope == self._scope:
|
| 134 |
+
# get from self
|
| 135 |
+
if real_key in self._module_dict:
|
| 136 |
+
return self._module_dict[real_key]
|
| 137 |
+
else:
|
| 138 |
+
# get from self._children
|
| 139 |
+
if scope in self._children:
|
| 140 |
+
return self._children[scope].get(real_key)
|
| 141 |
+
else:
|
| 142 |
+
# goto root
|
| 143 |
+
parent = self.parent
|
| 144 |
+
while parent.parent is not None:
|
| 145 |
+
parent = parent.parent
|
| 146 |
+
return parent.get(key)
|
| 147 |
+
|
| 148 |
+
def build(self, *args, **kwargs):
|
| 149 |
+
return self.build_func(*args, **kwargs, registry=self)
|
| 150 |
+
|
| 151 |
+
def _add_children(self, registry):
|
| 152 |
+
"""Add children for a registry.
|
| 153 |
+
The ``registry`` will be added as children based on its scope.
|
| 154 |
+
The parent registry could build objects from children registry.
|
| 155 |
+
Example:
|
| 156 |
+
>>> models = Registry('models')
|
| 157 |
+
>>> mmdet_models = Registry('models', parent=models)
|
| 158 |
+
>>> @mmdet_models.register_module()
|
| 159 |
+
>>> class ResNet:
|
| 160 |
+
>>> pass
|
| 161 |
+
>>> resnet = models.build(dict(NAME='mmdet.ResNet'))
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
assert isinstance(registry, Registry)
|
| 165 |
+
assert registry.scope is not None
|
| 166 |
+
assert registry.scope not in self.children, (
|
| 167 |
+
f"scope {registry.scope} exists in {self.name} registry"
|
| 168 |
+
)
|
| 169 |
+
self.children[registry.scope] = registry
|
| 170 |
+
|
| 171 |
+
def _register_module(self, module_class, module_name=None, force=False):
|
| 172 |
+
if not inspect.isclass(module_class):
|
| 173 |
+
raise TypeError(f"module must be a class, but got {type(module_class)}")
|
| 174 |
+
|
| 175 |
+
if module_name is None:
|
| 176 |
+
module_name = module_class.__name__
|
| 177 |
+
if isinstance(module_name, str):
|
| 178 |
+
module_name = [module_name]
|
| 179 |
+
for name in module_name:
|
| 180 |
+
if not force and name in self._module_dict:
|
| 181 |
+
raise KeyError(f"{name} is already registered in {self.name}")
|
| 182 |
+
self._module_dict[name] = module_class
|
| 183 |
+
|
| 184 |
+
def deprecated_register_module(self, cls=None, force=False):
|
| 185 |
+
warnings.warn(
|
| 186 |
+
"The old API of register_module(module, force=False) "
|
| 187 |
+
"is deprecated and will be removed, please use the new API "
|
| 188 |
+
"register_module(name=None, force=False, module=None) instead."
|
| 189 |
+
)
|
| 190 |
+
if cls is None:
|
| 191 |
+
return partial(self.deprecated_register_module, force=force)
|
| 192 |
+
self._register_module(cls, force=force)
|
| 193 |
+
return cls
|
| 194 |
+
|
| 195 |
+
def register_module(self, name=None, force=False, module=None):
|
| 196 |
+
"""Register a module.
|
| 197 |
+
A record will be added to `self._module_dict`, whose key is the class
|
| 198 |
+
name or the specified name, and value is the class itself.
|
| 199 |
+
It can be used as a decorator or a normal function.
|
| 200 |
+
Example:
|
| 201 |
+
>>> backbones = Registry('backbone')
|
| 202 |
+
>>> @backbones.register_module()
|
| 203 |
+
>>> class ResNet:
|
| 204 |
+
>>> pass
|
| 205 |
+
>>> backbones = Registry('backbone')
|
| 206 |
+
>>> @backbones.register_module(name='mnet')
|
| 207 |
+
>>> class MobileNet:
|
| 208 |
+
>>> pass
|
| 209 |
+
>>> backbones = Registry('backbone')
|
| 210 |
+
>>> class ResNet:
|
| 211 |
+
>>> pass
|
| 212 |
+
>>> backbones.register_module(ResNet)
|
| 213 |
+
Args:
|
| 214 |
+
name (str | None): The module name to be registered. If not
|
| 215 |
+
specified, the class name will be used.
|
| 216 |
+
force (bool, optional): Whether to override an existing class with
|
| 217 |
+
the same name. Default: False.
|
| 218 |
+
module (type): Module class to be registered.
|
| 219 |
+
"""
|
| 220 |
+
if not isinstance(force, bool):
|
| 221 |
+
raise TypeError(f"force must be a boolean, but got {type(force)}")
|
| 222 |
+
# NOTE: This is a walkaround to be compatible with the old api,
|
| 223 |
+
# while it may introduce unexpected bugs.
|
| 224 |
+
if isinstance(name, type):
|
| 225 |
+
return self.deprecated_register_module(name, force=force)
|
| 226 |
+
|
| 227 |
+
# raise the error ahead of time
|
| 228 |
+
if not (name is None or isinstance(name, str) or misc.is_seq_of(name, str)):
|
| 229 |
+
raise TypeError(
|
| 230 |
+
"name must be either of None, an instance of str or a sequence"
|
| 231 |
+
f" of str, but got {type(name)}"
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
# use it as a normal method: x.register_module(module=SomeClass)
|
| 235 |
+
if module is not None:
|
| 236 |
+
self._register_module(module_class=module, module_name=name, force=force)
|
| 237 |
+
return module
|
| 238 |
+
|
| 239 |
+
# use it as a decorator: @x.register_module()
|
| 240 |
+
def _register(cls):
|
| 241 |
+
self._register_module(module_class=cls, module_name=name, force=force)
|
| 242 |
+
return cls
|
| 243 |
+
|
| 244 |
+
return _register
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def build_from_cfg(cfg, registry, default_args=None):
|
| 248 |
+
"""Build a module from config dict.
|
| 249 |
+
Args:
|
| 250 |
+
cfg (edict): Config dict. It should at least contain the key "NAME".
|
| 251 |
+
registry (:obj:`Registry`): The registry to search the type from.
|
| 252 |
+
Returns:
|
| 253 |
+
object: The constructed object.
|
| 254 |
+
"""
|
| 255 |
+
if not isinstance(cfg, dict):
|
| 256 |
+
raise TypeError(f"cfg must be a dict, but got {type(cfg)}")
|
| 257 |
+
if "NAME" not in cfg:
|
| 258 |
+
if default_args is None or "NAME" not in default_args:
|
| 259 |
+
raise KeyError(
|
| 260 |
+
'`cfg` or `default_args` must contain the key "NAME", '
|
| 261 |
+
f"but got {cfg}\n{default_args}"
|
| 262 |
+
)
|
| 263 |
+
if not isinstance(registry, Registry):
|
| 264 |
+
raise TypeError(
|
| 265 |
+
f"registry must be an mmcv.Registry object, but got {type(registry)}"
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
if not (isinstance(default_args, dict) or default_args is None):
|
| 269 |
+
raise TypeError(
|
| 270 |
+
f"default_args must be a dict or None, but got {type(default_args)}"
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
if default_args is not None:
|
| 274 |
+
cfg = config.merge_new_config(cfg, default_args)
|
| 275 |
+
|
| 276 |
+
obj_type = cfg.get("NAME")
|
| 277 |
+
|
| 278 |
+
if isinstance(obj_type, str):
|
| 279 |
+
obj_cls = registry.get(obj_type)
|
| 280 |
+
if obj_cls is None:
|
| 281 |
+
raise KeyError(f"{obj_type} is not in the {registry.name} registry")
|
| 282 |
+
elif inspect.isclass(obj_type):
|
| 283 |
+
obj_cls = obj_type
|
| 284 |
+
else:
|
| 285 |
+
raise TypeError(f"type must be a str or valid type, but got {type(obj_type)}")
|
| 286 |
+
try:
|
| 287 |
+
return obj_cls(cfg)
|
| 288 |
+
except Exception as e:
|
| 289 |
+
# Normal TypeError does not print class name.
|
| 290 |
+
raise type(e)(f"{obj_cls.__name__}: {e}")
|
backend/cad_retrieval_utils/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .inference import make_submission
|
| 2 |
+
|
| 3 |
+
__all__ = ["make_submission"]
|
backend/cad_retrieval_utils/augmentations.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import cast
|
| 2 |
+
|
| 3 |
+
import torchvision.transforms as T
|
| 4 |
+
|
| 5 |
+
from .type_defs import ImageTransform
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def build_img_transforms(img_size: int) -> ImageTransform:
|
| 9 |
+
transform = T.Compose([
|
| 10 |
+
T.Resize(img_size),
|
| 11 |
+
T.CenterCrop(img_size),
|
| 12 |
+
T.ToTensor(),
|
| 13 |
+
T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
|
| 14 |
+
])
|
| 15 |
+
return cast(ImageTransform, transform)
|
backend/cad_retrieval_utils/configs/config.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from easydict import EasyDict as edict
|
| 5 |
+
|
| 6 |
+
CONFIG = edict()
|
| 7 |
+
|
| 8 |
+
# --- Конфиг pretrained модели Recon для загрузки ---
|
| 9 |
+
CONFIG.model = edict({
|
| 10 |
+
"NAME": "ReCon2",
|
| 11 |
+
"group_size": 32,
|
| 12 |
+
"num_group": 512,
|
| 13 |
+
"mask_ratio": 0.7,
|
| 14 |
+
"mask_type": "rand",
|
| 15 |
+
"embed_dim": 1024,
|
| 16 |
+
"depth": 24,
|
| 17 |
+
"drop_path_rate": 0.2,
|
| 18 |
+
"num_heads": 16,
|
| 19 |
+
"decoder_depth": 4,
|
| 20 |
+
"with_color": True,
|
| 21 |
+
"stop_grad": False,
|
| 22 |
+
"large_embedding": False,
|
| 23 |
+
"img_queries": 13,
|
| 24 |
+
"text_queries": 3,
|
| 25 |
+
"contrast_type": "byol",
|
| 26 |
+
"pretrained_model_name": "eva_large_patch14_336.in22k_ft_in22k_in1k",
|
| 27 |
+
})
|
| 28 |
+
|
| 29 |
+
# --- Общие параметры ---
|
| 30 |
+
CONFIG.npoints = 10_000
|
| 31 |
+
CONFIG.emb_dim = 1280
|
| 32 |
+
CONFIG.img_size = 336
|
| 33 |
+
CONFIG.seed = 42
|
| 34 |
+
CONFIG.device = torch.device("cpu")
|
| 35 |
+
CONFIG.text_ratio = 0.3
|
| 36 |
+
|
| 37 |
+
# --- Параметры инференса ---
|
| 38 |
+
CONFIG.infer_img_batch_size = 32
|
| 39 |
+
CONFIG.infer_pc_batch_size = 16
|
| 40 |
+
CONFIG.infer_text_batch_size = 32
|
| 41 |
+
|
| 42 |
+
# --- Параметры для MoE ---
|
| 43 |
+
CONFIG.train_params = edict()
|
| 44 |
+
CONFIG.train_params.n_experts = 8
|
| 45 |
+
|
| 46 |
+
# --- Пути ---
|
| 47 |
+
CONFIG.paths = edict()
|
| 48 |
+
CONFIG.paths.test_data_root = Path("/kaggle/input/test-final/test")
|
| 49 |
+
CONFIG.paths.submission_save_file = Path("./submission.csv")
|
| 50 |
+
|
| 51 |
+
# Эти пути будут перезаписаны из командной строки inference_runner.py
|
| 52 |
+
CONFIG.paths.model_spec = {
|
| 53 |
+
"text_proj": None,
|
| 54 |
+
"text_encoder": None,
|
| 55 |
+
"moe": None,
|
| 56 |
+
"pc_encoder": None
|
| 57 |
+
}
|
backend/cad_retrieval_utils/datasets.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
import trimesh
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from torch.utils.data import Dataset
|
| 8 |
+
|
| 9 |
+
from .type_defs import ImageTransform
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def normalize_pc(pc: np.ndarray) -> np.ndarray:
|
| 13 |
+
centroid = np.mean(pc, axis=0)
|
| 14 |
+
pc = pc - centroid
|
| 15 |
+
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
|
| 16 |
+
if m < 1e-6:
|
| 17 |
+
return pc
|
| 18 |
+
pc = pc / m
|
| 19 |
+
return pc
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def create_pc_tensor_with_dummy_color(pc: np.ndarray, npoints: int) -> torch.Tensor:
|
| 23 |
+
pc_with_dummy_color = np.zeros((npoints, 6), dtype=np.float32)
|
| 24 |
+
pc_with_dummy_color[:, :3] = pc
|
| 25 |
+
# Модель ReConV2 ожидает 6 каналов (XYZ + RGB), добавляем нейтральный серый
|
| 26 |
+
pc_with_dummy_color[:, 3:6] = 0.5
|
| 27 |
+
return torch.from_numpy(pc_with_dummy_color).float()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def load_mesh_safe(mesh_path: Path, npoints: int, seed: int) -> np.ndarray:
|
| 31 |
+
"""Безопасная загрузка меша с обработкой Scene объектов"""
|
| 32 |
+
mesh_data = trimesh.load(str(mesh_path))
|
| 33 |
+
mesh = mesh_data.to_mesh() if isinstance(mesh_data, trimesh.Scene) else mesh_data
|
| 34 |
+
pc, _ = trimesh.sample.sample_surface(mesh, npoints, seed=seed)
|
| 35 |
+
return np.array(pc, dtype=np.float32)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class InferenceMeshDataset(Dataset):
|
| 39 |
+
def __init__(self, file_paths: list[str], npoints: int, base_seed: int = 42) -> None:
|
| 40 |
+
self.file_paths = file_paths
|
| 41 |
+
self.npoints = npoints
|
| 42 |
+
self.base_seed = base_seed
|
| 43 |
+
|
| 44 |
+
def __len__(self) -> int:
|
| 45 |
+
return len(self.file_paths)
|
| 46 |
+
|
| 47 |
+
def __getitem__(self, idx: int) -> torch.Tensor:
|
| 48 |
+
pc_path = Path(self.file_paths[idx])
|
| 49 |
+
sample_seed = self.base_seed + idx
|
| 50 |
+
pc = load_mesh_safe(pc_path, self.npoints, sample_seed)
|
| 51 |
+
pc = normalize_pc(pc)
|
| 52 |
+
return create_pc_tensor_with_dummy_color(pc, self.npoints)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class InferenceImageDataset(Dataset):
|
| 56 |
+
def __init__(self, file_paths: list[str], transform: ImageTransform) -> None:
|
| 57 |
+
self.file_paths = file_paths
|
| 58 |
+
self.transform = transform
|
| 59 |
+
|
| 60 |
+
def __len__(self) -> int:
|
| 61 |
+
return len(self.file_paths)
|
| 62 |
+
|
| 63 |
+
def __getitem__(self, idx: int) -> torch.Tensor:
|
| 64 |
+
img_path = self.file_paths[idx]
|
| 65 |
+
img = Image.open(img_path).convert("RGB")
|
| 66 |
+
return self.transform(img)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class InferenceTextDataset(Dataset):
|
| 70 |
+
def __init__(self, file_paths: list[str]) -> None:
|
| 71 |
+
self.texts = []
|
| 72 |
+
for path in file_paths:
|
| 73 |
+
with open(path) as f:
|
| 74 |
+
self.texts.append(f.read().strip())
|
| 75 |
+
|
| 76 |
+
def __len__(self) -> int:
|
| 77 |
+
return len(self.texts)
|
| 78 |
+
|
| 79 |
+
def __getitem__(self, idx: int) -> str:
|
| 80 |
+
return self.texts[idx]
|
backend/cad_retrieval_utils/evaluation.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
from easydict import EasyDict as edict
|
| 4 |
+
from torch.utils.data import DataLoader
|
| 5 |
+
from tqdm.auto import tqdm
|
| 6 |
+
|
| 7 |
+
from .models import ImageEncoder, InferencePcEncoder, InferenceTextEncoder
|
| 8 |
+
from .type_defs import EmbeddingArray
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@torch.no_grad()
|
| 12 |
+
def get_inference_embeddings_text(
|
| 13 |
+
model: InferenceTextEncoder, loader: DataLoader, config: edict
|
| 14 |
+
) -> EmbeddingArray:
|
| 15 |
+
all_embs = []
|
| 16 |
+
for batch in tqdm(loader, desc="Извлечение text эмбеддингов"):
|
| 17 |
+
embs = model.encode_text(batch, normalize=True)
|
| 18 |
+
all_embs.append(embs.cpu().numpy())
|
| 19 |
+
return np.vstack(all_embs)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@torch.no_grad()
|
| 23 |
+
def get_inference_embeddings_mesh(
|
| 24 |
+
model: InferencePcEncoder, loader: DataLoader, config: edict
|
| 25 |
+
) -> EmbeddingArray:
|
| 26 |
+
all_embs = []
|
| 27 |
+
for batch in tqdm(loader, desc="Извлечение mesh эмбеддингов"):
|
| 28 |
+
batch = batch.to(config.device)
|
| 29 |
+
embs = model.encode_pc(batch, normalize=True)
|
| 30 |
+
all_embs.append(embs.cpu().numpy())
|
| 31 |
+
return np.vstack(all_embs)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@torch.no_grad()
|
| 35 |
+
def get_inference_embeddings_image(
|
| 36 |
+
model: ImageEncoder, loader: DataLoader, config: edict
|
| 37 |
+
) -> EmbeddingArray:
|
| 38 |
+
all_embs = []
|
| 39 |
+
for batch in tqdm(loader, desc="Извлечение image эмбеддингов"):
|
| 40 |
+
batch = batch.to(config.device)
|
| 41 |
+
embs = model.encode_image(batch, normalize=True)
|
| 42 |
+
all_embs.append(embs.cpu().numpy())
|
| 43 |
+
return np.vstack(all_embs)
|
backend/cad_retrieval_utils/inference.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from easydict import EasyDict as edict
|
| 6 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 7 |
+
from sklearn.preprocessing import normalize
|
| 8 |
+
from torch.utils.data import DataLoader
|
| 9 |
+
|
| 10 |
+
from .augmentations import build_img_transforms
|
| 11 |
+
from .datasets import InferenceImageDataset, InferenceMeshDataset, InferenceTextDataset
|
| 12 |
+
from .evaluation import (
|
| 13 |
+
get_inference_embeddings_image,
|
| 14 |
+
get_inference_embeddings_mesh,
|
| 15 |
+
get_inference_embeddings_text,
|
| 16 |
+
)
|
| 17 |
+
from .models import ImageEncoder, InferencePcEncoder, InferenceTextEncoder
|
| 18 |
+
from .type_defs import CheckpointSpec
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# --- Загрузчики моделей ---
|
| 22 |
+
def load_text_encoder(spec: CheckpointSpec, config: edict) -> InferenceTextEncoder:
|
| 23 |
+
text_encoder = InferenceTextEncoder(config).to(config.device)
|
| 24 |
+
text_encoder.load_text_weights(str(spec["text_proj"]), str(spec["text_encoder"]))
|
| 25 |
+
text_encoder.eval()
|
| 26 |
+
return text_encoder
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def load_pc_encoder(spec: CheckpointSpec, config: edict) -> InferencePcEncoder:
|
| 30 |
+
pc_encoder = InferencePcEncoder(config).to(config.device)
|
| 31 |
+
pc_encoder.load_pc_encoder_weights(str(spec["pc_encoder"]))
|
| 32 |
+
pc_encoder.eval()
|
| 33 |
+
return pc_encoder
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def load_image_encoder(spec: CheckpointSpec, config: edict) -> ImageEncoder:
|
| 37 |
+
img_encoder = ImageEncoder(config).to(config.device)
|
| 38 |
+
img_encoder.load_moe_weights(str(spec["moe"]))
|
| 39 |
+
img_encoder.eval()
|
| 40 |
+
return img_encoder
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# --- Подготовка данных ---
|
| 44 |
+
def prepare_all_data(config: edict) -> dict:
|
| 45 |
+
test_root = Path(config.paths.test_data_root)
|
| 46 |
+
img_transform = build_img_transforms(config.img_size)
|
| 47 |
+
data_loaders = {}
|
| 48 |
+
data_ids = {}
|
| 49 |
+
|
| 50 |
+
# Image-to-Mesh
|
| 51 |
+
q_img_paths = sorted(test_root.joinpath("queries_image_to_mesh").glob("*.png"))
|
| 52 |
+
g_mesh_for_img_paths = sorted(test_root.joinpath("gallery_mesh_for_image").glob("*.stl"))
|
| 53 |
+
data_loaders['q_img'] = DataLoader(InferenceImageDataset([str(p) for p in q_img_paths], img_transform),
|
| 54 |
+
batch_size=config.infer_img_batch_size, shuffle=False)
|
| 55 |
+
data_loaders['g_mesh_for_img'] = DataLoader(
|
| 56 |
+
InferenceMeshDataset([str(p) for p in g_mesh_for_img_paths], config.npoints, config.seed),
|
| 57 |
+
batch_size=config.infer_pc_batch_size, shuffle=False)
|
| 58 |
+
data_ids['q_img'] = [p.stem for p in q_img_paths]
|
| 59 |
+
data_ids['g_mesh_for_img'] = [p.stem for p in g_mesh_for_img_paths]
|
| 60 |
+
|
| 61 |
+
# Mesh-to-Image
|
| 62 |
+
q_mesh_to_img_paths = sorted(test_root.joinpath("queries_mesh_to_image").glob("*.stl"))
|
| 63 |
+
g_img_for_mesh_paths = sorted(test_root.joinpath("gallery_image_for_mesh").glob("*.png"))
|
| 64 |
+
data_loaders['q_mesh_to_img'] = DataLoader(
|
| 65 |
+
InferenceMeshDataset([str(p) for p in q_mesh_to_img_paths], config.npoints, config.seed),
|
| 66 |
+
batch_size=config.infer_pc_batch_size, shuffle=False)
|
| 67 |
+
data_loaders['g_img_for_mesh'] = DataLoader(
|
| 68 |
+
InferenceImageDataset([str(p) for p in g_img_for_mesh_paths], img_transform),
|
| 69 |
+
batch_size=config.infer_img_batch_size, shuffle=False)
|
| 70 |
+
data_ids['q_mesh_to_img'] = [p.stem for p in q_mesh_to_img_paths]
|
| 71 |
+
data_ids['g_img_for_mesh_paths'] = g_img_for_mesh_paths # Нужны полные пути для группировки
|
| 72 |
+
|
| 73 |
+
# Text-to-Mesh
|
| 74 |
+
q_text_paths = sorted(test_root.joinpath("queries_text_to_mesh").glob("*.txt"))
|
| 75 |
+
g_mesh_for_text_paths = sorted(test_root.joinpath("gallery_mesh_for_text").glob("*.stl"))
|
| 76 |
+
data_loaders['q_text'] = DataLoader(InferenceTextDataset([str(p) for p in q_text_paths]),
|
| 77 |
+
batch_size=config.infer_text_batch_size, shuffle=False)
|
| 78 |
+
data_loaders['g_mesh_for_text'] = DataLoader(
|
| 79 |
+
InferenceMeshDataset([str(p) for p in g_mesh_for_text_paths], config.npoints, config.seed),
|
| 80 |
+
batch_size=config.infer_pc_batch_size, shuffle=False)
|
| 81 |
+
data_ids['q_text'] = [p.stem for p in q_text_paths]
|
| 82 |
+
data_ids['g_mesh_for_text'] = [p.stem for p in g_mesh_for_text_paths]
|
| 83 |
+
|
| 84 |
+
# Mesh-to-Text
|
| 85 |
+
q_mesh_to_text_paths = sorted(test_root.joinpath("queries_mesh_to_text").glob("*.stl"))
|
| 86 |
+
g_text_for_mesh_paths = sorted(test_root.joinpath("gallery_text_for_mesh").glob("*.txt"))
|
| 87 |
+
data_loaders['q_mesh_to_text'] = DataLoader(
|
| 88 |
+
InferenceMeshDataset([str(p) for p in q_mesh_to_text_paths], config.npoints, config.seed),
|
| 89 |
+
batch_size=config.infer_pc_batch_size, shuffle=False)
|
| 90 |
+
data_loaders['g_text_for_mesh'] = DataLoader(InferenceTextDataset([str(p) for p in g_text_for_mesh_paths]),
|
| 91 |
+
batch_size=config.infer_text_batch_size, shuffle=False)
|
| 92 |
+
data_ids['q_mesh_to_text'] = [p.stem for p in q_mesh_to_text_paths]
|
| 93 |
+
data_ids['g_text_for_mesh'] = [p.stem for p in g_text_for_mesh_paths]
|
| 94 |
+
|
| 95 |
+
return data_loaders, data_ids
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# --- Решатели задач ---
|
| 99 |
+
def solve_img2mesh(loaders, ids, model_spec, config) -> pd.DataFrame:
|
| 100 |
+
print(" 🖼️ → 📦 Image-to-Mesh: получение эмбеддингов...")
|
| 101 |
+
img_encoder = load_image_encoder(model_spec, config)
|
| 102 |
+
pc_encoder = load_pc_encoder(model_spec, config)
|
| 103 |
+
|
| 104 |
+
query_embs = get_inference_embeddings_image(img_encoder, loaders['q_img'], config)
|
| 105 |
+
gallery_embs = get_inference_embeddings_mesh(pc_encoder, loaders['g_mesh_for_img'], config)
|
| 106 |
+
|
| 107 |
+
sims = cosine_similarity(query_embs, gallery_embs)
|
| 108 |
+
top_indices = np.argsort(sims, axis=1)[:, ::-1][:, :3]
|
| 109 |
+
|
| 110 |
+
results = {q_id: [ids['g_mesh_for_img'][j] for j in top_indices[i]] for i, q_id in enumerate(ids['q_img'])}
|
| 111 |
+
df = pd.DataFrame(list(results.items()), columns=["image_to_mesh_image", "image_to_mesh_mesh"])
|
| 112 |
+
return df.sort_values("image_to_mesh_image").reset_index(drop=True)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def solve_mesh2img(loaders, ids, model_spec, config) -> pd.DataFrame:
|
| 116 |
+
print(" 📦 → 🖼️ Mesh-to-Image: получение эмбеддингов...")
|
| 117 |
+
pc_encoder = load_pc_encoder(model_spec, config)
|
| 118 |
+
img_encoder = load_image_encoder(model_spec, config)
|
| 119 |
+
|
| 120 |
+
query_embs = get_inference_embeddings_mesh(pc_encoder, loaders['q_mesh_to_img'], config)
|
| 121 |
+
gallery_embs = get_inference_embeddings_image(img_encoder, loaders['g_img_for_mesh'], config)
|
| 122 |
+
|
| 123 |
+
gallery_img_model_ids = [p.name.split("_")[0] for p in ids['g_img_for_mesh_paths']]
|
| 124 |
+
df_gallery = pd.DataFrame(gallery_embs)
|
| 125 |
+
df_gallery["model_id"] = gallery_img_model_ids
|
| 126 |
+
mean_embs_df = df_gallery.groupby("model_id").mean()
|
| 127 |
+
|
| 128 |
+
avg_gallery_embs = normalize(mean_embs_df.to_numpy(), axis=1)
|
| 129 |
+
avg_gallery_ids = mean_embs_df.index.tolist()
|
| 130 |
+
|
| 131 |
+
sims = cosine_similarity(query_embs, avg_gallery_embs)
|
| 132 |
+
top_indices = np.argsort(sims, axis=1)[:, ::-1][:, :3]
|
| 133 |
+
|
| 134 |
+
results = {q_id: [avg_gallery_ids[j] for j in top_indices[i]] for i, q_id in enumerate(ids['q_mesh_to_img'])}
|
| 135 |
+
df = pd.DataFrame(list(results.items()), columns=["mesh_to_image_mesh", "mesh_to_image_image"])
|
| 136 |
+
return df.sort_values("mesh_to_image_mesh").reset_index(drop=True)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def solve_text2mesh(loaders, ids, model_spec, config) -> pd.DataFrame:
|
| 140 |
+
print(" 📝 → 📦 Text-to-Mesh: получение эмбеддингов...")
|
| 141 |
+
text_encoder = load_text_encoder(model_spec, config)
|
| 142 |
+
pc_encoder = load_pc_encoder(model_spec, config)
|
| 143 |
+
|
| 144 |
+
query_embs = get_inference_embeddings_text(text_encoder, loaders['q_text'], config)
|
| 145 |
+
gallery_embs = get_inference_embeddings_mesh(pc_encoder, loaders['g_mesh_for_text'], config)
|
| 146 |
+
|
| 147 |
+
sims = cosine_similarity(query_embs, gallery_embs)
|
| 148 |
+
top_indices = np.argsort(sims, axis=1)[:, ::-1][:, :3]
|
| 149 |
+
|
| 150 |
+
results = {q_id: [ids['g_mesh_for_text'][j] for j in top_indices[i]] for i, q_id in enumerate(ids['q_text'])}
|
| 151 |
+
df = pd.DataFrame(list(results.items()), columns=["text_to_mesh_text", "text_to_mesh_mesh"])
|
| 152 |
+
return df.sort_values("text_to_mesh_text").reset_index(drop=True)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def solve_mesh2text(loaders, ids, model_spec, config) -> pd.DataFrame:
|
| 156 |
+
print(" 📦 → 📝 Mesh-to-Text: получение эмбеддингов...")
|
| 157 |
+
pc_encoder = load_pc_encoder(model_spec, config)
|
| 158 |
+
text_encoder = load_text_encoder(model_spec, config)
|
| 159 |
+
|
| 160 |
+
query_embs = get_inference_embeddings_mesh(pc_encoder, loaders['q_mesh_to_text'], config)
|
| 161 |
+
gallery_embs = get_inference_embeddings_text(text_encoder, loaders['g_text_for_mesh'], config)
|
| 162 |
+
|
| 163 |
+
sims = cosine_similarity(query_embs, gallery_embs)
|
| 164 |
+
top_indices = np.argsort(sims, axis=1)[:, ::-1][:, :3]
|
| 165 |
+
|
| 166 |
+
results = {q_id: [ids['g_text_for_mesh'][j] for j in top_indices[i]] for i, q_id in
|
| 167 |
+
enumerate(ids['q_mesh_to_text'])}
|
| 168 |
+
df = pd.DataFrame(list(results.items()), columns=["mesh_to_text_mesh", "mesh_to_text_text"])
|
| 169 |
+
return df.sort_values("mesh_to_text_mesh").reset_index(drop=True)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
# --- Главная функция ---
|
| 173 |
+
def make_submission(config: edict) -> None:
|
| 174 |
+
print("\n" + "=" * 60)
|
| 175 |
+
print("🚀 Создание submission файла для всех 4 задач")
|
| 176 |
+
print("=" * 60)
|
| 177 |
+
|
| 178 |
+
model_spec = config.paths.model_spec
|
| 179 |
+
loaders, ids = prepare_all_data(config)
|
| 180 |
+
|
| 181 |
+
# Решаем все задачи
|
| 182 |
+
text2mesh_df = solve_text2mesh(loaders, ids, model_spec, config)
|
| 183 |
+
mesh2text_df = solve_mesh2text(loaders, ids, model_spec, config)
|
| 184 |
+
img2mesh_df = solve_img2mesh(loaders, ids, model_spec, config)
|
| 185 |
+
mesh2img_df = solve_mesh2img(loaders, ids, model_spec, config)
|
| 186 |
+
|
| 187 |
+
# Создаем финальный DataFrame с правильной структурой
|
| 188 |
+
# 2187 строк для image задач + 100 строк для text задач = 2287 строк
|
| 189 |
+
total_rows = 2287
|
| 190 |
+
final_df = pd.DataFrame(index=range(total_rows))
|
| 191 |
+
|
| 192 |
+
# Добавляем колонку id
|
| 193 |
+
final_df["id"] = final_df.index
|
| 194 |
+
|
| 195 |
+
# Инициализируем все колонки как None
|
| 196 |
+
for col in ["image_to_mesh_image", "image_to_mesh_mesh",
|
| 197 |
+
"mesh_to_image_mesh", "mesh_to_image_image",
|
| 198 |
+
"text_to_mesh_text", "text_to_mesh_mesh",
|
| 199 |
+
"mesh_to_text_mesh", "mesh_to_text_text"]:
|
| 200 |
+
final_df[col] = None
|
| 201 |
+
|
| 202 |
+
# Заполняем image задачи (первые 2187 стро��)
|
| 203 |
+
# Используем .at для присвоения списков
|
| 204 |
+
for i in range(len(img2mesh_df)):
|
| 205 |
+
final_df.at[i, "image_to_mesh_image"] = img2mesh_df.loc[i, "image_to_mesh_image"]
|
| 206 |
+
final_df.at[i, "image_to_mesh_mesh"] = img2mesh_df.loc[i, "image_to_mesh_mesh"]
|
| 207 |
+
|
| 208 |
+
for i in range(len(mesh2img_df)):
|
| 209 |
+
final_df.at[i, "mesh_to_image_mesh"] = mesh2img_df.loc[i, "mesh_to_image_mesh"]
|
| 210 |
+
final_df.at[i, "mesh_to_image_image"] = mesh2img_df.loc[i, "mesh_to_image_image"]
|
| 211 |
+
|
| 212 |
+
# Заполняем text задачи (последние 100 строк, начиная с индекса 2187)
|
| 213 |
+
text_start_idx = 2187
|
| 214 |
+
for i in range(len(text2mesh_df)):
|
| 215 |
+
final_df.at[text_start_idx + i, "text_to_mesh_text"] = text2mesh_df.loc[i, "text_to_mesh_text"]
|
| 216 |
+
final_df.at[text_start_idx + i, "text_to_mesh_mesh"] = text2mesh_df.loc[i, "text_to_mesh_mesh"]
|
| 217 |
+
|
| 218 |
+
for i in range(len(mesh2text_df)):
|
| 219 |
+
final_df.at[text_start_idx + i, "mesh_to_text_mesh"] = mesh2text_df.loc[i, "mesh_to_text_mesh"]
|
| 220 |
+
final_df.at[text_start_idx + i, "mesh_to_text_text"] = mesh2text_df.loc[i, "mesh_to_text_text"]
|
| 221 |
+
|
| 222 |
+
# Статистика
|
| 223 |
+
print(f"\n📊 Статистика submission:")
|
| 224 |
+
print(f" Заполненных image_to_mesh: {final_df['image_to_mesh_image'].notna().sum()}")
|
| 225 |
+
print(f" Заполненных mesh_to_image: {final_df['mesh_to_image_mesh'].notna().sum()}")
|
| 226 |
+
print(f" Заполненных text_to_mesh: {final_df['text_to_mesh_text'].notna().sum()}")
|
| 227 |
+
print(f" Заполненных mesh_to_text: {final_df['mesh_to_text_mesh'].notna().sum()}")
|
| 228 |
+
|
| 229 |
+
# Преобразуем списки в строки для CSV
|
| 230 |
+
for col in final_df.columns:
|
| 231 |
+
if col != "id": # Не трогаем колонку id
|
| 232 |
+
mask = final_df[col].apply(lambda x: isinstance(x, list))
|
| 233 |
+
final_df.loc[mask, col] = final_df.loc[mask, col].apply(str)
|
| 234 |
+
|
| 235 |
+
# Сохраняем результат
|
| 236 |
+
output_path = config.paths.submission_save_file
|
| 237 |
+
final_df.to_csv(output_path, index=False)
|
| 238 |
+
print(f"\n✅ Файл для сабмита успешно создан: {output_path}")
|
| 239 |
+
print(f" Всего строк: {len(final_df)}")
|
| 240 |
+
print(f" Image задачи: строки 0-2186")
|
| 241 |
+
print(f" Text задачи: строки 2187-2286")
|
| 242 |
+
print("=" * 60)
|
backend/cad_retrieval_utils/inference_runner.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
from cad_retrieval_utils.utils import init_environment, load_config
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def main() -> None:
|
| 8 |
+
parser = argparse.ArgumentParser(
|
| 9 |
+
description="Inference runner for all 4 tasks",
|
| 10 |
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
| 11 |
+
)
|
| 12 |
+
parser.add_argument("--config", required=True, help="Путь к .py-файлу с CONFIG")
|
| 13 |
+
parser.add_argument("--pc_encoder", required=True, help="Путь к весам PC encoder")
|
| 14 |
+
parser.add_argument("--img_moe", required=True, help="Путь к весам Image MoE head")
|
| 15 |
+
parser.add_argument("--text_proj", required=True, help="Путь к весам text projection")
|
| 16 |
+
parser.add_argument("--text_encoder", required=True, help="Путь к весам text encoder")
|
| 17 |
+
parser.add_argument("--output", default="submission.csv", help="Путь для сохранения submission.csv")
|
| 18 |
+
args = parser.parse_args()
|
| 19 |
+
|
| 20 |
+
CONFIG = load_config(args.config)
|
| 21 |
+
print(f"Using config: {args.config}")
|
| 22 |
+
|
| 23 |
+
# Обновляем конфиг путями из аргументов
|
| 24 |
+
CONFIG.paths.model_spec = {
|
| 25 |
+
"pc_encoder": args.pc_encoder,
|
| 26 |
+
"moe": args.img_moe,
|
| 27 |
+
"text_proj": args.text_proj,
|
| 28 |
+
"text_encoder": args.text_encoder,
|
| 29 |
+
}
|
| 30 |
+
CONFIG.paths.submission_save_file = Path(args.output)
|
| 31 |
+
|
| 32 |
+
# Проверка существования файлов
|
| 33 |
+
for key, path in CONFIG.paths.model_spec.items():
|
| 34 |
+
if path and not Path(path).exists():
|
| 35 |
+
raise FileNotFoundError(f"Файл не найден: {key} -> {path}")
|
| 36 |
+
|
| 37 |
+
init_environment(CONFIG)
|
| 38 |
+
|
| 39 |
+
# Импортируем после инициализации
|
| 40 |
+
from cad_retrieval_utils.inference import make_submission
|
| 41 |
+
make_submission(CONFIG)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
if __name__ == "__main__":
|
| 45 |
+
main()
|
backend/cad_retrieval_utils/models.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import cast
|
| 2 |
+
|
| 3 |
+
import open_clip
|
| 4 |
+
import timm
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from easydict import EasyDict as edict
|
| 9 |
+
|
| 10 |
+
from ReConV2.models.ReCon import ReCon2
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# --- Базовый PC Encoder (общий для всех) ---
|
| 14 |
+
class BasePcEncoder(nn.Module):
|
| 15 |
+
def __init__(self, config: edict):
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.text_ratio = config.text_ratio
|
| 18 |
+
self.pc_encoder_base = ReCon2(config.model)
|
| 19 |
+
self.config = config
|
| 20 |
+
|
| 21 |
+
def encode_pc(self, pc: torch.Tensor, normalize: bool) -> torch.Tensor:
|
| 22 |
+
img_token, text_token, _, _ = self.pc_encoder_base.forward_features(pc)
|
| 23 |
+
img_pred_feat = torch.mean(img_token, dim=1)
|
| 24 |
+
text_pred_feat = torch.mean(text_token, dim=1)
|
| 25 |
+
pc_feats = img_pred_feat + text_pred_feat * self.text_ratio
|
| 26 |
+
return F.normalize(pc_feats, dim=-1) if normalize else pc_feats
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# --- Модели для Text-Mesh ---
|
| 30 |
+
class TextEncoder(nn.Module):
|
| 31 |
+
def __init__(self, config: edict) -> None:
|
| 32 |
+
super().__init__()
|
| 33 |
+
self.config = config
|
| 34 |
+
model, _, _ = open_clip.create_model_and_transforms(
|
| 35 |
+
'EVA02-L-14-336',
|
| 36 |
+
pretrained='merged2b_s6b_b61k'
|
| 37 |
+
)
|
| 38 |
+
self.text_encoder = model
|
| 39 |
+
self.tokenizer = open_clip.get_tokenizer('EVA02-L-14-336')
|
| 40 |
+
|
| 41 |
+
text_dim = 768
|
| 42 |
+
self.text_proj = nn.Sequential(
|
| 43 |
+
nn.Linear(text_dim, config.emb_dim),
|
| 44 |
+
nn.ReLU(),
|
| 45 |
+
nn.Linear(config.emb_dim, config.emb_dim)
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
def encode_text(self, texts: list[str], normalize: bool = True) -> torch.Tensor:
|
| 49 |
+
tokens = self.tokenizer(texts).to(self.config.device)
|
| 50 |
+
text_features = self.text_encoder.encode_text(tokens)
|
| 51 |
+
text_embeddings = self.text_proj(text_features.float())
|
| 52 |
+
return F.normalize(text_embeddings, dim=-1) if normalize else text_embeddings
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class InferenceTextEncoder(nn.Module):
|
| 56 |
+
def __init__(self, config: edict) -> None:
|
| 57 |
+
super().__init__()
|
| 58 |
+
self.encoder = TextEncoder(config)
|
| 59 |
+
|
| 60 |
+
def load_text_weights(self, text_proj_path: str, text_encoder_path: str) -> None:
|
| 61 |
+
self.encoder.text_proj.load_state_dict(torch.load(text_proj_path, map_location="cpu"), strict=True)
|
| 62 |
+
print(f"✅ Text projection weights loaded from {text_proj_path}")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
checkpoint = torch.load(text_encoder_path, map_location="cpu")
|
| 66 |
+
# Загружаем только те параметры, которые есть в чекпоинте
|
| 67 |
+
missing, unexpected = self.encoder.text_encoder.load_state_dict(checkpoint, strict=False)
|
| 68 |
+
print(f"✅ Text encoder weights loaded from {text_encoder_path}")
|
| 69 |
+
if missing:
|
| 70 |
+
print(f" ℹ️ Missing keys (expected, frozen params): {len(missing)}")
|
| 71 |
+
if unexpected:
|
| 72 |
+
raise Exception(f" ⚠️ Unexpected keys: {unexpected}")
|
| 73 |
+
#strict=False так как последние слои грузим(при обучении 4 слоя размораживали, их сохранили и их же грузим в инференсе)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def encode_text(self, texts: list[str], normalize: bool = True) -> torch.Tensor:
|
| 78 |
+
return self.encoder.encode_text(texts, normalize)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# --- Модель для PC ---
|
| 82 |
+
class InferencePcEncoder(BasePcEncoder):
|
| 83 |
+
def __init__(self, config: edict) -> None:
|
| 84 |
+
super().__init__(config)
|
| 85 |
+
|
| 86 |
+
def load_pc_encoder_weights(self, checkpoint_path: str) -> None:
|
| 87 |
+
self.pc_encoder_base.load_state_dict(torch.load(checkpoint_path, map_location="cpu"), strict=True)
|
| 88 |
+
print(f"✅ PC encoder weights loaded from {checkpoint_path}")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# --- Модели для Image-Mesh ---
|
| 92 |
+
class MoEImgHead(nn.Module):
|
| 93 |
+
def __init__(self, in_dim: int, out_dim: int, n_experts: int = 8) -> None:
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.experts = nn.ModuleList([nn.Linear(in_dim, out_dim) for _ in range(n_experts)])
|
| 96 |
+
self.gate = nn.Sequential(nn.LayerNorm(in_dim), nn.Linear(in_dim, n_experts))
|
| 97 |
+
|
| 98 |
+
def forward(self, feats: torch.Tensor, normalize: bool) -> torch.Tensor:
|
| 99 |
+
logits = self.gate(feats)
|
| 100 |
+
w = torch.softmax(logits, dim=-1)
|
| 101 |
+
outs = torch.stack([e(feats) for e in self.experts], dim=1)
|
| 102 |
+
out = (w.unsqueeze(-1) * outs).sum(1)
|
| 103 |
+
return F.normalize(out, dim=-1) if normalize else out
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class ImageEncoder(nn.Module):
|
| 107 |
+
def __init__(self, config: edict) -> None:
|
| 108 |
+
super().__init__()
|
| 109 |
+
self.model = timm.create_model(config.model.pretrained_model_name, pretrained=True, num_classes=0)
|
| 110 |
+
|
| 111 |
+
self.img_proj = MoEImgHead(
|
| 112 |
+
config.model.embed_dim,
|
| 113 |
+
config.emb_dim,
|
| 114 |
+
n_experts=config.train_params.n_experts,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def encode_image(self, image: torch.Tensor, normalize: bool = True) -> torch.Tensor:
|
| 118 |
+
image_features = self.model(image) #вызываем под декоратором @torch.no_grad в evolution
|
| 119 |
+
image_embeddings = self.img_proj(image_features.float(), normalize=normalize)
|
| 120 |
+
return cast(torch.Tensor, image_embeddings)
|
| 121 |
+
|
| 122 |
+
def load_moe_weights(self, checkpoint_path: str) -> None:
|
| 123 |
+
self.img_proj.load_state_dict(torch.load(checkpoint_path, map_location="cpu"), strict=True)
|
| 124 |
+
print(f"✅ MoE weights loaded from {checkpoint_path}")
|
backend/cad_retrieval_utils/type_defs.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from collections.abc import Callable
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import TypeAlias, TypedDict
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from PIL import Image
|
| 9 |
+
|
| 10 |
+
# --- Примитивные псевдонимы ---
|
| 11 |
+
ModelID: TypeAlias = str
|
| 12 |
+
PathLike: TypeAlias = str | Path | os.PathLike[str]
|
| 13 |
+
ImageTransform: TypeAlias = Callable[[Image.Image], torch.Tensor]
|
| 14 |
+
|
| 15 |
+
# --- Типы для NumPy массивов ---
|
| 16 |
+
EmbeddingArray: TypeAlias = np.ndarray
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# --- Спецификация чекпоинтов для инференса ---
|
| 20 |
+
class CheckpointSpec(TypedDict):
|
| 21 |
+
# Пути для text-to-mesh
|
| 22 |
+
text_proj: PathLike
|
| 23 |
+
text_encoder: PathLike
|
| 24 |
+
|
| 25 |
+
# Пути для image-to-mesh
|
| 26 |
+
moe: PathLike
|
| 27 |
+
pc_encoder: PathLike
|
backend/cad_retrieval_utils/utils.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib.util
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from easydict import EasyDict as edict
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def load_config(config_path: str) -> edict:
|
| 12 |
+
CONFIG = edict()
|
| 13 |
+
|
| 14 |
+
# --- Конфиг pretrained модели Recon для загрузки ---
|
| 15 |
+
CONFIG.model = edict({
|
| 16 |
+
"NAME": "ReCon2",
|
| 17 |
+
"group_size": 32,
|
| 18 |
+
"num_group": 512,
|
| 19 |
+
"mask_ratio": 0.7,
|
| 20 |
+
"mask_type": "rand",
|
| 21 |
+
"embed_dim": 1024,
|
| 22 |
+
"depth": 24,
|
| 23 |
+
"drop_path_rate": 0.2,
|
| 24 |
+
"num_heads": 16,
|
| 25 |
+
"decoder_depth": 4,
|
| 26 |
+
"with_color": True,
|
| 27 |
+
"stop_grad": False,
|
| 28 |
+
"large_embedding": False,
|
| 29 |
+
"img_queries": 13,
|
| 30 |
+
"text_queries": 3,
|
| 31 |
+
"contrast_type": "byol",
|
| 32 |
+
"pretrained_model_name": "eva_large_patch14_336.in22k_ft_in22k_in1k",
|
| 33 |
+
})
|
| 34 |
+
|
| 35 |
+
# --- Общие параметры ---
|
| 36 |
+
CONFIG.npoints = 10_000
|
| 37 |
+
CONFIG.emb_dim = 1280
|
| 38 |
+
CONFIG.img_size = 336
|
| 39 |
+
CONFIG.seed = 42
|
| 40 |
+
CONFIG.device = torch.device("cpu")
|
| 41 |
+
CONFIG.text_ratio = 0.3
|
| 42 |
+
|
| 43 |
+
# --- Параметры инференса ---
|
| 44 |
+
CONFIG.infer_img_batch_size = 32
|
| 45 |
+
CONFIG.infer_pc_batch_size = 16
|
| 46 |
+
CONFIG.infer_text_batch_size = 32
|
| 47 |
+
|
| 48 |
+
# --- Параметры для MoE ---
|
| 49 |
+
CONFIG.train_params = edict()
|
| 50 |
+
CONFIG.train_params.n_experts = 8
|
| 51 |
+
|
| 52 |
+
# --- Пути ---
|
| 53 |
+
CONFIG.paths = edict()
|
| 54 |
+
CONFIG.paths.test_data_root = Path("/kaggle/input/test-final/test")
|
| 55 |
+
CONFIG.paths.submission_save_file = Path("./submission.csv")
|
| 56 |
+
|
| 57 |
+
# Эти пути будут перезаписаны из командной строки inference_runner.py
|
| 58 |
+
CONFIG.paths.model_spec = {
|
| 59 |
+
"text_proj": None,
|
| 60 |
+
"text_encoder": None,
|
| 61 |
+
"moe": None,
|
| 62 |
+
"pc_encoder": None
|
| 63 |
+
}
|
| 64 |
+
return CONFIG
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def init_environment(config: edict) -> None:
|
| 68 |
+
SEED = config.seed
|
| 69 |
+
|
| 70 |
+
# Все используют один и тот же базовый сид
|
| 71 |
+
random.seed(SEED)
|
| 72 |
+
os.environ["PYTHONHASHSEED"] = str(SEED)
|
| 73 |
+
np.random.seed(SEED)
|
| 74 |
+
torch.manual_seed(SEED)
|
| 75 |
+
torch.cuda.manual_seed(SEED)
|
| 76 |
+
torch.cuda.manual_seed_all(SEED)
|
| 77 |
+
|
| 78 |
+
# CuDNN настройки
|
| 79 |
+
torch.backends.cudnn.deterministic = True
|
| 80 |
+
torch.backends.cudnn.benchmark = False
|
| 81 |
+
|
| 82 |
+
# Отключение TF32
|
| 83 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
| 84 |
+
torch.backends.cudnn.allow_tf32 = False
|
| 85 |
+
|
| 86 |
+
# Детерминированные алгоритмы
|
| 87 |
+
torch.use_deterministic_algorithms(True)
|
| 88 |
+
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
print(f"✅ Детерминированная среда установлена с seed = {SEED}")
|
backend/config.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from easydict import EasyDict as edict
|
| 5 |
+
|
| 6 |
+
CONFIG = edict()
|
| 7 |
+
|
| 8 |
+
# --- Конфиг pretrained модели Recon для загрузки ---
|
| 9 |
+
CONFIG.model = edict({
|
| 10 |
+
"NAME": "ReCon2",
|
| 11 |
+
"group_size": 32,
|
| 12 |
+
"num_group": 512,
|
| 13 |
+
"mask_ratio": 0.7,
|
| 14 |
+
"mask_type": "rand",
|
| 15 |
+
"embed_dim": 1024,
|
| 16 |
+
"depth": 24,
|
| 17 |
+
"drop_path_rate": 0.2,
|
| 18 |
+
"num_heads": 16,
|
| 19 |
+
"decoder_depth": 4,
|
| 20 |
+
"with_color": True,
|
| 21 |
+
"stop_grad": False,
|
| 22 |
+
"large_embedding": False,
|
| 23 |
+
"img_queries": 13,
|
| 24 |
+
"text_queries": 3,
|
| 25 |
+
"contrast_type": "byol",
|
| 26 |
+
"pretrained_model_name": "eva_large_patch14_336.in22k_ft_in22k_in1k",
|
| 27 |
+
})
|
| 28 |
+
|
| 29 |
+
# --- Общие параметры ---
|
| 30 |
+
CONFIG.npoints = 10_000
|
| 31 |
+
CONFIG.emb_dim = 1280
|
| 32 |
+
CONFIG.img_size = 336
|
| 33 |
+
CONFIG.seed = 42
|
| 34 |
+
CONFIG.device = torch.device("cpu")
|
| 35 |
+
CONFIG.text_ratio = 0.3
|
| 36 |
+
|
| 37 |
+
# --- Параметры инференса ---
|
| 38 |
+
CONFIG.infer_img_batch_size = 32
|
| 39 |
+
CONFIG.infer_pc_batch_size = 16
|
| 40 |
+
CONFIG.infer_text_batch_size = 32
|
| 41 |
+
|
| 42 |
+
# --- Параметры для MoE ---
|
| 43 |
+
CONFIG.train_params = edict()
|
| 44 |
+
CONFIG.train_params.n_experts = 8
|
| 45 |
+
|
| 46 |
+
# --- Пути ---
|
| 47 |
+
CONFIG.paths = edict()
|
| 48 |
+
CONFIG.paths.test_data_root = Path("/kaggle/input/test-final/test")
|
| 49 |
+
CONFIG.paths.submission_save_file = Path("./submission.csv")
|
| 50 |
+
|
| 51 |
+
# Эти пути будут перезаписаны из командной строки inference_runner.py
|
| 52 |
+
CONFIG.paths.model_spec = {
|
| 53 |
+
"text_proj": None,
|
| 54 |
+
"text_encoder": None,
|
| 55 |
+
"moe": None,
|
| 56 |
+
"pc_encoder": None
|
| 57 |
+
}
|
backend/download_utils.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import requests
|
| 3 |
+
from tqdm.auto import tqdm
|
| 4 |
+
|
| 5 |
+
def download_yandex_file(public_file_url: str, destination_path: str, filename: str):
|
| 6 |
+
"""
|
| 7 |
+
Скачивает один файл с публичного Яндекс.Диска по прямой ссылке на файл.
|
| 8 |
+
"""
|
| 9 |
+
api_url = "https://cloud-api.yandex.net/v1/disk/public/resources/download"
|
| 10 |
+
params = {'public_key': public_file_url}
|
| 11 |
+
|
| 12 |
+
print(f"🔎 Получение информации о файле: {filename}...")
|
| 13 |
+
try:
|
| 14 |
+
response = requests.get(api_url, params=params)
|
| 15 |
+
response.raise_for_status()
|
| 16 |
+
data = response.json()
|
| 17 |
+
download_url = data.get('href')
|
| 18 |
+
|
| 19 |
+
if not download_url:
|
| 20 |
+
print(f"❌ Не удалось получить URL для скачивания файла '{filename}'. Ответ API: {data}")
|
| 21 |
+
return False
|
| 22 |
+
|
| 23 |
+
except requests.exceptions.RequestException as e:
|
| 24 |
+
print(f"❌ Ошибка при получении информации о файле '{filename}': {e}")
|
| 25 |
+
return False
|
| 26 |
+
except KeyError as e:
|
| 27 |
+
print(f"❌ Ошибка при разборе ответа API для '{filename}': отсутствует ключ {e}. Ответ: {data}")
|
| 28 |
+
return False
|
| 29 |
+
|
| 30 |
+
full_path = os.path.join(destination_path, filename)
|
| 31 |
+
os.makedirs(destination_path, exist_ok=True)
|
| 32 |
+
|
| 33 |
+
print(f"📥 Скачивание '{filename}' в '{full_path}'...")
|
| 34 |
+
try:
|
| 35 |
+
size_response = requests.head(download_url)
|
| 36 |
+
total_size = int(size_response.headers.get('content-length', 0))
|
| 37 |
+
|
| 38 |
+
download_response = requests.get(download_url, stream=True)
|
| 39 |
+
download_response.raise_for_status()
|
| 40 |
+
|
| 41 |
+
with open(full_path, 'wb') as f:
|
| 42 |
+
with tqdm(total=total_size, unit='B', unit_scale=True, desc=filename) as pbar:
|
| 43 |
+
for chunk in download_response.iter_content(chunk_size=8192):
|
| 44 |
+
if chunk:
|
| 45 |
+
f.write(chunk)
|
| 46 |
+
pbar.update(len(chunk))
|
| 47 |
+
|
| 48 |
+
except requests.exceptions.RequestException as e:
|
| 49 |
+
print(f"\n❌ Ошибка при скачивании файла '{filename}': {e}")
|
| 50 |
+
return False
|
| 51 |
+
except Exception as e:
|
| 52 |
+
print(f"\n❌ Неожиданная ошибка при скачивании '{filename}': {e}")
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
print(f"🎉 Файл '{filename}' успешно скачан.")
|
| 56 |
+
return True
|
backend/inference_utils.py
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/inference_utils.py
|
| 2 |
+
|
| 3 |
+
import base64
|
| 4 |
+
import tempfile
|
| 5 |
+
import uuid
|
| 6 |
+
import zipfile
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
import datetime
|
| 10 |
+
from typing import Callable # Add this import
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
import torch
|
| 14 |
+
from easydict import EasyDict as edict
|
| 15 |
+
from PIL import Image
|
| 16 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 17 |
+
from torch.utils.data import DataLoader
|
| 18 |
+
|
| 19 |
+
from cad_retrieval_utils.augmentations import build_img_transforms
|
| 20 |
+
from cad_retrieval_utils.datasets import (InferenceImageDataset,
|
| 21 |
+
InferenceMeshDataset,
|
| 22 |
+
InferenceTextDataset)
|
| 23 |
+
from cad_retrieval_utils.evaluation import (get_inference_embeddings_image,
|
| 24 |
+
get_inference_embeddings_mesh,
|
| 25 |
+
get_inference_embeddings_text)
|
| 26 |
+
from cad_retrieval_utils.inference import (load_image_encoder, load_pc_encoder,
|
| 27 |
+
load_text_encoder)
|
| 28 |
+
from cad_retrieval_utils.models import (ImageEncoder, InferencePcEncoder,
|
| 29 |
+
InferenceTextEncoder)
|
| 30 |
+
from cad_retrieval_utils.utils import init_environment, load_config
|
| 31 |
+
|
| 32 |
+
CONFIG: edict = None
|
| 33 |
+
IMG_TRANSFORM = None
|
| 34 |
+
PC_ENCODER: InferencePcEncoder = None
|
| 35 |
+
IMG_ENCODER: ImageEncoder = None
|
| 36 |
+
TEXT_ENCODER: InferenceTextEncoder = None
|
| 37 |
+
DATASET_CACHE = {}
|
| 38 |
+
TOP_K_MATCHES = 5
|
| 39 |
+
|
| 40 |
+
def load_models_and_config(config_path: str, model_paths: dict) -> None:
|
| 41 |
+
# This function is unchanged
|
| 42 |
+
global CONFIG, IMG_TRANSFORM, PC_ENCODER, IMG_ENCODER, TEXT_ENCODER
|
| 43 |
+
print("🚀 Загрузка конфигурации и моделей...")
|
| 44 |
+
if CONFIG is not None:
|
| 45 |
+
print(" Модели уже загружены.")
|
| 46 |
+
return
|
| 47 |
+
try:
|
| 48 |
+
CONFIG = load_config(config_path)
|
| 49 |
+
CONFIG.paths.model_spec = model_paths
|
| 50 |
+
init_environment(CONFIG)
|
| 51 |
+
PC_ENCODER = load_pc_encoder(CONFIG.paths.model_spec, CONFIG)
|
| 52 |
+
IMG_ENCODER = load_image_encoder(CONFIG.paths.model_spec, CONFIG)
|
| 53 |
+
TEXT_ENCODER = load_text_encoder(CONFIG.paths.model_spec, CONFIG)
|
| 54 |
+
IMG_TRANSFORM = build_img_transforms(CONFIG.img_size)
|
| 55 |
+
print("✅ Все модели успешно загружены в память.")
|
| 56 |
+
except Exception as e:
|
| 57 |
+
print(f"🔥 Критическая ошибка при загрузке моделей: {e}")
|
| 58 |
+
raise
|
| 59 |
+
|
| 60 |
+
@torch.no_grad()
|
| 61 |
+
def get_embedding_for_single_item(modality: str, content_bytes: bytes) -> np.ndarray:
|
| 62 |
+
# This function is unchanged
|
| 63 |
+
if modality == "image":
|
| 64 |
+
image = Image.open(BytesIO(content_bytes)).convert("RGB")
|
| 65 |
+
tensor = IMG_TRANSFORM(image).unsqueeze(0).to(CONFIG.device)
|
| 66 |
+
emb = IMG_ENCODER.encode_image(tensor, normalize=True)
|
| 67 |
+
return emb.cpu().numpy()
|
| 68 |
+
if modality == "text":
|
| 69 |
+
text = content_bytes.decode("utf-8")
|
| 70 |
+
emb = TEXT_ENCODER.encode_text([text], normalize=True)
|
| 71 |
+
return emb.cpu().numpy()
|
| 72 |
+
if modality == "mesh":
|
| 73 |
+
with tempfile.NamedTemporaryFile(suffix=".stl", delete=True) as tmp:
|
| 74 |
+
tmp.write(content_bytes)
|
| 75 |
+
tmp.flush()
|
| 76 |
+
dataset = InferenceMeshDataset([tmp.name], CONFIG.npoints, CONFIG.seed)
|
| 77 |
+
tensor = dataset[0].unsqueeze(0).to(CONFIG.device)
|
| 78 |
+
emb = PC_ENCODER.encode_pc(tensor, normalize=True)
|
| 79 |
+
return emb.cpu().numpy()
|
| 80 |
+
raise ValueError(f"Неизвестная модальность: {modality}")
|
| 81 |
+
|
| 82 |
+
def process_uploaded_zip(
|
| 83 |
+
zip_file_bytes: bytes,
|
| 84 |
+
original_filename: str,
|
| 85 |
+
update_status: Callable[[str, int], None]
|
| 86 |
+
) -> dict:
|
| 87 |
+
"""
|
| 88 |
+
Основная функция для обработки ZIP-архива с обратными вызовами для обновления статуса.
|
| 89 |
+
"""
|
| 90 |
+
dataset_id = str(uuid.uuid4())
|
| 91 |
+
print(f"⚙️ Начало обработки нового датасета: {original_filename} (ID: {dataset_id})")
|
| 92 |
+
update_status("Starting", 0)
|
| 93 |
+
|
| 94 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
| 95 |
+
tmp_path = Path(tmpdir)
|
| 96 |
+
zip_path = tmp_path / "data.zip"
|
| 97 |
+
zip_path.write_bytes(zip_file_bytes)
|
| 98 |
+
|
| 99 |
+
update_status("Unpacking Files", 5)
|
| 100 |
+
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
| 101 |
+
zip_ref.extractall(tmp_path)
|
| 102 |
+
print(f" 🗂️ Архив распакован в {tmpdir}")
|
| 103 |
+
|
| 104 |
+
update_status("Preparing Data", 10)
|
| 105 |
+
image_paths = sorted(list(tmp_path.glob("**/*.png")))
|
| 106 |
+
text_paths = sorted(list(tmp_path.glob("**/*.txt")))
|
| 107 |
+
mesh_paths = sorted(list(tmp_path.glob("**/*.stl")))
|
| 108 |
+
|
| 109 |
+
image_ds = InferenceImageDataset([str(p) for p in image_paths], IMG_TRANSFORM)
|
| 110 |
+
text_ds = InferenceTextDataset([str(p) for p in text_paths])
|
| 111 |
+
mesh_ds = InferenceMeshDataset([str(p) for p in mesh_paths], CONFIG.npoints, CONFIG.seed)
|
| 112 |
+
|
| 113 |
+
image_loader = DataLoader(image_ds, batch_size=CONFIG.infer_img_batch_size, shuffle=False)
|
| 114 |
+
text_loader = DataLoader(text_ds, batch_size=CONFIG.infer_text_batch_size, shuffle=False)
|
| 115 |
+
mesh_loader = DataLoader(mesh_ds, batch_size=CONFIG.infer_pc_batch_size, shuffle=False)
|
| 116 |
+
|
| 117 |
+
print(" 🧠 Вычисление эмбеддингов...")
|
| 118 |
+
update_status("Processing Images", 15)
|
| 119 |
+
image_embs = get_inference_embeddings_image(IMG_ENCODER, image_loader, CONFIG)
|
| 120 |
+
|
| 121 |
+
update_status("Processing Texts", 50)
|
| 122 |
+
text_embs = get_inference_embeddings_text(TEXT_ENCODER, text_loader, CONFIG)
|
| 123 |
+
|
| 124 |
+
update_status("Processing 3D Models", 55)
|
| 125 |
+
mesh_embs = get_inference_embeddings_mesh(PC_ENCODER, mesh_loader, CONFIG)
|
| 126 |
+
print(" ✅ Эмбеддинги вычислены.")
|
| 127 |
+
|
| 128 |
+
update_status("Caching Data", 90)
|
| 129 |
+
image_names = [p.name for p in image_paths]
|
| 130 |
+
text_names = [p.name for p in text_paths]
|
| 131 |
+
mesh_names = [p.name for p in mesh_paths]
|
| 132 |
+
|
| 133 |
+
image_items = [{"id": f"image_{i}", "name": name, "content": base64.b64encode(p.read_bytes()).decode('utf-8')} for i, (p, name) in enumerate(zip(image_paths, image_names))]
|
| 134 |
+
text_items = [{"id": f"text_{i}", "name": name, "content": p.read_text()} for i, (p, name) in enumerate(zip(text_paths, text_names))]
|
| 135 |
+
mesh_items = [{"id": f"mesh_{i}", "name": name, "content": base64.b64encode(p.read_bytes()).decode('utf-8')} for i, (p, name) in enumerate(zip(mesh_paths, mesh_names))]
|
| 136 |
+
|
| 137 |
+
dataset_data = {"images": image_items, "texts": text_items, "meshes": mesh_items}
|
| 138 |
+
|
| 139 |
+
DATASET_CACHE[dataset_id] = {
|
| 140 |
+
"data": dataset_data,
|
| 141 |
+
"embeddings": {
|
| 142 |
+
"image": (image_names, image_embs),
|
| 143 |
+
"text": (text_names, text_embs),
|
| 144 |
+
"mesh": (mesh_names, mesh_embs)
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
print(f" 💾 Датасет {dataset_id} сохранен в кэш.")
|
| 148 |
+
|
| 149 |
+
print(" ⚖️ Вычисление полной матрицы схожести...")
|
| 150 |
+
update_status("Building Matrix", 95)
|
| 151 |
+
full_comparison = {"images": [], "texts": [], "meshes": []}
|
| 152 |
+
|
| 153 |
+
all_embeddings = {
|
| 154 |
+
"image": (image_names, image_embs),
|
| 155 |
+
"text": (text_names, text_embs),
|
| 156 |
+
"mesh": (mesh_names, mesh_embs)
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
for source_modality, (source_names, source_embs) in all_embeddings.items():
|
| 160 |
+
for i, source_name in enumerate(source_names):
|
| 161 |
+
source_emb = source_embs[i:i+1]
|
| 162 |
+
matches = {}
|
| 163 |
+
for target_modality, (target_names, target_embs) in all_embeddings.items():
|
| 164 |
+
if not target_names: continue
|
| 165 |
+
sims = cosine_similarity(source_emb, target_embs).flatten()
|
| 166 |
+
|
| 167 |
+
if source_modality == target_modality:
|
| 168 |
+
sims[i] = -1
|
| 169 |
+
|
| 170 |
+
top_indices = np.argsort(sims)[::-1][:TOP_K_MATCHES]
|
| 171 |
+
matches[target_modality] = [
|
| 172 |
+
{"item": target_names[j], "confidence": float(sims[j])} for j in top_indices if sims[j] > -1
|
| 173 |
+
]
|
| 174 |
+
|
| 175 |
+
key_name = "meshes" if source_modality == "mesh" else source_modality + 's'
|
| 176 |
+
full_comparison[key_name].append({"source": source_name, "matches": matches})
|
| 177 |
+
|
| 178 |
+
print(" ✅ Матрица схожести готова.")
|
| 179 |
+
|
| 180 |
+
final_response = {
|
| 181 |
+
"id": dataset_id,
|
| 182 |
+
"name": original_filename,
|
| 183 |
+
"uploadDate": datetime.datetime.utcnow().isoformat() + "Z",
|
| 184 |
+
"data": dataset_data,
|
| 185 |
+
"processingState": "processed",
|
| 186 |
+
"processingProgress": 100,
|
| 187 |
+
"fullComparison": full_comparison
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
print(f"✅ Обработка датасета {dataset_id} завершена.")
|
| 191 |
+
return final_response
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def process_shared_dataset_directory(directory_path: Path, embeddings_path: Path, dataset_id: str, dataset_name: str) -> dict:
|
| 195 |
+
# This function is unchanged
|
| 196 |
+
print(f"⚙️ Начало обработки общего датасета: {dataset_name} (ID: {dataset_id})")
|
| 197 |
+
print(" 📂 Сканирование файлов данных...")
|
| 198 |
+
image_paths = sorted(list(directory_path.glob("**/*.png")))
|
| 199 |
+
text_paths = sorted(list(directory_path.glob("**/*.txt")))
|
| 200 |
+
mesh_paths = sorted(list(directory_path.glob("**/*.stl")))
|
| 201 |
+
if not any([image_paths, text_paths, mesh_paths]):
|
| 202 |
+
print(f"⚠️ В директории общего датасета '{directory_path}' не найдено файлов.")
|
| 203 |
+
return None
|
| 204 |
+
print(f" ✅ Найдено: {len(image_paths)} изображений, {len(text_paths)} текстов, {len(mesh_paths)} моделей.")
|
| 205 |
+
print(" 🧠 Индексирование предварительно вычисленных эмбеддингов...")
|
| 206 |
+
all_embedding_paths = list(embeddings_path.glob("**/*.npy"))
|
| 207 |
+
embedding_map = {p.stem: p for p in all_embedding_paths}
|
| 208 |
+
print(f" ✅ Найдено {len(embedding_map)} файлов эмбеддингов.")
|
| 209 |
+
def load_embeddings_for_paths(data_paths: list[Path]):
|
| 210 |
+
names = []
|
| 211 |
+
embs_list = []
|
| 212 |
+
for data_path in data_paths:
|
| 213 |
+
file_stem = data_path.stem
|
| 214 |
+
if file_stem in embedding_map:
|
| 215 |
+
embedding_path = embedding_map[file_stem]
|
| 216 |
+
try:
|
| 217 |
+
emb = np.load(embedding_path)
|
| 218 |
+
embs_list.append(emb)
|
| 219 |
+
names.append(data_path.name)
|
| 220 |
+
except Exception as e:
|
| 221 |
+
print(f" ⚠️ Не удалось загрузить или разобрать эмбеддинг для {data_path.name}: {e}")
|
| 222 |
+
else:
|
| 223 |
+
print(f" ⚠️ Внимание: не найден соответствующий эмбеддинг для {data_path.name}")
|
| 224 |
+
return names, np.array(embs_list) if embs_list else np.array([])
|
| 225 |
+
print(" 🚚 Загрузка и сопоставление эмбеддингов...")
|
| 226 |
+
image_names, image_embs = load_embeddings_for_paths(image_paths)
|
| 227 |
+
text_names, text_embs = load_embeddings_for_paths(text_paths)
|
| 228 |
+
mesh_names, mesh_embs = load_embeddings_for_paths(mesh_paths)
|
| 229 |
+
print(" ✅ Эмбеддинги для общего датасета загружены.")
|
| 230 |
+
static_root = Path("static")
|
| 231 |
+
image_items = [{"id": f"image_{i}", "name": p.name, "content": None, "contentUrl": f"/{p.relative_to(static_root)}"} for i, p in enumerate(image_paths)]
|
| 232 |
+
text_items = [{"id": f"text_{i}", "name": p.name, "content": None, "contentUrl": f"/{p.relative_to(static_root)}"} for i, p in enumerate(text_paths)]
|
| 233 |
+
mesh_items = [{"id": f"mesh_{i}", "name": p.name, "content": None, "contentUrl": f"/{p.relative_to(static_root)}"} for i, p in enumerate(mesh_paths)]
|
| 234 |
+
dataset_data = {"images": image_items, "texts": text_items, "meshes": mesh_items}
|
| 235 |
+
DATASET_CACHE[dataset_id] = {"data": dataset_data, "embeddings": {"image": (image_names, image_embs), "text": (text_names, text_embs), "mesh": (mesh_names, mesh_embs)}}
|
| 236 |
+
print(f" 💾 Эмбеддинги для общего датасета {dataset_id} сохранены в кэш.")
|
| 237 |
+
print(" ⚖️ Вычисление полной матрицы схожести для общего датасета...")
|
| 238 |
+
full_comparison = {"images": [], "texts": [], "meshes": []}
|
| 239 |
+
all_embeddings = {"image": (image_names, image_embs), "text": (text_names, text_embs), "mesh": (mesh_names, mesh_embs)}
|
| 240 |
+
for source_modality, (source_names, source_embs) in all_embeddings.items():
|
| 241 |
+
if len(source_names) == 0: continue
|
| 242 |
+
for i, source_name in enumerate(source_names):
|
| 243 |
+
source_emb = source_embs[i:i+1]
|
| 244 |
+
matches = {}
|
| 245 |
+
for target_modality, (target_names, target_embs) in all_embeddings.items():
|
| 246 |
+
if len(target_names) == 0: continue
|
| 247 |
+
sims = cosine_similarity(source_emb, target_embs).flatten()
|
| 248 |
+
if source_modality == target_modality:
|
| 249 |
+
sims[i] = -1
|
| 250 |
+
top_indices = np.argsort(sims)[::-1][:TOP_K_MATCHES]
|
| 251 |
+
matches[target_modality] = [{"item": target_names[j], "confidence": float(sims[j])} for j in top_indices if sims[j] > -1]
|
| 252 |
+
key_name = "meshes" if source_modality == "mesh" else source_modality + 's'
|
| 253 |
+
full_comparison[key_name].append({"source": source_name, "matches": matches})
|
| 254 |
+
print(" ✅ Матрица схожести для общего датасета готова.")
|
| 255 |
+
try:
|
| 256 |
+
creation_time = datetime.datetime.fromtimestamp(directory_path.stat().st_ctime)
|
| 257 |
+
except Exception:
|
| 258 |
+
creation_time = datetime.datetime.utcnow()
|
| 259 |
+
final_response = {"id": dataset_id, "name": dataset_name, "uploadDate": creation_time.isoformat() + "Z", "data": dataset_data, "processingState": "processed", "processingProgress": 100, "fullComparison": full_comparison, "isShared": True}
|
| 260 |
+
print(f"✅ Обработка общего датасета {dataset_id} завершена.")
|
| 261 |
+
return final_response
|
| 262 |
+
|
| 263 |
+
def find_matches_for_item(modality: str, content_base64: str, dataset_id: str) -> dict:
|
| 264 |
+
# This function is unchanged
|
| 265 |
+
print(f"🔍 Поиск совпадений для объекта ({modality}) в датасете {dataset_id}...")
|
| 266 |
+
if dataset_id not in DATASET_CACHE:
|
| 267 |
+
raise ValueError(f"Датасет с ID {dataset_id} не найден в кэше.")
|
| 268 |
+
content_bytes = base64.b64decode(content_base64)
|
| 269 |
+
source_emb = get_embedding_for_single_item(modality, content_bytes)
|
| 270 |
+
cached_dataset = DATASET_CACHE[dataset_id]
|
| 271 |
+
results = {}
|
| 272 |
+
for target_modality, (target_names, target_embs) in cached_dataset["embeddings"].items():
|
| 273 |
+
key_name = "meshes" if target_modality == "mesh" else target_modality + 's'
|
| 274 |
+
if not target_names: continue
|
| 275 |
+
sims = cosine_similarity(source_emb, target_embs).flatten()
|
| 276 |
+
top_indices = np.argsort(sims)[::-1][:TOP_K_MATCHES]
|
| 277 |
+
target_items_map = {item['name']: item for item in cached_dataset['data'][key_name]}
|
| 278 |
+
matches = []
|
| 279 |
+
for j in top_indices:
|
| 280 |
+
item_name = target_names[j]
|
| 281 |
+
if item_name in target_items_map:
|
| 282 |
+
matches.append({"item": target_items_map[item_name], "confidence": float(sims[j])})
|
| 283 |
+
results[key_name] = matches
|
| 284 |
+
print(" ✅ Поиск завершен.")
|
| 285 |
+
return {"results": results}
|
| 286 |
+
|
| 287 |
+
def cache_local_dataset(dataset: dict) -> None:
|
| 288 |
+
"""
|
| 289 |
+
Receives a full dataset object from the frontend, computes embeddings,
|
| 290 |
+
and loads it into the in-memory cache.
|
| 291 |
+
"""
|
| 292 |
+
dataset_id = dataset.get('id')
|
| 293 |
+
if not dataset_id:
|
| 294 |
+
print("⚠️ Attempted to cache a dataset without an ID.")
|
| 295 |
+
return
|
| 296 |
+
|
| 297 |
+
if dataset_id in DATASET_CACHE:
|
| 298 |
+
print(f"✅ Dataset {dataset_id} is already in the backend cache. Skipping re-hydration.")
|
| 299 |
+
return
|
| 300 |
+
|
| 301 |
+
print(f"🧠 Re-hydrating backend cache for local dataset ID: {dataset_id}")
|
| 302 |
+
|
| 303 |
+
try:
|
| 304 |
+
all_embeddings = {}
|
| 305 |
+
all_names = {}
|
| 306 |
+
|
| 307 |
+
# The content comes in different formats (data URL for images, text for text, etc.)
|
| 308 |
+
# We need to decode it before sending to the embedding function.
|
| 309 |
+
def get_bytes_from_content(content_str: str, modality: str) -> bytes:
|
| 310 |
+
if modality in ['image', 'mesh']:
|
| 311 |
+
# Handle data URLs (e.g., "data:image/png;base64,...") or raw base64
|
| 312 |
+
if ',' in content_str:
|
| 313 |
+
header, encoded = content_str.split(',', 1)
|
| 314 |
+
return base64.b64decode(encoded)
|
| 315 |
+
else:
|
| 316 |
+
return base64.b64decode(content_str)
|
| 317 |
+
else: # text
|
| 318 |
+
return content_str.encode('utf-8')
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
for modality_plural, items in dataset.get('data', {}).items():
|
| 322 |
+
modality_singular = "mesh" if modality_plural == "meshes" else modality_plural[:-1]
|
| 323 |
+
|
| 324 |
+
names = []
|
| 325 |
+
embs_list = []
|
| 326 |
+
|
| 327 |
+
print(f" ⚙️ Processing {len(items)} items for modality: {modality_singular}")
|
| 328 |
+
|
| 329 |
+
for item in items:
|
| 330 |
+
item_content = item.get('content')
|
| 331 |
+
if not item_content:
|
| 332 |
+
continue
|
| 333 |
+
|
| 334 |
+
content_bytes = get_bytes_from_content(item_content, modality_singular)
|
| 335 |
+
embedding = get_embedding_for_single_item(modality_singular, content_bytes)
|
| 336 |
+
|
| 337 |
+
embs_list.append(embedding[0]) # get_embedding returns shape (1, D)
|
| 338 |
+
names.append(item.get('name'))
|
| 339 |
+
|
| 340 |
+
all_names[modality_singular] = names
|
| 341 |
+
all_embeddings[modality_singular] = np.array(embs_list) if embs_list else np.array([])
|
| 342 |
+
|
| 343 |
+
# Structure the cache entry exactly like process_uploaded_zip does
|
| 344 |
+
DATASET_CACHE[dataset_id] = {
|
| 345 |
+
"data": dataset.get('data'),
|
| 346 |
+
"embeddings": {
|
| 347 |
+
mod: (all_names[mod], all_embeddings[mod]) for mod in all_embeddings
|
| 348 |
+
}
|
| 349 |
+
}
|
| 350 |
+
print(f" ✅ Successfully cached {dataset_id} with embeddings.")
|
| 351 |
+
|
| 352 |
+
except Exception as e:
|
| 353 |
+
print(f"🔥 CRITICAL ERROR while re-hydrating cache for {dataset_id}: {e}")
|
| 354 |
+
import traceback
|
| 355 |
+
traceback.print_exc()
|
backend/main.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/main.py
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import asyncio
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import zipfile
|
| 7 |
+
import io
|
| 8 |
+
import requests
|
| 9 |
+
import uuid # Add this import
|
| 10 |
+
from fastapi import FastAPI, UploadFile, File, HTTPException, BackgroundTasks # Add BackgroundTasks
|
| 11 |
+
from fastapi.staticfiles import StaticFiles
|
| 12 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 13 |
+
from pydantic import BaseModel, Field
|
| 14 |
+
from typing import List, Dict, Any
|
| 15 |
+
|
| 16 |
+
# Импортируем утилиты
|
| 17 |
+
from inference_utils import (
|
| 18 |
+
load_models_and_config,
|
| 19 |
+
process_uploaded_zip,
|
| 20 |
+
find_matches_for_item,
|
| 21 |
+
process_shared_dataset_directory,
|
| 22 |
+
cache_local_dataset,
|
| 23 |
+
)
|
| 24 |
+
# Импортируем нашу новую функцию для скачивания
|
| 25 |
+
from download_utils import download_yandex_file
|
| 26 |
+
|
| 27 |
+
# --- Инициализация ---
|
| 28 |
+
app = FastAPI()
|
| 29 |
+
|
| 30 |
+
# Разрешаем CORS
|
| 31 |
+
app.add_middleware(
|
| 32 |
+
CORSMiddleware,
|
| 33 |
+
allow_origins=["*"],
|
| 34 |
+
allow_credentials=True,
|
| 35 |
+
allow_methods=["*"],
|
| 36 |
+
allow_headers=["*"],
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# --- Глобальные кэши ---
|
| 40 |
+
SHARED_DATASET_FULL_DATA = {}
|
| 41 |
+
SHARED_DATASET_ID = "shared_dataset_1"
|
| 42 |
+
PROCESSING_STATUS = {} # NEW: For tracking progress
|
| 43 |
+
|
| 44 |
+
# --- Helper Functions ---
|
| 45 |
+
|
| 46 |
+
def download_and_unzip_yandex_archive(public_url: str, destination_dir: Path, description: str):
|
| 47 |
+
# This function is unchanged
|
| 48 |
+
print(f"--- 📥 Checking for {description} ---")
|
| 49 |
+
if destination_dir.exists() and any(destination_dir.iterdir()):
|
| 50 |
+
print(f"✅ {description} already exists in '{destination_dir}'. Skipping download.")
|
| 51 |
+
return True
|
| 52 |
+
print(f"⏳ {description} not found. Starting download from Yandex.Disk...")
|
| 53 |
+
destination_dir.mkdir(parents=True, exist_ok=True)
|
| 54 |
+
if "YOUR_" in public_url or "ВАША_" in public_url:
|
| 55 |
+
print(f"🔥 WARNING: Placeholder URL detected for {description}. Download skipped.")
|
| 56 |
+
return False
|
| 57 |
+
try:
|
| 58 |
+
api_url = "https://cloud-api.yandex.net/v1/disk/public/resources/download"
|
| 59 |
+
params = {'public_key': public_url}
|
| 60 |
+
response = requests.get(api_url, params=params)
|
| 61 |
+
response.raise_for_status()
|
| 62 |
+
download_url = response.json().get('href')
|
| 63 |
+
if not download_url:
|
| 64 |
+
raise RuntimeError(f"Could not retrieve download URL for {description} from Yandex.Disk API.")
|
| 65 |
+
print(f" 🔗 Got download link. Fetching ZIP archive for {description}...")
|
| 66 |
+
zip_response = requests.get(download_url, stream=True)
|
| 67 |
+
zip_response.raise_for_status()
|
| 68 |
+
zip_in_memory = io.BytesIO(zip_response.content)
|
| 69 |
+
print(f" 🗂️ Unzipping archive for {description}...")
|
| 70 |
+
with zipfile.ZipFile(zip_in_memory, 'r') as zip_ref:
|
| 71 |
+
zip_ref.extractall(destination_dir)
|
| 72 |
+
print(f"🎉 {description} successfully downloaded and extracted to '{destination_dir}'.")
|
| 73 |
+
return True
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"🔥 CRITICAL ERROR downloading or unzipping {description}: {e}")
|
| 76 |
+
return False
|
| 77 |
+
|
| 78 |
+
# --- NEW: Background Processing Wrapper ---
|
| 79 |
+
def background_process_zip(zip_bytes: bytes, original_filename: str, job_id: str):
|
| 80 |
+
"""Wrapper function to run processing and update status."""
|
| 81 |
+
def update_status(stage: str, progress: int):
|
| 82 |
+
"""Callback to update the global status dictionary."""
|
| 83 |
+
print(f"Job {job_id}: {stage} - {progress}%")
|
| 84 |
+
PROCESSING_STATUS[job_id] = {"stage": stage, "progress": progress, "status": "processing"}
|
| 85 |
+
|
| 86 |
+
try:
|
| 87 |
+
processed_data = process_uploaded_zip(
|
| 88 |
+
zip_bytes, original_filename, update_status
|
| 89 |
+
)
|
| 90 |
+
PROCESSING_STATUS[job_id] = {
|
| 91 |
+
"status": "complete",
|
| 92 |
+
"result": processed_data
|
| 93 |
+
}
|
| 94 |
+
except Exception as e:
|
| 95 |
+
import traceback
|
| 96 |
+
traceback.print_exc()
|
| 97 |
+
PROCESSING_STATUS[job_id] = {
|
| 98 |
+
"status": "error",
|
| 99 |
+
"message": f"An error occurred during processing: {e}"
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
class SingleMatchRequest(BaseModel):
|
| 103 |
+
modality: str
|
| 104 |
+
content: str
|
| 105 |
+
dataset_id: str
|
| 106 |
+
|
| 107 |
+
# --- MODIFIED: process-dataset endpoint ---
|
| 108 |
+
class ProcessDatasetResponse(BaseModel):
|
| 109 |
+
job_id: str
|
| 110 |
+
|
| 111 |
+
class DataItemModel(BaseModel):
|
| 112 |
+
id: str
|
| 113 |
+
name: str
|
| 114 |
+
content: str | None = None # Frontend sends content as string (base64 or text)
|
| 115 |
+
contentUrl: str | None = None
|
| 116 |
+
|
| 117 |
+
class DatasetDataModel(BaseModel):
|
| 118 |
+
images: List[DataItemModel]
|
| 119 |
+
texts: List[DataItemModel]
|
| 120 |
+
meshes: List[DataItemModel]
|
| 121 |
+
|
| 122 |
+
class LocalDatasetModel(BaseModel):
|
| 123 |
+
id: str
|
| 124 |
+
name: str
|
| 125 |
+
data: DatasetDataModel
|
| 126 |
+
# We only need the core data for re-hydration, other fields are optional
|
| 127 |
+
# Use 'Any' for complex fields we don't need to strictly validate here
|
| 128 |
+
fullComparison: Dict[str, Any] | None = None
|
| 129 |
+
|
| 130 |
+
@app.post("/api/cache-local-dataset")
|
| 131 |
+
async def cache_local_dataset_endpoint(dataset: LocalDatasetModel):
|
| 132 |
+
"""
|
| 133 |
+
Receives a local dataset from the frontend to re-hydrate the server's in-memory cache.
|
| 134 |
+
"""
|
| 135 |
+
try:
|
| 136 |
+
# Pydantic's .dict() is deprecated, use .model_dump()
|
| 137 |
+
dataset_dict = dataset.model_dump()
|
| 138 |
+
await asyncio.to_thread(cache_local_dataset, dataset_dict)
|
| 139 |
+
return {"status": "cached", "id": dataset.id}
|
| 140 |
+
except Exception as e:
|
| 141 |
+
import traceback
|
| 142 |
+
traceback.print_exc()
|
| 143 |
+
raise HTTPException(status_code=500, detail=f"Failed to cache dataset: {e}")
|
| 144 |
+
|
| 145 |
+
# --- Startup Event ---
|
| 146 |
+
@app.on_event("startup")
|
| 147 |
+
def startup_event():
|
| 148 |
+
# This function is unchanged
|
| 149 |
+
SHARED_DATASET_DIR = Path("static/shared_dataset")
|
| 150 |
+
SHARED_EMBEDDINGS_DIR = Path("static/shared_embeddings")
|
| 151 |
+
|
| 152 |
+
SHARED_DATASET_ZIP_URL = "https://disk.yandex.ru/d/G9C3_FGGzSLAXw"
|
| 153 |
+
SHARED_EMBEDDINGS_ZIP_URL = "https://disk.yandex.ru/d/aVTX6n2pc0hrCw"
|
| 154 |
+
dataset_ready = download_and_unzip_yandex_archive(SHARED_DATASET_ZIP_URL, SHARED_DATASET_DIR, "shared dataset files")
|
| 155 |
+
embeddings_ready = download_and_unzip_yandex_archive(SHARED_EMBEDDINGS_ZIP_URL, SHARED_EMBEDDINGS_DIR, "pre-computed embeddings")
|
| 156 |
+
DATA_DIR = Path("data/")
|
| 157 |
+
MODEL_URLS = {
|
| 158 |
+
"text_proj.pth": "https://disk.yandex.ru/d/uMH1ls0nYM4txw",
|
| 159 |
+
"text_encoder.pth": "https://disk.yandex.ru/d/R0BBLPXj828OhA",
|
| 160 |
+
"moe.pth": "https://disk.yandex.ru/d/vDfuIPziuO45wg",
|
| 161 |
+
"pc_encoder.pth": "https://disk.yandex.ru/d/03Ps2TMcWAKkww",
|
| 162 |
+
}
|
| 163 |
+
print("--- 📥 Checking and loading models ---")
|
| 164 |
+
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
| 165 |
+
all_models_present = True
|
| 166 |
+
for filename, url in MODEL_URLS.items():
|
| 167 |
+
destination_file = DATA_DIR / filename
|
| 168 |
+
if not destination_file.exists():
|
| 169 |
+
print(f"⏳ Модель '{filename}' не найдена. Начинаю загрузку...")
|
| 170 |
+
if not "ВАША_ССЫЛКА" in url:
|
| 171 |
+
success = download_yandex_file(public_file_url=url, destination_path=str(DATA_DIR), filename=filename)
|
| 172 |
+
if not success:
|
| 173 |
+
all_models_present = False
|
| 174 |
+
print(f"🔥 Критическая ошибка: не удалось скачать модель '{filename}'.")
|
| 175 |
+
else:
|
| 176 |
+
all_models_present = False
|
| 177 |
+
print(f"🔥 ВНИМАНИЕ: Пропущена загрузка '{filename}', т.к. ссылка является плейсхолдером.")
|
| 178 |
+
else:
|
| 179 |
+
print(f"✅ Модель '{filename}' уже существует. Пропускаю загрузку.")
|
| 180 |
+
if not all_models_present:
|
| 181 |
+
raise RuntimeError("Не удалось загрузить все необходимые модели. Приложение не может запуститься.")
|
| 182 |
+
print("--- ✅ Все модели готовы к использованию ---")
|
| 183 |
+
model_paths = {"text_proj": str(DATA_DIR / "text_proj.pth"), "text_encoder": str(DATA_DIR / "text_encoder.pth"), "moe": str(DATA_DIR / "moe.pth"), "pc_encoder": str(DATA_DIR / "pc_encoder.pth")}
|
| 184 |
+
config_path = "cad_retrieval_utils/config/config.py"
|
| 185 |
+
try:
|
| 186 |
+
load_models_and_config(config_path=config_path, model_paths=model_paths)
|
| 187 |
+
print("✅ Все модели успешно загружены в память.")
|
| 188 |
+
except Exception as e:
|
| 189 |
+
print(f"🔥 Ошибка при загрузке моделей: {e}")
|
| 190 |
+
import traceback
|
| 191 |
+
traceback.print_exc()
|
| 192 |
+
raise RuntimeError(f"Ошибка загрузки моделей, приложение не может запуститься.") from e
|
| 193 |
+
if dataset_ready and embeddings_ready:
|
| 194 |
+
print("--- 🧠 Loading pre-computed embeddings for shared dataset ---")
|
| 195 |
+
try:
|
| 196 |
+
full_data = process_shared_dataset_directory(directory_path=SHARED_DATASET_DIR, embeddings_path=SHARED_EMBEDDINGS_DIR, dataset_id=SHARED_DATASET_ID, dataset_name="Cloud Multi-Modal Dataset")
|
| 197 |
+
if full_data:
|
| 198 |
+
SHARED_DATASET_FULL_DATA[SHARED_DATASET_ID] = full_data
|
| 199 |
+
print("--- ✅ Shared dataset processed and cached successfully. ---")
|
| 200 |
+
else:
|
| 201 |
+
print("--- ⚠️ Shared dataset processing returned no data. Caching skipped. ---")
|
| 202 |
+
except Exception as e:
|
| 203 |
+
print(f"🔥 CRITICAL ERROR processing shared dataset: {e}")
|
| 204 |
+
import traceback
|
| 205 |
+
traceback.print_exc()
|
| 206 |
+
else:
|
| 207 |
+
print("--- ⚠️ Shared dataset or embeddings not available. Processing skipped. ---")
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
# --- API Endpoints ---
|
| 211 |
+
@app.get("/api/shared-dataset-metadata")
|
| 212 |
+
async def get_shared_dataset_metadata():
|
| 213 |
+
# This function is unchanged
|
| 214 |
+
metadata_list = []
|
| 215 |
+
for dataset_id, full_data in SHARED_DATASET_FULL_DATA.items():
|
| 216 |
+
metadata = {"id": full_data["id"], "name": full_data["name"], "uploadDate": full_data["uploadDate"], "processingState": full_data["processingState"], "itemCounts": {"images": len(full_data["data"]["images"]), "texts": len(full_data["data"]["texts"]), "meshes": len(full_data["data"]["meshes"])}, "isShared": True}
|
| 217 |
+
metadata_list.append(metadata)
|
| 218 |
+
return metadata_list
|
| 219 |
+
|
| 220 |
+
@app.get("/api/shared-dataset")
|
| 221 |
+
async def get_shared_dataset(id: str):
|
| 222 |
+
# This function is unchanged
|
| 223 |
+
dataset = SHARED_DATASET_FULL_DATA.get(id)
|
| 224 |
+
if not dataset:
|
| 225 |
+
raise HTTPException(status_code=404, detail=f"Shared dataset with id '{id}' not found.")
|
| 226 |
+
return dataset
|
| 227 |
+
|
| 228 |
+
@app.post("/api/process-dataset", response_model=ProcessDatasetResponse)
|
| 229 |
+
async def process_dataset_endpoint(
|
| 230 |
+
background_tasks: BackgroundTasks, file: UploadFile = File(...)
|
| 231 |
+
):
|
| 232 |
+
if not file.filename or not file.filename.endswith('.zip'):
|
| 233 |
+
raise HTTPException(status_code=400, detail="A ZIP archive is required.")
|
| 234 |
+
|
| 235 |
+
zip_bytes = await file.read()
|
| 236 |
+
job_id = str(uuid.uuid4())
|
| 237 |
+
PROCESSING_STATUS[job_id] = {"status": "starting", "stage": "Queued", "progress": 0}
|
| 238 |
+
|
| 239 |
+
background_tasks.add_task(
|
| 240 |
+
background_process_zip, zip_bytes, file.filename, job_id
|
| 241 |
+
)
|
| 242 |
+
return {"job_id": job_id}
|
| 243 |
+
|
| 244 |
+
# --- NEW: processing-status endpoint ---
|
| 245 |
+
class StatusResponse(BaseModel):
|
| 246 |
+
status: str
|
| 247 |
+
stage: str | None = None
|
| 248 |
+
progress: int | None = None
|
| 249 |
+
message: str | None = None
|
| 250 |
+
result: dict | None = None
|
| 251 |
+
|
| 252 |
+
@app.get("/api/processing-status/{job_id}", response_model=StatusResponse)
|
| 253 |
+
async def get_processing_status(job_id: str):
|
| 254 |
+
"""Poll this endpoint to get the status of a processing job."""
|
| 255 |
+
status = PROCESSING_STATUS.get(job_id)
|
| 256 |
+
if not status:
|
| 257 |
+
raise HTTPException(status_code=404, detail="Job ID not found.")
|
| 258 |
+
return status
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
@app.post("/api/find-matches")
|
| 262 |
+
async def find_matches_endpoint(request: SingleMatchRequest):
|
| 263 |
+
# This function is unchanged
|
| 264 |
+
try:
|
| 265 |
+
match_results = await asyncio.to_thread(
|
| 266 |
+
find_matches_for_item, request.modality, request.content, request.dataset_id
|
| 267 |
+
)
|
| 268 |
+
source_item_data = {"id": "source_item", "name": "Source Item", "content": request.content}
|
| 269 |
+
final_response = {"sourceItem": source_item_data, "sourceModality": request.modality, **match_results}
|
| 270 |
+
return final_response
|
| 271 |
+
except ValueError as ve:
|
| 272 |
+
raise HTTPException(status_code=404, detail=str(ve))
|
| 273 |
+
except Exception as e:
|
| 274 |
+
import traceback
|
| 275 |
+
traceback.print_exc()
|
| 276 |
+
raise HTTPException(status_code=500, detail=f"Ошибка при поиске совпадений: {e}")
|
| 277 |
+
|
| 278 |
+
app.mount("/", StaticFiles(directory="static", html=True), name="static")
|
backend/requirements.txt
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
--extra-index-url https://download.pytorch.org/whl/cpu
|
| 2 |
+
|
| 3 |
+
# Framework
|
| 4 |
+
fastapi
|
| 5 |
+
uvicorn[standard]
|
| 6 |
+
python-multipart
|
| 7 |
+
|
| 8 |
+
# ML & Data Science
|
| 9 |
+
easydict
|
| 10 |
+
matplotlib
|
| 11 |
+
ninja
|
| 12 |
+
numpy
|
| 13 |
+
open_clip_torch
|
| 14 |
+
pandas
|
| 15 |
+
Pillow
|
| 16 |
+
PyYAML
|
| 17 |
+
scikit-learn
|
| 18 |
+
scipy
|
| 19 |
+
seaborn
|
| 20 |
+
termcolor
|
| 21 |
+
timm
|
| 22 |
+
torch
|
| 23 |
+
torchaudio
|
| 24 |
+
torchvision
|
| 25 |
+
tqdm
|
| 26 |
+
trimesh
|
| 27 |
+
umap-learn
|
| 28 |
+
|
| 29 |
+
# Other
|
| 30 |
+
requests
|
| 31 |
+
wandb
|
frontend/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
frontend/App.tsx
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import React, { useState, useMemo, useEffect, useCallback, lazy, Suspense } from 'react';
|
| 3 |
+
import { DatasetManager } from './components/DatasetManager';
|
| 4 |
+
import type { Dataset, DatasetMetadata } from './types';
|
| 5 |
+
import * as db from './services/dbService';
|
| 6 |
+
import * as apiService from './services/apiService';
|
| 7 |
+
import { Spinner } from './components/common/Spinner';
|
| 8 |
+
|
| 9 |
+
const ComparisonTool = lazy(() => import('./components/ComparisonTool').then(module => ({ default: module.ComparisonTool })));
|
| 10 |
+
|
| 11 |
+
type View = 'manager' | 'comparison';
|
| 12 |
+
|
| 13 |
+
const App: React.FC = () => {
|
| 14 |
+
const [datasets, setDatasets] = useState<DatasetMetadata[]>([]);
|
| 15 |
+
const [selectedDatasetId, setSelectedDatasetId] = useState<string | null>(null);
|
| 16 |
+
const [activeDataset, setActiveDataset] = useState<Dataset | null>(null);
|
| 17 |
+
const [view, setView] = useState<View>('manager');
|
| 18 |
+
const [isLoading, setIsLoading] = useState(true);
|
| 19 |
+
const [isNavigating, setIsNavigating] = useState(false);
|
| 20 |
+
const [error, setError] = useState<string | null>(null);
|
| 21 |
+
|
| 22 |
+
useEffect(() => {
|
| 23 |
+
const loadInitialData = async () => {
|
| 24 |
+
try {
|
| 25 |
+
const localMeta = await db.getAllDatasetMetadata();
|
| 26 |
+
let sharedMeta: DatasetMetadata[] = [];
|
| 27 |
+
try {
|
| 28 |
+
sharedMeta = await apiService.getSharedDatasetMetadata();
|
| 29 |
+
} catch (e) {
|
| 30 |
+
console.error("Could not load shared datasets, continuing with local.", e);
|
| 31 |
+
setError("Could not load cloud datasets. The backend service may be unavailable. Local datasets are still accessible.");
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
const allMeta = [...sharedMeta, ...localMeta];
|
| 35 |
+
|
| 36 |
+
setDatasets(allMeta);
|
| 37 |
+
|
| 38 |
+
if (allMeta.length > 0) {
|
| 39 |
+
// Select the most recent dataset by default
|
| 40 |
+
const sortedMeta = [...allMeta].sort((a, b) => new Date(b.uploadDate).getTime() - new Date(a.uploadDate).getTime());
|
| 41 |
+
setSelectedDatasetId(sortedMeta[0].id);
|
| 42 |
+
}
|
| 43 |
+
} catch (error) {
|
| 44 |
+
console.error("Failed to load initial data", error);
|
| 45 |
+
setError("A critical error occurred while loading local datasets.");
|
| 46 |
+
} finally {
|
| 47 |
+
setIsLoading(false);
|
| 48 |
+
}
|
| 49 |
+
};
|
| 50 |
+
loadInitialData();
|
| 51 |
+
}, []);
|
| 52 |
+
|
| 53 |
+
const addDataset = async (newDataset: Dataset) => {
|
| 54 |
+
await db.addDataset(newDataset);
|
| 55 |
+
const localMeta = await db.getAllDatasetMetadata();
|
| 56 |
+
const sharedMeta = datasets.filter(d => d.isShared); // Keep existing shared meta
|
| 57 |
+
setDatasets([...sharedMeta, ...localMeta]);
|
| 58 |
+
setSelectedDatasetId(newDataset.id);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
const deleteDataset = async (id: string) => {
|
| 62 |
+
await db.deleteDataset(id);
|
| 63 |
+
setDatasets(prevDatasets => {
|
| 64 |
+
const newDatasets = prevDatasets.filter(d => d.id !== id);
|
| 65 |
+
if (selectedDatasetId === id) {
|
| 66 |
+
const sortedMeta = [...newDatasets].sort((a, b) => new Date(b.uploadDate).getTime() - new Date(a.uploadDate).getTime());
|
| 67 |
+
setSelectedDatasetId(sortedMeta.length > 0 ? sortedMeta[0].id : null);
|
| 68 |
+
}
|
| 69 |
+
return newDatasets;
|
| 70 |
+
});
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
const renameDataset = async (id: string, newName: string) => {
|
| 74 |
+
await db.renameDataset(id, newName);
|
| 75 |
+
setDatasets(prev => prev.map(d => d.id === id ? { ...d, name: newName } : d));
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
const processedDatasets = useMemo(() => {
|
| 79 |
+
return datasets.filter(d => d.processingState === 'processed');
|
| 80 |
+
}, [datasets]);
|
| 81 |
+
|
| 82 |
+
const getFullDataset = async (id: string): Promise<Dataset | null> => {
|
| 83 |
+
const meta = datasets.find(d => d.id === id);
|
| 84 |
+
if (!meta) return null;
|
| 85 |
+
|
| 86 |
+
if (meta.isShared) {
|
| 87 |
+
return apiService.getSharedDataset(id);
|
| 88 |
+
} else {
|
| 89 |
+
return db.getDataset(id);
|
| 90 |
+
}
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
const handleOpenComparisonTool = useCallback(async () => {
|
| 94 |
+
if (!selectedDatasetId) return;
|
| 95 |
+
const selectedMeta = datasets.find(d => d.id === selectedDatasetId);
|
| 96 |
+
if (!selectedMeta || selectedMeta.processingState !== 'processed') return;
|
| 97 |
+
|
| 98 |
+
setView('comparison');
|
| 99 |
+
setActiveDataset(null);
|
| 100 |
+
setIsNavigating(true);
|
| 101 |
+
|
| 102 |
+
try {
|
| 103 |
+
const fullDataset = await getFullDataset(selectedDatasetId);
|
| 104 |
+
if (!fullDataset) {
|
| 105 |
+
throw new Error(`Failed to load dataset ${selectedDatasetId}.`);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
// *** NEW LOGIC ***
|
| 109 |
+
// If it's a local dataset, ensure it's in the backend's cache before proceeding.
|
| 110 |
+
if (!fullDataset.isShared) {
|
| 111 |
+
console.log("Local dataset selected. Ensuring it's cached on the backend...");
|
| 112 |
+
await apiService.ensureDatasetInCache(fullDataset);
|
| 113 |
+
console.log("Backend cache confirmed.");
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
setActiveDataset(fullDataset);
|
| 117 |
+
|
| 118 |
+
} catch (error) {
|
| 119 |
+
console.error("Error preparing comparison tool:", error);
|
| 120 |
+
alert(`Error: Could not load the selected dataset. ${error instanceof Error ? error.message : ''}`);
|
| 121 |
+
setView('manager'); // Go back on error
|
| 122 |
+
} finally {
|
| 123 |
+
setIsNavigating(false);
|
| 124 |
+
}
|
| 125 |
+
}, [selectedDatasetId, datasets]);
|
| 126 |
+
|
| 127 |
+
const handleDatasetChange = useCallback(async (newId: string) => {
|
| 128 |
+
setSelectedDatasetId(newId);
|
| 129 |
+
|
| 130 |
+
setActiveDataset(null);
|
| 131 |
+
setIsNavigating(true);
|
| 132 |
+
|
| 133 |
+
try {
|
| 134 |
+
const fullDataset = await getFullDataset(newId);
|
| 135 |
+
if (!fullDataset) {
|
| 136 |
+
throw new Error(`Failed to load dataset ${newId}.`);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
// Also ensure cache is hydrated when switching datasets inside the tool
|
| 140 |
+
if (!fullDataset.isShared) {
|
| 141 |
+
await apiService.ensureDatasetInCache(fullDataset);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
setActiveDataset(fullDataset);
|
| 145 |
+
|
| 146 |
+
} catch (error) {
|
| 147 |
+
console.error(`Error switching dataset to ${newId}:`, error);
|
| 148 |
+
setActiveDataset(null);
|
| 149 |
+
} finally {
|
| 150 |
+
setIsNavigating(false);
|
| 151 |
+
}
|
| 152 |
+
}, []);
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
const mainContent = () => {
|
| 156 |
+
if (isLoading) {
|
| 157 |
+
return <div className="flex justify-center items-center h-64"><Spinner /><span>Loading Datasets...</span></div>;
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
const errorBanner = error ? (
|
| 161 |
+
<div className="bg-red-800/50 border border-red-600 text-red-200 px-4 py-3 rounded-lg mb-4" role="alert">
|
| 162 |
+
<p>
|
| 163 |
+
<strong className="font-bold">Cloud Connection Error:</strong> {error}
|
| 164 |
+
</p>
|
| 165 |
+
</div>
|
| 166 |
+
) : null;
|
| 167 |
+
|
| 168 |
+
if (view === 'manager') {
|
| 169 |
+
return (
|
| 170 |
+
<DatasetManager
|
| 171 |
+
datasets={datasets}
|
| 172 |
+
selectedDatasetId={selectedDatasetId}
|
| 173 |
+
onSelectDataset={setSelectedDatasetId}
|
| 174 |
+
onAddDataset={addDataset}
|
| 175 |
+
onDeleteDataset={deleteDataset}
|
| 176 |
+
onRenameDataset={renameDataset}
|
| 177 |
+
onOpenComparisonTool={handleOpenComparisonTool}
|
| 178 |
+
onGetFullDataset={getFullDataset}
|
| 179 |
+
errorBanner={errorBanner}
|
| 180 |
+
/>
|
| 181 |
+
);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
if (view === 'comparison') {
|
| 185 |
+
const fallbackUI = <div className="flex justify-center items-center h-64"><Spinner /><span>Loading Comparison Tool...</span></div>;
|
| 186 |
+
if (isNavigating || !activeDataset) {
|
| 187 |
+
return fallbackUI;
|
| 188 |
+
}
|
| 189 |
+
return (
|
| 190 |
+
<Suspense fallback={fallbackUI}>
|
| 191 |
+
<ComparisonTool
|
| 192 |
+
key={activeDataset.id}
|
| 193 |
+
dataset={activeDataset}
|
| 194 |
+
allDatasets={processedDatasets}
|
| 195 |
+
onDatasetChange={handleDatasetChange}
|
| 196 |
+
onBack={() => { setView('manager'); setActiveDataset(null); }}
|
| 197 |
+
/>
|
| 198 |
+
</Suspense>
|
| 199 |
+
);
|
| 200 |
+
}
|
| 201 |
+
return null;
|
| 202 |
+
};
|
| 203 |
+
|
| 204 |
+
return (
|
| 205 |
+
<div className="min-h-screen bg-gray-900 text-gray-200 font-sans">
|
| 206 |
+
<header className="bg-gray-800/50 backdrop-blur-sm border-b border-gray-700 sticky top-0 z-20">
|
| 207 |
+
<div className="container mx-auto px-4 sm:px-6 lg:px-8 py-4 flex items-center justify-between">
|
| 208 |
+
<h1 className="text-2xl font-bold text-cyan-400">
|
| 209 |
+
Cross-Modal Object Comparison Tool
|
| 210 |
+
</h1>
|
| 211 |
+
</div>
|
| 212 |
+
</header>
|
| 213 |
+
<main className="container mx-auto px-4 sm:px-6 lg:px-8 py-8">
|
| 214 |
+
{mainContent()}
|
| 215 |
+
</main>
|
| 216 |
+
</div>
|
| 217 |
+
);
|
| 218 |
+
};
|
| 219 |
+
|
| 220 |
+
export default App;
|
frontend/components/ComparisonTool.tsx
ADDED
|
@@ -0,0 +1,558 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import React, { useState, useMemo, useCallback, useEffect, useRef, lazy, Suspense } from 'react';
|
| 3 |
+
import type { Dataset, Modality, DataItem, SingleComparisonResult, DatasetMetadata } from '../types';
|
| 4 |
+
import { findTopMatches } from '../services/apiService';
|
| 5 |
+
import { findTopMatchesFromLocal } from '../services/comparisonService';
|
| 6 |
+
import { downloadJson } from '../services/fileService';
|
| 7 |
+
import { FullscreenViewer } from './common/FullscreenViewer';
|
| 8 |
+
import { Spinner } from './common/Spinner';
|
| 9 |
+
import { getItemContent } from '../services/sharedDatasetService';
|
| 10 |
+
|
| 11 |
+
const MeshViewer = lazy(() => import('./common/MeshViewer').then(module => ({ default: module.MeshViewer })));
|
| 12 |
+
|
| 13 |
+
interface ItemCardBaseProps {
|
| 14 |
+
modality: Modality;
|
| 15 |
+
isSource?: boolean;
|
| 16 |
+
confidence?: number;
|
| 17 |
+
onView: (item: DataItem, modality: Modality) => void;
|
| 18 |
+
onClick?: () => void;
|
| 19 |
+
className?: string;
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
const LazyItemCard: React.FC<ItemCardBaseProps & { item: DataItem }> = ({ item, ...props }) => {
|
| 23 |
+
const [loadedItem, setLoadedItem] = useState<DataItem>(item);
|
| 24 |
+
const [isLoading, setIsLoading] = useState(!item.content && !!item.contentUrl);
|
| 25 |
+
|
| 26 |
+
useEffect(() => {
|
| 27 |
+
let isMounted = true;
|
| 28 |
+
// Images are loaded by the browser directly via `contentUrl`, so we only fetch for text/mesh.
|
| 29 |
+
if (!item.content && item.contentUrl && props.modality !== 'image') {
|
| 30 |
+
setIsLoading(true);
|
| 31 |
+
getItemContent(item.contentUrl)
|
| 32 |
+
.then(content => {
|
| 33 |
+
if (isMounted) {
|
| 34 |
+
setLoadedItem({ ...item, content });
|
| 35 |
+
setIsLoading(false);
|
| 36 |
+
}
|
| 37 |
+
})
|
| 38 |
+
.catch(err => {
|
| 39 |
+
console.error("Failed to load item content", err);
|
| 40 |
+
if (isMounted) setIsLoading(false);
|
| 41 |
+
});
|
| 42 |
+
} else {
|
| 43 |
+
setLoadedItem(item);
|
| 44 |
+
setIsLoading(false);
|
| 45 |
+
}
|
| 46 |
+
return () => { isMounted = false; }
|
| 47 |
+
}, [item, props.modality]);
|
| 48 |
+
|
| 49 |
+
if (isLoading) {
|
| 50 |
+
return (
|
| 51 |
+
<div className={`bg-gray-800 rounded-lg shadow-md border border-gray-700 flex items-center justify-center aspect-[4/3] ${props.className}`}>
|
| 52 |
+
<Spinner />
|
| 53 |
+
</div>
|
| 54 |
+
);
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
return <ItemCard {...props} item={loadedItem} />;
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
const ItemCard: React.FC<ItemCardBaseProps & { item: DataItem }> = ({ item, modality, isSource, confidence, onView, onClick }) => {
|
| 62 |
+
const isText = modality === 'text';
|
| 63 |
+
|
| 64 |
+
// Use contentUrl for images directly, fallback to loaded content if available.
|
| 65 |
+
const imageUrl = modality === 'image' ? (item.contentUrl || (item.content as string)) : null;
|
| 66 |
+
|
| 67 |
+
const content = useMemo(() => {
|
| 68 |
+
switch (modality) {
|
| 69 |
+
case 'image':
|
| 70 |
+
if (!imageUrl) return null;
|
| 71 |
+
return (
|
| 72 |
+
<div className="relative pt-[75%]">
|
| 73 |
+
<div className="absolute inset-0">
|
| 74 |
+
<img src={imageUrl} alt={item.name} className="object-cover w-full h-full rounded-md" />
|
| 75 |
+
</div>
|
| 76 |
+
</div>
|
| 77 |
+
);
|
| 78 |
+
case 'text':
|
| 79 |
+
if (typeof item.content !== 'string') return null;
|
| 80 |
+
return <div className="h-full overflow-y-auto"><p className="text-xs text-gray-300 p-3">{item.content}</p></div>;
|
| 81 |
+
case 'mesh':
|
| 82 |
+
return (
|
| 83 |
+
<div className="relative pt-[75%]">
|
| 84 |
+
<div className="absolute inset-0 bg-gray-700 rounded-md overflow-hidden">
|
| 85 |
+
<Suspense fallback={<div className="w-full h-full flex items-center justify-center"><Spinner /></div>}>
|
| 86 |
+
<MeshViewer stlContent={item.content || item.contentUrl} interactive={false} />
|
| 87 |
+
</Suspense>
|
| 88 |
+
</div>
|
| 89 |
+
</div>
|
| 90 |
+
);
|
| 91 |
+
}
|
| 92 |
+
}, [item, modality, imageUrl]);
|
| 93 |
+
|
| 94 |
+
return (
|
| 95 |
+
<div
|
| 96 |
+
className={`bg-gray-800 rounded-lg shadow-md border flex flex-col h-full transition-shadow hover:shadow-cyan-500/20 ${isSource ? 'border-cyan-500' : 'border-gray-700'}`}
|
| 97 |
+
onClick={() => onClick ? onClick() : onView(item, modality)}
|
| 98 |
+
>
|
| 99 |
+
<div className={`flex-grow ${isText ? 'h-32' : ''}`}>{content}</div>
|
| 100 |
+
<div className="p-2 text-xs bg-gray-800/50 rounded-b-lg">
|
| 101 |
+
<p className="font-semibold truncate text-white">{item.name}</p>
|
| 102 |
+
{confidence !== undefined && <p className="text-cyan-300">Confidence: {confidence.toFixed(4)}</p>}
|
| 103 |
+
</div>
|
| 104 |
+
</div>
|
| 105 |
+
);
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
const pluralToSingular = (plural: string): Modality => {
|
| 109 |
+
if (plural === 'meshes') return 'mesh';
|
| 110 |
+
return plural.slice(0, -1) as Modality;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
const ResultsDisplay: React.FC<{
|
| 114 |
+
results: SingleComparisonResult;
|
| 115 |
+
onViewItem: (item: DataItem, modality: Modality) => void;
|
| 116 |
+
}> = ({ results, onViewItem }) => {
|
| 117 |
+
const sourcePluralModality = results.sourceModality === 'mesh' ? 'meshes' : `${results.sourceModality}s`;
|
| 118 |
+
|
| 119 |
+
return (
|
| 120 |
+
<div className="mt-6 p-4 bg-gray-800/50 border border-gray-700 rounded-lg">
|
| 121 |
+
<h3 className="text-xl font-bold mb-4">Comparison Results</h3>
|
| 122 |
+
<div className="flex flex-col md:flex-row gap-6 items-start">
|
| 123 |
+
{/* Source Item */}
|
| 124 |
+
<div className="w-full md:w-1/4 flex-shrink-0">
|
| 125 |
+
<h4 className="font-semibold mb-2 text-center text-cyan-400">Source Item</h4>
|
| 126 |
+
<div className="cursor-pointer">
|
| 127 |
+
<LazyItemCard item={results.sourceItem} modality={results.sourceModality} isSource onView={onViewItem}/>
|
| 128 |
+
</div>
|
| 129 |
+
</div>
|
| 130 |
+
{/* Matches */}
|
| 131 |
+
<div className="w-full md:w-3/4 flex flex-col gap-6">
|
| 132 |
+
{Object.entries(results.results)
|
| 133 |
+
.filter(([pluralModality, matches]) => pluralModality !== sourcePluralModality && matches && matches.length > 0)
|
| 134 |
+
.map(([pluralModality, matches]) => (
|
| 135 |
+
<div key={pluralModality}>
|
| 136 |
+
<h4 className="font-semibold mb-2 capitalize text-indigo-400">{pluralModality} Matches</h4>
|
| 137 |
+
<div className="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-3">
|
| 138 |
+
{(matches || []).slice(0, 3).map(match => (
|
| 139 |
+
<div key={match.item.id} className="cursor-pointer">
|
| 140 |
+
<LazyItemCard
|
| 141 |
+
item={match.item}
|
| 142 |
+
modality={pluralToSingular(pluralModality)}
|
| 143 |
+
confidence={match.confidence}
|
| 144 |
+
onView={onViewItem}
|
| 145 |
+
/>
|
| 146 |
+
</div>
|
| 147 |
+
))}
|
| 148 |
+
</div>
|
| 149 |
+
</div>
|
| 150 |
+
))}
|
| 151 |
+
</div>
|
| 152 |
+
</div>
|
| 153 |
+
</div>
|
| 154 |
+
);
|
| 155 |
+
};
|
| 156 |
+
|
| 157 |
+
interface ComparisonToolProps {
|
| 158 |
+
dataset: Dataset;
|
| 159 |
+
allDatasets: DatasetMetadata[]; // Use metadata for the dropdown
|
| 160 |
+
onDatasetChange: (id: string) => void;
|
| 161 |
+
onBack: () => void;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
const FileUploader: React.FC<{
|
| 165 |
+
onFileSelect: (file: File) => void | Promise<void>;
|
| 166 |
+
accept: string;
|
| 167 |
+
modality: Modality;
|
| 168 |
+
clear: () => void;
|
| 169 |
+
}> = ({ onFileSelect, accept, modality, clear }) => {
|
| 170 |
+
const [file, setFile] = useState<File | null>(null);
|
| 171 |
+
const inputRef = React.useRef<HTMLInputElement>(null);
|
| 172 |
+
|
| 173 |
+
const handleFileChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
| 174 |
+
const selectedFile = e.target.files?.[0];
|
| 175 |
+
if (selectedFile) {
|
| 176 |
+
setFile(selectedFile);
|
| 177 |
+
onFileSelect(selectedFile);
|
| 178 |
+
}
|
| 179 |
+
};
|
| 180 |
+
|
| 181 |
+
const handleClear = () => {
|
| 182 |
+
setFile(null);
|
| 183 |
+
if(inputRef.current) inputRef.current.value = "";
|
| 184 |
+
clear();
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
return (
|
| 188 |
+
<div className="w-full text-center">
|
| 189 |
+
<input type="file" ref={inputRef} accept={accept} onChange={handleFileChange} className="hidden" />
|
| 190 |
+
<button
|
| 191 |
+
onClick={() => inputRef.current?.click()}
|
| 192 |
+
className="w-full bg-gray-700 hover:bg-gray-600 text-white font-semibold py-3 px-4 rounded-lg transition-colors text-center flex items-center justify-center gap-2"
|
| 193 |
+
>
|
| 194 |
+
<svg xmlns="http://www.w3.org/2000/svg" className="h-5 w-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2}>
|
| 195 |
+
<path strokeLinecap="round" strokeLinejoin="round" d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-8l-4-4m0 0L8 8m4-4v12" />
|
| 196 |
+
</svg>
|
| 197 |
+
<span>{`Upload a ${modality} file`}</span>
|
| 198 |
+
</button>
|
| 199 |
+
{file && (
|
| 200 |
+
<div className="mt-2 text-sm">
|
| 201 |
+
<p className="text-gray-300 truncate">Selected: {file.name}</p>
|
| 202 |
+
<button onClick={handleClear} className="text-cyan-400 hover:text-cyan-300 underline" title="Clear selection">
|
| 203 |
+
Clear
|
| 204 |
+
</button>
|
| 205 |
+
</div>
|
| 206 |
+
)}
|
| 207 |
+
</div>
|
| 208 |
+
);
|
| 209 |
+
};
|
| 210 |
+
|
| 211 |
+
export const ComparisonTool: React.FC<ComparisonToolProps> = ({ dataset, allDatasets, onDatasetChange, onBack }) => {
|
| 212 |
+
const [activeTab, setActiveTab] = useState<Modality>('image');
|
| 213 |
+
const [selectedItem, setSelectedItem] = useState<DataItem | null>(null);
|
| 214 |
+
const [newItem, setNewItem] = useState<{file?: File, text?: string} | null>(null);
|
| 215 |
+
const [newItemPreview, setNewItemPreview] = useState<string | null>(null);
|
| 216 |
+
const [newItemMeshPreviewContent, setNewItemMeshPreviewContent] = useState<ArrayBuffer | null>(null);
|
| 217 |
+
const [comparisonResult, setComparisonResult] = useState<SingleComparisonResult | null>(null);
|
| 218 |
+
const [isComparing, setIsComparing] = useState(false);
|
| 219 |
+
const [searchTerm, setSearchTerm] = useState('');
|
| 220 |
+
const [viewingItem, setViewingItem] = useState<{ item: DataItem; modality: Modality } | null>(null);
|
| 221 |
+
const resultsRef = useRef<HTMLDivElement>(null);
|
| 222 |
+
|
| 223 |
+
const MAX_ITEMS_TO_DISPLAY = 30;
|
| 224 |
+
|
| 225 |
+
useEffect(() => {
|
| 226 |
+
// Cleanup object URLs to prevent memory leaks
|
| 227 |
+
return () => {
|
| 228 |
+
if(newItemPreview && newItemPreview.startsWith('blob:')) {
|
| 229 |
+
URL.revokeObjectURL(newItemPreview);
|
| 230 |
+
}
|
| 231 |
+
}
|
| 232 |
+
}, [newItemPreview]);
|
| 233 |
+
|
| 234 |
+
useEffect(() => {
|
| 235 |
+
// Scroll to results when they appear
|
| 236 |
+
if (comparisonResult && resultsRef.current) {
|
| 237 |
+
// A small delay to ensure the element is rendered and painted before scrolling
|
| 238 |
+
const timer = setTimeout(() => {
|
| 239 |
+
resultsRef.current?.scrollIntoView({ behavior: 'smooth', block: 'start' });
|
| 240 |
+
}, 100);
|
| 241 |
+
return () => clearTimeout(timer);
|
| 242 |
+
}
|
| 243 |
+
}, [comparisonResult]);
|
| 244 |
+
|
| 245 |
+
const handleItemSelect = async (item: DataItem) => {
|
| 246 |
+
setSelectedItem(item);
|
| 247 |
+
setNewItem(null);
|
| 248 |
+
setNewItemPreview(null);
|
| 249 |
+
setNewItemMeshPreviewContent(null);
|
| 250 |
+
|
| 251 |
+
// Optimistic UI update to show the newly selected source item immediately
|
| 252 |
+
setComparisonResult({
|
| 253 |
+
sourceItem: item,
|
| 254 |
+
sourceModality: activeTab,
|
| 255 |
+
results: {}, // No matches yet, will be populated below
|
| 256 |
+
});
|
| 257 |
+
|
| 258 |
+
let itemWithContent = item;
|
| 259 |
+
// For cloud items, the content is not loaded yet. We must fetch it.
|
| 260 |
+
// The individual card will show a spinner, so we don't need a global one.
|
| 261 |
+
if (!item.content && item.contentUrl) {
|
| 262 |
+
try {
|
| 263 |
+
const content = await getItemContent(item.contentUrl);
|
| 264 |
+
itemWithContent = { ...item, content };
|
| 265 |
+
} catch (e) {
|
| 266 |
+
console.error("Failed to lazy-load content for comparison:", e);
|
| 267 |
+
alert("Could not load item content from the server for comparison.");
|
| 268 |
+
setComparisonResult(null); // Clear results on error
|
| 269 |
+
setSelectedItem(null);
|
| 270 |
+
return;
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
// Use fast, local search for existing dataset items and update the results
|
| 275 |
+
const results = findTopMatchesFromLocal(itemWithContent, activeTab, dataset);
|
| 276 |
+
setComparisonResult(results);
|
| 277 |
+
};
|
| 278 |
+
|
| 279 |
+
const handleNewItemSearch = async () => {
|
| 280 |
+
if (!newItem) return;
|
| 281 |
+
|
| 282 |
+
setSelectedItem(null); // Deselect grid item
|
| 283 |
+
setIsComparing(true);
|
| 284 |
+
setComparisonResult(null); // Clear previous results
|
| 285 |
+
|
| 286 |
+
try {
|
| 287 |
+
let content: string | ArrayBuffer;
|
| 288 |
+
let name: string;
|
| 289 |
+
|
| 290 |
+
if (newItem.file) {
|
| 291 |
+
name = newItem.file.name;
|
| 292 |
+
if (activeTab === 'image') {
|
| 293 |
+
content = await new Promise((resolve, reject) => {
|
| 294 |
+
const reader = new FileReader();
|
| 295 |
+
reader.onload = () => resolve(reader.result as string);
|
| 296 |
+
reader.onerror = reject;
|
| 297 |
+
reader.readAsDataURL(newItem.file);
|
| 298 |
+
});
|
| 299 |
+
} else if (activeTab === 'mesh') {
|
| 300 |
+
content = await new Promise((resolve, reject) => {
|
| 301 |
+
const reader = new FileReader();
|
| 302 |
+
reader.onload = () => resolve(reader.result as ArrayBuffer);
|
| 303 |
+
reader.onerror = reject;
|
| 304 |
+
reader.readAsArrayBuffer(newItem.file);
|
| 305 |
+
});
|
| 306 |
+
} else { // text file
|
| 307 |
+
content = await new Promise((resolve, reject) => {
|
| 308 |
+
const reader = new FileReader();
|
| 309 |
+
reader.onload = () => resolve(reader.result as string);
|
| 310 |
+
reader.onerror = reject;
|
| 311 |
+
reader.readAsText(newItem.file);
|
| 312 |
+
});
|
| 313 |
+
}
|
| 314 |
+
} else if (newItem.text) {
|
| 315 |
+
name = 'Custom Text Input';
|
| 316 |
+
content = newItem.text;
|
| 317 |
+
} else {
|
| 318 |
+
throw new Error("No new item content found");
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
const sourceItem: DataItem = { id: `new_${Date.now()}`, name, content };
|
| 322 |
+
|
| 323 |
+
// Optimistic UI: Show the source item while waiting for matches.
|
| 324 |
+
setComparisonResult({
|
| 325 |
+
sourceItem,
|
| 326 |
+
sourceModality: activeTab,
|
| 327 |
+
results: {},
|
| 328 |
+
});
|
| 329 |
+
|
| 330 |
+
const results = await findTopMatches(sourceItem, activeTab, dataset.id);
|
| 331 |
+
setComparisonResult(results);
|
| 332 |
+
|
| 333 |
+
} catch (error) {
|
| 334 |
+
console.error("Failed to find matches for new item:", error);
|
| 335 |
+
alert(`Error finding matches: ${error instanceof Error ? error.message : String(error)}`);
|
| 336 |
+
setComparisonResult(null); // Clear on error
|
| 337 |
+
} finally {
|
| 338 |
+
setIsComparing(false);
|
| 339 |
+
}
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
const handleFullComparison = () => {
|
| 343 |
+
if (dataset.fullComparison) {
|
| 344 |
+
downloadJson(dataset.fullComparison, `${dataset.name}-full-comparison.json`);
|
| 345 |
+
} else {
|
| 346 |
+
alert("Full comparison data is not available for this dataset.");
|
| 347 |
+
}
|
| 348 |
+
};
|
| 349 |
+
|
| 350 |
+
const clearNewItem = useCallback(() => {
|
| 351 |
+
setNewItem(null);
|
| 352 |
+
if(newItemPreview && newItemPreview.startsWith('blob:')) URL.revokeObjectURL(newItemPreview);
|
| 353 |
+
setNewItemPreview(null);
|
| 354 |
+
setNewItemMeshPreviewContent(null);
|
| 355 |
+
}, [newItemPreview]);
|
| 356 |
+
|
| 357 |
+
const handleFileSelected = async (file: File) => {
|
| 358 |
+
setNewItem({ file });
|
| 359 |
+
setSelectedItem(null);
|
| 360 |
+
if(newItemPreview && newItemPreview.startsWith('blob:')) URL.revokeObjectURL(newItemPreview);
|
| 361 |
+
setNewItemPreview(null);
|
| 362 |
+
setNewItemMeshPreviewContent(null);
|
| 363 |
+
|
| 364 |
+
if(activeTab === 'image') {
|
| 365 |
+
setNewItemPreview(URL.createObjectURL(file));
|
| 366 |
+
} else if (activeTab === 'mesh') {
|
| 367 |
+
try {
|
| 368 |
+
const buffer = await file.arrayBuffer();
|
| 369 |
+
setNewItemMeshPreviewContent(buffer);
|
| 370 |
+
} catch (error) {
|
| 371 |
+
console.error("Error reading STL file for preview:", error);
|
| 372 |
+
alert("Could not read the file for preview.");
|
| 373 |
+
}
|
| 374 |
+
}
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
const handleTabChange = (mod: Modality) => {
|
| 378 |
+
setActiveTab(mod);
|
| 379 |
+
setComparisonResult(null);
|
| 380 |
+
setSelectedItem(null);
|
| 381 |
+
clearNewItem();
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
const pluralKey = activeTab === 'mesh' ? 'meshes' : `${activeTab}s`;
|
| 385 |
+
const items = dataset.data[pluralKey as keyof typeof dataset.data];
|
| 386 |
+
|
| 387 |
+
const filteredItems = useMemo(() => {
|
| 388 |
+
if (!searchTerm) return items;
|
| 389 |
+
return items.filter(item => item.name.toLowerCase().includes(searchTerm.toLowerCase()));
|
| 390 |
+
}, [items, searchTerm]);
|
| 391 |
+
|
| 392 |
+
const displayedItems = useMemo(() => {
|
| 393 |
+
return filteredItems.slice(0, MAX_ITEMS_TO_DISPLAY);
|
| 394 |
+
}, [filteredItems]);
|
| 395 |
+
|
| 396 |
+
return (
|
| 397 |
+
<>
|
| 398 |
+
<div className="space-y-8">
|
| 399 |
+
<div className="flex flex-col sm:flex-row items-start sm:items-center justify-between gap-4">
|
| 400 |
+
<div className="flex items-center gap-4">
|
| 401 |
+
<h2 className="text-3xl font-bold tracking-tight">Comparison Tool:</h2>
|
| 402 |
+
<select
|
| 403 |
+
value={dataset.id}
|
| 404 |
+
onChange={(e) => onDatasetChange(e.target.value)}
|
| 405 |
+
className="bg-gray-700 border border-gray-600 rounded-md px-3 py-2 text-white text-lg focus:ring-2 focus:ring-cyan-500 focus:outline-none"
|
| 406 |
+
>
|
| 407 |
+
{allDatasets.map(d => <option key={d.id} value={d.id}>{d.name}</option>)}
|
| 408 |
+
</select>
|
| 409 |
+
</div>
|
| 410 |
+
<button onClick={onBack} className="bg-gray-600 hover:bg-gray-500 text-white font-bold py-2 px-4 rounded-lg transition-colors">
|
| 411 |
+
← Back to Manager
|
| 412 |
+
</button>
|
| 413 |
+
</div>
|
| 414 |
+
|
| 415 |
+
<div className="border-b border-gray-600">
|
| 416 |
+
<nav className="-mb-px flex space-x-8">
|
| 417 |
+
{(['image', 'text', 'mesh'] as Modality[]).map(mod => (
|
| 418 |
+
<button key={mod} onClick={() => handleTabChange(mod)}
|
| 419 |
+
className={`${activeTab === mod ? 'border-cyan-400 text-cyan-400' : 'border-transparent text-gray-400 hover:text-gray-200'}
|
| 420 |
+
whitespace-nowrap py-3 px-1 border-b-2 font-medium text-sm capitalize transition-colors`}>
|
| 421 |
+
{mod === 'mesh' ? 'meshes' : `${mod}s`}
|
| 422 |
+
</button>
|
| 423 |
+
))}
|
| 424 |
+
</nav>
|
| 425 |
+
</div>
|
| 426 |
+
|
| 427 |
+
{/* Single Element Search */}
|
| 428 |
+
<div className="bg-gray-800/50 p-6 rounded-lg border border-gray-700 space-y-6">
|
| 429 |
+
<div>
|
| 430 |
+
<h3 className="text-2xl font-bold text-center mb-6">Search with New Item</h3>
|
| 431 |
+
<div className="flex flex-col md:flex-row items-start justify-center gap-8 w-full max-w-4xl mx-auto">
|
| 432 |
+
{/* Left side: Uploader Controls */}
|
| 433 |
+
<div className="w-full md:w-1/2 flex flex-col justify-start items-center space-y-4">
|
| 434 |
+
{activeTab === 'image' && (
|
| 435 |
+
<div className="w-full max-w-sm">
|
| 436 |
+
<FileUploader onFileSelect={handleFileSelected} accept="image/png, image/jpeg" modality="image" clear={clearNewItem} />
|
| 437 |
+
</div>
|
| 438 |
+
)}
|
| 439 |
+
{activeTab === 'mesh' && (
|
| 440 |
+
<div className="w-full max-w-sm">
|
| 441 |
+
<FileUploader onFileSelect={handleFileSelected} accept=".stl" modality="mesh" clear={clearNewItem} />
|
| 442 |
+
</div>
|
| 443 |
+
)}
|
| 444 |
+
{activeTab === 'text' && (
|
| 445 |
+
<div className="space-y-3 h-full w-full max-w-sm flex flex-col">
|
| 446 |
+
<textarea
|
| 447 |
+
value={newItem?.text ?? ''}
|
| 448 |
+
onChange={(e) => { setNewItem({ text: e.target.value }); setSelectedItem(null); }}
|
| 449 |
+
placeholder="Type or paste text here..."
|
| 450 |
+
className="w-full h-28 bg-gray-700 border border-gray-600 rounded-md p-2 text-white focus:ring-2 focus:ring-cyan-500 focus:outline-none resize-none"
|
| 451 |
+
/>
|
| 452 |
+
<div className="text-center text-gray-400 text-sm">or</div>
|
| 453 |
+
<FileUploader onFileSelect={handleFileSelected} accept=".txt" modality="text" clear={clearNewItem} />
|
| 454 |
+
</div>
|
| 455 |
+
)}
|
| 456 |
+
|
| 457 |
+
<button onClick={handleNewItemSearch} disabled={!newItem || isComparing} className="bg-cyan-500 hover:bg-cyan-600 text-white font-bold py-3 px-8 rounded-lg transition-colors flex items-center justify-center disabled:bg-gray-600 disabled:cursor-not-allowed text-base w-full max-w-sm">
|
| 458 |
+
{isComparing ? <><Spinner /> Searching...</> : 'Find Matches for New Item'}
|
| 459 |
+
</button>
|
| 460 |
+
</div>
|
| 461 |
+
|
| 462 |
+
{/* Right side: Preview */}
|
| 463 |
+
<div className="w-full md:w-1/2 flex flex-col items-center">
|
| 464 |
+
<div className="w-full max-w-sm aspect-[4/3] bg-gray-900/50 rounded-md flex items-center justify-center relative overflow-hidden border border-gray-700">
|
| 465 |
+
{activeTab === 'image' && newItemPreview ? (
|
| 466 |
+
<img src={newItemPreview} alt="Preview" className="absolute inset-0 w-full h-full object-cover"/>
|
| 467 |
+
) : activeTab === 'mesh' && newItemMeshPreviewContent ? (
|
| 468 |
+
<div className="absolute inset-0">
|
| 469 |
+
<Suspense fallback={<div className="w-full h-full flex items-center justify-center"><Spinner /></div>}>
|
| 470 |
+
<MeshViewer stlContent={newItemMeshPreviewContent} interactive={true} />
|
| 471 |
+
</Suspense>
|
| 472 |
+
</div>
|
| 473 |
+
) : (
|
| 474 |
+
<div className="text-gray-500 italic p-4 text-center">
|
| 475 |
+
{activeTab === 'image' && 'Image preview will appear here'}
|
| 476 |
+
{activeTab === 'mesh' && '3D model preview will appear here'}
|
| 477 |
+
{activeTab === 'text' && 'Enter text or upload a TXT file to search'}
|
| 478 |
+
</div>
|
| 479 |
+
)}
|
| 480 |
+
</div>
|
| 481 |
+
</div>
|
| 482 |
+
</div>
|
| 483 |
+
</div>
|
| 484 |
+
|
| 485 |
+
<div className="border-t border-gray-700 pt-6">
|
| 486 |
+
<div className="flex flex-col sm:flex-row justify-between items-center mb-4 gap-4">
|
| 487 |
+
<h3 className="text-2xl font-bold">...or Select from Dataset</h3>
|
| 488 |
+
<input
|
| 489 |
+
type="text"
|
| 490 |
+
placeholder="Search items by name..."
|
| 491 |
+
value={searchTerm}
|
| 492 |
+
onChange={(e) => setSearchTerm(e.target.value)}
|
| 493 |
+
className="bg-gray-700 border border-gray-600 rounded-md px-3 py-2 w-full sm:w-64 text-white focus:ring-2 focus:ring-cyan-500 focus:outline-none"
|
| 494 |
+
/>
|
| 495 |
+
</div>
|
| 496 |
+
<div className="max-h-96 overflow-y-auto p-2 bg-gray-900/50 rounded-md">
|
| 497 |
+
<div className="grid grid-cols-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 gap-4">
|
| 498 |
+
{displayedItems.map(item => (
|
| 499 |
+
<div key={item.id}
|
| 500 |
+
className={`rounded-lg overflow-hidden border-2 ${selectedItem?.id === item.id ? 'border-cyan-500 ring-2 ring-cyan-500' : 'border-transparent'}`}>
|
| 501 |
+
<LazyItemCard
|
| 502 |
+
item={item}
|
| 503 |
+
modality={activeTab}
|
| 504 |
+
onView={(viewedItem, mod) => setViewingItem({item: viewedItem, modality: mod})}
|
| 505 |
+
onClick={() => handleItemSelect(item)}
|
| 506 |
+
className="cursor-pointer"
|
| 507 |
+
/>
|
| 508 |
+
</div>
|
| 509 |
+
))}
|
| 510 |
+
</div>
|
| 511 |
+
{filteredItems.length > displayedItems.length && (
|
| 512 |
+
<p className="text-center text-gray-500 p-4 text-sm">
|
| 513 |
+
Showing first {MAX_ITEMS_TO_DISPLAY} of {filteredItems.length} items.
|
| 514 |
+
{searchTerm ? " Refine your search." : " Use search to narrow results."}
|
| 515 |
+
</p>
|
| 516 |
+
)}
|
| 517 |
+
{filteredItems.length === 0 && <p className="text-center text-gray-500 p-8">No items found matching your search.</p>}
|
| 518 |
+
</div>
|
| 519 |
+
</div>
|
| 520 |
+
|
| 521 |
+
{isComparing && !comparisonResult && (
|
| 522 |
+
<div className="mt-6 p-4 text-center">
|
| 523 |
+
<div className="flex justify-center items-center gap-2 text-lg">
|
| 524 |
+
<Spinner />
|
| 525 |
+
<span>Finding matches...</span>
|
| 526 |
+
</div>
|
| 527 |
+
</div>
|
| 528 |
+
)}
|
| 529 |
+
<div ref={resultsRef}>
|
| 530 |
+
{comparisonResult && <ResultsDisplay results={comparisonResult} onViewItem={(item, modality) => setViewingItem({item, modality})}/>}
|
| 531 |
+
</div>
|
| 532 |
+
</div>
|
| 533 |
+
|
| 534 |
+
{/* Full Dataset Comparison */}
|
| 535 |
+
<div className="bg-gray-800/50 p-6 rounded-lg border border-gray-700">
|
| 536 |
+
<h3 className="text-2xl font-bold mb-4">Full Dataset Comparison</h3>
|
| 537 |
+
<p className="text-gray-400 mb-4">
|
| 538 |
+
The full comparison results for this dataset have been pre-computed. Click the button to download them as a JSON file instantly.
|
| 539 |
+
</p>
|
| 540 |
+
<button
|
| 541 |
+
onClick={handleFullComparison}
|
| 542 |
+
disabled={!dataset.fullComparison}
|
| 543 |
+
className="bg-indigo-500 hover:bg-indigo-600 text-white font-bold py-3 px-6 rounded-lg transition-colors w-full sm:w-auto flex items-center justify-center gap-2 disabled:bg-gray-600 disabled:cursor-not-allowed"
|
| 544 |
+
>
|
| 545 |
+
Download Full Comparison JSON
|
| 546 |
+
</button>
|
| 547 |
+
</div>
|
| 548 |
+
</div>
|
| 549 |
+
{viewingItem && (
|
| 550 |
+
<FullscreenViewer
|
| 551 |
+
item={viewingItem.item}
|
| 552 |
+
modality={viewingItem.modality}
|
| 553 |
+
onClose={() => setViewingItem(null)}
|
| 554 |
+
/>
|
| 555 |
+
)}
|
| 556 |
+
</>
|
| 557 |
+
);
|
| 558 |
+
};
|
frontend/components/DatasetManager.tsx
ADDED
|
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState, useRef, useEffect, useMemo } from 'react';
|
| 2 |
+
import type { Dataset, DatasetMetadata } from '../types';
|
| 3 |
+
import { DatasetViewer } from './DatasetViewer';
|
| 4 |
+
import { Modal } from './common/Modal';
|
| 5 |
+
import { Spinner } from './common/Spinner';
|
| 6 |
+
import { ProgressBar } from './common/ProgressBar';
|
| 7 |
+
import { startDatasetProcessing, getProcessingStatus } from '../services/apiService';
|
| 8 |
+
import { downloadCsv } from '../services/fileService';
|
| 9 |
+
|
| 10 |
+
interface DatasetManagerProps {
|
| 11 |
+
datasets: DatasetMetadata[];
|
| 12 |
+
selectedDatasetId: string | null;
|
| 13 |
+
onSelectDataset: (id: string | null) => void;
|
| 14 |
+
onAddDataset: (dataset: Dataset) => Promise<void>;
|
| 15 |
+
onDeleteDataset: (id: string) => Promise<void>;
|
| 16 |
+
onRenameDataset: (id: string, newName: string) => Promise<void>;
|
| 17 |
+
onOpenComparisonTool: () => void;
|
| 18 |
+
onGetFullDataset: (id: string) => Promise<Dataset | null>;
|
| 19 |
+
errorBanner?: React.ReactNode | null;
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
const useSmoothProgress = (targetProgress: number, duration: number = 6000) => {
|
| 23 |
+
const [displayedProgress, setDisplayedProgress] = useState(targetProgress);
|
| 24 |
+
// FIX: Initialize useRef with null to satisfy TypeScript's requirement for an initial value when a generic type is provided.
|
| 25 |
+
const animationFrameRef = useRef<number | null>(null);
|
| 26 |
+
const startProgressRef = useRef<number>(targetProgress);
|
| 27 |
+
// The value is set in useEffect before it's ever used, so initializing with 0 is safe.
|
| 28 |
+
const startTimeRef = useRef<number>(0);
|
| 29 |
+
|
| 30 |
+
useEffect(() => {
|
| 31 |
+
startProgressRef.current = displayedProgress;
|
| 32 |
+
// FIX: Explicitly call `window.performance.now()` to avoid potential scope resolution issues with build tools which could be causing the cryptic error.
|
| 33 |
+
startTimeRef.current = window.performance.now();
|
| 34 |
+
|
| 35 |
+
const animate = (currentTime: number) => {
|
| 36 |
+
const elapsedTime = currentTime - startTimeRef.current;
|
| 37 |
+
const progressFraction = Math.min(elapsedTime / duration, 1);
|
| 38 |
+
|
| 39 |
+
// Ease-out quadratic easing function
|
| 40 |
+
const easedFraction = progressFraction * (2 - progressFraction);
|
| 41 |
+
|
| 42 |
+
const newProgress = startProgressRef.current + (targetProgress - startProgressRef.current) * easedFraction;
|
| 43 |
+
setDisplayedProgress(newProgress);
|
| 44 |
+
|
| 45 |
+
if (progressFraction < 1) {
|
| 46 |
+
animationFrameRef.current = requestAnimationFrame(animate);
|
| 47 |
+
}
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
if (animationFrameRef.current) {
|
| 51 |
+
cancelAnimationFrame(animationFrameRef.current);
|
| 52 |
+
}
|
| 53 |
+
animationFrameRef.current = requestAnimationFrame(animate);
|
| 54 |
+
|
| 55 |
+
return () => {
|
| 56 |
+
if (animationFrameRef.current) {
|
| 57 |
+
cancelAnimationFrame(animationFrameRef.current);
|
| 58 |
+
}
|
| 59 |
+
};
|
| 60 |
+
}, [targetProgress, duration]);
|
| 61 |
+
|
| 62 |
+
return displayedProgress;
|
| 63 |
+
};
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
const ThreeDotsIcon = () => (
|
| 67 |
+
<svg xmlns="http://www.w3.org/2000/svg" className="h-5 w-5" viewBox="0 0 20 20" fill="currentColor">
|
| 68 |
+
<path d="M10 6a2 2 0 110-4 2 2 0 010 4zM10 12a2 2 0 110-4 2 2 0 010 4zM10 18a2 2 0 110-4 2 2 0 010 4z" />
|
| 69 |
+
</svg>
|
| 70 |
+
);
|
| 71 |
+
|
| 72 |
+
const CloudIcon = () => (
|
| 73 |
+
<svg xmlns="http://www.w3.org/2000/svg" className="h-5 w-5 text-cyan-400" viewBox="0 0 20 20" fill="currentColor">
|
| 74 |
+
<path d="M5.5 16a3.5 3.5 0 01-.369-6.98 4 4 0 117.753-1.977A4.5 4.5 0 1113.5 16h-8z" />
|
| 75 |
+
</svg>
|
| 76 |
+
);
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
const CheckCircleIcon = () => (
|
| 80 |
+
<svg xmlns="http://www.w3.org/2000/svg" className="h-5 w-5 text-green-400" viewBox="0 0 20 20" fill="currentColor">
|
| 81 |
+
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zm3.707-9.293a1 1 0 00-1.414-1.414L9 10.586 7.707 9.293a1 1 0 00-1.414 1.414l2 2a1 1 0 001.414 0l4-4z" clipRule="evenodd" />
|
| 82 |
+
</svg>
|
| 83 |
+
);
|
| 84 |
+
|
| 85 |
+
const ExclamationCircleIcon = () => (
|
| 86 |
+
<svg xmlns="http://www.w3.org/2000/svg" className="h-5 w-5 text-red-400" viewBox="0 0 20 20" fill="currentColor">
|
| 87 |
+
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zm-1-9a1 1 0 112 0v4a1 1 0 11-2 0V9zm1-4a1 1 0 100 2 1 1 0 000-2z" clipRule="evenodd" />
|
| 88 |
+
</svg>
|
| 89 |
+
);
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
interface ProcessingStatus {
|
| 93 |
+
id: string; // This will be the job_id from the backend
|
| 94 |
+
name: string;
|
| 95 |
+
stage: string;
|
| 96 |
+
progress: number;
|
| 97 |
+
uploadDate: Date;
|
| 98 |
+
error?: string;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
const StatusDisplay: React.FC<{dataset: DatasetMetadata | { stage: string; progress: number; error?: string } }> = ({ dataset }) => {
|
| 102 |
+
if ('stage' in dataset) { // It's a ProcessingStatus object
|
| 103 |
+
if (dataset.error) {
|
| 104 |
+
return <div className="flex items-center gap-2 text-red-400 truncate" title={dataset.error}> <ExclamationCircleIcon /> Error </div>;
|
| 105 |
+
}
|
| 106 |
+
return <ProgressBar progress={dataset.progress} label={dataset.stage} />;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
switch (dataset.processingState) {
|
| 110 |
+
case 'processing':
|
| 111 |
+
return <ProgressBar progress={100} label="Processing..." />;
|
| 112 |
+
case 'processed':
|
| 113 |
+
return <div className="flex items-center gap-2 text-green-400"> <CheckCircleIcon /> Completed </div>;
|
| 114 |
+
case 'error':
|
| 115 |
+
return <div className="flex items-center gap-2 text-red-400"> <ExclamationCircleIcon /> Error </div>;
|
| 116 |
+
default:
|
| 117 |
+
return <span className="text-gray-400">Pending</span>;
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
const ProcessingRow: React.FC<{status: ProcessingStatus}> = ({ status }) => {
|
| 122 |
+
const displayedProgress = useSmoothProgress(status.progress);
|
| 123 |
+
|
| 124 |
+
return (
|
| 125 |
+
<ul className="divide-y divide-gray-700">
|
| 126 |
+
<li className="grid grid-cols-12 gap-4 p-4 items-center bg-gray-700/30">
|
| 127 |
+
<div className="col-span-3 font-medium truncate">{status.name}</div>
|
| 128 |
+
<div className="col-span-3 text-gray-400">---</div>
|
| 129 |
+
<div className="col-span-2 text-sm text-gray-400">{status.uploadDate.toLocaleString()}</div>
|
| 130 |
+
<div className="col-span-3 text-sm">
|
| 131 |
+
<StatusDisplay dataset={{...status, progress: displayedProgress}} />
|
| 132 |
+
</div>
|
| 133 |
+
<div className="col-span-1 flex justify-end"></div>
|
| 134 |
+
</li>
|
| 135 |
+
</ul>
|
| 136 |
+
);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
export const DatasetManager: React.FC<DatasetManagerProps> = ({
|
| 140 |
+
datasets,
|
| 141 |
+
selectedDatasetId,
|
| 142 |
+
onSelectDataset,
|
| 143 |
+
onAddDataset,
|
| 144 |
+
onDeleteDataset,
|
| 145 |
+
onRenameDataset,
|
| 146 |
+
onOpenComparisonTool,
|
| 147 |
+
onGetFullDataset,
|
| 148 |
+
errorBanner = null,
|
| 149 |
+
}) => {
|
| 150 |
+
const [isViewerOpen, setViewerOpen] = useState(false);
|
| 151 |
+
const [isRenameModalOpen, setRenameModalOpen] = useState(false);
|
| 152 |
+
const [activeDropdown, setActiveDropdown] = useState<string | null>(null);
|
| 153 |
+
const [datasetToEdit, setDatasetToEdit] = useState<DatasetMetadata | null>(null);
|
| 154 |
+
const [fullDatasetForViewer, setFullDatasetForViewer] = useState<Dataset | null>(null);
|
| 155 |
+
const [isViewerLoading, setIsViewerLoading] = useState(false);
|
| 156 |
+
const [newDatasetName, setNewDatasetName] = useState('');
|
| 157 |
+
const [processingStatus, setProcessingStatus] = useState<ProcessingStatus | null>(null);
|
| 158 |
+
const pollingIntervalRef = useRef<number | null>(null);
|
| 159 |
+
const fileInputRef = useRef<HTMLInputElement>(null);
|
| 160 |
+
|
| 161 |
+
useEffect(() => {
|
| 162 |
+
return () => {
|
| 163 |
+
if (pollingIntervalRef.current) {
|
| 164 |
+
clearInterval(pollingIntervalRef.current);
|
| 165 |
+
}
|
| 166 |
+
};
|
| 167 |
+
}, []);
|
| 168 |
+
|
| 169 |
+
const handleUploadClick = () => {
|
| 170 |
+
fileInputRef.current?.click();
|
| 171 |
+
};
|
| 172 |
+
|
| 173 |
+
const handleFileSelect = async (event: React.ChangeEvent<HTMLInputElement>) => {
|
| 174 |
+
const file = event.target.files?.[0];
|
| 175 |
+
if (!file) return;
|
| 176 |
+
|
| 177 |
+
if (pollingIntervalRef.current) clearInterval(pollingIntervalRef.current);
|
| 178 |
+
|
| 179 |
+
const tempStatus: ProcessingStatus = {
|
| 180 |
+
name: file.name,
|
| 181 |
+
progress: 0,
|
| 182 |
+
stage: 'Uploading...',
|
| 183 |
+
id: `processing-${Date.now()}`,
|
| 184 |
+
uploadDate: new Date()
|
| 185 |
+
};
|
| 186 |
+
setProcessingStatus(tempStatus);
|
| 187 |
+
|
| 188 |
+
try {
|
| 189 |
+
const jobId = await startDatasetProcessing(file);
|
| 190 |
+
|
| 191 |
+
setProcessingStatus(prev => prev ? { ...prev, id: jobId, stage: 'Queued', progress: 5 } : null);
|
| 192 |
+
|
| 193 |
+
pollingIntervalRef.current = window.setInterval(async () => {
|
| 194 |
+
try {
|
| 195 |
+
const status = await getProcessingStatus(jobId);
|
| 196 |
+
|
| 197 |
+
if (status.status === 'processing') {
|
| 198 |
+
setProcessingStatus(prev => {
|
| 199 |
+
if (prev?.id === jobId) {
|
| 200 |
+
return {
|
| 201 |
+
...prev,
|
| 202 |
+
stage: status.stage || 'Processing...',
|
| 203 |
+
progress: status.progress || prev.progress,
|
| 204 |
+
};
|
| 205 |
+
}
|
| 206 |
+
return prev;
|
| 207 |
+
});
|
| 208 |
+
} else if (status.status === 'complete') {
|
| 209 |
+
if (pollingIntervalRef.current) clearInterval(pollingIntervalRef.current);
|
| 210 |
+
if (status.result) {
|
| 211 |
+
await onAddDataset(status.result);
|
| 212 |
+
}
|
| 213 |
+
setProcessingStatus(null);
|
| 214 |
+
} else if (status.status === 'error') {
|
| 215 |
+
if (pollingIntervalRef.current) clearInterval(pollingIntervalRef.current);
|
| 216 |
+
setProcessingStatus(prev => prev?.id === jobId ? {
|
| 217 |
+
...prev,
|
| 218 |
+
stage: 'Error',
|
| 219 |
+
progress: 100,
|
| 220 |
+
error: status.message || 'An unknown error occurred.'
|
| 221 |
+
} : prev);
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
} catch (pollError: any) {
|
| 225 |
+
console.error("Polling error:", pollError);
|
| 226 |
+
if (pollingIntervalRef.current) clearInterval(pollingIntervalRef.current);
|
| 227 |
+
setProcessingStatus(prev => prev?.id === jobId ? {
|
| 228 |
+
...prev,
|
| 229 |
+
stage: 'Error',
|
| 230 |
+
progress: 100,
|
| 231 |
+
error: `Failed to get status: ${pollError.message}`
|
| 232 |
+
} : prev);
|
| 233 |
+
}
|
| 234 |
+
}, 2000);
|
| 235 |
+
|
| 236 |
+
} catch (error: any) {
|
| 237 |
+
console.error("Failed to start dataset processing:", error);
|
| 238 |
+
setProcessingStatus({
|
| 239 |
+
...tempStatus,
|
| 240 |
+
stage: 'Error',
|
| 241 |
+
progress: 100,
|
| 242 |
+
error: error.message,
|
| 243 |
+
});
|
| 244 |
+
} finally {
|
| 245 |
+
if (fileInputRef.current) {
|
| 246 |
+
fileInputRef.current.value = '';
|
| 247 |
+
}
|
| 248 |
+
}
|
| 249 |
+
};
|
| 250 |
+
|
| 251 |
+
const handleRename = () => {
|
| 252 |
+
if (datasetToEdit && newDatasetName.trim()) {
|
| 253 |
+
onRenameDataset(datasetToEdit.id, newDatasetName.trim());
|
| 254 |
+
setRenameModalOpen(false);
|
| 255 |
+
setDatasetToEdit(null);
|
| 256 |
+
setNewDatasetName('');
|
| 257 |
+
}
|
| 258 |
+
};
|
| 259 |
+
|
| 260 |
+
const openRenameModal = (dataset: DatasetMetadata) => {
|
| 261 |
+
setDatasetToEdit(dataset);
|
| 262 |
+
setNewDatasetName(dataset.name);
|
| 263 |
+
setRenameModalOpen(true);
|
| 264 |
+
setActiveDropdown(null);
|
| 265 |
+
};
|
| 266 |
+
|
| 267 |
+
const openViewer = (datasetMeta: DatasetMetadata) => {
|
| 268 |
+
setViewerOpen(true);
|
| 269 |
+
setFullDatasetForViewer(null);
|
| 270 |
+
setIsViewerLoading(true);
|
| 271 |
+
setActiveDropdown(null);
|
| 272 |
+
|
| 273 |
+
onGetFullDataset(datasetMeta.id)
|
| 274 |
+
.then(fullDataset => {
|
| 275 |
+
if (fullDataset) {
|
| 276 |
+
setFullDatasetForViewer(fullDataset);
|
| 277 |
+
} else {
|
| 278 |
+
alert("Could not load dataset details.");
|
| 279 |
+
setViewerOpen(false);
|
| 280 |
+
}
|
| 281 |
+
})
|
| 282 |
+
.catch(e => {
|
| 283 |
+
console.error("Failed to fetch dataset for viewer:", e);
|
| 284 |
+
alert(`Error: Could not load dataset. ${e instanceof Error ? e.message : ''}`);
|
| 285 |
+
setViewerOpen(false);
|
| 286 |
+
})
|
| 287 |
+
.finally(() => {
|
| 288 |
+
setIsViewerLoading(false);
|
| 289 |
+
});
|
| 290 |
+
};
|
| 291 |
+
|
| 292 |
+
const handleDelete = (id: string) => {
|
| 293 |
+
if (window.confirm('Are you sure you want to delete this dataset?')) {
|
| 294 |
+
onDeleteDataset(id);
|
| 295 |
+
}
|
| 296 |
+
setActiveDropdown(null);
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
const handleDownloadAnalysis = (datasetMeta: DatasetMetadata) => {
|
| 300 |
+
if (datasetMeta.processingState !== 'processed') {
|
| 301 |
+
alert('Dataset is not processed yet.');
|
| 302 |
+
return;
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
onGetFullDataset(datasetMeta.id).then(fullDataset => {
|
| 306 |
+
if (fullDataset && fullDataset.fullComparison) {
|
| 307 |
+
downloadCsv(fullDataset, `${fullDataset.name}-analysis.csv`);
|
| 308 |
+
} else {
|
| 309 |
+
alert('Full comparison data not found for this dataset.');
|
| 310 |
+
}
|
| 311 |
+
}).catch(err => {
|
| 312 |
+
console.error('Failed to get dataset for download:', err);
|
| 313 |
+
alert('Could not load dataset to download analysis.');
|
| 314 |
+
});
|
| 315 |
+
|
| 316 |
+
setActiveDropdown(null);
|
| 317 |
+
};
|
| 318 |
+
|
| 319 |
+
const isComparisonToolDisabled = () => {
|
| 320 |
+
if (!selectedDatasetId) return true;
|
| 321 |
+
const selected = datasets.find(d => d.id === selectedDatasetId);
|
| 322 |
+
return !selected || selected.processingState !== 'processed';
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
const { sharedDatasets, localDatasets } = useMemo(() => {
|
| 326 |
+
const shared: DatasetMetadata[] = [];
|
| 327 |
+
const local: DatasetMetadata[] = [];
|
| 328 |
+
datasets.forEach(d => {
|
| 329 |
+
if (d.isShared) {
|
| 330 |
+
shared.push(d);
|
| 331 |
+
} else {
|
| 332 |
+
local.push(d);
|
| 333 |
+
}
|
| 334 |
+
});
|
| 335 |
+
local.sort((a, b) => new Date(b.uploadDate).getTime() - new Date(a.uploadDate).getTime());
|
| 336 |
+
return { sharedDatasets: shared, localDatasets: local };
|
| 337 |
+
}, [datasets]);
|
| 338 |
+
|
| 339 |
+
const renderDatasetList = (list: DatasetMetadata[], title: string) => (
|
| 340 |
+
<>
|
| 341 |
+
{list.length > 0 && <h3 className="px-4 pt-4 pb-2 text-lg font-semibold text-gray-300">{title}</h3>}
|
| 342 |
+
<ul className="divide-y divide-gray-700">
|
| 343 |
+
{list.map(dataset => (
|
| 344 |
+
<li
|
| 345 |
+
key={dataset.id}
|
| 346 |
+
onClick={() => onSelectDataset(dataset.id)}
|
| 347 |
+
className={`grid grid-cols-12 gap-4 p-4 items-center cursor-pointer transition-colors ${
|
| 348 |
+
selectedDatasetId === dataset.id ? 'bg-indigo-600/30' : 'hover:bg-gray-700/50'
|
| 349 |
+
}`}
|
| 350 |
+
>
|
| 351 |
+
<div className="col-span-3 font-medium flex items-center gap-3">
|
| 352 |
+
{dataset.isShared && <CloudIcon />}
|
| 353 |
+
<span className="truncate">{dataset.name}</span>
|
| 354 |
+
</div>
|
| 355 |
+
<div className="col-span-3">{dataset.itemCounts.images} Images, {dataset.itemCounts.texts} Texts, {dataset.itemCounts.meshes} Meshes</div>
|
| 356 |
+
<div className="col-span-2 text-sm text-gray-400">{new Date(dataset.uploadDate).toLocaleString()}</div>
|
| 357 |
+
<div className="col-span-3 text-sm">
|
| 358 |
+
<StatusDisplay dataset={dataset} />
|
| 359 |
+
</div>
|
| 360 |
+
<div className="col-span-1 flex justify-end relative">
|
| 361 |
+
<button onClick={(e) => { e.stopPropagation(); setActiveDropdown(activeDropdown === dataset.id ? null : dataset.id)}} className="p-2 rounded-full hover:bg-gray-600">
|
| 362 |
+
<ThreeDotsIcon />
|
| 363 |
+
</button>
|
| 364 |
+
{activeDropdown === dataset.id && (
|
| 365 |
+
<div className="absolute top-full right-0 mt-2 w-48 bg-gray-800 border border-gray-600 rounded-md shadow-lg z-10">
|
| 366 |
+
<ul className="py-1">
|
| 367 |
+
<li className="px-4 py-2 hover:bg-gray-700 cursor-pointer" onClick={(e) => { e.stopPropagation(); openViewer(dataset); }}>View</li>
|
| 368 |
+
{dataset.processingState === 'processed' && (
|
| 369 |
+
<li className="px-4 py-2 hover:bg-gray-700 cursor-pointer" onClick={(e) => { e.stopPropagation(); handleDownloadAnalysis(dataset); }}>Download Analysis (.csv)</li>
|
| 370 |
+
)}
|
| 371 |
+
{!dataset.isShared && (
|
| 372 |
+
<>
|
| 373 |
+
<li className="px-4 py-2 hover:bg-gray-700 cursor-pointer" onClick={(e) => { e.stopPropagation(); openRenameModal(dataset); }}>Rename</li>
|
| 374 |
+
<li className="px-4 py-2 hover:bg-gray-700 text-red-400 cursor-pointer" onClick={(e) => { e.stopPropagation(); handleDelete(dataset.id); }}>Delete</li>
|
| 375 |
+
</>
|
| 376 |
+
)}
|
| 377 |
+
</ul>
|
| 378 |
+
</div>
|
| 379 |
+
)}
|
| 380 |
+
</div>
|
| 381 |
+
</li>
|
| 382 |
+
))}
|
| 383 |
+
</ul>
|
| 384 |
+
</>
|
| 385 |
+
);
|
| 386 |
+
|
| 387 |
+
return (
|
| 388 |
+
<div className="space-y-6">
|
| 389 |
+
{errorBanner}
|
| 390 |
+
<div className="flex justify-between items-center">
|
| 391 |
+
<h2 className="text-3xl font-bold tracking-tight">Dataset Manager</h2>
|
| 392 |
+
<div className="flex items-center gap-4">
|
| 393 |
+
<input
|
| 394 |
+
type="file"
|
| 395 |
+
ref={fileInputRef}
|
| 396 |
+
onChange={handleFileSelect}
|
| 397 |
+
accept=".zip"
|
| 398 |
+
className="hidden"
|
| 399 |
+
/>
|
| 400 |
+
<button
|
| 401 |
+
onClick={handleUploadClick}
|
| 402 |
+
disabled={!!processingStatus && !processingStatus.error}
|
| 403 |
+
className="bg-cyan-500 hover:bg-cyan-600 text-white font-bold py-2 px-4 rounded-lg transition-colors flex items-center justify-center w-48 disabled:bg-gray-600 disabled:cursor-not-allowed"
|
| 404 |
+
>
|
| 405 |
+
{processingStatus && !processingStatus.error ? <><Spinner /> In Progress...</> : 'Upload Dataset (.zip)'}
|
| 406 |
+
</button>
|
| 407 |
+
<button
|
| 408 |
+
onClick={onOpenComparisonTool}
|
| 409 |
+
disabled={isComparisonToolDisabled()}
|
| 410 |
+
className="bg-indigo-500 hover:bg-indigo-600 text-white font-bold py-2 px-4 rounded-lg transition-colors disabled:bg-gray-600 disabled:cursor-not-allowed"
|
| 411 |
+
>
|
| 412 |
+
Open Comparison Tool
|
| 413 |
+
</button>
|
| 414 |
+
</div>
|
| 415 |
+
</div>
|
| 416 |
+
|
| 417 |
+
<div className="bg-gray-800/50 rounded-lg border border-gray-700 shadow-lg">
|
| 418 |
+
<div className="grid grid-cols-12 gap-4 p-4 font-semibold text-gray-400 border-b border-gray-700">
|
| 419 |
+
<div className="col-span-3">Name</div>
|
| 420 |
+
<div className="col-span-3">Items</div>
|
| 421 |
+
<div className="col-span-2">Upload Date</div>
|
| 422 |
+
<div className="col-span-3">Status</div>
|
| 423 |
+
<div className="col-span-1 text-right">Actions</div>
|
| 424 |
+
</div>
|
| 425 |
+
|
| 426 |
+
{processingStatus && <ProcessingRow status={processingStatus} />}
|
| 427 |
+
|
| 428 |
+
{renderDatasetList(sharedDatasets, "Cloud Datasets")}
|
| 429 |
+
{renderDatasetList(localDatasets, "Local Datasets")}
|
| 430 |
+
|
| 431 |
+
{datasets.length === 0 && !processingStatus && <div className="text-center p-8 text-gray-500">No datasets found. Upload a new one to get started.</div>}
|
| 432 |
+
</div>
|
| 433 |
+
|
| 434 |
+
{isViewerOpen && (
|
| 435 |
+
<DatasetViewer
|
| 436 |
+
dataset={fullDatasetForViewer}
|
| 437 |
+
isLoading={isViewerLoading}
|
| 438 |
+
onClose={() => setViewerOpen(false)}
|
| 439 |
+
/>
|
| 440 |
+
)}
|
| 441 |
+
|
| 442 |
+
{isRenameModalOpen && datasetToEdit && (
|
| 443 |
+
<Modal title="Rename Dataset" onClose={() => setRenameModalOpen(false)}>
|
| 444 |
+
<div className="space-y-4">
|
| 445 |
+
<input
|
| 446 |
+
type="text"
|
| 447 |
+
value={newDatasetName}
|
| 448 |
+
onChange={(e) => setNewDatasetName(e.target.value)}
|
| 449 |
+
className="w-full bg-gray-700 border border-gray-600 rounded-md px-3 py-2 text-white focus:ring-2 focus:ring-cyan-500 focus:outline-none"
|
| 450 |
+
/>
|
| 451 |
+
<div className="flex justify-end gap-2">
|
| 452 |
+
<button onClick={() => setRenameModalOpen(false)} className="px-4 py-2 rounded-md bg-gray-600 hover:bg-gray-500 transition-colors">Cancel</button>
|
| 453 |
+
<button onClick={handleRename} className="px-4 py-2 rounded-md bg-cyan-500 hover:bg-cyan-600 text-white transition-colors">Save</button>
|
| 454 |
+
</div>
|
| 455 |
+
</div>
|
| 456 |
+
</Modal>
|
| 457 |
+
)}
|
| 458 |
+
</div>
|
| 459 |
+
);
|
| 460 |
+
};
|
frontend/components/DatasetViewer.tsx
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import React, { useState, useEffect, lazy, Suspense } from 'react';
|
| 3 |
+
import type { Dataset, Modality, DataItem } from '../types';
|
| 4 |
+
import { Modal } from './common/Modal';
|
| 5 |
+
import { FullscreenViewer } from './common/FullscreenViewer';
|
| 6 |
+
import { getItemContent } from '../services/sharedDatasetService';
|
| 7 |
+
import { Spinner } from './common/Spinner';
|
| 8 |
+
|
| 9 |
+
const MeshViewer = lazy(() => import('./common/MeshViewer').then(module => ({ default: module.MeshViewer })));
|
| 10 |
+
|
| 11 |
+
interface DatasetViewerProps {
|
| 12 |
+
dataset: Dataset | null;
|
| 13 |
+
isLoading: boolean;
|
| 14 |
+
onClose: () => void;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
const ITEMS_PER_PAGE = 16;
|
| 18 |
+
|
| 19 |
+
const LazyItem: React.FC<{
|
| 20 |
+
item: DataItem;
|
| 21 |
+
modality: Modality;
|
| 22 |
+
onClick: () => void;
|
| 23 |
+
}> = ({ item, modality, onClick }) => {
|
| 24 |
+
const [loadedItem, setLoadedItem] = useState<DataItem>(item);
|
| 25 |
+
const [isLoading, setIsLoading] = useState(!item.content && !!item.contentUrl);
|
| 26 |
+
const [error, setError] = useState(false);
|
| 27 |
+
|
| 28 |
+
useEffect(() => {
|
| 29 |
+
let isMounted = true;
|
| 30 |
+
// Images are loaded by the browser via `contentUrl`, so we only fetch for text/mesh.
|
| 31 |
+
if (!item.content && item.contentUrl && modality !== 'image') {
|
| 32 |
+
setIsLoading(true);
|
| 33 |
+
setError(false);
|
| 34 |
+
getItemContent(item.contentUrl)
|
| 35 |
+
.then(content => {
|
| 36 |
+
if (isMounted) {
|
| 37 |
+
setLoadedItem({ ...item, content });
|
| 38 |
+
}
|
| 39 |
+
})
|
| 40 |
+
.catch(err => {
|
| 41 |
+
console.error("Failed to load item content", err);
|
| 42 |
+
if (isMounted) setError(true);
|
| 43 |
+
})
|
| 44 |
+
.finally(() => {
|
| 45 |
+
if (isMounted) setIsLoading(false);
|
| 46 |
+
});
|
| 47 |
+
} else {
|
| 48 |
+
setLoadedItem(item);
|
| 49 |
+
setIsLoading(false);
|
| 50 |
+
}
|
| 51 |
+
return () => { isMounted = false; };
|
| 52 |
+
}, [item, modality]);
|
| 53 |
+
|
| 54 |
+
const renderContent = () => {
|
| 55 |
+
if (isLoading) {
|
| 56 |
+
return <div className="flex items-center justify-center h-full"><Spinner /></div>;
|
| 57 |
+
}
|
| 58 |
+
if (error) {
|
| 59 |
+
return <div className="flex items-center justify-center h-full text-xs text-red-400 p-2 text-center">Load Failed</div>;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
const content = loadedItem.content;
|
| 63 |
+
const name = loadedItem.name;
|
| 64 |
+
const imageUrl = modality === 'image' ? (loadedItem.contentUrl || content as string) : null;
|
| 65 |
+
|
| 66 |
+
switch (modality) {
|
| 67 |
+
case 'image':
|
| 68 |
+
return (
|
| 69 |
+
<div className="group relative pt-[75%]">
|
| 70 |
+
<div className="absolute inset-0">
|
| 71 |
+
<img src={imageUrl || undefined} alt={name} className="object-cover w-full h-full rounded-lg shadow-md" />
|
| 72 |
+
<div className="absolute bottom-0 left-0 w-full bg-black/50 text-white text-xs p-1 rounded-b-lg truncate">{name}</div>
|
| 73 |
+
</div>
|
| 74 |
+
</div>
|
| 75 |
+
);
|
| 76 |
+
case 'text':
|
| 77 |
+
return (
|
| 78 |
+
<div className="bg-gray-700 p-3 rounded-lg h-full flex flex-col min-h-[120px]">
|
| 79 |
+
<h4 className="font-bold text-sm text-cyan-300 mb-1 truncate">{name}</h4>
|
| 80 |
+
<p className="text-xs text-gray-300 flex-grow break-words">{(content as string)?.substring(0, 100)}...</p>
|
| 81 |
+
</div>
|
| 82 |
+
);
|
| 83 |
+
case 'mesh':
|
| 84 |
+
return (
|
| 85 |
+
<div className="relative pt-[75%]">
|
| 86 |
+
<div className="absolute inset-0 bg-gray-700 rounded-lg overflow-hidden">
|
| 87 |
+
<Suspense fallback={<div className="w-full h-full flex items-center justify-center"><Spinner /></div>}>
|
| 88 |
+
<MeshViewer stlContent={content || loadedItem.contentUrl} interactive={false} />
|
| 89 |
+
</Suspense>
|
| 90 |
+
</div>
|
| 91 |
+
<div className="absolute bottom-0 left-0 w-full bg-black/50 text-white text-xs p-1 rounded-b-lg truncate pointer-events-none">{name}</div>
|
| 92 |
+
</div>
|
| 93 |
+
);
|
| 94 |
+
}
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
return (
|
| 98 |
+
<div onClick={onClick} className="cursor-pointer h-full">
|
| 99 |
+
{renderContent()}
|
| 100 |
+
</div>
|
| 101 |
+
);
|
| 102 |
+
};
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
const ModalityViewer: React.FC<{ items: DataItem[], modality: Modality, onViewItem: (item: DataItem, modality: Modality) => void }> = ({ items, modality, onViewItem }) => {
|
| 106 |
+
const [visibleCount, setVisibleCount] = useState(ITEMS_PER_PAGE);
|
| 107 |
+
|
| 108 |
+
const visibleItems = items.slice(0, visibleCount);
|
| 109 |
+
|
| 110 |
+
return (
|
| 111 |
+
<div>
|
| 112 |
+
<div className="grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4">
|
| 113 |
+
{visibleItems.map(item => (
|
| 114 |
+
<LazyItem
|
| 115 |
+
key={item.id}
|
| 116 |
+
item={item}
|
| 117 |
+
modality={modality}
|
| 118 |
+
onClick={() => onViewItem(item, modality)}
|
| 119 |
+
/>
|
| 120 |
+
))}
|
| 121 |
+
</div>
|
| 122 |
+
{visibleCount < items.length && (
|
| 123 |
+
<div className="text-center mt-6">
|
| 124 |
+
<button
|
| 125 |
+
onClick={() => setVisibleCount(c => c + ITEMS_PER_PAGE)}
|
| 126 |
+
className="bg-gray-600 hover:bg-gray-500 text-white font-semibold py-2 px-6 rounded-lg transition-colors"
|
| 127 |
+
>
|
| 128 |
+
Load More
|
| 129 |
+
</button>
|
| 130 |
+
</div>
|
| 131 |
+
)}
|
| 132 |
+
</div>
|
| 133 |
+
);
|
| 134 |
+
};
|
| 135 |
+
|
| 136 |
+
export const DatasetViewer: React.FC<DatasetViewerProps> = ({ dataset, isLoading, onClose }) => {
|
| 137 |
+
const [activeTab, setActiveTab] = useState<Modality>('image');
|
| 138 |
+
const [viewingItem, setViewingItem] = useState<{item: DataItem, modality: Modality} | null>(null);
|
| 139 |
+
|
| 140 |
+
const handleViewItem = async (item: DataItem, modality: Modality) => {
|
| 141 |
+
let itemWithContent = item;
|
| 142 |
+
// If content is not loaded (e.g., text/mesh from a shared dataset), load it now for the fullscreen viewer
|
| 143 |
+
if (!item.content && item.contentUrl) {
|
| 144 |
+
try {
|
| 145 |
+
const content = await getItemContent(item.contentUrl);
|
| 146 |
+
itemWithContent = { ...item, content };
|
| 147 |
+
} catch (e) {
|
| 148 |
+
console.error("Failed to load content for viewer:", e);
|
| 149 |
+
alert("Could not load item content.");
|
| 150 |
+
return;
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
setViewingItem({item: itemWithContent, modality});
|
| 154 |
+
};
|
| 155 |
+
|
| 156 |
+
const handleCloseViewer = () => {
|
| 157 |
+
setViewingItem(null);
|
| 158 |
+
};
|
| 159 |
+
|
| 160 |
+
const renderBody = () => {
|
| 161 |
+
if (isLoading || !dataset) {
|
| 162 |
+
return (
|
| 163 |
+
<div className="flex justify-center items-center h-96">
|
| 164 |
+
<Spinner />
|
| 165 |
+
<span className="ml-2">Loading dataset...</span>
|
| 166 |
+
</div>
|
| 167 |
+
);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
const tabs: { name: string, modality: Modality }[] = [
|
| 171 |
+
{ name: 'Images', modality: 'image' },
|
| 172 |
+
{ name: 'Texts', modality: 'text' },
|
| 173 |
+
{ name: '3D Models', modality: 'mesh' },
|
| 174 |
+
];
|
| 175 |
+
|
| 176 |
+
const getItems = (mod: Modality) => {
|
| 177 |
+
const key = mod === 'mesh' ? 'meshes' : `${mod}s`;
|
| 178 |
+
return dataset.data[key as keyof typeof dataset.data] || [];
|
| 179 |
+
};
|
| 180 |
+
|
| 181 |
+
return (
|
| 182 |
+
<>
|
| 183 |
+
<div className="border-b border-gray-600">
|
| 184 |
+
<nav className="-mb-px flex space-x-8" aria-label="Tabs">
|
| 185 |
+
{tabs.map(tab => {
|
| 186 |
+
const items = getItems(tab.modality);
|
| 187 |
+
return (
|
| 188 |
+
<button
|
| 189 |
+
key={tab.name}
|
| 190 |
+
onClick={() => setActiveTab(tab.modality)}
|
| 191 |
+
className={`${
|
| 192 |
+
activeTab === tab.modality
|
| 193 |
+
? 'border-cyan-400 text-cyan-400'
|
| 194 |
+
: 'border-transparent text-gray-400 hover:text-gray-200 hover:border-gray-400'
|
| 195 |
+
} whitespace-nowrap py-4 px-1 border-b-2 font-medium text-sm transition-colors`}
|
| 196 |
+
>
|
| 197 |
+
{tab.name} ({items.length})
|
| 198 |
+
</button>
|
| 199 |
+
);
|
| 200 |
+
})}
|
| 201 |
+
</nav>
|
| 202 |
+
</div>
|
| 203 |
+
<div className="py-6">
|
| 204 |
+
<ModalityViewer items={getItems(activeTab)} modality={activeTab} onViewItem={handleViewItem} />
|
| 205 |
+
</div>
|
| 206 |
+
</>
|
| 207 |
+
);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
return (
|
| 211 |
+
<>
|
| 212 |
+
<Modal title={`Viewing Dataset: ${dataset?.name || 'Loading...'}`} onClose={onClose} size="5xl">
|
| 213 |
+
{renderBody()}
|
| 214 |
+
</Modal>
|
| 215 |
+
{viewingItem && (
|
| 216 |
+
<FullscreenViewer item={viewingItem.item} modality={viewingItem.modality} onClose={handleCloseViewer} />
|
| 217 |
+
)}
|
| 218 |
+
</>
|
| 219 |
+
);
|
| 220 |
+
};
|
frontend/components/common/FullscreenViewer.tsx
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import React, { lazy, Suspense } from 'react';
|
| 3 |
+
import type { DataItem, Modality } from '../../types';
|
| 4 |
+
import { Spinner } from './Spinner';
|
| 5 |
+
|
| 6 |
+
const MeshViewer = lazy(() => import('./MeshViewer').then(module => ({ default: module.MeshViewer })));
|
| 7 |
+
|
| 8 |
+
interface FullscreenViewerProps {
|
| 9 |
+
item: DataItem;
|
| 10 |
+
modality: Modality;
|
| 11 |
+
onClose: () => void;
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
export const FullscreenViewer: React.FC<FullscreenViewerProps> = ({ item, modality, onClose }) => {
|
| 15 |
+
const renderContent = () => {
|
| 16 |
+
switch (modality) {
|
| 17 |
+
case 'image':
|
| 18 |
+
if (typeof item.content !== 'string') return null;
|
| 19 |
+
return <img src={item.content} alt={item.name} className="max-w-full max-h-full object-contain" />;
|
| 20 |
+
case 'text':
|
| 21 |
+
if (typeof item.content !== 'string') return null;
|
| 22 |
+
return (
|
| 23 |
+
<div className="bg-gray-900 p-6 rounded-lg text-left h-full w-full overflow-y-auto">
|
| 24 |
+
<pre className="text-sm whitespace-pre-wrap break-words text-gray-200">
|
| 25 |
+
{item.content}
|
| 26 |
+
</pre>
|
| 27 |
+
</div>
|
| 28 |
+
);
|
| 29 |
+
case 'mesh':
|
| 30 |
+
return (
|
| 31 |
+
<div className="h-full w-full bg-gray-900 rounded-lg">
|
| 32 |
+
<Suspense fallback={<div className="w-full h-full flex items-center justify-center"><Spinner /></div>}>
|
| 33 |
+
<MeshViewer stlContent={item.content} interactive={true} />
|
| 34 |
+
</Suspense>
|
| 35 |
+
</div>
|
| 36 |
+
);
|
| 37 |
+
default:
|
| 38 |
+
return null;
|
| 39 |
+
}
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
return (
|
| 43 |
+
<div
|
| 44 |
+
className="fixed inset-0 bg-black bg-opacity-80 flex justify-center items-center z-50 p-4 animate-fade-in"
|
| 45 |
+
onClick={onClose}
|
| 46 |
+
>
|
| 47 |
+
<div
|
| 48 |
+
className="relative w-full h-full max-w-6xl max-h-[90vh] flex items-center justify-center"
|
| 49 |
+
onClick={(e) => e.stopPropagation()}
|
| 50 |
+
>
|
| 51 |
+
<div className="absolute top-0 right-0 m-4 z-10">
|
| 52 |
+
<button onClick={onClose} className="text-white bg-black/50 rounded-full p-2 hover:bg-black/80 transition-colors">
|
| 53 |
+
<svg xmlns="http://www.w3.org/2000/svg" className="h-8 w-8" fill="none" viewBox="0 0 24 24" stroke="currentColor">
|
| 54 |
+
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
|
| 55 |
+
</svg>
|
| 56 |
+
</button>
|
| 57 |
+
</div>
|
| 58 |
+
|
| 59 |
+
<div className="w-full h-full flex flex-col items-center justify-center">
|
| 60 |
+
{renderContent()}
|
| 61 |
+
</div>
|
| 62 |
+
|
| 63 |
+
<div className="absolute bottom-0 left-0 right-0 p-4 text-center bg-black/50 text-white rounded-b-lg pointer-events-none">
|
| 64 |
+
<p className="font-bold">{item.name}</p>
|
| 65 |
+
</div>
|
| 66 |
+
</div>
|
| 67 |
+
</div>
|
| 68 |
+
);
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
// Add fade-in animation to tailwind config or a style tag if not using a config
|
| 72 |
+
const style = document.createElement('style');
|
| 73 |
+
style.innerHTML = `
|
| 74 |
+
@keyframes fade-in {
|
| 75 |
+
from { opacity: 0; }
|
| 76 |
+
to { opacity: 1; }
|
| 77 |
+
}
|
| 78 |
+
.animate-fade-in {
|
| 79 |
+
animation: fade-in 0.2s ease-out;
|
| 80 |
+
}`;
|
| 81 |
+
document.head.appendChild(style);
|
frontend/components/common/MeshViewer.tsx
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useRef, useEffect, useState } from 'react';
|
| 2 |
+
import * as THREE from 'three';
|
| 3 |
+
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls';
|
| 4 |
+
import { STLLoader } from 'three/examples/jsm/loaders/STLLoader';
|
| 5 |
+
import { Spinner } from './Spinner';
|
| 6 |
+
import { getItemContent } from '../../services/sharedDatasetService';
|
| 7 |
+
|
| 8 |
+
interface MeshViewerProps {
|
| 9 |
+
stlContent: string | ArrayBuffer | null;
|
| 10 |
+
interactive?: boolean;
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
export const MeshViewer: React.FC<MeshViewerProps> = ({ stlContent, interactive = true }) => {
|
| 14 |
+
const mountRef = useRef<HTMLDivElement>(null);
|
| 15 |
+
const [isVisible, setIsVisible] = useState(false);
|
| 16 |
+
const [isLoading, setIsLoading] = useState(true);
|
| 17 |
+
const [loadedContent, setLoadedContent] = useState<ArrayBuffer | null>(null);
|
| 18 |
+
|
| 19 |
+
useEffect(() => {
|
| 20 |
+
const node = mountRef.current;
|
| 21 |
+
if (!node) return;
|
| 22 |
+
|
| 23 |
+
const observer = new IntersectionObserver(
|
| 24 |
+
([entry]) => {
|
| 25 |
+
if (entry.isIntersecting) {
|
| 26 |
+
setIsVisible(true);
|
| 27 |
+
observer.disconnect(); // Load only once
|
| 28 |
+
}
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
rootMargin: '400px', // Load when it's 400px away from the viewport
|
| 32 |
+
}
|
| 33 |
+
);
|
| 34 |
+
|
| 35 |
+
observer.observe(node);
|
| 36 |
+
|
| 37 |
+
return () => {
|
| 38 |
+
observer.disconnect();
|
| 39 |
+
};
|
| 40 |
+
}, []);
|
| 41 |
+
|
| 42 |
+
useEffect(() => {
|
| 43 |
+
if (!isVisible || !stlContent) return;
|
| 44 |
+
|
| 45 |
+
let isCancelled = false;
|
| 46 |
+
|
| 47 |
+
const fetchAndSetContent = async () => {
|
| 48 |
+
setIsLoading(true);
|
| 49 |
+
try {
|
| 50 |
+
let content: ArrayBuffer | null = null;
|
| 51 |
+
if (typeof stlContent === 'string') {
|
| 52 |
+
// It's a URL, fetch it.
|
| 53 |
+
content = await getItemContent(stlContent) as ArrayBuffer;
|
| 54 |
+
} else if (stlContent instanceof ArrayBuffer) {
|
| 55 |
+
// It's already an ArrayBuffer.
|
| 56 |
+
content = stlContent;
|
| 57 |
+
}
|
| 58 |
+
if (!isCancelled) {
|
| 59 |
+
setLoadedContent(content);
|
| 60 |
+
}
|
| 61 |
+
} catch (error) {
|
| 62 |
+
console.error("Failed to fetch mesh content:", error);
|
| 63 |
+
if (!isCancelled) setIsLoading(false); // Stop loading on error
|
| 64 |
+
}
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
fetchAndSetContent();
|
| 68 |
+
|
| 69 |
+
return () => {
|
| 70 |
+
isCancelled = true;
|
| 71 |
+
}
|
| 72 |
+
}, [stlContent, isVisible]);
|
| 73 |
+
|
| 74 |
+
useEffect(() => {
|
| 75 |
+
const mountNode = mountRef.current;
|
| 76 |
+
if (!mountNode || !loadedContent) {
|
| 77 |
+
setIsLoading(!!stlContent);
|
| 78 |
+
return;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
setIsLoading(true);
|
| 82 |
+
|
| 83 |
+
let animationFrameId: number;
|
| 84 |
+
let renderer: THREE.WebGLRenderer;
|
| 85 |
+
let controls: OrbitControls | null = null;
|
| 86 |
+
let scene: THREE.Scene;
|
| 87 |
+
let handleResize: () => void;
|
| 88 |
+
|
| 89 |
+
const init = () => {
|
| 90 |
+
scene = new THREE.Scene();
|
| 91 |
+
scene.background = new THREE.Color(0x27272a);
|
| 92 |
+
|
| 93 |
+
const camera = new THREE.PerspectiveCamera(35, mountNode.clientWidth / mountNode.clientHeight, 0.1, 1000);
|
| 94 |
+
camera.position.set(2.5, 2.5, 2.5);
|
| 95 |
+
|
| 96 |
+
renderer = new THREE.WebGLRenderer({ antialias: true, powerPreference: 'low-power' });
|
| 97 |
+
renderer.setSize(mountNode.clientWidth, mountNode.clientHeight);
|
| 98 |
+
renderer.setPixelRatio(window.devicePixelRatio);
|
| 99 |
+
mountNode.appendChild(renderer.domElement);
|
| 100 |
+
|
| 101 |
+
scene.add(new THREE.AmbientLight(0xffffff, Math.PI / 1.5));
|
| 102 |
+
const directionalLight = new THREE.DirectionalLight(0xffffff, 2);
|
| 103 |
+
directionalLight.position.set(5, 5, 5);
|
| 104 |
+
scene.add(directionalLight);
|
| 105 |
+
|
| 106 |
+
if (interactive) {
|
| 107 |
+
controls = new OrbitControls(camera, renderer.domElement);
|
| 108 |
+
controls.enableDamping = true;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
const loader = new STLLoader();
|
| 112 |
+
const geometry = loader.parse(loadedContent);
|
| 113 |
+
const material = new THREE.MeshStandardMaterial({ color: '#00ffff', flatShading: false, metalness: 0.2, roughness: 0.5 });
|
| 114 |
+
const mesh = new THREE.Mesh(geometry, material);
|
| 115 |
+
|
| 116 |
+
const box = new THREE.Box3().setFromObject(mesh);
|
| 117 |
+
const center = box.getCenter(new THREE.Vector3());
|
| 118 |
+
mesh.position.sub(center);
|
| 119 |
+
scene.add(mesh);
|
| 120 |
+
|
| 121 |
+
const size = box.getSize(new THREE.Vector3());
|
| 122 |
+
const maxDim = Math.max(size.x, size.y, size.z);
|
| 123 |
+
const fov = camera.fov * (Math.PI / 180);
|
| 124 |
+
let cameraZ = Math.abs(maxDim / 2 / Math.tan(fov / 2));
|
| 125 |
+
cameraZ *= 1.5;
|
| 126 |
+
camera.position.z = camera.position.y = camera.position.x = cameraZ;
|
| 127 |
+
const minZ = box.min.z;
|
| 128 |
+
const cameraToFarEdge = ( minZ < 0 ) ? -minZ + cameraZ : cameraZ - minZ;
|
| 129 |
+
camera.far = cameraToFarEdge * 3;
|
| 130 |
+
camera.updateProjectionMatrix();
|
| 131 |
+
|
| 132 |
+
if (controls) {
|
| 133 |
+
controls.target.copy(mesh.position);
|
| 134 |
+
controls.update();
|
| 135 |
+
} else {
|
| 136 |
+
camera.lookAt(mesh.position);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
handleResize = () => {
|
| 140 |
+
if (!mountNode || !renderer) return;
|
| 141 |
+
camera.aspect = mountNode.clientWidth / mountNode.clientHeight;
|
| 142 |
+
camera.updateProjectionMatrix();
|
| 143 |
+
renderer.setSize(mountNode.clientWidth, mountNode.clientHeight);
|
| 144 |
+
};
|
| 145 |
+
window.addEventListener('resize', handleResize);
|
| 146 |
+
|
| 147 |
+
setIsLoading(false);
|
| 148 |
+
|
| 149 |
+
const animate = () => {
|
| 150 |
+
animationFrameId = requestAnimationFrame(animate);
|
| 151 |
+
if (controls) controls.update();
|
| 152 |
+
if (!interactive) {
|
| 153 |
+
mesh.rotation.y += 0.005;
|
| 154 |
+
}
|
| 155 |
+
if(renderer) renderer.render(scene, camera);
|
| 156 |
+
};
|
| 157 |
+
animate();
|
| 158 |
+
};
|
| 159 |
+
|
| 160 |
+
const timeoutId = setTimeout(init, 10); // Short delay to allow DOM to update
|
| 161 |
+
|
| 162 |
+
return () => {
|
| 163 |
+
clearTimeout(timeoutId);
|
| 164 |
+
cancelAnimationFrame(animationFrameId);
|
| 165 |
+
if (handleResize) {
|
| 166 |
+
window.removeEventListener('resize', handleResize);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
if(scene){
|
| 170 |
+
scene.traverse(object => {
|
| 171 |
+
if (object instanceof THREE.Mesh) {
|
| 172 |
+
if(object.geometry) object.geometry.dispose();
|
| 173 |
+
if(object.material) {
|
| 174 |
+
if (Array.isArray(object.material)) {
|
| 175 |
+
object.material.forEach(material => material.dispose());
|
| 176 |
+
} else {
|
| 177 |
+
object.material.dispose();
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
}
|
| 181 |
+
});
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
controls?.dispose();
|
| 185 |
+
|
| 186 |
+
if(renderer){
|
| 187 |
+
renderer.dispose();
|
| 188 |
+
if (mountNode && renderer.domElement.parentNode === mountNode) {
|
| 189 |
+
mountNode.removeChild(renderer.domElement);
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
};
|
| 193 |
+
}, [loadedContent, interactive]);
|
| 194 |
+
|
| 195 |
+
return (
|
| 196 |
+
<div className="w-full h-full relative bg-gray-700">
|
| 197 |
+
{isLoading && (
|
| 198 |
+
<div className="absolute inset-0 flex items-center justify-center z-10">
|
| 199 |
+
<div className="w-8 h-8">
|
| 200 |
+
<Spinner />
|
| 201 |
+
</div>
|
| 202 |
+
</div>
|
| 203 |
+
)}
|
| 204 |
+
<div ref={mountRef} className={`w-full h-full transition-opacity duration-300 ${isLoading ? 'opacity-0' : 'opacity-100'}`} />
|
| 205 |
+
</div>
|
| 206 |
+
);
|
| 207 |
+
};
|
frontend/components/common/Modal.tsx
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import React from 'react';
|
| 3 |
+
|
| 4 |
+
interface ModalProps {
|
| 5 |
+
title: string;
|
| 6 |
+
onClose: () => void;
|
| 7 |
+
children: React.ReactNode;
|
| 8 |
+
size?: 'sm' | 'md' | 'lg' | 'xl' | '2xl' | '3xl' | '4xl' | '5xl';
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
const sizeClasses = {
|
| 12 |
+
sm: 'max-w-sm',
|
| 13 |
+
md: 'max-w-md',
|
| 14 |
+
lg: 'max-w-lg',
|
| 15 |
+
xl: 'max-w-xl',
|
| 16 |
+
'2xl': 'max-w-2xl',
|
| 17 |
+
'3xl': 'max-w-3xl',
|
| 18 |
+
'4xl': 'max-w-4xl',
|
| 19 |
+
'5xl': 'max-w-5xl',
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
export const Modal: React.FC<ModalProps> = ({ title, onClose, children, size = 'md' }) => {
|
| 24 |
+
return (
|
| 25 |
+
<div
|
| 26 |
+
className="fixed inset-0 bg-black bg-opacity-75 flex justify-center items-center z-50 p-4"
|
| 27 |
+
onClick={onClose}
|
| 28 |
+
>
|
| 29 |
+
<div
|
| 30 |
+
className={`bg-gray-800 rounded-xl shadow-2xl w-full ${sizeClasses[size]} flex flex-col max-h-[90vh]`}
|
| 31 |
+
onClick={(e) => e.stopPropagation()}
|
| 32 |
+
>
|
| 33 |
+
<div className="flex justify-between items-center p-4 border-b border-gray-700">
|
| 34 |
+
<h2 className="text-xl font-bold text-white">{title}</h2>
|
| 35 |
+
<button onClick={onClose} className="text-gray-400 hover:text-white">
|
| 36 |
+
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor">
|
| 37 |
+
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
|
| 38 |
+
</svg>
|
| 39 |
+
</button>
|
| 40 |
+
</div>
|
| 41 |
+
<div className="p-6 overflow-y-auto">
|
| 42 |
+
{children}
|
| 43 |
+
</div>
|
| 44 |
+
</div>
|
| 45 |
+
</div>
|
| 46 |
+
);
|
| 47 |
+
};
|
frontend/components/common/ProgressBar.tsx
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from 'react';
|
| 2 |
+
|
| 3 |
+
interface ProgressBarProps {
|
| 4 |
+
progress: number;
|
| 5 |
+
label?: string;
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
export const ProgressBar: React.FC<ProgressBarProps> = ({ progress, label }) => {
|
| 9 |
+
const safeProgress = Math.max(0, Math.min(100, progress));
|
| 10 |
+
|
| 11 |
+
return (
|
| 12 |
+
<div className="flex items-center gap-3">
|
| 13 |
+
{label && <span className="text-xs text-gray-300 min-w-[8ch] text-right">{label}</span>}
|
| 14 |
+
<div className="w-full bg-gray-600 rounded-full h-2.5">
|
| 15 |
+
<div
|
| 16 |
+
className="bg-cyan-500 h-2.5 rounded-full transition-all duration-300 ease-out"
|
| 17 |
+
style={{ width: `${safeProgress}%` }}
|
| 18 |
+
></div>
|
| 19 |
+
</div>
|
| 20 |
+
<span className="text-xs text-gray-300 min-w-[4ch] text-right">{Math.round(safeProgress)}%</span>
|
| 21 |
+
</div>
|
| 22 |
+
);
|
| 23 |
+
};
|
frontend/components/common/Spinner.tsx
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import React from 'react';
|
| 3 |
+
|
| 4 |
+
export const Spinner: React.FC = () => (
|
| 5 |
+
<svg className="animate-spin -ml-1 mr-3 h-5 w-5 text-white" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
|
| 6 |
+
<circle className="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" strokeWidth="4"></circle>
|
| 7 |
+
<path className="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
|
| 8 |
+
</svg>
|
| 9 |
+
);
|
frontend/index.html
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8" />
|
| 5 |
+
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 7 |
+
<title>Cross-Modal Object Comparison</title>
|
| 8 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
| 9 |
+
<script type="importmap">
|
| 10 |
+
{
|
| 11 |
+
"imports": {
|
| 12 |
+
"react/": "https://aistudiocdn.com/react@^19.2.0/",
|
| 13 |
+
"react": "https://aistudiocdn.com/react@^19.2.0",
|
| 14 |
+
"react-dom/": "https://aistudiocdn.com/react-dom@^19.2.0/",
|
| 15 |
+
"three": "https://esm.sh/[email protected]",
|
| 16 |
+
"three/": "https://esm.sh/[email protected]/",
|
| 17 |
+
"jszip": "https://esm.sh/[email protected]"
|
| 18 |
+
}
|
| 19 |
+
}
|
| 20 |
+
</script>
|
| 21 |
+
</head>
|
| 22 |
+
<body class="bg-gray-900 text-white">
|
| 23 |
+
<div id="root"></div>
|
| 24 |
+
<script type="module" src="/index.tsx"></script>
|
| 25 |
+
</body>
|
| 26 |
+
</html>
|
frontend/index.tsx
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import React from 'react';
|
| 3 |
+
import ReactDOM from 'react-dom/client';
|
| 4 |
+
import App from './App';
|
| 5 |
+
|
| 6 |
+
const rootElement = document.getElementById('root');
|
| 7 |
+
if (!rootElement) {
|
| 8 |
+
throw new Error("Could not find root element to mount to");
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
const root = ReactDOM.createRoot(rootElement);
|
| 12 |
+
root.render(
|
| 13 |
+
<React.StrictMode>
|
| 14 |
+
<App />
|
| 15 |
+
</React.StrictMode>
|
| 16 |
+
);
|
frontend/metadata.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Cross-Modal Object Comparison Tool",
|
| 3 |
+
"description": "An application to manage datasets of 3D models, images, and text descriptions, and compare objects across these modalities. It features a dataset manager, a content viewer, and tools for both single-item and full-dataset comparison, with placeholder logic for future AI model integration.",
|
| 4 |
+
"requestFramePermissions": []
|
| 5 |
+
}
|
frontend/package-lock.json
ADDED
|
@@ -0,0 +1,1874 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "cross-modal-object-comparison-tool",
|
| 3 |
+
"version": "0.0.0",
|
| 4 |
+
"lockfileVersion": 3,
|
| 5 |
+
"requires": true,
|
| 6 |
+
"packages": {
|
| 7 |
+
"": {
|
| 8 |
+
"name": "cross-modal-object-comparison-tool",
|
| 9 |
+
"version": "0.0.0",
|
| 10 |
+
"dependencies": {
|
| 11 |
+
"jszip": "3.10.1",
|
| 12 |
+
"react": "^19.2.0",
|
| 13 |
+
"react-dom": "^19.2.0",
|
| 14 |
+
"three": "0.166.1"
|
| 15 |
+
},
|
| 16 |
+
"devDependencies": {
|
| 17 |
+
"@types/node": "^22.14.0",
|
| 18 |
+
"@vitejs/plugin-react": "^5.0.0",
|
| 19 |
+
"typescript": "~5.8.2",
|
| 20 |
+
"vite": "^6.2.0"
|
| 21 |
+
}
|
| 22 |
+
},
|
| 23 |
+
"node_modules/@babel/code-frame": {
|
| 24 |
+
"version": "7.27.1",
|
| 25 |
+
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
|
| 26 |
+
"integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
|
| 27 |
+
"dev": true,
|
| 28 |
+
"license": "MIT",
|
| 29 |
+
"dependencies": {
|
| 30 |
+
"@babel/helper-validator-identifier": "^7.27.1",
|
| 31 |
+
"js-tokens": "^4.0.0",
|
| 32 |
+
"picocolors": "^1.1.1"
|
| 33 |
+
},
|
| 34 |
+
"engines": {
|
| 35 |
+
"node": ">=6.9.0"
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
"node_modules/@babel/compat-data": {
|
| 39 |
+
"version": "7.28.5",
|
| 40 |
+
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
|
| 41 |
+
"integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
|
| 42 |
+
"dev": true,
|
| 43 |
+
"license": "MIT",
|
| 44 |
+
"engines": {
|
| 45 |
+
"node": ">=6.9.0"
|
| 46 |
+
}
|
| 47 |
+
},
|
| 48 |
+
"node_modules/@babel/core": {
|
| 49 |
+
"version": "7.28.5",
|
| 50 |
+
"resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
|
| 51 |
+
"integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
|
| 52 |
+
"dev": true,
|
| 53 |
+
"license": "MIT",
|
| 54 |
+
"peer": true,
|
| 55 |
+
"dependencies": {
|
| 56 |
+
"@babel/code-frame": "^7.27.1",
|
| 57 |
+
"@babel/generator": "^7.28.5",
|
| 58 |
+
"@babel/helper-compilation-targets": "^7.27.2",
|
| 59 |
+
"@babel/helper-module-transforms": "^7.28.3",
|
| 60 |
+
"@babel/helpers": "^7.28.4",
|
| 61 |
+
"@babel/parser": "^7.28.5",
|
| 62 |
+
"@babel/template": "^7.27.2",
|
| 63 |
+
"@babel/traverse": "^7.28.5",
|
| 64 |
+
"@babel/types": "^7.28.5",
|
| 65 |
+
"@jridgewell/remapping": "^2.3.5",
|
| 66 |
+
"convert-source-map": "^2.0.0",
|
| 67 |
+
"debug": "^4.1.0",
|
| 68 |
+
"gensync": "^1.0.0-beta.2",
|
| 69 |
+
"json5": "^2.2.3",
|
| 70 |
+
"semver": "^6.3.1"
|
| 71 |
+
},
|
| 72 |
+
"engines": {
|
| 73 |
+
"node": ">=6.9.0"
|
| 74 |
+
},
|
| 75 |
+
"funding": {
|
| 76 |
+
"type": "opencollective",
|
| 77 |
+
"url": "https://opencollective.com/babel"
|
| 78 |
+
}
|
| 79 |
+
},
|
| 80 |
+
"node_modules/@babel/generator": {
|
| 81 |
+
"version": "7.28.5",
|
| 82 |
+
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
|
| 83 |
+
"integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
|
| 84 |
+
"dev": true,
|
| 85 |
+
"license": "MIT",
|
| 86 |
+
"dependencies": {
|
| 87 |
+
"@babel/parser": "^7.28.5",
|
| 88 |
+
"@babel/types": "^7.28.5",
|
| 89 |
+
"@jridgewell/gen-mapping": "^0.3.12",
|
| 90 |
+
"@jridgewell/trace-mapping": "^0.3.28",
|
| 91 |
+
"jsesc": "^3.0.2"
|
| 92 |
+
},
|
| 93 |
+
"engines": {
|
| 94 |
+
"node": ">=6.9.0"
|
| 95 |
+
}
|
| 96 |
+
},
|
| 97 |
+
"node_modules/@babel/helper-compilation-targets": {
|
| 98 |
+
"version": "7.27.2",
|
| 99 |
+
"resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
|
| 100 |
+
"integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
|
| 101 |
+
"dev": true,
|
| 102 |
+
"license": "MIT",
|
| 103 |
+
"dependencies": {
|
| 104 |
+
"@babel/compat-data": "^7.27.2",
|
| 105 |
+
"@babel/helper-validator-option": "^7.27.1",
|
| 106 |
+
"browserslist": "^4.24.0",
|
| 107 |
+
"lru-cache": "^5.1.1",
|
| 108 |
+
"semver": "^6.3.1"
|
| 109 |
+
},
|
| 110 |
+
"engines": {
|
| 111 |
+
"node": ">=6.9.0"
|
| 112 |
+
}
|
| 113 |
+
},
|
| 114 |
+
"node_modules/@babel/helper-globals": {
|
| 115 |
+
"version": "7.28.0",
|
| 116 |
+
"resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
|
| 117 |
+
"integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
|
| 118 |
+
"dev": true,
|
| 119 |
+
"license": "MIT",
|
| 120 |
+
"engines": {
|
| 121 |
+
"node": ">=6.9.0"
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"node_modules/@babel/helper-module-imports": {
|
| 125 |
+
"version": "7.27.1",
|
| 126 |
+
"resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
|
| 127 |
+
"integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
|
| 128 |
+
"dev": true,
|
| 129 |
+
"license": "MIT",
|
| 130 |
+
"dependencies": {
|
| 131 |
+
"@babel/traverse": "^7.27.1",
|
| 132 |
+
"@babel/types": "^7.27.1"
|
| 133 |
+
},
|
| 134 |
+
"engines": {
|
| 135 |
+
"node": ">=6.9.0"
|
| 136 |
+
}
|
| 137 |
+
},
|
| 138 |
+
"node_modules/@babel/helper-module-transforms": {
|
| 139 |
+
"version": "7.28.3",
|
| 140 |
+
"resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
|
| 141 |
+
"integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
|
| 142 |
+
"dev": true,
|
| 143 |
+
"license": "MIT",
|
| 144 |
+
"dependencies": {
|
| 145 |
+
"@babel/helper-module-imports": "^7.27.1",
|
| 146 |
+
"@babel/helper-validator-identifier": "^7.27.1",
|
| 147 |
+
"@babel/traverse": "^7.28.3"
|
| 148 |
+
},
|
| 149 |
+
"engines": {
|
| 150 |
+
"node": ">=6.9.0"
|
| 151 |
+
},
|
| 152 |
+
"peerDependencies": {
|
| 153 |
+
"@babel/core": "^7.0.0"
|
| 154 |
+
}
|
| 155 |
+
},
|
| 156 |
+
"node_modules/@babel/helper-plugin-utils": {
|
| 157 |
+
"version": "7.27.1",
|
| 158 |
+
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
|
| 159 |
+
"integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
|
| 160 |
+
"dev": true,
|
| 161 |
+
"license": "MIT",
|
| 162 |
+
"engines": {
|
| 163 |
+
"node": ">=6.9.0"
|
| 164 |
+
}
|
| 165 |
+
},
|
| 166 |
+
"node_modules/@babel/helper-string-parser": {
|
| 167 |
+
"version": "7.27.1",
|
| 168 |
+
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
|
| 169 |
+
"integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
|
| 170 |
+
"dev": true,
|
| 171 |
+
"license": "MIT",
|
| 172 |
+
"engines": {
|
| 173 |
+
"node": ">=6.9.0"
|
| 174 |
+
}
|
| 175 |
+
},
|
| 176 |
+
"node_modules/@babel/helper-validator-identifier": {
|
| 177 |
+
"version": "7.28.5",
|
| 178 |
+
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
|
| 179 |
+
"integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
|
| 180 |
+
"dev": true,
|
| 181 |
+
"license": "MIT",
|
| 182 |
+
"engines": {
|
| 183 |
+
"node": ">=6.9.0"
|
| 184 |
+
}
|
| 185 |
+
},
|
| 186 |
+
"node_modules/@babel/helper-validator-option": {
|
| 187 |
+
"version": "7.27.1",
|
| 188 |
+
"resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
|
| 189 |
+
"integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
|
| 190 |
+
"dev": true,
|
| 191 |
+
"license": "MIT",
|
| 192 |
+
"engines": {
|
| 193 |
+
"node": ">=6.9.0"
|
| 194 |
+
}
|
| 195 |
+
},
|
| 196 |
+
"node_modules/@babel/helpers": {
|
| 197 |
+
"version": "7.28.4",
|
| 198 |
+
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
|
| 199 |
+
"integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
|
| 200 |
+
"dev": true,
|
| 201 |
+
"license": "MIT",
|
| 202 |
+
"dependencies": {
|
| 203 |
+
"@babel/template": "^7.27.2",
|
| 204 |
+
"@babel/types": "^7.28.4"
|
| 205 |
+
},
|
| 206 |
+
"engines": {
|
| 207 |
+
"node": ">=6.9.0"
|
| 208 |
+
}
|
| 209 |
+
},
|
| 210 |
+
"node_modules/@babel/parser": {
|
| 211 |
+
"version": "7.28.5",
|
| 212 |
+
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
|
| 213 |
+
"integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
|
| 214 |
+
"dev": true,
|
| 215 |
+
"license": "MIT",
|
| 216 |
+
"dependencies": {
|
| 217 |
+
"@babel/types": "^7.28.5"
|
| 218 |
+
},
|
| 219 |
+
"bin": {
|
| 220 |
+
"parser": "bin/babel-parser.js"
|
| 221 |
+
},
|
| 222 |
+
"engines": {
|
| 223 |
+
"node": ">=6.0.0"
|
| 224 |
+
}
|
| 225 |
+
},
|
| 226 |
+
"node_modules/@babel/plugin-transform-react-jsx-self": {
|
| 227 |
+
"version": "7.27.1",
|
| 228 |
+
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
|
| 229 |
+
"integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
|
| 230 |
+
"dev": true,
|
| 231 |
+
"license": "MIT",
|
| 232 |
+
"dependencies": {
|
| 233 |
+
"@babel/helper-plugin-utils": "^7.27.1"
|
| 234 |
+
},
|
| 235 |
+
"engines": {
|
| 236 |
+
"node": ">=6.9.0"
|
| 237 |
+
},
|
| 238 |
+
"peerDependencies": {
|
| 239 |
+
"@babel/core": "^7.0.0-0"
|
| 240 |
+
}
|
| 241 |
+
},
|
| 242 |
+
"node_modules/@babel/plugin-transform-react-jsx-source": {
|
| 243 |
+
"version": "7.27.1",
|
| 244 |
+
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
|
| 245 |
+
"integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
|
| 246 |
+
"dev": true,
|
| 247 |
+
"license": "MIT",
|
| 248 |
+
"dependencies": {
|
| 249 |
+
"@babel/helper-plugin-utils": "^7.27.1"
|
| 250 |
+
},
|
| 251 |
+
"engines": {
|
| 252 |
+
"node": ">=6.9.0"
|
| 253 |
+
},
|
| 254 |
+
"peerDependencies": {
|
| 255 |
+
"@babel/core": "^7.0.0-0"
|
| 256 |
+
}
|
| 257 |
+
},
|
| 258 |
+
"node_modules/@babel/template": {
|
| 259 |
+
"version": "7.27.2",
|
| 260 |
+
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
|
| 261 |
+
"integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
|
| 262 |
+
"dev": true,
|
| 263 |
+
"license": "MIT",
|
| 264 |
+
"dependencies": {
|
| 265 |
+
"@babel/code-frame": "^7.27.1",
|
| 266 |
+
"@babel/parser": "^7.27.2",
|
| 267 |
+
"@babel/types": "^7.27.1"
|
| 268 |
+
},
|
| 269 |
+
"engines": {
|
| 270 |
+
"node": ">=6.9.0"
|
| 271 |
+
}
|
| 272 |
+
},
|
| 273 |
+
"node_modules/@babel/traverse": {
|
| 274 |
+
"version": "7.28.5",
|
| 275 |
+
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
|
| 276 |
+
"integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
|
| 277 |
+
"dev": true,
|
| 278 |
+
"license": "MIT",
|
| 279 |
+
"dependencies": {
|
| 280 |
+
"@babel/code-frame": "^7.27.1",
|
| 281 |
+
"@babel/generator": "^7.28.5",
|
| 282 |
+
"@babel/helper-globals": "^7.28.0",
|
| 283 |
+
"@babel/parser": "^7.28.5",
|
| 284 |
+
"@babel/template": "^7.27.2",
|
| 285 |
+
"@babel/types": "^7.28.5",
|
| 286 |
+
"debug": "^4.3.1"
|
| 287 |
+
},
|
| 288 |
+
"engines": {
|
| 289 |
+
"node": ">=6.9.0"
|
| 290 |
+
}
|
| 291 |
+
},
|
| 292 |
+
"node_modules/@babel/types": {
|
| 293 |
+
"version": "7.28.5",
|
| 294 |
+
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
|
| 295 |
+
"integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
|
| 296 |
+
"dev": true,
|
| 297 |
+
"license": "MIT",
|
| 298 |
+
"dependencies": {
|
| 299 |
+
"@babel/helper-string-parser": "^7.27.1",
|
| 300 |
+
"@babel/helper-validator-identifier": "^7.28.5"
|
| 301 |
+
},
|
| 302 |
+
"engines": {
|
| 303 |
+
"node": ">=6.9.0"
|
| 304 |
+
}
|
| 305 |
+
},
|
| 306 |
+
"node_modules/@esbuild/aix-ppc64": {
|
| 307 |
+
"version": "0.25.12",
|
| 308 |
+
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz",
|
| 309 |
+
"integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==",
|
| 310 |
+
"cpu": [
|
| 311 |
+
"ppc64"
|
| 312 |
+
],
|
| 313 |
+
"dev": true,
|
| 314 |
+
"license": "MIT",
|
| 315 |
+
"optional": true,
|
| 316 |
+
"os": [
|
| 317 |
+
"aix"
|
| 318 |
+
],
|
| 319 |
+
"engines": {
|
| 320 |
+
"node": ">=18"
|
| 321 |
+
}
|
| 322 |
+
},
|
| 323 |
+
"node_modules/@esbuild/android-arm": {
|
| 324 |
+
"version": "0.25.12",
|
| 325 |
+
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz",
|
| 326 |
+
"integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==",
|
| 327 |
+
"cpu": [
|
| 328 |
+
"arm"
|
| 329 |
+
],
|
| 330 |
+
"dev": true,
|
| 331 |
+
"license": "MIT",
|
| 332 |
+
"optional": true,
|
| 333 |
+
"os": [
|
| 334 |
+
"android"
|
| 335 |
+
],
|
| 336 |
+
"engines": {
|
| 337 |
+
"node": ">=18"
|
| 338 |
+
}
|
| 339 |
+
},
|
| 340 |
+
"node_modules/@esbuild/android-arm64": {
|
| 341 |
+
"version": "0.25.12",
|
| 342 |
+
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz",
|
| 343 |
+
"integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==",
|
| 344 |
+
"cpu": [
|
| 345 |
+
"arm64"
|
| 346 |
+
],
|
| 347 |
+
"dev": true,
|
| 348 |
+
"license": "MIT",
|
| 349 |
+
"optional": true,
|
| 350 |
+
"os": [
|
| 351 |
+
"android"
|
| 352 |
+
],
|
| 353 |
+
"engines": {
|
| 354 |
+
"node": ">=18"
|
| 355 |
+
}
|
| 356 |
+
},
|
| 357 |
+
"node_modules/@esbuild/android-x64": {
|
| 358 |
+
"version": "0.25.12",
|
| 359 |
+
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz",
|
| 360 |
+
"integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==",
|
| 361 |
+
"cpu": [
|
| 362 |
+
"x64"
|
| 363 |
+
],
|
| 364 |
+
"dev": true,
|
| 365 |
+
"license": "MIT",
|
| 366 |
+
"optional": true,
|
| 367 |
+
"os": [
|
| 368 |
+
"android"
|
| 369 |
+
],
|
| 370 |
+
"engines": {
|
| 371 |
+
"node": ">=18"
|
| 372 |
+
}
|
| 373 |
+
},
|
| 374 |
+
"node_modules/@esbuild/darwin-arm64": {
|
| 375 |
+
"version": "0.25.12",
|
| 376 |
+
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz",
|
| 377 |
+
"integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==",
|
| 378 |
+
"cpu": [
|
| 379 |
+
"arm64"
|
| 380 |
+
],
|
| 381 |
+
"dev": true,
|
| 382 |
+
"license": "MIT",
|
| 383 |
+
"optional": true,
|
| 384 |
+
"os": [
|
| 385 |
+
"darwin"
|
| 386 |
+
],
|
| 387 |
+
"engines": {
|
| 388 |
+
"node": ">=18"
|
| 389 |
+
}
|
| 390 |
+
},
|
| 391 |
+
"node_modules/@esbuild/darwin-x64": {
|
| 392 |
+
"version": "0.25.12",
|
| 393 |
+
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz",
|
| 394 |
+
"integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==",
|
| 395 |
+
"cpu": [
|
| 396 |
+
"x64"
|
| 397 |
+
],
|
| 398 |
+
"dev": true,
|
| 399 |
+
"license": "MIT",
|
| 400 |
+
"optional": true,
|
| 401 |
+
"os": [
|
| 402 |
+
"darwin"
|
| 403 |
+
],
|
| 404 |
+
"engines": {
|
| 405 |
+
"node": ">=18"
|
| 406 |
+
}
|
| 407 |
+
},
|
| 408 |
+
"node_modules/@esbuild/freebsd-arm64": {
|
| 409 |
+
"version": "0.25.12",
|
| 410 |
+
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz",
|
| 411 |
+
"integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==",
|
| 412 |
+
"cpu": [
|
| 413 |
+
"arm64"
|
| 414 |
+
],
|
| 415 |
+
"dev": true,
|
| 416 |
+
"license": "MIT",
|
| 417 |
+
"optional": true,
|
| 418 |
+
"os": [
|
| 419 |
+
"freebsd"
|
| 420 |
+
],
|
| 421 |
+
"engines": {
|
| 422 |
+
"node": ">=18"
|
| 423 |
+
}
|
| 424 |
+
},
|
| 425 |
+
"node_modules/@esbuild/freebsd-x64": {
|
| 426 |
+
"version": "0.25.12",
|
| 427 |
+
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz",
|
| 428 |
+
"integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==",
|
| 429 |
+
"cpu": [
|
| 430 |
+
"x64"
|
| 431 |
+
],
|
| 432 |
+
"dev": true,
|
| 433 |
+
"license": "MIT",
|
| 434 |
+
"optional": true,
|
| 435 |
+
"os": [
|
| 436 |
+
"freebsd"
|
| 437 |
+
],
|
| 438 |
+
"engines": {
|
| 439 |
+
"node": ">=18"
|
| 440 |
+
}
|
| 441 |
+
},
|
| 442 |
+
"node_modules/@esbuild/linux-arm": {
|
| 443 |
+
"version": "0.25.12",
|
| 444 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz",
|
| 445 |
+
"integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==",
|
| 446 |
+
"cpu": [
|
| 447 |
+
"arm"
|
| 448 |
+
],
|
| 449 |
+
"dev": true,
|
| 450 |
+
"license": "MIT",
|
| 451 |
+
"optional": true,
|
| 452 |
+
"os": [
|
| 453 |
+
"linux"
|
| 454 |
+
],
|
| 455 |
+
"engines": {
|
| 456 |
+
"node": ">=18"
|
| 457 |
+
}
|
| 458 |
+
},
|
| 459 |
+
"node_modules/@esbuild/linux-arm64": {
|
| 460 |
+
"version": "0.25.12",
|
| 461 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz",
|
| 462 |
+
"integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==",
|
| 463 |
+
"cpu": [
|
| 464 |
+
"arm64"
|
| 465 |
+
],
|
| 466 |
+
"dev": true,
|
| 467 |
+
"license": "MIT",
|
| 468 |
+
"optional": true,
|
| 469 |
+
"os": [
|
| 470 |
+
"linux"
|
| 471 |
+
],
|
| 472 |
+
"engines": {
|
| 473 |
+
"node": ">=18"
|
| 474 |
+
}
|
| 475 |
+
},
|
| 476 |
+
"node_modules/@esbuild/linux-ia32": {
|
| 477 |
+
"version": "0.25.12",
|
| 478 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz",
|
| 479 |
+
"integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==",
|
| 480 |
+
"cpu": [
|
| 481 |
+
"ia32"
|
| 482 |
+
],
|
| 483 |
+
"dev": true,
|
| 484 |
+
"license": "MIT",
|
| 485 |
+
"optional": true,
|
| 486 |
+
"os": [
|
| 487 |
+
"linux"
|
| 488 |
+
],
|
| 489 |
+
"engines": {
|
| 490 |
+
"node": ">=18"
|
| 491 |
+
}
|
| 492 |
+
},
|
| 493 |
+
"node_modules/@esbuild/linux-loong64": {
|
| 494 |
+
"version": "0.25.12",
|
| 495 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz",
|
| 496 |
+
"integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==",
|
| 497 |
+
"cpu": [
|
| 498 |
+
"loong64"
|
| 499 |
+
],
|
| 500 |
+
"dev": true,
|
| 501 |
+
"license": "MIT",
|
| 502 |
+
"optional": true,
|
| 503 |
+
"os": [
|
| 504 |
+
"linux"
|
| 505 |
+
],
|
| 506 |
+
"engines": {
|
| 507 |
+
"node": ">=18"
|
| 508 |
+
}
|
| 509 |
+
},
|
| 510 |
+
"node_modules/@esbuild/linux-mips64el": {
|
| 511 |
+
"version": "0.25.12",
|
| 512 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz",
|
| 513 |
+
"integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==",
|
| 514 |
+
"cpu": [
|
| 515 |
+
"mips64el"
|
| 516 |
+
],
|
| 517 |
+
"dev": true,
|
| 518 |
+
"license": "MIT",
|
| 519 |
+
"optional": true,
|
| 520 |
+
"os": [
|
| 521 |
+
"linux"
|
| 522 |
+
],
|
| 523 |
+
"engines": {
|
| 524 |
+
"node": ">=18"
|
| 525 |
+
}
|
| 526 |
+
},
|
| 527 |
+
"node_modules/@esbuild/linux-ppc64": {
|
| 528 |
+
"version": "0.25.12",
|
| 529 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz",
|
| 530 |
+
"integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==",
|
| 531 |
+
"cpu": [
|
| 532 |
+
"ppc64"
|
| 533 |
+
],
|
| 534 |
+
"dev": true,
|
| 535 |
+
"license": "MIT",
|
| 536 |
+
"optional": true,
|
| 537 |
+
"os": [
|
| 538 |
+
"linux"
|
| 539 |
+
],
|
| 540 |
+
"engines": {
|
| 541 |
+
"node": ">=18"
|
| 542 |
+
}
|
| 543 |
+
},
|
| 544 |
+
"node_modules/@esbuild/linux-riscv64": {
|
| 545 |
+
"version": "0.25.12",
|
| 546 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz",
|
| 547 |
+
"integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==",
|
| 548 |
+
"cpu": [
|
| 549 |
+
"riscv64"
|
| 550 |
+
],
|
| 551 |
+
"dev": true,
|
| 552 |
+
"license": "MIT",
|
| 553 |
+
"optional": true,
|
| 554 |
+
"os": [
|
| 555 |
+
"linux"
|
| 556 |
+
],
|
| 557 |
+
"engines": {
|
| 558 |
+
"node": ">=18"
|
| 559 |
+
}
|
| 560 |
+
},
|
| 561 |
+
"node_modules/@esbuild/linux-s390x": {
|
| 562 |
+
"version": "0.25.12",
|
| 563 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz",
|
| 564 |
+
"integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==",
|
| 565 |
+
"cpu": [
|
| 566 |
+
"s390x"
|
| 567 |
+
],
|
| 568 |
+
"dev": true,
|
| 569 |
+
"license": "MIT",
|
| 570 |
+
"optional": true,
|
| 571 |
+
"os": [
|
| 572 |
+
"linux"
|
| 573 |
+
],
|
| 574 |
+
"engines": {
|
| 575 |
+
"node": ">=18"
|
| 576 |
+
}
|
| 577 |
+
},
|
| 578 |
+
"node_modules/@esbuild/linux-x64": {
|
| 579 |
+
"version": "0.25.12",
|
| 580 |
+
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz",
|
| 581 |
+
"integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==",
|
| 582 |
+
"cpu": [
|
| 583 |
+
"x64"
|
| 584 |
+
],
|
| 585 |
+
"dev": true,
|
| 586 |
+
"license": "MIT",
|
| 587 |
+
"optional": true,
|
| 588 |
+
"os": [
|
| 589 |
+
"linux"
|
| 590 |
+
],
|
| 591 |
+
"engines": {
|
| 592 |
+
"node": ">=18"
|
| 593 |
+
}
|
| 594 |
+
},
|
| 595 |
+
"node_modules/@esbuild/netbsd-arm64": {
|
| 596 |
+
"version": "0.25.12",
|
| 597 |
+
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz",
|
| 598 |
+
"integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==",
|
| 599 |
+
"cpu": [
|
| 600 |
+
"arm64"
|
| 601 |
+
],
|
| 602 |
+
"dev": true,
|
| 603 |
+
"license": "MIT",
|
| 604 |
+
"optional": true,
|
| 605 |
+
"os": [
|
| 606 |
+
"netbsd"
|
| 607 |
+
],
|
| 608 |
+
"engines": {
|
| 609 |
+
"node": ">=18"
|
| 610 |
+
}
|
| 611 |
+
},
|
| 612 |
+
"node_modules/@esbuild/netbsd-x64": {
|
| 613 |
+
"version": "0.25.12",
|
| 614 |
+
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz",
|
| 615 |
+
"integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==",
|
| 616 |
+
"cpu": [
|
| 617 |
+
"x64"
|
| 618 |
+
],
|
| 619 |
+
"dev": true,
|
| 620 |
+
"license": "MIT",
|
| 621 |
+
"optional": true,
|
| 622 |
+
"os": [
|
| 623 |
+
"netbsd"
|
| 624 |
+
],
|
| 625 |
+
"engines": {
|
| 626 |
+
"node": ">=18"
|
| 627 |
+
}
|
| 628 |
+
},
|
| 629 |
+
"node_modules/@esbuild/openbsd-arm64": {
|
| 630 |
+
"version": "0.25.12",
|
| 631 |
+
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz",
|
| 632 |
+
"integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==",
|
| 633 |
+
"cpu": [
|
| 634 |
+
"arm64"
|
| 635 |
+
],
|
| 636 |
+
"dev": true,
|
| 637 |
+
"license": "MIT",
|
| 638 |
+
"optional": true,
|
| 639 |
+
"os": [
|
| 640 |
+
"openbsd"
|
| 641 |
+
],
|
| 642 |
+
"engines": {
|
| 643 |
+
"node": ">=18"
|
| 644 |
+
}
|
| 645 |
+
},
|
| 646 |
+
"node_modules/@esbuild/openbsd-x64": {
|
| 647 |
+
"version": "0.25.12",
|
| 648 |
+
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz",
|
| 649 |
+
"integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==",
|
| 650 |
+
"cpu": [
|
| 651 |
+
"x64"
|
| 652 |
+
],
|
| 653 |
+
"dev": true,
|
| 654 |
+
"license": "MIT",
|
| 655 |
+
"optional": true,
|
| 656 |
+
"os": [
|
| 657 |
+
"openbsd"
|
| 658 |
+
],
|
| 659 |
+
"engines": {
|
| 660 |
+
"node": ">=18"
|
| 661 |
+
}
|
| 662 |
+
},
|
| 663 |
+
"node_modules/@esbuild/openharmony-arm64": {
|
| 664 |
+
"version": "0.25.12",
|
| 665 |
+
"resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz",
|
| 666 |
+
"integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==",
|
| 667 |
+
"cpu": [
|
| 668 |
+
"arm64"
|
| 669 |
+
],
|
| 670 |
+
"dev": true,
|
| 671 |
+
"license": "MIT",
|
| 672 |
+
"optional": true,
|
| 673 |
+
"os": [
|
| 674 |
+
"openharmony"
|
| 675 |
+
],
|
| 676 |
+
"engines": {
|
| 677 |
+
"node": ">=18"
|
| 678 |
+
}
|
| 679 |
+
},
|
| 680 |
+
"node_modules/@esbuild/sunos-x64": {
|
| 681 |
+
"version": "0.25.12",
|
| 682 |
+
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz",
|
| 683 |
+
"integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==",
|
| 684 |
+
"cpu": [
|
| 685 |
+
"x64"
|
| 686 |
+
],
|
| 687 |
+
"dev": true,
|
| 688 |
+
"license": "MIT",
|
| 689 |
+
"optional": true,
|
| 690 |
+
"os": [
|
| 691 |
+
"sunos"
|
| 692 |
+
],
|
| 693 |
+
"engines": {
|
| 694 |
+
"node": ">=18"
|
| 695 |
+
}
|
| 696 |
+
},
|
| 697 |
+
"node_modules/@esbuild/win32-arm64": {
|
| 698 |
+
"version": "0.25.12",
|
| 699 |
+
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz",
|
| 700 |
+
"integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==",
|
| 701 |
+
"cpu": [
|
| 702 |
+
"arm64"
|
| 703 |
+
],
|
| 704 |
+
"dev": true,
|
| 705 |
+
"license": "MIT",
|
| 706 |
+
"optional": true,
|
| 707 |
+
"os": [
|
| 708 |
+
"win32"
|
| 709 |
+
],
|
| 710 |
+
"engines": {
|
| 711 |
+
"node": ">=18"
|
| 712 |
+
}
|
| 713 |
+
},
|
| 714 |
+
"node_modules/@esbuild/win32-ia32": {
|
| 715 |
+
"version": "0.25.12",
|
| 716 |
+
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz",
|
| 717 |
+
"integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==",
|
| 718 |
+
"cpu": [
|
| 719 |
+
"ia32"
|
| 720 |
+
],
|
| 721 |
+
"dev": true,
|
| 722 |
+
"license": "MIT",
|
| 723 |
+
"optional": true,
|
| 724 |
+
"os": [
|
| 725 |
+
"win32"
|
| 726 |
+
],
|
| 727 |
+
"engines": {
|
| 728 |
+
"node": ">=18"
|
| 729 |
+
}
|
| 730 |
+
},
|
| 731 |
+
"node_modules/@esbuild/win32-x64": {
|
| 732 |
+
"version": "0.25.12",
|
| 733 |
+
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz",
|
| 734 |
+
"integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==",
|
| 735 |
+
"cpu": [
|
| 736 |
+
"x64"
|
| 737 |
+
],
|
| 738 |
+
"dev": true,
|
| 739 |
+
"license": "MIT",
|
| 740 |
+
"optional": true,
|
| 741 |
+
"os": [
|
| 742 |
+
"win32"
|
| 743 |
+
],
|
| 744 |
+
"engines": {
|
| 745 |
+
"node": ">=18"
|
| 746 |
+
}
|
| 747 |
+
},
|
| 748 |
+
"node_modules/@jridgewell/gen-mapping": {
|
| 749 |
+
"version": "0.3.13",
|
| 750 |
+
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
|
| 751 |
+
"integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
|
| 752 |
+
"dev": true,
|
| 753 |
+
"license": "MIT",
|
| 754 |
+
"dependencies": {
|
| 755 |
+
"@jridgewell/sourcemap-codec": "^1.5.0",
|
| 756 |
+
"@jridgewell/trace-mapping": "^0.3.24"
|
| 757 |
+
}
|
| 758 |
+
},
|
| 759 |
+
"node_modules/@jridgewell/remapping": {
|
| 760 |
+
"version": "2.3.5",
|
| 761 |
+
"resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
|
| 762 |
+
"integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
|
| 763 |
+
"dev": true,
|
| 764 |
+
"license": "MIT",
|
| 765 |
+
"dependencies": {
|
| 766 |
+
"@jridgewell/gen-mapping": "^0.3.5",
|
| 767 |
+
"@jridgewell/trace-mapping": "^0.3.24"
|
| 768 |
+
}
|
| 769 |
+
},
|
| 770 |
+
"node_modules/@jridgewell/resolve-uri": {
|
| 771 |
+
"version": "3.1.2",
|
| 772 |
+
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
|
| 773 |
+
"integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
|
| 774 |
+
"dev": true,
|
| 775 |
+
"license": "MIT",
|
| 776 |
+
"engines": {
|
| 777 |
+
"node": ">=6.0.0"
|
| 778 |
+
}
|
| 779 |
+
},
|
| 780 |
+
"node_modules/@jridgewell/sourcemap-codec": {
|
| 781 |
+
"version": "1.5.5",
|
| 782 |
+
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
|
| 783 |
+
"integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
|
| 784 |
+
"dev": true,
|
| 785 |
+
"license": "MIT"
|
| 786 |
+
},
|
| 787 |
+
"node_modules/@jridgewell/trace-mapping": {
|
| 788 |
+
"version": "0.3.31",
|
| 789 |
+
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
|
| 790 |
+
"integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
|
| 791 |
+
"dev": true,
|
| 792 |
+
"license": "MIT",
|
| 793 |
+
"dependencies": {
|
| 794 |
+
"@jridgewell/resolve-uri": "^3.1.0",
|
| 795 |
+
"@jridgewell/sourcemap-codec": "^1.4.14"
|
| 796 |
+
}
|
| 797 |
+
},
|
| 798 |
+
"node_modules/@rolldown/pluginutils": {
|
| 799 |
+
"version": "1.0.0-beta.43",
|
| 800 |
+
"resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.43.tgz",
|
| 801 |
+
"integrity": "sha512-5Uxg7fQUCmfhax7FJke2+8B6cqgeUJUD9o2uXIKXhD+mG0mL6NObmVoi9wXEU1tY89mZKgAYA6fTbftx3q2ZPQ==",
|
| 802 |
+
"dev": true,
|
| 803 |
+
"license": "MIT"
|
| 804 |
+
},
|
| 805 |
+
"node_modules/@rollup/rollup-android-arm-eabi": {
|
| 806 |
+
"version": "4.52.5",
|
| 807 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.5.tgz",
|
| 808 |
+
"integrity": "sha512-8c1vW4ocv3UOMp9K+gToY5zL2XiiVw3k7f1ksf4yO1FlDFQ1C2u72iACFnSOceJFsWskc2WZNqeRhFRPzv+wtQ==",
|
| 809 |
+
"cpu": [
|
| 810 |
+
"arm"
|
| 811 |
+
],
|
| 812 |
+
"dev": true,
|
| 813 |
+
"license": "MIT",
|
| 814 |
+
"optional": true,
|
| 815 |
+
"os": [
|
| 816 |
+
"android"
|
| 817 |
+
]
|
| 818 |
+
},
|
| 819 |
+
"node_modules/@rollup/rollup-android-arm64": {
|
| 820 |
+
"version": "4.52.5",
|
| 821 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.5.tgz",
|
| 822 |
+
"integrity": "sha512-mQGfsIEFcu21mvqkEKKu2dYmtuSZOBMmAl5CFlPGLY94Vlcm+zWApK7F/eocsNzp8tKmbeBP8yXyAbx0XHsFNA==",
|
| 823 |
+
"cpu": [
|
| 824 |
+
"arm64"
|
| 825 |
+
],
|
| 826 |
+
"dev": true,
|
| 827 |
+
"license": "MIT",
|
| 828 |
+
"optional": true,
|
| 829 |
+
"os": [
|
| 830 |
+
"android"
|
| 831 |
+
]
|
| 832 |
+
},
|
| 833 |
+
"node_modules/@rollup/rollup-darwin-arm64": {
|
| 834 |
+
"version": "4.52.5",
|
| 835 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.5.tgz",
|
| 836 |
+
"integrity": "sha512-takF3CR71mCAGA+v794QUZ0b6ZSrgJkArC+gUiG6LB6TQty9T0Mqh3m2ImRBOxS2IeYBo4lKWIieSvnEk2OQWA==",
|
| 837 |
+
"cpu": [
|
| 838 |
+
"arm64"
|
| 839 |
+
],
|
| 840 |
+
"dev": true,
|
| 841 |
+
"license": "MIT",
|
| 842 |
+
"optional": true,
|
| 843 |
+
"os": [
|
| 844 |
+
"darwin"
|
| 845 |
+
]
|
| 846 |
+
},
|
| 847 |
+
"node_modules/@rollup/rollup-darwin-x64": {
|
| 848 |
+
"version": "4.52.5",
|
| 849 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.5.tgz",
|
| 850 |
+
"integrity": "sha512-W901Pla8Ya95WpxDn//VF9K9u2JbocwV/v75TE0YIHNTbhqUTv9w4VuQ9MaWlNOkkEfFwkdNhXgcLqPSmHy0fA==",
|
| 851 |
+
"cpu": [
|
| 852 |
+
"x64"
|
| 853 |
+
],
|
| 854 |
+
"dev": true,
|
| 855 |
+
"license": "MIT",
|
| 856 |
+
"optional": true,
|
| 857 |
+
"os": [
|
| 858 |
+
"darwin"
|
| 859 |
+
]
|
| 860 |
+
},
|
| 861 |
+
"node_modules/@rollup/rollup-freebsd-arm64": {
|
| 862 |
+
"version": "4.52.5",
|
| 863 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.5.tgz",
|
| 864 |
+
"integrity": "sha512-QofO7i7JycsYOWxe0GFqhLmF6l1TqBswJMvICnRUjqCx8b47MTo46W8AoeQwiokAx3zVryVnxtBMcGcnX12LvA==",
|
| 865 |
+
"cpu": [
|
| 866 |
+
"arm64"
|
| 867 |
+
],
|
| 868 |
+
"dev": true,
|
| 869 |
+
"license": "MIT",
|
| 870 |
+
"optional": true,
|
| 871 |
+
"os": [
|
| 872 |
+
"freebsd"
|
| 873 |
+
]
|
| 874 |
+
},
|
| 875 |
+
"node_modules/@rollup/rollup-freebsd-x64": {
|
| 876 |
+
"version": "4.52.5",
|
| 877 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.5.tgz",
|
| 878 |
+
"integrity": "sha512-jr21b/99ew8ujZubPo9skbrItHEIE50WdV86cdSoRkKtmWa+DDr6fu2c/xyRT0F/WazZpam6kk7IHBerSL7LDQ==",
|
| 879 |
+
"cpu": [
|
| 880 |
+
"x64"
|
| 881 |
+
],
|
| 882 |
+
"dev": true,
|
| 883 |
+
"license": "MIT",
|
| 884 |
+
"optional": true,
|
| 885 |
+
"os": [
|
| 886 |
+
"freebsd"
|
| 887 |
+
]
|
| 888 |
+
},
|
| 889 |
+
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
|
| 890 |
+
"version": "4.52.5",
|
| 891 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.5.tgz",
|
| 892 |
+
"integrity": "sha512-PsNAbcyv9CcecAUagQefwX8fQn9LQ4nZkpDboBOttmyffnInRy8R8dSg6hxxl2Re5QhHBf6FYIDhIj5v982ATQ==",
|
| 893 |
+
"cpu": [
|
| 894 |
+
"arm"
|
| 895 |
+
],
|
| 896 |
+
"dev": true,
|
| 897 |
+
"license": "MIT",
|
| 898 |
+
"optional": true,
|
| 899 |
+
"os": [
|
| 900 |
+
"linux"
|
| 901 |
+
]
|
| 902 |
+
},
|
| 903 |
+
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
|
| 904 |
+
"version": "4.52.5",
|
| 905 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.5.tgz",
|
| 906 |
+
"integrity": "sha512-Fw4tysRutyQc/wwkmcyoqFtJhh0u31K+Q6jYjeicsGJJ7bbEq8LwPWV/w0cnzOqR2m694/Af6hpFayLJZkG2VQ==",
|
| 907 |
+
"cpu": [
|
| 908 |
+
"arm"
|
| 909 |
+
],
|
| 910 |
+
"dev": true,
|
| 911 |
+
"license": "MIT",
|
| 912 |
+
"optional": true,
|
| 913 |
+
"os": [
|
| 914 |
+
"linux"
|
| 915 |
+
]
|
| 916 |
+
},
|
| 917 |
+
"node_modules/@rollup/rollup-linux-arm64-gnu": {
|
| 918 |
+
"version": "4.52.5",
|
| 919 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.5.tgz",
|
| 920 |
+
"integrity": "sha512-a+3wVnAYdQClOTlyapKmyI6BLPAFYs0JM8HRpgYZQO02rMR09ZcV9LbQB+NL6sljzG38869YqThrRnfPMCDtZg==",
|
| 921 |
+
"cpu": [
|
| 922 |
+
"arm64"
|
| 923 |
+
],
|
| 924 |
+
"dev": true,
|
| 925 |
+
"license": "MIT",
|
| 926 |
+
"optional": true,
|
| 927 |
+
"os": [
|
| 928 |
+
"linux"
|
| 929 |
+
]
|
| 930 |
+
},
|
| 931 |
+
"node_modules/@rollup/rollup-linux-arm64-musl": {
|
| 932 |
+
"version": "4.52.5",
|
| 933 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.5.tgz",
|
| 934 |
+
"integrity": "sha512-AvttBOMwO9Pcuuf7m9PkC1PUIKsfaAJ4AYhy944qeTJgQOqJYJ9oVl2nYgY7Rk0mkbsuOpCAYSs6wLYB2Xiw0Q==",
|
| 935 |
+
"cpu": [
|
| 936 |
+
"arm64"
|
| 937 |
+
],
|
| 938 |
+
"dev": true,
|
| 939 |
+
"license": "MIT",
|
| 940 |
+
"optional": true,
|
| 941 |
+
"os": [
|
| 942 |
+
"linux"
|
| 943 |
+
]
|
| 944 |
+
},
|
| 945 |
+
"node_modules/@rollup/rollup-linux-loong64-gnu": {
|
| 946 |
+
"version": "4.52.5",
|
| 947 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.5.tgz",
|
| 948 |
+
"integrity": "sha512-DkDk8pmXQV2wVrF6oq5tONK6UHLz/XcEVow4JTTerdeV1uqPeHxwcg7aFsfnSm9L+OO8WJsWotKM2JJPMWrQtA==",
|
| 949 |
+
"cpu": [
|
| 950 |
+
"loong64"
|
| 951 |
+
],
|
| 952 |
+
"dev": true,
|
| 953 |
+
"license": "MIT",
|
| 954 |
+
"optional": true,
|
| 955 |
+
"os": [
|
| 956 |
+
"linux"
|
| 957 |
+
]
|
| 958 |
+
},
|
| 959 |
+
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
|
| 960 |
+
"version": "4.52.5",
|
| 961 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.5.tgz",
|
| 962 |
+
"integrity": "sha512-W/b9ZN/U9+hPQVvlGwjzi+Wy4xdoH2I8EjaCkMvzpI7wJUs8sWJ03Rq96jRnHkSrcHTpQe8h5Tg3ZzUPGauvAw==",
|
| 963 |
+
"cpu": [
|
| 964 |
+
"ppc64"
|
| 965 |
+
],
|
| 966 |
+
"dev": true,
|
| 967 |
+
"license": "MIT",
|
| 968 |
+
"optional": true,
|
| 969 |
+
"os": [
|
| 970 |
+
"linux"
|
| 971 |
+
]
|
| 972 |
+
},
|
| 973 |
+
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
|
| 974 |
+
"version": "4.52.5",
|
| 975 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.5.tgz",
|
| 976 |
+
"integrity": "sha512-sjQLr9BW7R/ZiXnQiWPkErNfLMkkWIoCz7YMn27HldKsADEKa5WYdobaa1hmN6slu9oWQbB6/jFpJ+P2IkVrmw==",
|
| 977 |
+
"cpu": [
|
| 978 |
+
"riscv64"
|
| 979 |
+
],
|
| 980 |
+
"dev": true,
|
| 981 |
+
"license": "MIT",
|
| 982 |
+
"optional": true,
|
| 983 |
+
"os": [
|
| 984 |
+
"linux"
|
| 985 |
+
]
|
| 986 |
+
},
|
| 987 |
+
"node_modules/@rollup/rollup-linux-riscv64-musl": {
|
| 988 |
+
"version": "4.52.5",
|
| 989 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.5.tgz",
|
| 990 |
+
"integrity": "sha512-hq3jU/kGyjXWTvAh2awn8oHroCbrPm8JqM7RUpKjalIRWWXE01CQOf/tUNWNHjmbMHg/hmNCwc/Pz3k1T/j/Lg==",
|
| 991 |
+
"cpu": [
|
| 992 |
+
"riscv64"
|
| 993 |
+
],
|
| 994 |
+
"dev": true,
|
| 995 |
+
"license": "MIT",
|
| 996 |
+
"optional": true,
|
| 997 |
+
"os": [
|
| 998 |
+
"linux"
|
| 999 |
+
]
|
| 1000 |
+
},
|
| 1001 |
+
"node_modules/@rollup/rollup-linux-s390x-gnu": {
|
| 1002 |
+
"version": "4.52.5",
|
| 1003 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.5.tgz",
|
| 1004 |
+
"integrity": "sha512-gn8kHOrku8D4NGHMK1Y7NA7INQTRdVOntt1OCYypZPRt6skGbddska44K8iocdpxHTMMNui5oH4elPH4QOLrFQ==",
|
| 1005 |
+
"cpu": [
|
| 1006 |
+
"s390x"
|
| 1007 |
+
],
|
| 1008 |
+
"dev": true,
|
| 1009 |
+
"license": "MIT",
|
| 1010 |
+
"optional": true,
|
| 1011 |
+
"os": [
|
| 1012 |
+
"linux"
|
| 1013 |
+
]
|
| 1014 |
+
},
|
| 1015 |
+
"node_modules/@rollup/rollup-linux-x64-gnu": {
|
| 1016 |
+
"version": "4.52.5",
|
| 1017 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.5.tgz",
|
| 1018 |
+
"integrity": "sha512-hXGLYpdhiNElzN770+H2nlx+jRog8TyynpTVzdlc6bndktjKWyZyiCsuDAlpd+j+W+WNqfcyAWz9HxxIGfZm1Q==",
|
| 1019 |
+
"cpu": [
|
| 1020 |
+
"x64"
|
| 1021 |
+
],
|
| 1022 |
+
"dev": true,
|
| 1023 |
+
"license": "MIT",
|
| 1024 |
+
"optional": true,
|
| 1025 |
+
"os": [
|
| 1026 |
+
"linux"
|
| 1027 |
+
]
|
| 1028 |
+
},
|
| 1029 |
+
"node_modules/@rollup/rollup-linux-x64-musl": {
|
| 1030 |
+
"version": "4.52.5",
|
| 1031 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.5.tgz",
|
| 1032 |
+
"integrity": "sha512-arCGIcuNKjBoKAXD+y7XomR9gY6Mw7HnFBv5Rw7wQRvwYLR7gBAgV7Mb2QTyjXfTveBNFAtPt46/36vV9STLNg==",
|
| 1033 |
+
"cpu": [
|
| 1034 |
+
"x64"
|
| 1035 |
+
],
|
| 1036 |
+
"dev": true,
|
| 1037 |
+
"license": "MIT",
|
| 1038 |
+
"optional": true,
|
| 1039 |
+
"os": [
|
| 1040 |
+
"linux"
|
| 1041 |
+
]
|
| 1042 |
+
},
|
| 1043 |
+
"node_modules/@rollup/rollup-openharmony-arm64": {
|
| 1044 |
+
"version": "4.52.5",
|
| 1045 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.5.tgz",
|
| 1046 |
+
"integrity": "sha512-QoFqB6+/9Rly/RiPjaomPLmR/13cgkIGfA40LHly9zcH1S0bN2HVFYk3a1eAyHQyjs3ZJYlXvIGtcCs5tko9Cw==",
|
| 1047 |
+
"cpu": [
|
| 1048 |
+
"arm64"
|
| 1049 |
+
],
|
| 1050 |
+
"dev": true,
|
| 1051 |
+
"license": "MIT",
|
| 1052 |
+
"optional": true,
|
| 1053 |
+
"os": [
|
| 1054 |
+
"openharmony"
|
| 1055 |
+
]
|
| 1056 |
+
},
|
| 1057 |
+
"node_modules/@rollup/rollup-win32-arm64-msvc": {
|
| 1058 |
+
"version": "4.52.5",
|
| 1059 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.5.tgz",
|
| 1060 |
+
"integrity": "sha512-w0cDWVR6MlTstla1cIfOGyl8+qb93FlAVutcor14Gf5Md5ap5ySfQ7R9S/NjNaMLSFdUnKGEasmVnu3lCMqB7w==",
|
| 1061 |
+
"cpu": [
|
| 1062 |
+
"arm64"
|
| 1063 |
+
],
|
| 1064 |
+
"dev": true,
|
| 1065 |
+
"license": "MIT",
|
| 1066 |
+
"optional": true,
|
| 1067 |
+
"os": [
|
| 1068 |
+
"win32"
|
| 1069 |
+
]
|
| 1070 |
+
},
|
| 1071 |
+
"node_modules/@rollup/rollup-win32-ia32-msvc": {
|
| 1072 |
+
"version": "4.52.5",
|
| 1073 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.5.tgz",
|
| 1074 |
+
"integrity": "sha512-Aufdpzp7DpOTULJCuvzqcItSGDH73pF3ko/f+ckJhxQyHtp67rHw3HMNxoIdDMUITJESNE6a8uh4Lo4SLouOUg==",
|
| 1075 |
+
"cpu": [
|
| 1076 |
+
"ia32"
|
| 1077 |
+
],
|
| 1078 |
+
"dev": true,
|
| 1079 |
+
"license": "MIT",
|
| 1080 |
+
"optional": true,
|
| 1081 |
+
"os": [
|
| 1082 |
+
"win32"
|
| 1083 |
+
]
|
| 1084 |
+
},
|
| 1085 |
+
"node_modules/@rollup/rollup-win32-x64-gnu": {
|
| 1086 |
+
"version": "4.52.5",
|
| 1087 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.5.tgz",
|
| 1088 |
+
"integrity": "sha512-UGBUGPFp1vkj6p8wCRraqNhqwX/4kNQPS57BCFc8wYh0g94iVIW33wJtQAx3G7vrjjNtRaxiMUylM0ktp/TRSQ==",
|
| 1089 |
+
"cpu": [
|
| 1090 |
+
"x64"
|
| 1091 |
+
],
|
| 1092 |
+
"dev": true,
|
| 1093 |
+
"license": "MIT",
|
| 1094 |
+
"optional": true,
|
| 1095 |
+
"os": [
|
| 1096 |
+
"win32"
|
| 1097 |
+
]
|
| 1098 |
+
},
|
| 1099 |
+
"node_modules/@rollup/rollup-win32-x64-msvc": {
|
| 1100 |
+
"version": "4.52.5",
|
| 1101 |
+
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.5.tgz",
|
| 1102 |
+
"integrity": "sha512-TAcgQh2sSkykPRWLrdyy2AiceMckNf5loITqXxFI5VuQjS5tSuw3WlwdN8qv8vzjLAUTvYaH/mVjSFpbkFbpTg==",
|
| 1103 |
+
"cpu": [
|
| 1104 |
+
"x64"
|
| 1105 |
+
],
|
| 1106 |
+
"dev": true,
|
| 1107 |
+
"license": "MIT",
|
| 1108 |
+
"optional": true,
|
| 1109 |
+
"os": [
|
| 1110 |
+
"win32"
|
| 1111 |
+
]
|
| 1112 |
+
},
|
| 1113 |
+
"node_modules/@types/babel__core": {
|
| 1114 |
+
"version": "7.20.5",
|
| 1115 |
+
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
|
| 1116 |
+
"integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
|
| 1117 |
+
"dev": true,
|
| 1118 |
+
"license": "MIT",
|
| 1119 |
+
"dependencies": {
|
| 1120 |
+
"@babel/parser": "^7.20.7",
|
| 1121 |
+
"@babel/types": "^7.20.7",
|
| 1122 |
+
"@types/babel__generator": "*",
|
| 1123 |
+
"@types/babel__template": "*",
|
| 1124 |
+
"@types/babel__traverse": "*"
|
| 1125 |
+
}
|
| 1126 |
+
},
|
| 1127 |
+
"node_modules/@types/babel__generator": {
|
| 1128 |
+
"version": "7.27.0",
|
| 1129 |
+
"resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
|
| 1130 |
+
"integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
|
| 1131 |
+
"dev": true,
|
| 1132 |
+
"license": "MIT",
|
| 1133 |
+
"dependencies": {
|
| 1134 |
+
"@babel/types": "^7.0.0"
|
| 1135 |
+
}
|
| 1136 |
+
},
|
| 1137 |
+
"node_modules/@types/babel__template": {
|
| 1138 |
+
"version": "7.4.4",
|
| 1139 |
+
"resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
|
| 1140 |
+
"integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
|
| 1141 |
+
"dev": true,
|
| 1142 |
+
"license": "MIT",
|
| 1143 |
+
"dependencies": {
|
| 1144 |
+
"@babel/parser": "^7.1.0",
|
| 1145 |
+
"@babel/types": "^7.0.0"
|
| 1146 |
+
}
|
| 1147 |
+
},
|
| 1148 |
+
"node_modules/@types/babel__traverse": {
|
| 1149 |
+
"version": "7.28.0",
|
| 1150 |
+
"resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
|
| 1151 |
+
"integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
|
| 1152 |
+
"dev": true,
|
| 1153 |
+
"license": "MIT",
|
| 1154 |
+
"dependencies": {
|
| 1155 |
+
"@babel/types": "^7.28.2"
|
| 1156 |
+
}
|
| 1157 |
+
},
|
| 1158 |
+
"node_modules/@types/estree": {
|
| 1159 |
+
"version": "1.0.8",
|
| 1160 |
+
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
|
| 1161 |
+
"integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
|
| 1162 |
+
"dev": true,
|
| 1163 |
+
"license": "MIT"
|
| 1164 |
+
},
|
| 1165 |
+
"node_modules/@types/node": {
|
| 1166 |
+
"version": "22.19.0",
|
| 1167 |
+
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.0.tgz",
|
| 1168 |
+
"integrity": "sha512-xpr/lmLPQEj+TUnHmR+Ab91/glhJvsqcjB+yY0Ix9GO70H6Lb4FHH5GeqdOE5btAx7eIMwuHkp4H2MSkLcqWbA==",
|
| 1169 |
+
"dev": true,
|
| 1170 |
+
"license": "MIT",
|
| 1171 |
+
"peer": true,
|
| 1172 |
+
"dependencies": {
|
| 1173 |
+
"undici-types": "~6.21.0"
|
| 1174 |
+
}
|
| 1175 |
+
},
|
| 1176 |
+
"node_modules/@vitejs/plugin-react": {
|
| 1177 |
+
"version": "5.1.0",
|
| 1178 |
+
"resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.1.0.tgz",
|
| 1179 |
+
"integrity": "sha512-4LuWrg7EKWgQaMJfnN+wcmbAW+VSsCmqGohftWjuct47bv8uE4n/nPpq4XjJPsxgq00GGG5J8dvBczp8uxScew==",
|
| 1180 |
+
"dev": true,
|
| 1181 |
+
"license": "MIT",
|
| 1182 |
+
"dependencies": {
|
| 1183 |
+
"@babel/core": "^7.28.4",
|
| 1184 |
+
"@babel/plugin-transform-react-jsx-self": "^7.27.1",
|
| 1185 |
+
"@babel/plugin-transform-react-jsx-source": "^7.27.1",
|
| 1186 |
+
"@rolldown/pluginutils": "1.0.0-beta.43",
|
| 1187 |
+
"@types/babel__core": "^7.20.5",
|
| 1188 |
+
"react-refresh": "^0.18.0"
|
| 1189 |
+
},
|
| 1190 |
+
"engines": {
|
| 1191 |
+
"node": "^20.19.0 || >=22.12.0"
|
| 1192 |
+
},
|
| 1193 |
+
"peerDependencies": {
|
| 1194 |
+
"vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
|
| 1195 |
+
}
|
| 1196 |
+
},
|
| 1197 |
+
"node_modules/baseline-browser-mapping": {
|
| 1198 |
+
"version": "2.8.23",
|
| 1199 |
+
"resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.23.tgz",
|
| 1200 |
+
"integrity": "sha512-616V5YX4bepJFzNyOfce5Fa8fDJMfoxzOIzDCZwaGL8MKVpFrXqfNUoIpRn9YMI5pXf/VKgzjB4htFMsFKKdiQ==",
|
| 1201 |
+
"dev": true,
|
| 1202 |
+
"license": "Apache-2.0",
|
| 1203 |
+
"bin": {
|
| 1204 |
+
"baseline-browser-mapping": "dist/cli.js"
|
| 1205 |
+
}
|
| 1206 |
+
},
|
| 1207 |
+
"node_modules/browserslist": {
|
| 1208 |
+
"version": "4.27.0",
|
| 1209 |
+
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.27.0.tgz",
|
| 1210 |
+
"integrity": "sha512-AXVQwdhot1eqLihwasPElhX2tAZiBjWdJ9i/Zcj2S6QYIjkx62OKSfnobkriB81C3l4w0rVy3Nt4jaTBltYEpw==",
|
| 1211 |
+
"dev": true,
|
| 1212 |
+
"funding": [
|
| 1213 |
+
{
|
| 1214 |
+
"type": "opencollective",
|
| 1215 |
+
"url": "https://opencollective.com/browserslist"
|
| 1216 |
+
},
|
| 1217 |
+
{
|
| 1218 |
+
"type": "tidelift",
|
| 1219 |
+
"url": "https://tidelift.com/funding/github/npm/browserslist"
|
| 1220 |
+
},
|
| 1221 |
+
{
|
| 1222 |
+
"type": "github",
|
| 1223 |
+
"url": "https://github.com/sponsors/ai"
|
| 1224 |
+
}
|
| 1225 |
+
],
|
| 1226 |
+
"license": "MIT",
|
| 1227 |
+
"peer": true,
|
| 1228 |
+
"dependencies": {
|
| 1229 |
+
"baseline-browser-mapping": "^2.8.19",
|
| 1230 |
+
"caniuse-lite": "^1.0.30001751",
|
| 1231 |
+
"electron-to-chromium": "^1.5.238",
|
| 1232 |
+
"node-releases": "^2.0.26",
|
| 1233 |
+
"update-browserslist-db": "^1.1.4"
|
| 1234 |
+
},
|
| 1235 |
+
"bin": {
|
| 1236 |
+
"browserslist": "cli.js"
|
| 1237 |
+
},
|
| 1238 |
+
"engines": {
|
| 1239 |
+
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
|
| 1240 |
+
}
|
| 1241 |
+
},
|
| 1242 |
+
"node_modules/caniuse-lite": {
|
| 1243 |
+
"version": "1.0.30001753",
|
| 1244 |
+
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001753.tgz",
|
| 1245 |
+
"integrity": "sha512-Bj5H35MD/ebaOV4iDLqPEtiliTN29qkGtEHCwawWn4cYm+bPJM2NsaP30vtZcnERClMzp52J4+aw2UNbK4o+zw==",
|
| 1246 |
+
"dev": true,
|
| 1247 |
+
"funding": [
|
| 1248 |
+
{
|
| 1249 |
+
"type": "opencollective",
|
| 1250 |
+
"url": "https://opencollective.com/browserslist"
|
| 1251 |
+
},
|
| 1252 |
+
{
|
| 1253 |
+
"type": "tidelift",
|
| 1254 |
+
"url": "https://tidelift.com/funding/github/npm/caniuse-lite"
|
| 1255 |
+
},
|
| 1256 |
+
{
|
| 1257 |
+
"type": "github",
|
| 1258 |
+
"url": "https://github.com/sponsors/ai"
|
| 1259 |
+
}
|
| 1260 |
+
],
|
| 1261 |
+
"license": "CC-BY-4.0"
|
| 1262 |
+
},
|
| 1263 |
+
"node_modules/convert-source-map": {
|
| 1264 |
+
"version": "2.0.0",
|
| 1265 |
+
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
|
| 1266 |
+
"integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
|
| 1267 |
+
"dev": true,
|
| 1268 |
+
"license": "MIT"
|
| 1269 |
+
},
|
| 1270 |
+
"node_modules/core-util-is": {
|
| 1271 |
+
"version": "1.0.3",
|
| 1272 |
+
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
|
| 1273 |
+
"integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
|
| 1274 |
+
"license": "MIT"
|
| 1275 |
+
},
|
| 1276 |
+
"node_modules/debug": {
|
| 1277 |
+
"version": "4.4.3",
|
| 1278 |
+
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
|
| 1279 |
+
"integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
|
| 1280 |
+
"dev": true,
|
| 1281 |
+
"license": "MIT",
|
| 1282 |
+
"dependencies": {
|
| 1283 |
+
"ms": "^2.1.3"
|
| 1284 |
+
},
|
| 1285 |
+
"engines": {
|
| 1286 |
+
"node": ">=6.0"
|
| 1287 |
+
},
|
| 1288 |
+
"peerDependenciesMeta": {
|
| 1289 |
+
"supports-color": {
|
| 1290 |
+
"optional": true
|
| 1291 |
+
}
|
| 1292 |
+
}
|
| 1293 |
+
},
|
| 1294 |
+
"node_modules/electron-to-chromium": {
|
| 1295 |
+
"version": "1.5.244",
|
| 1296 |
+
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.244.tgz",
|
| 1297 |
+
"integrity": "sha512-OszpBN7xZX4vWMPJwB9illkN/znA8M36GQqQxi6MNy9axWxhOfJyZZJtSLQCpEFLHP2xK33BiWx9aIuIEXVCcw==",
|
| 1298 |
+
"dev": true,
|
| 1299 |
+
"license": "ISC"
|
| 1300 |
+
},
|
| 1301 |
+
"node_modules/esbuild": {
|
| 1302 |
+
"version": "0.25.12",
|
| 1303 |
+
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz",
|
| 1304 |
+
"integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==",
|
| 1305 |
+
"dev": true,
|
| 1306 |
+
"hasInstallScript": true,
|
| 1307 |
+
"license": "MIT",
|
| 1308 |
+
"bin": {
|
| 1309 |
+
"esbuild": "bin/esbuild"
|
| 1310 |
+
},
|
| 1311 |
+
"engines": {
|
| 1312 |
+
"node": ">=18"
|
| 1313 |
+
},
|
| 1314 |
+
"optionalDependencies": {
|
| 1315 |
+
"@esbuild/aix-ppc64": "0.25.12",
|
| 1316 |
+
"@esbuild/android-arm": "0.25.12",
|
| 1317 |
+
"@esbuild/android-arm64": "0.25.12",
|
| 1318 |
+
"@esbuild/android-x64": "0.25.12",
|
| 1319 |
+
"@esbuild/darwin-arm64": "0.25.12",
|
| 1320 |
+
"@esbuild/darwin-x64": "0.25.12",
|
| 1321 |
+
"@esbuild/freebsd-arm64": "0.25.12",
|
| 1322 |
+
"@esbuild/freebsd-x64": "0.25.12",
|
| 1323 |
+
"@esbuild/linux-arm": "0.25.12",
|
| 1324 |
+
"@esbuild/linux-arm64": "0.25.12",
|
| 1325 |
+
"@esbuild/linux-ia32": "0.25.12",
|
| 1326 |
+
"@esbuild/linux-loong64": "0.25.12",
|
| 1327 |
+
"@esbuild/linux-mips64el": "0.25.12",
|
| 1328 |
+
"@esbuild/linux-ppc64": "0.25.12",
|
| 1329 |
+
"@esbuild/linux-riscv64": "0.25.12",
|
| 1330 |
+
"@esbuild/linux-s390x": "0.25.12",
|
| 1331 |
+
"@esbuild/linux-x64": "0.25.12",
|
| 1332 |
+
"@esbuild/netbsd-arm64": "0.25.12",
|
| 1333 |
+
"@esbuild/netbsd-x64": "0.25.12",
|
| 1334 |
+
"@esbuild/openbsd-arm64": "0.25.12",
|
| 1335 |
+
"@esbuild/openbsd-x64": "0.25.12",
|
| 1336 |
+
"@esbuild/openharmony-arm64": "0.25.12",
|
| 1337 |
+
"@esbuild/sunos-x64": "0.25.12",
|
| 1338 |
+
"@esbuild/win32-arm64": "0.25.12",
|
| 1339 |
+
"@esbuild/win32-ia32": "0.25.12",
|
| 1340 |
+
"@esbuild/win32-x64": "0.25.12"
|
| 1341 |
+
}
|
| 1342 |
+
},
|
| 1343 |
+
"node_modules/escalade": {
|
| 1344 |
+
"version": "3.2.0",
|
| 1345 |
+
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
|
| 1346 |
+
"integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
|
| 1347 |
+
"dev": true,
|
| 1348 |
+
"license": "MIT",
|
| 1349 |
+
"engines": {
|
| 1350 |
+
"node": ">=6"
|
| 1351 |
+
}
|
| 1352 |
+
},
|
| 1353 |
+
"node_modules/fdir": {
|
| 1354 |
+
"version": "6.5.0",
|
| 1355 |
+
"resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
|
| 1356 |
+
"integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
|
| 1357 |
+
"dev": true,
|
| 1358 |
+
"license": "MIT",
|
| 1359 |
+
"engines": {
|
| 1360 |
+
"node": ">=12.0.0"
|
| 1361 |
+
},
|
| 1362 |
+
"peerDependencies": {
|
| 1363 |
+
"picomatch": "^3 || ^4"
|
| 1364 |
+
},
|
| 1365 |
+
"peerDependenciesMeta": {
|
| 1366 |
+
"picomatch": {
|
| 1367 |
+
"optional": true
|
| 1368 |
+
}
|
| 1369 |
+
}
|
| 1370 |
+
},
|
| 1371 |
+
"node_modules/fsevents": {
|
| 1372 |
+
"version": "2.3.3",
|
| 1373 |
+
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
|
| 1374 |
+
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
|
| 1375 |
+
"dev": true,
|
| 1376 |
+
"hasInstallScript": true,
|
| 1377 |
+
"license": "MIT",
|
| 1378 |
+
"optional": true,
|
| 1379 |
+
"os": [
|
| 1380 |
+
"darwin"
|
| 1381 |
+
],
|
| 1382 |
+
"engines": {
|
| 1383 |
+
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
|
| 1384 |
+
}
|
| 1385 |
+
},
|
| 1386 |
+
"node_modules/gensync": {
|
| 1387 |
+
"version": "1.0.0-beta.2",
|
| 1388 |
+
"resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
|
| 1389 |
+
"integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
|
| 1390 |
+
"dev": true,
|
| 1391 |
+
"license": "MIT",
|
| 1392 |
+
"engines": {
|
| 1393 |
+
"node": ">=6.9.0"
|
| 1394 |
+
}
|
| 1395 |
+
},
|
| 1396 |
+
"node_modules/immediate": {
|
| 1397 |
+
"version": "3.0.6",
|
| 1398 |
+
"resolved": "https://registry.npmjs.org/immediate/-/immediate-3.0.6.tgz",
|
| 1399 |
+
"integrity": "sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ==",
|
| 1400 |
+
"license": "MIT"
|
| 1401 |
+
},
|
| 1402 |
+
"node_modules/inherits": {
|
| 1403 |
+
"version": "2.0.4",
|
| 1404 |
+
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
| 1405 |
+
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
|
| 1406 |
+
"license": "ISC"
|
| 1407 |
+
},
|
| 1408 |
+
"node_modules/isarray": {
|
| 1409 |
+
"version": "1.0.0",
|
| 1410 |
+
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
|
| 1411 |
+
"integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==",
|
| 1412 |
+
"license": "MIT"
|
| 1413 |
+
},
|
| 1414 |
+
"node_modules/js-tokens": {
|
| 1415 |
+
"version": "4.0.0",
|
| 1416 |
+
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
|
| 1417 |
+
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
|
| 1418 |
+
"dev": true,
|
| 1419 |
+
"license": "MIT"
|
| 1420 |
+
},
|
| 1421 |
+
"node_modules/jsesc": {
|
| 1422 |
+
"version": "3.1.0",
|
| 1423 |
+
"resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
|
| 1424 |
+
"integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
|
| 1425 |
+
"dev": true,
|
| 1426 |
+
"license": "MIT",
|
| 1427 |
+
"bin": {
|
| 1428 |
+
"jsesc": "bin/jsesc"
|
| 1429 |
+
},
|
| 1430 |
+
"engines": {
|
| 1431 |
+
"node": ">=6"
|
| 1432 |
+
}
|
| 1433 |
+
},
|
| 1434 |
+
"node_modules/json5": {
|
| 1435 |
+
"version": "2.2.3",
|
| 1436 |
+
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
|
| 1437 |
+
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
|
| 1438 |
+
"dev": true,
|
| 1439 |
+
"license": "MIT",
|
| 1440 |
+
"bin": {
|
| 1441 |
+
"json5": "lib/cli.js"
|
| 1442 |
+
},
|
| 1443 |
+
"engines": {
|
| 1444 |
+
"node": ">=6"
|
| 1445 |
+
}
|
| 1446 |
+
},
|
| 1447 |
+
"node_modules/jszip": {
|
| 1448 |
+
"version": "3.10.1",
|
| 1449 |
+
"resolved": "https://registry.npmjs.org/jszip/-/jszip-3.10.1.tgz",
|
| 1450 |
+
"integrity": "sha512-xXDvecyTpGLrqFrvkrUSoxxfJI5AH7U8zxxtVclpsUtMCq4JQ290LY8AW5c7Ggnr/Y/oK+bQMbqK2qmtk3pN4g==",
|
| 1451 |
+
"license": "(MIT OR GPL-3.0-or-later)",
|
| 1452 |
+
"dependencies": {
|
| 1453 |
+
"lie": "~3.3.0",
|
| 1454 |
+
"pako": "~1.0.2",
|
| 1455 |
+
"readable-stream": "~2.3.6",
|
| 1456 |
+
"setimmediate": "^1.0.5"
|
| 1457 |
+
}
|
| 1458 |
+
},
|
| 1459 |
+
"node_modules/lie": {
|
| 1460 |
+
"version": "3.3.0",
|
| 1461 |
+
"resolved": "https://registry.npmjs.org/lie/-/lie-3.3.0.tgz",
|
| 1462 |
+
"integrity": "sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ==",
|
| 1463 |
+
"license": "MIT",
|
| 1464 |
+
"dependencies": {
|
| 1465 |
+
"immediate": "~3.0.5"
|
| 1466 |
+
}
|
| 1467 |
+
},
|
| 1468 |
+
"node_modules/lru-cache": {
|
| 1469 |
+
"version": "5.1.1",
|
| 1470 |
+
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
|
| 1471 |
+
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
|
| 1472 |
+
"dev": true,
|
| 1473 |
+
"license": "ISC",
|
| 1474 |
+
"dependencies": {
|
| 1475 |
+
"yallist": "^3.0.2"
|
| 1476 |
+
}
|
| 1477 |
+
},
|
| 1478 |
+
"node_modules/ms": {
|
| 1479 |
+
"version": "2.1.3",
|
| 1480 |
+
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
| 1481 |
+
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
|
| 1482 |
+
"dev": true,
|
| 1483 |
+
"license": "MIT"
|
| 1484 |
+
},
|
| 1485 |
+
"node_modules/nanoid": {
|
| 1486 |
+
"version": "3.3.11",
|
| 1487 |
+
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
|
| 1488 |
+
"integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
|
| 1489 |
+
"dev": true,
|
| 1490 |
+
"funding": [
|
| 1491 |
+
{
|
| 1492 |
+
"type": "github",
|
| 1493 |
+
"url": "https://github.com/sponsors/ai"
|
| 1494 |
+
}
|
| 1495 |
+
],
|
| 1496 |
+
"license": "MIT",
|
| 1497 |
+
"bin": {
|
| 1498 |
+
"nanoid": "bin/nanoid.cjs"
|
| 1499 |
+
},
|
| 1500 |
+
"engines": {
|
| 1501 |
+
"node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
|
| 1502 |
+
}
|
| 1503 |
+
},
|
| 1504 |
+
"node_modules/node-releases": {
|
| 1505 |
+
"version": "2.0.27",
|
| 1506 |
+
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
|
| 1507 |
+
"integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
|
| 1508 |
+
"dev": true,
|
| 1509 |
+
"license": "MIT"
|
| 1510 |
+
},
|
| 1511 |
+
"node_modules/pako": {
|
| 1512 |
+
"version": "1.0.11",
|
| 1513 |
+
"resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz",
|
| 1514 |
+
"integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==",
|
| 1515 |
+
"license": "(MIT AND Zlib)"
|
| 1516 |
+
},
|
| 1517 |
+
"node_modules/picocolors": {
|
| 1518 |
+
"version": "1.1.1",
|
| 1519 |
+
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
|
| 1520 |
+
"integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
|
| 1521 |
+
"dev": true,
|
| 1522 |
+
"license": "ISC"
|
| 1523 |
+
},
|
| 1524 |
+
"node_modules/picomatch": {
|
| 1525 |
+
"version": "4.0.3",
|
| 1526 |
+
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
| 1527 |
+
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
| 1528 |
+
"dev": true,
|
| 1529 |
+
"license": "MIT",
|
| 1530 |
+
"peer": true,
|
| 1531 |
+
"engines": {
|
| 1532 |
+
"node": ">=12"
|
| 1533 |
+
},
|
| 1534 |
+
"funding": {
|
| 1535 |
+
"url": "https://github.com/sponsors/jonschlinkert"
|
| 1536 |
+
}
|
| 1537 |
+
},
|
| 1538 |
+
"node_modules/postcss": {
|
| 1539 |
+
"version": "8.5.6",
|
| 1540 |
+
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
|
| 1541 |
+
"integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
|
| 1542 |
+
"dev": true,
|
| 1543 |
+
"funding": [
|
| 1544 |
+
{
|
| 1545 |
+
"type": "opencollective",
|
| 1546 |
+
"url": "https://opencollective.com/postcss/"
|
| 1547 |
+
},
|
| 1548 |
+
{
|
| 1549 |
+
"type": "tidelift",
|
| 1550 |
+
"url": "https://tidelift.com/funding/github/npm/postcss"
|
| 1551 |
+
},
|
| 1552 |
+
{
|
| 1553 |
+
"type": "github",
|
| 1554 |
+
"url": "https://github.com/sponsors/ai"
|
| 1555 |
+
}
|
| 1556 |
+
],
|
| 1557 |
+
"license": "MIT",
|
| 1558 |
+
"dependencies": {
|
| 1559 |
+
"nanoid": "^3.3.11",
|
| 1560 |
+
"picocolors": "^1.1.1",
|
| 1561 |
+
"source-map-js": "^1.2.1"
|
| 1562 |
+
},
|
| 1563 |
+
"engines": {
|
| 1564 |
+
"node": "^10 || ^12 || >=14"
|
| 1565 |
+
}
|
| 1566 |
+
},
|
| 1567 |
+
"node_modules/process-nextick-args": {
|
| 1568 |
+
"version": "2.0.1",
|
| 1569 |
+
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
|
| 1570 |
+
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
|
| 1571 |
+
"license": "MIT"
|
| 1572 |
+
},
|
| 1573 |
+
"node_modules/react": {
|
| 1574 |
+
"version": "19.2.0",
|
| 1575 |
+
"resolved": "https://registry.npmjs.org/react/-/react-19.2.0.tgz",
|
| 1576 |
+
"integrity": "sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==",
|
| 1577 |
+
"license": "MIT",
|
| 1578 |
+
"peer": true,
|
| 1579 |
+
"engines": {
|
| 1580 |
+
"node": ">=0.10.0"
|
| 1581 |
+
}
|
| 1582 |
+
},
|
| 1583 |
+
"node_modules/react-dom": {
|
| 1584 |
+
"version": "19.2.0",
|
| 1585 |
+
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.0.tgz",
|
| 1586 |
+
"integrity": "sha512-UlbRu4cAiGaIewkPyiRGJk0imDN2T3JjieT6spoL2UeSf5od4n5LB/mQ4ejmxhCFT1tYe8IvaFulzynWovsEFQ==",
|
| 1587 |
+
"license": "MIT",
|
| 1588 |
+
"dependencies": {
|
| 1589 |
+
"scheduler": "^0.27.0"
|
| 1590 |
+
},
|
| 1591 |
+
"peerDependencies": {
|
| 1592 |
+
"react": "^19.2.0"
|
| 1593 |
+
}
|
| 1594 |
+
},
|
| 1595 |
+
"node_modules/react-refresh": {
|
| 1596 |
+
"version": "0.18.0",
|
| 1597 |
+
"resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.18.0.tgz",
|
| 1598 |
+
"integrity": "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==",
|
| 1599 |
+
"dev": true,
|
| 1600 |
+
"license": "MIT",
|
| 1601 |
+
"engines": {
|
| 1602 |
+
"node": ">=0.10.0"
|
| 1603 |
+
}
|
| 1604 |
+
},
|
| 1605 |
+
"node_modules/readable-stream": {
|
| 1606 |
+
"version": "2.3.8",
|
| 1607 |
+
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
|
| 1608 |
+
"integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
|
| 1609 |
+
"license": "MIT",
|
| 1610 |
+
"dependencies": {
|
| 1611 |
+
"core-util-is": "~1.0.0",
|
| 1612 |
+
"inherits": "~2.0.3",
|
| 1613 |
+
"isarray": "~1.0.0",
|
| 1614 |
+
"process-nextick-args": "~2.0.0",
|
| 1615 |
+
"safe-buffer": "~5.1.1",
|
| 1616 |
+
"string_decoder": "~1.1.1",
|
| 1617 |
+
"util-deprecate": "~1.0.1"
|
| 1618 |
+
}
|
| 1619 |
+
},
|
| 1620 |
+
"node_modules/rollup": {
|
| 1621 |
+
"version": "4.52.5",
|
| 1622 |
+
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.52.5.tgz",
|
| 1623 |
+
"integrity": "sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw==",
|
| 1624 |
+
"dev": true,
|
| 1625 |
+
"license": "MIT",
|
| 1626 |
+
"dependencies": {
|
| 1627 |
+
"@types/estree": "1.0.8"
|
| 1628 |
+
},
|
| 1629 |
+
"bin": {
|
| 1630 |
+
"rollup": "dist/bin/rollup"
|
| 1631 |
+
},
|
| 1632 |
+
"engines": {
|
| 1633 |
+
"node": ">=18.0.0",
|
| 1634 |
+
"npm": ">=8.0.0"
|
| 1635 |
+
},
|
| 1636 |
+
"optionalDependencies": {
|
| 1637 |
+
"@rollup/rollup-android-arm-eabi": "4.52.5",
|
| 1638 |
+
"@rollup/rollup-android-arm64": "4.52.5",
|
| 1639 |
+
"@rollup/rollup-darwin-arm64": "4.52.5",
|
| 1640 |
+
"@rollup/rollup-darwin-x64": "4.52.5",
|
| 1641 |
+
"@rollup/rollup-freebsd-arm64": "4.52.5",
|
| 1642 |
+
"@rollup/rollup-freebsd-x64": "4.52.5",
|
| 1643 |
+
"@rollup/rollup-linux-arm-gnueabihf": "4.52.5",
|
| 1644 |
+
"@rollup/rollup-linux-arm-musleabihf": "4.52.5",
|
| 1645 |
+
"@rollup/rollup-linux-arm64-gnu": "4.52.5",
|
| 1646 |
+
"@rollup/rollup-linux-arm64-musl": "4.52.5",
|
| 1647 |
+
"@rollup/rollup-linux-loong64-gnu": "4.52.5",
|
| 1648 |
+
"@rollup/rollup-linux-ppc64-gnu": "4.52.5",
|
| 1649 |
+
"@rollup/rollup-linux-riscv64-gnu": "4.52.5",
|
| 1650 |
+
"@rollup/rollup-linux-riscv64-musl": "4.52.5",
|
| 1651 |
+
"@rollup/rollup-linux-s390x-gnu": "4.52.5",
|
| 1652 |
+
"@rollup/rollup-linux-x64-gnu": "4.52.5",
|
| 1653 |
+
"@rollup/rollup-linux-x64-musl": "4.52.5",
|
| 1654 |
+
"@rollup/rollup-openharmony-arm64": "4.52.5",
|
| 1655 |
+
"@rollup/rollup-win32-arm64-msvc": "4.52.5",
|
| 1656 |
+
"@rollup/rollup-win32-ia32-msvc": "4.52.5",
|
| 1657 |
+
"@rollup/rollup-win32-x64-gnu": "4.52.5",
|
| 1658 |
+
"@rollup/rollup-win32-x64-msvc": "4.52.5",
|
| 1659 |
+
"fsevents": "~2.3.2"
|
| 1660 |
+
}
|
| 1661 |
+
},
|
| 1662 |
+
"node_modules/safe-buffer": {
|
| 1663 |
+
"version": "5.1.2",
|
| 1664 |
+
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
|
| 1665 |
+
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
|
| 1666 |
+
"license": "MIT"
|
| 1667 |
+
},
|
| 1668 |
+
"node_modules/scheduler": {
|
| 1669 |
+
"version": "0.27.0",
|
| 1670 |
+
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",
|
| 1671 |
+
"integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==",
|
| 1672 |
+
"license": "MIT"
|
| 1673 |
+
},
|
| 1674 |
+
"node_modules/semver": {
|
| 1675 |
+
"version": "6.3.1",
|
| 1676 |
+
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
|
| 1677 |
+
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
|
| 1678 |
+
"dev": true,
|
| 1679 |
+
"license": "ISC",
|
| 1680 |
+
"bin": {
|
| 1681 |
+
"semver": "bin/semver.js"
|
| 1682 |
+
}
|
| 1683 |
+
},
|
| 1684 |
+
"node_modules/setimmediate": {
|
| 1685 |
+
"version": "1.0.5",
|
| 1686 |
+
"resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz",
|
| 1687 |
+
"integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==",
|
| 1688 |
+
"license": "MIT"
|
| 1689 |
+
},
|
| 1690 |
+
"node_modules/source-map-js": {
|
| 1691 |
+
"version": "1.2.1",
|
| 1692 |
+
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
|
| 1693 |
+
"integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
|
| 1694 |
+
"dev": true,
|
| 1695 |
+
"license": "BSD-3-Clause",
|
| 1696 |
+
"engines": {
|
| 1697 |
+
"node": ">=0.10.0"
|
| 1698 |
+
}
|
| 1699 |
+
},
|
| 1700 |
+
"node_modules/string_decoder": {
|
| 1701 |
+
"version": "1.1.1",
|
| 1702 |
+
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
|
| 1703 |
+
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
|
| 1704 |
+
"license": "MIT",
|
| 1705 |
+
"dependencies": {
|
| 1706 |
+
"safe-buffer": "~5.1.0"
|
| 1707 |
+
}
|
| 1708 |
+
},
|
| 1709 |
+
"node_modules/three": {
|
| 1710 |
+
"version": "0.166.1",
|
| 1711 |
+
"resolved": "https://registry.npmjs.org/three/-/three-0.166.1.tgz",
|
| 1712 |
+
"integrity": "sha512-LtuafkKHHzm61AQA1be2MAYIw1IjmhOUxhBa0prrLpEMWbV7ijvxCRHjSgHPGp2493wLBzwKV46tA9nivLEgKg==",
|
| 1713 |
+
"license": "MIT"
|
| 1714 |
+
},
|
| 1715 |
+
"node_modules/tinyglobby": {
|
| 1716 |
+
"version": "0.2.15",
|
| 1717 |
+
"resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
|
| 1718 |
+
"integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==",
|
| 1719 |
+
"dev": true,
|
| 1720 |
+
"license": "MIT",
|
| 1721 |
+
"dependencies": {
|
| 1722 |
+
"fdir": "^6.5.0",
|
| 1723 |
+
"picomatch": "^4.0.3"
|
| 1724 |
+
},
|
| 1725 |
+
"engines": {
|
| 1726 |
+
"node": ">=12.0.0"
|
| 1727 |
+
},
|
| 1728 |
+
"funding": {
|
| 1729 |
+
"url": "https://github.com/sponsors/SuperchupuDev"
|
| 1730 |
+
}
|
| 1731 |
+
},
|
| 1732 |
+
"node_modules/typescript": {
|
| 1733 |
+
"version": "5.8.3",
|
| 1734 |
+
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz",
|
| 1735 |
+
"integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
|
| 1736 |
+
"dev": true,
|
| 1737 |
+
"license": "Apache-2.0",
|
| 1738 |
+
"bin": {
|
| 1739 |
+
"tsc": "bin/tsc",
|
| 1740 |
+
"tsserver": "bin/tsserver"
|
| 1741 |
+
},
|
| 1742 |
+
"engines": {
|
| 1743 |
+
"node": ">=14.17"
|
| 1744 |
+
}
|
| 1745 |
+
},
|
| 1746 |
+
"node_modules/undici-types": {
|
| 1747 |
+
"version": "6.21.0",
|
| 1748 |
+
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
|
| 1749 |
+
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
|
| 1750 |
+
"dev": true,
|
| 1751 |
+
"license": "MIT"
|
| 1752 |
+
},
|
| 1753 |
+
"node_modules/update-browserslist-db": {
|
| 1754 |
+
"version": "1.1.4",
|
| 1755 |
+
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.4.tgz",
|
| 1756 |
+
"integrity": "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==",
|
| 1757 |
+
"dev": true,
|
| 1758 |
+
"funding": [
|
| 1759 |
+
{
|
| 1760 |
+
"type": "opencollective",
|
| 1761 |
+
"url": "https://opencollective.com/browserslist"
|
| 1762 |
+
},
|
| 1763 |
+
{
|
| 1764 |
+
"type": "tidelift",
|
| 1765 |
+
"url": "https://tidelift.com/funding/github/npm/browserslist"
|
| 1766 |
+
},
|
| 1767 |
+
{
|
| 1768 |
+
"type": "github",
|
| 1769 |
+
"url": "https://github.com/sponsors/ai"
|
| 1770 |
+
}
|
| 1771 |
+
],
|
| 1772 |
+
"license": "MIT",
|
| 1773 |
+
"dependencies": {
|
| 1774 |
+
"escalade": "^3.2.0",
|
| 1775 |
+
"picocolors": "^1.1.1"
|
| 1776 |
+
},
|
| 1777 |
+
"bin": {
|
| 1778 |
+
"update-browserslist-db": "cli.js"
|
| 1779 |
+
},
|
| 1780 |
+
"peerDependencies": {
|
| 1781 |
+
"browserslist": ">= 4.21.0"
|
| 1782 |
+
}
|
| 1783 |
+
},
|
| 1784 |
+
"node_modules/util-deprecate": {
|
| 1785 |
+
"version": "1.0.2",
|
| 1786 |
+
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
| 1787 |
+
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
|
| 1788 |
+
"license": "MIT"
|
| 1789 |
+
},
|
| 1790 |
+
"node_modules/vite": {
|
| 1791 |
+
"version": "6.4.1",
|
| 1792 |
+
"resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz",
|
| 1793 |
+
"integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==",
|
| 1794 |
+
"dev": true,
|
| 1795 |
+
"license": "MIT",
|
| 1796 |
+
"peer": true,
|
| 1797 |
+
"dependencies": {
|
| 1798 |
+
"esbuild": "^0.25.0",
|
| 1799 |
+
"fdir": "^6.4.4",
|
| 1800 |
+
"picomatch": "^4.0.2",
|
| 1801 |
+
"postcss": "^8.5.3",
|
| 1802 |
+
"rollup": "^4.34.9",
|
| 1803 |
+
"tinyglobby": "^0.2.13"
|
| 1804 |
+
},
|
| 1805 |
+
"bin": {
|
| 1806 |
+
"vite": "bin/vite.js"
|
| 1807 |
+
},
|
| 1808 |
+
"engines": {
|
| 1809 |
+
"node": "^18.0.0 || ^20.0.0 || >=22.0.0"
|
| 1810 |
+
},
|
| 1811 |
+
"funding": {
|
| 1812 |
+
"url": "https://github.com/vitejs/vite?sponsor=1"
|
| 1813 |
+
},
|
| 1814 |
+
"optionalDependencies": {
|
| 1815 |
+
"fsevents": "~2.3.3"
|
| 1816 |
+
},
|
| 1817 |
+
"peerDependencies": {
|
| 1818 |
+
"@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0",
|
| 1819 |
+
"jiti": ">=1.21.0",
|
| 1820 |
+
"less": "*",
|
| 1821 |
+
"lightningcss": "^1.21.0",
|
| 1822 |
+
"sass": "*",
|
| 1823 |
+
"sass-embedded": "*",
|
| 1824 |
+
"stylus": "*",
|
| 1825 |
+
"sugarss": "*",
|
| 1826 |
+
"terser": "^5.16.0",
|
| 1827 |
+
"tsx": "^4.8.1",
|
| 1828 |
+
"yaml": "^2.4.2"
|
| 1829 |
+
},
|
| 1830 |
+
"peerDependenciesMeta": {
|
| 1831 |
+
"@types/node": {
|
| 1832 |
+
"optional": true
|
| 1833 |
+
},
|
| 1834 |
+
"jiti": {
|
| 1835 |
+
"optional": true
|
| 1836 |
+
},
|
| 1837 |
+
"less": {
|
| 1838 |
+
"optional": true
|
| 1839 |
+
},
|
| 1840 |
+
"lightningcss": {
|
| 1841 |
+
"optional": true
|
| 1842 |
+
},
|
| 1843 |
+
"sass": {
|
| 1844 |
+
"optional": true
|
| 1845 |
+
},
|
| 1846 |
+
"sass-embedded": {
|
| 1847 |
+
"optional": true
|
| 1848 |
+
},
|
| 1849 |
+
"stylus": {
|
| 1850 |
+
"optional": true
|
| 1851 |
+
},
|
| 1852 |
+
"sugarss": {
|
| 1853 |
+
"optional": true
|
| 1854 |
+
},
|
| 1855 |
+
"terser": {
|
| 1856 |
+
"optional": true
|
| 1857 |
+
},
|
| 1858 |
+
"tsx": {
|
| 1859 |
+
"optional": true
|
| 1860 |
+
},
|
| 1861 |
+
"yaml": {
|
| 1862 |
+
"optional": true
|
| 1863 |
+
}
|
| 1864 |
+
}
|
| 1865 |
+
},
|
| 1866 |
+
"node_modules/yallist": {
|
| 1867 |
+
"version": "3.1.1",
|
| 1868 |
+
"resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
|
| 1869 |
+
"integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
|
| 1870 |
+
"dev": true,
|
| 1871 |
+
"license": "ISC"
|
| 1872 |
+
}
|
| 1873 |
+
}
|
| 1874 |
+
}
|
frontend/package.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "cross-modal-object-comparison-tool",
|
| 3 |
+
"private": true,
|
| 4 |
+
"version": "0.0.0",
|
| 5 |
+
"type": "module",
|
| 6 |
+
"scripts": {
|
| 7 |
+
"dev": "vite",
|
| 8 |
+
"build": "vite build",
|
| 9 |
+
"preview": "vite preview"
|
| 10 |
+
},
|
| 11 |
+
"dependencies": {
|
| 12 |
+
"react": "^19.2.0",
|
| 13 |
+
"react-dom": "^19.2.0",
|
| 14 |
+
"three": "0.166.1",
|
| 15 |
+
"jszip": "3.10.1"
|
| 16 |
+
},
|
| 17 |
+
"devDependencies": {
|
| 18 |
+
"@types/node": "^22.14.0",
|
| 19 |
+
"@vitejs/plugin-react": "^5.0.0",
|
| 20 |
+
"typescript": "~5.8.2",
|
| 21 |
+
"vite": "^6.2.0"
|
| 22 |
+
}
|
| 23 |
+
}
|
frontend/services/apiService.ts
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { Dataset, DataItem, Modality, SingleComparisonResult, DatasetMetadata } from '../types';
|
| 2 |
+
|
| 3 |
+
// Define the base URL for your backend API
|
| 4 |
+
// For local development, it might be 'http://localhost:8000'
|
| 5 |
+
// When deployed on Hugging Face Spaces, it will be a relative path '/'.
|
| 6 |
+
const API_BASE_URL = '/';
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* A helper function to handle API errors.
|
| 10 |
+
*/
|
| 11 |
+
const handleApiError = async (response: Response) => {
|
| 12 |
+
if (!response.ok) {
|
| 13 |
+
let errorMessage = `HTTP error! status: ${response.status}`;
|
| 14 |
+
try {
|
| 15 |
+
const errorData = await response.json();
|
| 16 |
+
errorMessage = errorData.detail || JSON.stringify(errorData);
|
| 17 |
+
} catch (e) {
|
| 18 |
+
// The response was not JSON
|
| 19 |
+
errorMessage = await response.text();
|
| 20 |
+
}
|
| 21 |
+
throw new Error(errorMessage);
|
| 22 |
+
}
|
| 23 |
+
return response.json();
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
// Helper to correctly encode unicode strings to base64, which is required by the backend.
|
| 27 |
+
const unicodeToBase64 = (str: string) => {
|
| 28 |
+
return btoa(
|
| 29 |
+
encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, (match, p1) => {
|
| 30 |
+
return String.fromCharCode(parseInt(p1, 16));
|
| 31 |
+
})
|
| 32 |
+
);
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
const contentToBase64 = (content: string | ArrayBuffer, modality: Modality): Promise<string> => {
|
| 37 |
+
return new Promise((resolve, reject) => {
|
| 38 |
+
if (modality === 'text') {
|
| 39 |
+
try {
|
| 40 |
+
// Use helper for proper unicode support
|
| 41 |
+
resolve(unicodeToBase64(content as string));
|
| 42 |
+
} catch (error) {
|
| 43 |
+
console.error("Failed to Base64 encode text content:", error);
|
| 44 |
+
reject(new Error("Failed to encode text. Ensure it doesn't contain unsupported characters."));
|
| 45 |
+
}
|
| 46 |
+
} else if (typeof content === 'string') {
|
| 47 |
+
// For images, content is a data URL
|
| 48 |
+
const parts = content.split(',');
|
| 49 |
+
resolve(parts.length > 1 ? parts[1] : content);
|
| 50 |
+
} else if (content instanceof ArrayBuffer) {
|
| 51 |
+
// For meshes
|
| 52 |
+
const bytes = new Uint8Array(content);
|
| 53 |
+
let binary = '';
|
| 54 |
+
for (let i = 0; i < bytes.byteLength; i++) {
|
| 55 |
+
binary += String.fromCharCode(bytes[i]);
|
| 56 |
+
}
|
| 57 |
+
resolve(btoa(binary));
|
| 58 |
+
} else {
|
| 59 |
+
reject(new Error('Unsupported content type for base64 conversion.'));
|
| 60 |
+
}
|
| 61 |
+
});
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
/**
|
| 65 |
+
* Post-processes data received from the backend to ensure correct frontend rendering.
|
| 66 |
+
* - Converts raw Base64 image strings to Data URLs.
|
| 67 |
+
* - Converts raw Base64 mesh strings to ArrayBuffers.
|
| 68 |
+
*/
|
| 69 |
+
const postProcessApiData = <T extends { content: string | ArrayBuffer }>(item: T, modality: 'images' | 'texts' | 'meshes'): T => {
|
| 70 |
+
if (modality === 'images' && typeof item.content === 'string' && !item.content.startsWith('data:')) {
|
| 71 |
+
item.content = `data:image/png;base64,${item.content}`;
|
| 72 |
+
}
|
| 73 |
+
if (modality === 'meshes' && typeof item.content === 'string') {
|
| 74 |
+
const binaryString = atob(item.content);
|
| 75 |
+
const len = binaryString.length;
|
| 76 |
+
const bytes = new Uint8Array(len);
|
| 77 |
+
for (let i = 0; i < len; i++) {
|
| 78 |
+
bytes[i] = binaryString.charCodeAt(i);
|
| 79 |
+
}
|
| 80 |
+
item.content = bytes.buffer;
|
| 81 |
+
}
|
| 82 |
+
return item;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
/**
|
| 86 |
+
* Starts the dataset processing on the backend by uploading a .zip file.
|
| 87 |
+
* @param file The .zip file to upload.
|
| 88 |
+
* @returns A promise that resolves with a job ID for polling the status.
|
| 89 |
+
*/
|
| 90 |
+
export const startDatasetProcessing = async (file: File): Promise<string> => {
|
| 91 |
+
const formData = new FormData();
|
| 92 |
+
formData.append('file', file);
|
| 93 |
+
|
| 94 |
+
const response = await fetch(`${API_BASE_URL}api/process-dataset`, {
|
| 95 |
+
method: 'POST',
|
| 96 |
+
body: formData,
|
| 97 |
+
});
|
| 98 |
+
|
| 99 |
+
const { job_id } = await handleApiError(response);
|
| 100 |
+
if (!job_id) {
|
| 101 |
+
throw new Error("API did not return a job ID.");
|
| 102 |
+
}
|
| 103 |
+
return job_id;
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
interface ProcessingStatus {
|
| 108 |
+
status: 'starting' | 'processing' | 'complete' | 'error';
|
| 109 |
+
stage?: string;
|
| 110 |
+
progress?: number;
|
| 111 |
+
message?: string;
|
| 112 |
+
result?: Dataset;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
/**
|
| 116 |
+
* Polls the backend for the status of a dataset processing job.
|
| 117 |
+
* @param jobId The ID of the job to check.
|
| 118 |
+
* @returns A promise that resolves with the current status.
|
| 119 |
+
*/
|
| 120 |
+
export const getProcessingStatus = async (jobId: string): Promise<ProcessingStatus> => {
|
| 121 |
+
const response = await fetch(`${API_BASE_URL}api/processing-status/${jobId}`);
|
| 122 |
+
const status: ProcessingStatus = await handleApiError(response);
|
| 123 |
+
|
| 124 |
+
// If the job is complete, post-process the resulting dataset data
|
| 125 |
+
if (status.status === 'complete' && status.result) {
|
| 126 |
+
const processedDataset = status.result;
|
| 127 |
+
// The backend returns a string for the date, convert it to a Date object.
|
| 128 |
+
if (processedDataset.uploadDate && typeof processedDataset.uploadDate === 'string') {
|
| 129 |
+
processedDataset.uploadDate = new Date(processedDataset.uploadDate);
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
// Ensure all data items have the correct format for frontend rendering.
|
| 133 |
+
if (processedDataset.data) {
|
| 134 |
+
if (processedDataset.data.images) {
|
| 135 |
+
processedDataset.data.images = processedDataset.data.images.map((item: DataItem) => postProcessApiData(item, 'images'));
|
| 136 |
+
}
|
| 137 |
+
if (processedDataset.data.meshes) {
|
| 138 |
+
processedDataset.data.meshes = processedDataset.data.meshes.map((item: DataItem) => postProcessApiData(item, 'meshes'));
|
| 139 |
+
}
|
| 140 |
+
}
|
| 141 |
+
status.result = processedDataset;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
return status;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
/**
|
| 148 |
+
* Sends a local dataset to the backend to populate its in-memory cache.
|
| 149 |
+
* This is crucial for making comparisons after a page reload.
|
| 150 |
+
* @param dataset The full local dataset object from IndexedDB.
|
| 151 |
+
*/
|
| 152 |
+
export const ensureDatasetInCache = async (dataset: Dataset): Promise<void> => {
|
| 153 |
+
// The backend expects content as base64 or raw text, but our Mesh content is an ArrayBuffer.
|
| 154 |
+
// We need to convert it before sending. Images are already data URLs (string).
|
| 155 |
+
const payload = {
|
| 156 |
+
...dataset,
|
| 157 |
+
data: {
|
| 158 |
+
...dataset.data,
|
| 159 |
+
meshes: await Promise.all(dataset.data.meshes.map(async (mesh) => {
|
| 160 |
+
if (mesh.content instanceof ArrayBuffer) {
|
| 161 |
+
return { ...mesh, content: await contentToBase64(mesh.content, 'mesh') };
|
| 162 |
+
}
|
| 163 |
+
return mesh;
|
| 164 |
+
})),
|
| 165 |
+
}
|
| 166 |
+
};
|
| 167 |
+
|
| 168 |
+
const response = await fetch(`${API_BASE_URL}api/cache-local-dataset`, {
|
| 169 |
+
method: 'POST',
|
| 170 |
+
headers: { 'Content-Type': 'application/json' },
|
| 171 |
+
body: JSON.stringify(payload),
|
| 172 |
+
});
|
| 173 |
+
|
| 174 |
+
await handleApiError(response);
|
| 175 |
+
};
|
| 176 |
+
|
| 177 |
+
/**
|
| 178 |
+
* Finds the top matches for a single item by querying the backend.
|
| 179 |
+
* @param sourceItem The item to find matches for.
|
| 180 |
+
* @param sourceModality The modality of the source item.
|
| 181 |
+
* @param datasetId The ID of the dataset to search within.
|
| 182 |
+
* @returns A promise that resolves with the comparison results.
|
| 183 |
+
*/
|
| 184 |
+
export const findTopMatches = async (
|
| 185 |
+
sourceItem: DataItem,
|
| 186 |
+
sourceModality: Modality,
|
| 187 |
+
datasetId: string
|
| 188 |
+
): Promise<SingleComparisonResult> => {
|
| 189 |
+
|
| 190 |
+
const contentAsBase64 = await contentToBase64(sourceItem.content, sourceModality);
|
| 191 |
+
|
| 192 |
+
const requestBody = {
|
| 193 |
+
modality: sourceModality,
|
| 194 |
+
content: contentAsBase64,
|
| 195 |
+
dataset_id: datasetId,
|
| 196 |
+
};
|
| 197 |
+
|
| 198 |
+
const response = await fetch(`${API_BASE_URL}api/find-matches`, {
|
| 199 |
+
method: 'POST',
|
| 200 |
+
headers: { 'Content-Type': 'application/json' },
|
| 201 |
+
body: JSON.stringify(requestBody),
|
| 202 |
+
});
|
| 203 |
+
|
| 204 |
+
const result: SingleComparisonResult = await handleApiError(response);
|
| 205 |
+
|
| 206 |
+
// The API returns a representation of the source item with raw base64.
|
| 207 |
+
// We replace it with our original source item which has the correct format for rendering.
|
| 208 |
+
result.sourceItem = sourceItem;
|
| 209 |
+
|
| 210 |
+
// Post-process all returned match items to ensure they render correctly.
|
| 211 |
+
for (const key of Object.keys(result.results)) {
|
| 212 |
+
const modalityKey = key as 'images' | 'texts' | 'meshes';
|
| 213 |
+
const matches = result.results[modalityKey];
|
| 214 |
+
if (matches) {
|
| 215 |
+
matches.forEach(match => {
|
| 216 |
+
postProcessApiData(match.item, modalityKey);
|
| 217 |
+
});
|
| 218 |
+
}
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
return result;
|
| 222 |
+
};
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
// --- Service functions for SHARED datasets ---
|
| 226 |
+
|
| 227 |
+
/**
|
| 228 |
+
* Returns the metadata for all available shared datasets by querying the backend.
|
| 229 |
+
*/
|
| 230 |
+
export const getSharedDatasetMetadata = async (): Promise<DatasetMetadata[]> => {
|
| 231 |
+
try {
|
| 232 |
+
const response = await fetch('/api/shared-dataset-metadata');
|
| 233 |
+
const metadataList = await handleApiError(response);
|
| 234 |
+
|
| 235 |
+
// The backend returns strings for dates, convert them to Date objects.
|
| 236 |
+
return metadataList.map((meta: any) => ({
|
| 237 |
+
...meta,
|
| 238 |
+
uploadDate: new Date(meta.uploadDate),
|
| 239 |
+
}));
|
| 240 |
+
} catch (error) {
|
| 241 |
+
console.error("Failed to fetch shared dataset metadata:", error);
|
| 242 |
+
// Re-throw the error so the UI layer can handle it.
|
| 243 |
+
throw error;
|
| 244 |
+
}
|
| 245 |
+
};
|
| 246 |
+
|
| 247 |
+
/**
|
| 248 |
+
* Returns the full data structure for a specific shared dataset from the backend.
|
| 249 |
+
* The content for each item remains null, only the URLs are provided.
|
| 250 |
+
*/
|
| 251 |
+
export const getSharedDataset = async (id: string): Promise<Dataset | null> => {
|
| 252 |
+
try {
|
| 253 |
+
const response = await fetch(`/api/shared-dataset?id=${id}`);
|
| 254 |
+
const dataset = await handleApiError(response);
|
| 255 |
+
|
| 256 |
+
// Convert date string from API to Date object
|
| 257 |
+
dataset.uploadDate = new Date(dataset.uploadDate);
|
| 258 |
+
|
| 259 |
+
return dataset;
|
| 260 |
+
} catch (error) {
|
| 261 |
+
console.error(`Failed to fetch shared dataset with id ${id}:`, error);
|
| 262 |
+
// Re-throw the error so the UI layer can handle it.
|
| 263 |
+
throw error;
|
| 264 |
+
}
|
| 265 |
+
};
|