Lyyy3 commited on
Commit
ada9628
·
verified ·
1 Parent(s): 1fd6f80

Upload 54 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. Anchor.py +42 -0
  3. LICENSE +21 -0
  4. README.md +124 -3
  5. data_preprocess/Anchor_mean_w_h.py +29 -0
  6. data_preprocess/Move_img.py +19 -0
  7. data_preprocess/copy_files.py +22 -0
  8. data_preprocess/data_augmentation/img/PCA.py +71 -0
  9. data_preprocess/data_augmentation/img/gaussian_noise.py +36 -0
  10. data_preprocess/data_augmentation/img/horizontal_flip.py +29 -0
  11. data_preprocess/data_augmentation/img/rotate_image.py +25 -0
  12. data_preprocess/data_augmentation/img/vertical_flip.py +29 -0
  13. data_preprocess/data_augmentation/label/horizontal_flip.py +93 -0
  14. data_preprocess/data_augmentation/label/original_img.py +94 -0
  15. data_preprocess/data_augmentation/label/rotate_180.py +95 -0
  16. data_preprocess/data_augmentation/label/rotate_270.py +100 -0
  17. data_preprocess/data_augmentation/label/rotate_90.py +102 -0
  18. data_preprocess/data_augmentation/label/vertical_flip.py +93 -0
  19. data_preprocess/rearrangement.py +33 -0
  20. data_preprocess/resize_to_416.py +39 -0
  21. data_preprocess/segment_img.py +20 -0
  22. data_preprocess/train_test_split.py +54 -0
  23. detected_img/complex/000035r.png +3 -0
  24. detected_img/multi-object/multi-object.png +3 -0
  25. detected_img/simple/000009r.png +3 -0
  26. draw_function.py +71 -0
  27. draw_single_box.py +76 -0
  28. evaluate_acc.py +152 -0
  29. grasp_detect_multibox.py +111 -0
  30. grasp_detect_singlebox.py +100 -0
  31. log/training_logs.txt +0 -0
  32. model.py +884 -0
  33. model_config.py +176 -0
  34. picture/RAGT.pdf +0 -0
  35. picture/RAGT.png +3 -0
  36. picture/RARA.pdf +0 -0
  37. picture/RARA.png +3 -0
  38. picture/RAST.pdf +0 -0
  39. picture/RAST.png +3 -0
  40. picture/annotation.pdf +3 -0
  41. picture/annotation.png +3 -0
  42. picture/dataset.pdf +3 -0
  43. picture/dataset.png +3 -0
  44. picture/detected-multi-obj.pdf +3 -0
  45. picture/detected-multi-obj.png +3 -0
  46. picture/detected-single-obj.pdf +0 -0
  47. picture/detected-single-obj.png +3 -0
  48. pretrained_weights/mobilevit_s.pt +3 -0
  49. similarity.py +76 -0
  50. to_yolo_dataset.py +99 -0
.gitattributes CHANGED
@@ -57,3 +57,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ picture/annotation.pdf filter=lfs diff=lfs merge=lfs -text
61
+ picture/dataset.pdf filter=lfs diff=lfs merge=lfs -text
62
+ picture/detected-multi-obj.pdf filter=lfs diff=lfs merge=lfs -text
Anchor.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ anchor_thetas = [x*0.2094 for x in range(15)]
5
+ # anchor宽
6
+ anchor_w = 85.72
7
+ # anchor高
8
+ anchor_h = 19.15
9
+ # 每个grid cell 的anchor数
10
+ num_anchors = 3
11
+ # 输出层下采样次数
12
+ times_of_down_sampling = 5
13
+ # 输入图像尺寸
14
+ img_size = 416
15
+ # 防止角度偏移量为0
16
+ Anchor_eps = 0.000001
17
+
18
+
19
+ field_of_grid_cell = 2 ** times_of_down_sampling
20
+ num_grid_cell = int(img_size / field_of_grid_cell)
21
+ theta_margin = 180 / num_anchors
22
+
23
+
24
+ # if __name__ == '__main__':
25
+ # print(field_of_grid_cell)
26
+ # print(anchor_thetas)
27
+ # print(3//0.2094)
28
+ # a = np.arange(16).reshape((4, 4))
29
+ # print(33.2%16.4)
30
+ # a = np.arange(10*26*26*15*6).reshape((10, 26, 26, 15, 6))
31
+ # b = []
32
+ # for i in a:
33
+ # b.append(i)
34
+ # # b = np.array(b)
35
+ # print(type(b[0]))
36
+ # # print(b.shape)
37
+ # c = np.arange(16).reshape((4, 4))
38
+ # d = []
39
+ # for i in c:
40
+ # d.append(i)
41
+ # print(d)
42
+ # print(np.array(d))
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 kimitlte
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,3 +1,124 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This is the repository for the **NBMOD** (Noisy Background Multi-Object Dataset for grasp detection) and the code of the paper [***NBMOD: Find It and Grasp It in Noisy Background***](https://arxiv.org/abs/2306.10265).
2
+
3
+
4
+ # Introduction
5
+ We propose a dataset called **NBMOD** (Noisy Background Multi-Object Dataset for grasp detection) consisting of **31,500 RGB-D images**, which is composed of three subsets: Simple background Single-object Subset (**SSS**), Noisy background Single-object Subset (**NSS**), and Multi-Object grasp detection Subset (**MOS**). Among them, the SSS subset consists of 13,500 images, the NSS subset consists of 13,000 images, and the MOS subset consists of 5,000 images.
6
+
7
+ Unlike the renowned Cornell dataset, the NBMOD dataset differs in that its backgrounds are no longer simple whiteboards. The NSS and MOS subsets comprise a substantial number of images with noise, where this noise corresponds to interfering objects unrelated to the target objects for grasping detection. Moreover, in the MOS subset, each image encompasses multiple target objects for grasp detection, which closely resembles real-world working environments.
8
+
9
+ To enhance the task of grasp detection, we propose a novel mechanism called **RAM** (Rotation Anchor Mechanism) and design three detection network architectures: **RARA** (network with Rotation Anchor and Region Attention), **RAST** (network with Rotation Anchor and Semi Transformer), and **RAGT** (network with Rotation Anchor and Global Transformer). These architectures aim to improve the accuracy and robustness of grasp detection by incorporating rotation anchor-based methods and attention mechanisms.
10
+
11
+ Some samples of NBMOD are shown in the following figure:
12
+
13
+ ![image](picture/dataset.png)
14
+
15
+ Annotations of some samples in NBMOD are illustrated in the following figure as examples:
16
+
17
+ ![image](picture/annotation.png)
18
+
19
+
20
+ # Model Architectures
21
+ The architectures of the RAST, RARA, and RAGT models are depicted in the following figures:
22
+
23
+ **RAST:**
24
+
25
+ ![image](picture/RAST.png)
26
+
27
+ **RARA:**
28
+
29
+ ![image](picture/RARA.png)
30
+
31
+ **RAGT:**
32
+
33
+ ![image](picture/RAGT.png)
34
+
35
+
36
+ # Detection Results
37
+ Detection results on SSS and NSS:
38
+
39
+ ![image](picture/detected-single-obj.png)
40
+
41
+ Detection results on MOS:
42
+
43
+ ![image](picture/detected-multi-obj.png)
44
+
45
+
46
+ # Requirements
47
+ Our experimental setup is as follows:
48
+
49
+ python 3.9.7
50
+ torch.version.cuda 11.3
51
+ torch 1.12.1+cu113
52
+ torchaudio 0.12.1+cpu
53
+ torchdata 0.6.0
54
+ torchinfo 1.7.2
55
+ torchstat 0.0.7
56
+ torchsummary 1.5.1
57
+ torchvision 0.13.1+cu113
58
+ torchviz 0.0.2
59
+ tornado 6.2
60
+ tqdm 4.65.0
61
+ thop 0.1.1-2209072238
62
+ tensorboard 2.9.1
63
+ tensorboard-data-server 0.6.1
64
+ tensorboard-plugin-wit 1.8.1
65
+ tensorboardx 2.5.1
66
+ opencv-contrib-python 4.7.0.72
67
+ opencv-python 4.7.0.72
68
+
69
+ CUDA Version 11.2
70
+
71
+
72
+ # Download NBMOD and Model Weights
73
+ Currently, the open-source code available is for the RAGT-3/3 model. You can modify the variable 'num_anchors' in the 'Anchor.py' file to change the number of anchors in each grid cell. The code for the RAST and RAGT models will be uploaded soon.
74
+
75
+ The NBMOD is available at [NBMOD](https://pan.baidu.com/s/1kHtTKYkqFciJpfiMkEENaQ), with the password for extraction being 6666.
76
+
77
+ The weights of models are available at [Weights](https://pan.baidu.com/s/18tAB5Yuu0yAJiyQvjE2vJw). The password for extraction is 6666.
78
+
79
+ If you are unable to download the NBMOD dataset and model weights from the above link, you can try the following links: [NBMOD](https://drive.google.com/drive/folders/1zresLaQZc3DEP2m_Eo0w_kxT_6cHx9vN?usp=sharing) and [Weights](https://drive.google.com/drive/folders/1VKq7kK126RB0kNulP9JeY6soFN9sRp5O?usp=sharing).
80
+
81
+
82
+ # Training & Testing
83
+ The images in NBMOD have a resolution of 640x480, and the label files are in XML format.
84
+
85
+ If you want to utilize our code, you can refer to our training and testing process as follows:
86
+
87
+ 1) The 640x480 images are padded with zeros to a size of 640x640 and then resized to 416x416. You can use the `resize_to_416.py` file in the `\data_preprocess\` directory to complete this step.
88
+
89
+ 2) Use the `original_img.py` file in the `\data_preprocess\data_augmentation\label\` directory to parse the coordinates of oriented bounding boxes from XML files into TXT files. In the TXT file, the coordinates are in the format of nx5, where "n" represents the number of annotation bounding boxes contained in the image, and "5" denotes the five coordinate parameters of the five-dimensional grasp representation.
90
+
91
+ For example:
92
+
93
+ x1 y1 w1 h1 theta1
94
+ x2 y2 w2 h2 theta2
95
+ x3 y3 w3 h3 theta3
96
+ ......
97
+ xn yn wn hn thetan
98
+
99
+ During the experiment, to accelerate the training process, we employed a strategy of performing data augmentation before training rather than augmenting the data during the training process. Under the `\data_preprocess\data_augmentation` directory, there are additional code files for data augmentation, which perform transformations on both images and coordinates. If you need to perform data augmentation, you can refer to these Python programs.
100
+
101
+ The purpose of the `rearrangement.py` file in the `\data_preprocess\` directory is to renumber the augmented data after the augmentation process.
102
+
103
+ 3) To start the training process, you can modify the paths of the dataset's images and labels in the `train_grasp.py` file. Additionally, you can set your desired batch size, loss function weights, and the number of training epochs. Once these modifications are done, you can run the `train_grasp.py` file to begin the training.
104
+
105
+ In the experiment, we employed the AdamW optimizer with its default training parameters. You can modify the `train_grasp.py` file to implement a more fine-grained training strategy according to your requirements.
106
+
107
+ 4) The `evaluate_acc.py` file is used to test the accuracy of the model. After modifying the paths of the test dataset's images and labels, as well as the model's weight path, you can run the file to evaluate the accuracy of the model.
108
+
109
+ Please ensure that the test dataset's image and label data formats remain consistent with those of the training dataset.
110
+
111
+ The `draw_single_box.py` script is used to visualize the detection results by drawing only the bounding box with the highest confidence. On the other hand, `grasp_detect_multibox.py` can be used to visualize all prediction boxes with confidence scores greater than a specified threshold.
112
+
113
+
114
+ # Citation
115
+ You can find a paper for explaining the NBMOD and our models on [arXiv](https://arxiv.org/abs/2306.10265).
116
+
117
+ If you use this library or find the documentation useful for your research, please consider citing:
118
+
119
+ @article{cao2023nbmod,
120
+ title={NBMOD: Find It and Grasp It in Noisy Background},
121
+ author={Cao, Boyuan and Zhou, Xinyu and Guo, Congmin and Zhang, Baohua and Liu, Yuchen and Tan, Qianqiu},
122
+ journal={arXiv preprint arXiv:2306.10265},
123
+ year={2023}
124
+ }
data_preprocess/Anchor_mean_w_h.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+
4
+
5
+ def compute_column_mean(folder_path):
6
+ column_sum_3 = 0
7
+ column_sum_4 = 0
8
+ total_rows = 0
9
+
10
+ for file_name in os.listdir(folder_path):
11
+ if file_name.endswith('.txt'):
12
+ file_path = os.path.join(folder_path, file_name)
13
+ data = np.loadtxt(file_path)
14
+
15
+ column_sum_3 += np.sum(data[:, 2]) # 第3列的和
16
+ column_sum_4 += np.sum(data[:, 3]) # 第4列的和
17
+ total_rows += data.shape[0] # 累加行数
18
+
19
+ mean_3 = column_sum_3 / total_rows
20
+ mean_4 = column_sum_4 / total_rows
21
+
22
+ return mean_3, mean_4
23
+
24
+
25
+ if __name__ == "__main__":
26
+ folder_path = r"J:\experiment_data\1 origin\label"
27
+ mean_3, mean_4 = compute_column_mean(folder_path)
28
+ print("第3列的均值: ", mean_3)
29
+ print("第4列的均值: ", mean_4)
data_preprocess/Move_img.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+
4
+ def copy_png_files(src_folder, dest_folder):
5
+ if not os.path.exists(dest_folder):
6
+ os.makedirs(dest_folder)
7
+
8
+ for file_name in os.listdir(src_folder):
9
+ if file_name.endswith(".png"):
10
+ src_file_path = os.path.join(src_folder, file_name)
11
+ dest_file_path = os.path.join(dest_folder, file_name)
12
+ shutil.copy(src_file_path, dest_file_path)
13
+ print(f'复制文件: {src_file_path} 到 {dest_file_path}')
14
+
15
+ if __name__ == "__main__":
16
+ source_folder = r'D:\Fruit_rd\img\a_bunch_of_bananas'
17
+ destination_folder = r'J:\data1-RGB\img\a_bunch_of_bananas'
18
+
19
+ copy_png_files(source_folder, destination_folder)
data_preprocess/copy_files.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+
4
+
5
+ def copy_files(src_folder, dest_folder):
6
+ if not os.path.exists(dest_folder):
7
+ os.makedirs(dest_folder)
8
+
9
+ for file_name in os.listdir(src_folder):
10
+ print(file_name)
11
+ src_file_path = os.path.join(src_folder, file_name)
12
+ dest_file_path = os.path.join(dest_folder, file_name)
13
+
14
+ if os.path.isfile(src_file_path):
15
+ shutil.copy2(src_file_path, dest_file_path)
16
+
17
+
18
+ if __name__ == "__main__":
19
+ src_folder = r"J:\experiment_data\8 r270\label" # 替换为源文件夹的路径
20
+ dest_folder = r"J:\experiment_data\Train_Augmented_data\label" # 替换为目标文件夹的路径
21
+
22
+ copy_files(src_folder, dest_folder)
data_preprocess/data_augmentation/img/PCA.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ from numpy import linalg
5
+ import random
6
+ from PIL import Image
7
+ from sklearn.decomposition import PCA
8
+
9
+
10
+ def pca_color_augmentation(image_array):
11
+ '''
12
+ image augmention: PCA jitter
13
+ :param image_array: 图像array
14
+ :return img2: 经过PCA-jitter增强的图像array
15
+ '''
16
+ assert image_array.dtype == 'uint8'
17
+ assert image_array.ndim == 3
18
+ # 输入的图像应该是 (w, h, 3)这样的三通道分布
19
+
20
+ img1 = image_array.astype('float32') / 255.0
21
+ # 分别计算R,G,B三个通道的方差和均值
22
+ mean = img1.mean(axis=0).mean(axis=0)
23
+ std = img1.reshape((-1, 3)).std() # 不可以使用img1.std(axis = 0).std(axis = 0)
24
+
25
+ # 将图像标按channel标准化(均值为0,方差为1)
26
+ img1 = (img1 - mean) / (std)
27
+
28
+ # 将图像按照三个通道展成三个长条
29
+ img1 = img1.reshape((-1, 3))
30
+
31
+ # 对矩阵进行PCA操作
32
+ # 求矩阵的协方差矩阵
33
+ cov = np.cov(img1, rowvar=False)
34
+ # 求协方差矩阵的特征值和向量
35
+ eigValue, eigVector = linalg.eig(cov)
36
+
37
+ # 抖动系数(均值为0,方差为0.1的标准分布)
38
+ rand = np.array([random.normalvariate(0, 0.08) for i in range(3)])
39
+ jitter = np.dot(eigVector, eigValue * rand)
40
+
41
+ jitter = (jitter * 255).astype(np.int32)[np.newaxis, np.newaxis, :]
42
+
43
+ img2 = np.clip(image_array + jitter, 0, 255)
44
+
45
+ return img2
46
+
47
+
48
+ def process_images(input_folder, output_folder, alpha_std=0.1, seed=None):
49
+ if not os.path.exists(output_folder):
50
+ os.makedirs(output_folder)
51
+
52
+ file_list = [f for f in os.listdir(input_folder) if f.endswith(".png")]
53
+
54
+ for file_name in file_list:
55
+ input_image_path = os.path.join(input_folder, file_name)
56
+ output_image_path = os.path.join(output_folder, file_name)
57
+
58
+ image = cv2.imread(input_image_path)
59
+ augmented_image = pca_color_augmentation(image)
60
+
61
+ cv2.imwrite(output_image_path, augmented_image)
62
+ print(output_image_path)
63
+
64
+
65
+ if __name__ == "__main__":
66
+ input_folder = r'J:\experiment_data\1 origin\img'
67
+ output_folder = r'J:\experiment_data\3 PCA_illumination\img'
68
+ alpha_std = 0.1 # 根据需求调整光照强度的变化程度
69
+ seed = 42 # 可以修改为任意整数以改变随机种子,或设置为None以使用随机种子
70
+
71
+ process_images(input_folder, output_folder, alpha_std, seed)
data_preprocess/data_augmentation/img/gaussian_noise.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+
5
+
6
+ def add_gaussian_noise(image, mean=0, sigma=25):
7
+ height, width, channels = image.shape
8
+ noise = np.random.normal(mean, sigma, (height, width, channels))
9
+ noisy_image = image + noise
10
+ noisy_image = np.clip(noisy_image, 0, 255).astype(np.uint8)
11
+ return noisy_image
12
+
13
+
14
+ def add_noise_to_folder(input_folder, output_folder, mean=0, sigma=25):
15
+ if not os.path.exists(output_folder):
16
+ os.makedirs(output_folder)
17
+
18
+ file_list = [f for f in os.listdir(input_folder) if f.endswith(".png")]
19
+
20
+ for file_name in file_list:
21
+ input_image_path = os.path.join(input_folder, file_name)
22
+ output_image_path = os.path.join(output_folder, file_name)
23
+
24
+ image = cv2.imread(input_image_path)
25
+ noisy_image = add_gaussian_noise(image, mean, sigma)
26
+
27
+ cv2.imwrite(output_image_path, noisy_image)
28
+
29
+
30
+ if __name__ == "__main__":
31
+ input_folder = r'J:\experiment_data\0 train_test_split\train\img'
32
+ output_folder = r'J:\experiment_data\2 Gs_noise\img'
33
+ mean = 0
34
+ sigma = 25 # 根据需求调整噪声强度
35
+
36
+ add_noise_to_folder(input_folder, output_folder, mean, sigma)
data_preprocess/data_augmentation/img/horizontal_flip.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+
4
+
5
+ def horizontal_flip(image):
6
+ return cv2.flip(image, 1)
7
+
8
+
9
+ def process_images(input_folder, output_folder):
10
+ if not os.path.exists(output_folder):
11
+ os.makedirs(output_folder)
12
+
13
+ file_list = [f for f in os.listdir(input_folder) if f.endswith(".png")]
14
+
15
+ for file_name in file_list:
16
+ input_image_path = os.path.join(input_folder, file_name)
17
+ output_image_path = os.path.join(output_folder, file_name)
18
+
19
+ image = cv2.imread(input_image_path)
20
+ flipped_image = horizontal_flip(image)
21
+
22
+ cv2.imwrite(output_image_path, flipped_image)
23
+
24
+
25
+ if __name__ == "__main__":
26
+ input_folder = r'J:\experiment_data\1 origin\img'
27
+ output_folder = r'J:\experiment_data\4 horizontal\img'
28
+
29
+ process_images(input_folder, output_folder)
data_preprocess/data_augmentation/img/rotate_image.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from PIL import Image
3
+
4
+
5
+ def rotate_images(input_folder, output_folder):
6
+ if not os.path.exists(output_folder):
7
+ os.makedirs(output_folder)
8
+
9
+ file_list = [f for f in os.listdir(input_folder) if f.endswith(".png")]
10
+
11
+ for file_name in file_list:
12
+ print(file_name)
13
+ src_image_path = os.path.join(input_folder, file_name)
14
+ dest_image_path = os.path.join(output_folder, file_name)
15
+
16
+ img = Image.open(src_image_path)
17
+ rotated_img = img.rotate(-270) # 顺时针旋转90度
18
+ rotated_img.save(dest_image_path)
19
+
20
+
21
+ if __name__ == "__main__":
22
+ input_folder = r'J:\experiment_data\1 origin\img'
23
+ output_folder = r'J:\experiment_data\8 r270\img'
24
+
25
+ rotate_images(input_folder, output_folder)
data_preprocess/data_augmentation/img/vertical_flip.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+
4
+
5
+ def vertical_flip(image):
6
+ return cv2.flip(image, 0)
7
+
8
+
9
+ def process_images(input_folder, output_folder):
10
+ if not os.path.exists(output_folder):
11
+ os.makedirs(output_folder)
12
+
13
+ file_list = [f for f in os.listdir(input_folder) if f.endswith(".png")]
14
+
15
+ for file_name in file_list:
16
+ input_image_path = os.path.join(input_folder, file_name)
17
+ output_image_path = os.path.join(output_folder, file_name)
18
+
19
+ image = cv2.imread(input_image_path)
20
+ flipped_image = vertical_flip(image)
21
+
22
+ cv2.imwrite(output_image_path, flipped_image)
23
+
24
+
25
+ if __name__ == "__main__":
26
+ input_folder = r'J:\experiment_data\1 origin\img'
27
+ output_folder = r'J:\experiment_data\5 vertical\img'
28
+
29
+ process_images(input_folder, output_folder)
data_preprocess/data_augmentation/label/horizontal_flip.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import xml.etree.ElementTree as ET
4
+ import glob
5
+ import numpy as np
6
+
7
+
8
+ def center_to_vertice(x, y, w, h, angle): # 将抓取参数转化为抓取框用以显示
9
+ theta = angle
10
+ vertice = np.zeros((4, 2))
11
+ vertice[0] = (x - w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
12
+ vertice[1] = (x + w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
13
+ vertice[2] = (x + w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
14
+ vertice[3] = (x - w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
15
+ for i in range(0, 2):
16
+ for j in range(0, 4):
17
+ vertice[j][i] = round(vertice[j][i], 3)
18
+ return vertice
19
+
20
+
21
+ def xml_to_txt(xml_path, txt_path):
22
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
23
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
24
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
25
+ fileList = os.listdir(xml_path)
26
+ print(fileList)
27
+ k = 0
28
+ # enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列
29
+ for i, file in enumerate(annotations):
30
+ in_file = open(file) # 打开xml文件
31
+ tree = ET.parse(in_file) # 用ElementTree表示xml文件
32
+ root = tree.getroot() # 返回树的根节点
33
+ # filename = root.find('filename').text
34
+ # s = filename[2:]
35
+ # s = s.zfill(5)
36
+ # file_save = s + '.txt'
37
+ # file_save = filename + '.txt'
38
+ file_save = fileList[k][:-4] + '.txt'
39
+ k = k + 1
40
+ print(file_save)
41
+ file_txt = os.path.join(txt_path, file_save)
42
+ f_w = open(file_txt, 'w')
43
+ for obj2 in root.iter('object'):
44
+ # current = list()
45
+ # class_num = class_names.index(name)
46
+ xmlbox1 = obj2.find('robndbox')
47
+ x = xmlbox1.find('cx').text
48
+ y = xmlbox1.find('cy').text
49
+ width = xmlbox1.find('w').text
50
+ height = xmlbox1.find('h').text
51
+ angle = xmlbox1.find('angle').text
52
+ x = float(x)
53
+ x = x * 0.65
54
+ y = float(y)
55
+ y = y * 0.65
56
+ width = float(width)
57
+ width = width * 0.65
58
+ height = float(height)
59
+ height = height * 0.65
60
+ angle = float(angle)
61
+ if height > width:
62
+ exchange = width
63
+ width = height
64
+ height = exchange
65
+ angle = angle + 3.1415926/2
66
+ if angle >= 3.1415926:
67
+ angle = angle - 3.1415926
68
+ angle = 3.1415926 - angle # 左右对称
69
+ x = 416 - x # 左右对称
70
+ # angle = np.tan(angle)
71
+ # if angle<0:
72
+ # angle = max(-15,angle)
73
+ # if angle>=0:
74
+ # angle = min(15,angle)
75
+ f_w.write(str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(height) + ' ' + str(angle) + '\n')
76
+
77
+ # f_w.write(str(bbox[0][0]) + ' ' + str(bbox[0][1]) + '\n' +
78
+ # str(bbox[1][0]) + ' ' + str(bbox[1][1]) + '\n' +
79
+ # str(bbox[2][0]) + ' ' + str(bbox[2][1]) + '\n' +
80
+ # str(bbox[3][0]) + ' ' + str(bbox[3][1]) + '\n')
81
+
82
+
83
+ if __name__ == '__main__':
84
+ xml_path = r'J:\experiment_data\0 train_test_split\train\label' # xml文件路径
85
+ txt_path = r'J:\experiment_data\4 horizontal\label' # txt文件路径
86
+
87
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
88
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
89
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
90
+ # for i, element in enumerate(annotations):
91
+ # print(i, element)
92
+
93
+ xml_to_txt(xml_path, txt_path)
data_preprocess/data_augmentation/label/original_img.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import xml.etree.ElementTree as ET
4
+ import glob
5
+ import numpy as np
6
+
7
+
8
+ def center_to_vertice(x, y, w, h, angle): # 将抓取参数转化为抓取框用以显示
9
+ theta = angle
10
+ vertice = np.zeros((4, 2))
11
+ vertice[0] = (x - w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
12
+ vertice[1] = (x + w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
13
+ vertice[2] = (x + w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
14
+ vertice[3] = (x - w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
15
+ for i in range(0, 2):
16
+ for j in range(0, 4):
17
+ vertice[j][i] = round(vertice[j][i], 3)
18
+ return vertice
19
+
20
+
21
+ def xml_to_txt(xml_path, txt_path):
22
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
23
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
24
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
25
+ fileList = os.listdir(xml_path)
26
+ print(fileList)
27
+ k = 0
28
+ # enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列
29
+ for i, file in enumerate(annotations):
30
+ in_file = open(file) # 打开xml文件
31
+ tree = ET.parse(in_file) # 用ElementTree表示xml文件
32
+ root = tree.getroot() # 返回树的根节点
33
+ # filename = root.find('filename').text
34
+ # s = filename
35
+ # s = filename[2:]
36
+ # s = s.zfill(6)
37
+ # file_save = s + '.txt'
38
+ # file_save = filename + '.txt'
39
+ file_save = fileList[k][:-4] + '.txt'
40
+ k = k + 1
41
+ print(file_save)
42
+ file_txt = os.path.join(txt_path, file_save)
43
+ f_w = open(file_txt, 'w')
44
+ for obj2 in root.iter('object'):
45
+ # current = list()
46
+ # class_num = class_names.index(name)
47
+ xmlbox1 = obj2.find('robndbox')
48
+ x = xmlbox1.find('cx').text
49
+ y = xmlbox1.find('cy').text
50
+ width = xmlbox1.find('w').text
51
+ height = xmlbox1.find('h').text
52
+ angle = xmlbox1.find('angle').text
53
+ x = float(x)
54
+ x = x * 0.65
55
+ y = float(y)
56
+ y = y * 0.65
57
+ width = float(width)
58
+ width = width * 0.65
59
+ height = float(height)
60
+ height = height * 0.65
61
+ angle = float(angle)
62
+ if height > width:
63
+ exchange = width
64
+ width = height
65
+ height = exchange
66
+ angle = angle + 3.1415926/2
67
+ if angle >= 3.1415926:
68
+ angle = angle - 3.1415926
69
+
70
+ # angle = np.tan(angle)
71
+ # if angle<0:
72
+ # angle = max(-15,angle)
73
+ # if angle>=0:
74
+ # angle = min(15,angle)
75
+
76
+ f_w.write(str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(height) + ' ' + str(angle) + '\n')
77
+
78
+ # f_w.write(str(bbox[0][0]) + ' ' + str(bbox[0][1]) + '\n' +
79
+ # str(bbox[1][0]) + ' ' + str(bbox[1][1]) + '\n' +
80
+ # str(bbox[2][0]) + ' ' + str(bbox[2][1]) + '\n' +
81
+ # str(bbox[3][0]) + ' ' + str(bbox[3][1]) + '\n')
82
+
83
+
84
+ if __name__ == '__main__':
85
+ xml_path = r'J:\experiment_data\0 train_test_split\test\single-complex\label' # xml文件路径
86
+ txt_path = r'J:\experiment_data\0.1 test\single-complex\label' # txt文件路径
87
+
88
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
89
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
90
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
91
+ # for i, element in enumerate(annotations):
92
+ # print(i, element)
93
+
94
+ xml_to_txt(xml_path, txt_path)
data_preprocess/data_augmentation/label/rotate_180.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import xml.etree.ElementTree as ET
4
+ import glob
5
+ import numpy as np
6
+
7
+
8
+ def center_to_vertice(x, y, w, h, angle): # 将抓取参数转化为抓取框用以显示
9
+ theta = angle
10
+ vertice = np.zeros((4, 2))
11
+ vertice[0] = (x - w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
12
+ vertice[1] = (x + w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
13
+ vertice[2] = (x + w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
14
+ vertice[3] = (x - w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
15
+ for i in range(0, 2):
16
+ for j in range(0, 4):
17
+ vertice[j][i] = round(vertice[j][i], 3)
18
+ return vertice
19
+
20
+
21
+ def xml_to_txt(xml_path, txt_path):
22
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
23
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
24
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
25
+ fileList = os.listdir(xml_path)
26
+ print(fileList)
27
+ k = 0
28
+ # enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列
29
+ for i, file in enumerate(annotations):
30
+ in_file = open(file) # 打开xml文件
31
+ tree = ET.parse(in_file) # 用ElementTree表示xml文件
32
+ root = tree.getroot() # 返回树的根节点
33
+ # filename = root.find('filename').text
34
+ # s = filename[2:]
35
+ # s = s.zfill(5)
36
+ # file_save = s + '.txt'
37
+ # file_save = filename + '.txt'
38
+ file_save = fileList[k][:-4] + '.txt'
39
+ k = k + 1
40
+ print(file_save)
41
+ file_txt = os.path.join(txt_path, file_save)
42
+ f_w = open(file_txt, 'w')
43
+ for obj2 in root.iter('object'):
44
+ # current = list()
45
+ # class_num = class_names.index(name)
46
+ xmlbox1 = obj2.find('robndbox')
47
+ x = xmlbox1.find('cx').text
48
+ y = xmlbox1.find('cy').text
49
+ width = xmlbox1.find('w').text
50
+ height = xmlbox1.find('h').text
51
+ angle = xmlbox1.find('angle').text
52
+ x = float(x)
53
+ x = x * 0.65
54
+ y = float(y)
55
+ y = y * 0.65
56
+ width = float(width)
57
+ width = width * 0.65
58
+ height = float(height)
59
+ height = height * 0.65
60
+ angle = float(angle)
61
+ if height > width:
62
+ exchange = width
63
+ width = height
64
+ height = exchange
65
+ angle = angle + 3.1415926/2
66
+ if angle >= 3.1415926:
67
+ angle = angle - 3.1415926
68
+
69
+ x = 416 - x # 中心对称
70
+ y = 416 - y # 中心对称
71
+ # angle = np.tan(angle)
72
+ # if angle<0:
73
+ # angle = max(-15,angle)
74
+ # if angle>=0:
75
+ # angle = min(15,angle)
76
+
77
+ f_w.write(str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(height) + ' ' + str(angle) + '\n')
78
+
79
+ # f_w.write(str(bbox[0][0]) + ' ' + str(bbox[0][1]) + '\n' +
80
+ # str(bbox[1][0]) + ' ' + str(bbox[1][1]) + '\n' +
81
+ # str(bbox[2][0]) + ' ' + str(bbox[2][1]) + '\n' +
82
+ # str(bbox[3][0]) + ' ' + str(bbox[3][1]) + '\n')
83
+
84
+
85
+ if __name__ == "__main__":
86
+ xml_path = r'J:\experiment_data\0 train_test_split\train\label' # xml文件路径
87
+ txt_path = r'J:\experiment_data\6 r180\label' # txt文件路径
88
+
89
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
90
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
91
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
92
+ # for i, element in enumerate(annotations):
93
+ # print(i, element)
94
+
95
+ xml_to_txt(xml_path, txt_path)
data_preprocess/data_augmentation/label/rotate_270.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import xml.etree.ElementTree as ET
4
+ import glob
5
+ import numpy as np
6
+
7
+
8
+ def center_to_vertice(x, y, w, h, angle): # 将抓取参数转化为抓取框用以显示
9
+ theta = angle
10
+ vertice = np.zeros((4, 2))
11
+ vertice[0] = (x - w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
12
+ vertice[1] = (x + w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
13
+ vertice[2] = (x + w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
14
+ vertice[3] = (x - w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
15
+ for i in range(0, 2):
16
+ for j in range(0, 4):
17
+ vertice[j][i] = round(vertice[j][i], 3)
18
+ return vertice
19
+
20
+
21
+ def xml_to_txt(xml_path, txt_path):
22
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
23
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
24
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
25
+ fileList = os.listdir(xml_path)
26
+ print(fileList)
27
+ k = 0
28
+ # enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列
29
+ for i, file in enumerate(annotations):
30
+ in_file = open(file) # 打开xml文件
31
+ tree = ET.parse(in_file) # 用ElementTree表示xml文件
32
+ root = tree.getroot() # 返回树的根节点
33
+ # filename = root.find('filename').text
34
+ # s = filename[2:]
35
+ # s = s.zfill(5)
36
+ # file_save = s + '.txt'
37
+ # file_save = filename + '.txt'
38
+ file_save = fileList[k][:-4] + '.txt'
39
+ k = k + 1
40
+ print(file_save)
41
+ file_txt = os.path.join(txt_path, file_save)
42
+ f_w = open(file_txt, 'w')
43
+ for obj2 in root.iter('object'):
44
+ # current = list()
45
+ # class_num = class_names.index(name)
46
+ xmlbox1 = obj2.find('robndbox')
47
+ x = xmlbox1.find('cx').text
48
+ y = xmlbox1.find('cy').text
49
+ width = xmlbox1.find('w').text
50
+ height = xmlbox1.find('h').text
51
+ angle = xmlbox1.find('angle').text
52
+ x = float(x)
53
+ x = x * 0.65
54
+ y = float(y)
55
+ y = y * 0.65
56
+ width = float(width)
57
+ width = width * 0.65
58
+ height = float(height)
59
+ height = height * 0.65
60
+ angle = float(angle)
61
+ if height > width:
62
+ exchange = width
63
+ width = height
64
+ height = exchange
65
+ angle = angle + 3.1415926/2
66
+ if angle >= 3.1415926:
67
+ angle = angle - 3.1415926
68
+
69
+ exchange = x
70
+ x = y
71
+ y = 416 - exchange # 顺时针旋转270度
72
+ angle = angle + 3.1415926 * 1.5
73
+ while angle >= 3.1415926:
74
+ angle = angle - 3.1415926
75
+
76
+ # angle = np.tan(angle)
77
+ # if angle<0:
78
+ # angle = max(-15,angle)
79
+ # if angle>=0:
80
+ # angle = min(15,angle)
81
+
82
+ f_w.write(str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(height) + ' ' + str(angle) + '\n')
83
+
84
+ # f_w.write(str(bbox[0][0]) + ' ' + str(bbox[0][1]) + '\n' +
85
+ # str(bbox[1][0]) + ' ' + str(bbox[1][1]) + '\n' +
86
+ # str(bbox[2][0]) + ' ' + str(bbox[2][1]) + '\n' +
87
+ # str(bbox[3][0]) + ' ' + str(bbox[3][1]) + '\n')
88
+
89
+
90
+ if __name__ == "__main__":
91
+ xml_path = r'J:\experiment_data\0 train_test_split\train\label' # xml文件路径
92
+ txt_path = r'J:\experiment_data\8 r270\label' # txt文件路径
93
+
94
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
95
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
96
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
97
+ # for i, element in enumerate(annotations):
98
+ # print(i, element)
99
+
100
+ xml_to_txt(xml_path, txt_path)
data_preprocess/data_augmentation/label/rotate_90.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import xml.etree.ElementTree as ET
4
+ import glob
5
+ import numpy as np
6
+
7
+
8
+ def center_to_vertice(x, y, w, h, angle): # 将抓取参数转化为抓取框用以显示
9
+ theta = angle
10
+ vertice = np.zeros((4, 2))
11
+ vertice[0] = (x - w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
12
+ vertice[1] = (x + w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
13
+ vertice[2] = (x + w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
14
+ vertice[3] = (x - w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
15
+ for i in range(0, 2):
16
+ for j in range(0, 4):
17
+ vertice[j][i] = round(vertice[j][i], 3)
18
+ return vertice
19
+
20
+
21
+ def xml_to_txt(xml_path, txt_path):
22
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
23
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
24
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
25
+ fileList = os.listdir(xml_path)
26
+ print(fileList)
27
+ k = 0
28
+ # enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列
29
+ for i, file in enumerate(annotations):
30
+ in_file = open(file) # 打开xml文件
31
+ tree = ET.parse(in_file) # 用ElementTree表示xml文件
32
+ root = tree.getroot() # 返回树的根节点
33
+ # filename = root.find('filename').text
34
+ # s = filename[2:]
35
+ # s = s.zfill(6)
36
+ # file_save = s + '.txt'
37
+ # file_save = filename + '.txt'
38
+ file_save = fileList[k][:-4] + '.txt'
39
+ k = k + 1
40
+ print(file_save)
41
+ file_txt = os.path.join(txt_path, file_save)
42
+ f_w = open(file_txt, 'w')
43
+ for obj2 in root.iter('object'):
44
+ # current = list()
45
+ # class_num = class_names.index(name)
46
+ xmlbox1 = obj2.find('robndbox')
47
+ x = xmlbox1.find('cx').text
48
+ y = xmlbox1.find('cy').text
49
+ width = xmlbox1.find('w').text
50
+ height = xmlbox1.find('h').text
51
+ angle = xmlbox1.find('angle').text
52
+ x = float(x)
53
+ x = x * 0.65
54
+ y = float(y)
55
+ y = y * 0.65
56
+ width = float(width)
57
+ width = width * 0.65
58
+ height = float(height)
59
+ height = height * 0.65
60
+ angle = float(angle)
61
+ if height > width:
62
+ exchange = width
63
+ width = height
64
+ height = exchange
65
+ angle = angle + 3.1415926/2
66
+ if angle >= 3.1415926:
67
+ angle = angle - 3.1415926
68
+
69
+ # 顺时针转动90度
70
+ exchange = y
71
+ y = x
72
+ x = 416 - exchange
73
+ angle = angle + 3.1415926/2
74
+ while angle >= 3.1415926:
75
+ angle = angle - 3.1415926
76
+
77
+ # angle = np.tan(angle)
78
+ # if angle<0:
79
+ # angle = max(-15,angle)
80
+ # if angle>=0:
81
+ # angle = min(15,angle)
82
+
83
+ f_w.write(str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(height) + ' ' + str(angle) + '\n')
84
+
85
+ # f_w.write(str(bbox[0][0]) + ' ' + str(bbox[0][1]) + '\n' +
86
+ # str(bbox[1][0]) + ' ' + str(bbox[1][1]) + '\n' +
87
+ # str(bbox[2][0]) + ' ' + str(bbox[2][1]) + '\n' +
88
+ # str(bbox[3][0]) + ' ' + str(bbox[3][1]) + '\n')
89
+
90
+
91
+ if __name__ == '__main__':
92
+ xml_path = r'J:\experiment_data\0 train_test_split\train\label' # xml文件路径
93
+ txt_path = r'J:\experiment_data\7 r90\label' # txt文件路径
94
+
95
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
96
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
97
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
98
+ # for i, element in enumerate(annotations):
99
+ # print(i, element)
100
+
101
+ xml_to_txt(xml_path, txt_path)
102
+
data_preprocess/data_augmentation/label/vertical_flip.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import xml.etree.ElementTree as ET
4
+ import glob
5
+ import numpy as np
6
+
7
+
8
+ def center_to_vertice(x, y, w, h, angle): # 将抓取参数转化为抓取框用以显示
9
+ theta = angle
10
+ vertice = np.zeros((4, 2))
11
+ vertice[0] = (x - w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
12
+ vertice[1] = (x + w / 2 * np.cos(theta) + h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) - h / 2 * np.cos(theta))
13
+ vertice[2] = (x + w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y + w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
14
+ vertice[3] = (x - w / 2 * np.cos(theta) - h / 2 * np.sin(theta), y - w / 2 * np.sin(theta) + h / 2 * np.cos(theta))
15
+ for i in range(0, 2):
16
+ for j in range(0, 4):
17
+ vertice[j][i] = round(vertice[j][i], 3)
18
+ return vertice
19
+
20
+
21
+ def xml_to_txt(xml_path, txt_path):
22
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
23
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
24
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
25
+ fileList = os.listdir(xml_path)
26
+ print(fileList)
27
+ k = 0
28
+ # enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列
29
+ for i, file in enumerate(annotations):
30
+ in_file = open(file) # 打开xml文件
31
+ tree = ET.parse(in_file) # 用ElementTree表示xml文件
32
+ root = tree.getroot() # 返回树的根节点
33
+ # filename = root.find('filename').text
34
+ # s = filename[2:]
35
+ # s = s.zfill(6)
36
+ # file_save = s + '.txt'
37
+ # file_save = filename + '.txt'
38
+ file_save = fileList[k][:-4] + '.txt'
39
+ k = k + 1
40
+ print(file_save)
41
+ file_txt = os.path.join(txt_path, file_save)
42
+ f_w = open(file_txt, 'w')
43
+ for obj2 in root.iter('object'):
44
+ # current = list()
45
+ # class_num = class_names.index(name)
46
+ xmlbox1 = obj2.find('robndbox')
47
+ x = xmlbox1.find('cx').text
48
+ y = xmlbox1.find('cy').text
49
+ width = xmlbox1.find('w').text
50
+ height = xmlbox1.find('h').text
51
+ angle = xmlbox1.find('angle').text
52
+ x = float(x)
53
+ x = x * 0.65
54
+ y = float(y)
55
+ y = y * 0.65
56
+ width = float(width)
57
+ width = width * 0.65
58
+ height = float(height)
59
+ height = height * 0.65
60
+ angle = float(angle)
61
+ if height > width:
62
+ exchange = width
63
+ width = height
64
+ height = exchange
65
+ angle = angle + 3.1415926/2
66
+ if angle >= 3.1415926:
67
+ angle = angle - 3.1415926
68
+ angle = 3.1415926 - angle # 上下对称
69
+ y = 416 - y # 上下对称
70
+ # angle = np.tan(angle)
71
+ # if angle<0:
72
+ # angle = max(-15,angle)
73
+ # if angle>=0:
74
+ # angle = min(15,angle)
75
+ f_w.write(str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(height) + ' ' + str(angle) + '\n')
76
+
77
+ # f_w.write(str(bbox[0][0]) + ' ' + str(bbox[0][1]) + '\n' +
78
+ # str(bbox[1][0]) + ' ' + str(bbox[1][1]) + '\n' +
79
+ # str(bbox[2][0]) + ' ' + str(bbox[2][1]) + '\n' +
80
+ # str(bbox[3][0]) + ' ' + str(bbox[3][1]) + '\n')
81
+
82
+
83
+ if __name__ == '__main__':
84
+ xml_path = r'J:\experiment_data\0 train_test_split\train\label' # xml文件路径
85
+ txt_path = r'J:\experiment_data\5 vertical\label' # txt文件路径
86
+
87
+ os.chdir(xml_path) # 将当前的工作目录转到该目录下
88
+ annotations = os.listdir('.') # 返回指定路径下的文件和文件夹列表
89
+ annotations = glob.glob(str(annotations) + '*.xml') # 返回所有匹配的文件路径列表
90
+ # for i, element in enumerate(annotations):
91
+ # print(i, element)
92
+
93
+ xml_to_txt(xml_path, txt_path)
data_preprocess/rearrangement.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ # 初始值
5
+ path = r'J:\experiment_data\8 r270\label'
6
+
7
+ start_num = 178501 # 命名起点
8
+
9
+
10
+ file_type = "png" if path[-3:] == 'img' else "txt"
11
+
12
+
13
+ # 获取该目录下所有文件,存入列表中
14
+ fileList = os.listdir(path)
15
+
16
+
17
+ n = 0
18
+ for i in fileList:
19
+
20
+ # 设置旧文件名(就是路径+文件名)
21
+ oldname = path + os.sep + fileList[n] # os.sep添加系统分隔符
22
+
23
+ s = str(start_num)
24
+ s = s.zfill(7)
25
+
26
+ newname = path + os.sep + s + "r." + file_type
27
+
28
+ os.rename(oldname, newname) # 用os模块中的rename方法对文件改名
29
+ print(oldname, '======>', newname)
30
+
31
+ start_num = start_num + 1
32
+ n = n+1
33
+
data_preprocess/resize_to_416.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+
4
+
5
+ def pad_and_resize_image(image):
6
+ # 创建一个黑色的640x640背景图像
7
+ padded_image = cv2.copyMakeBorder(image, 0, 160, 0, 0, cv2.BORDER_CONSTANT, value=0)
8
+
9
+ # 缩放图像到416x416
10
+ resized_image = cv2.resize(padded_image, (416, 416), interpolation=cv2.INTER_AREA)
11
+
12
+ return resized_image
13
+
14
+
15
+ def process_images(src_folder, dest_folder):
16
+ if not os.path.exists(dest_folder):
17
+ os.makedirs(dest_folder)
18
+
19
+ for file_name in os.listdir(src_folder):
20
+ if file_name.endswith(".png"):
21
+ src_file_path = os.path.join(src_folder, file_name)
22
+ dest_file_path = os.path.join(dest_folder, file_name)
23
+
24
+ # 读取图像
25
+ image = cv2.imread(src_file_path)
26
+
27
+ # 调用 pad_and_resize_image 函数处理图像
28
+ processed_image = pad_and_resize_image(image)
29
+
30
+ # 保存处理后的图像
31
+ cv2.imwrite(dest_file_path, processed_image)
32
+ print(f'处理并保存图像: {src_file_path} 到 {dest_file_path}')
33
+
34
+
35
+ if __name__ == "__main__":
36
+ source_folder = r'D:\cornell_data\img'
37
+ destination_folder = r'J:\cornell_dataset\img'
38
+
39
+ process_images(source_folder, destination_folder)
data_preprocess/segment_img.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+
5
+ def partition_img(img_dir, img_name):
6
+ img_path = img_dir + '\\' + img_name
7
+ img = cv2.imread(img_path, -1)
8
+ print(img.shape)
9
+ h, w, _ = img.shape
10
+ win_w, win_h = w//4, h//4
11
+ k = 1
12
+ for i in range(4):
13
+ for j in range(4):
14
+ sub_img = img[win_h*i:win_h*(i+1), win_w*j:win_w*(j+1), :]
15
+ cv2.imwrite(img_dir + '\\' + str(k) + '.png', sub_img)
16
+ k = k + 1
17
+
18
+
19
+ if __name__ == '__main__':
20
+ partition_img(r'C:\Users\CBY\Desktop\manuscript\picture', 'jige.png')
data_preprocess/train_test_split.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import random
4
+
5
+
6
+ def split_dataset(image_folder, label_folder,
7
+ train_image_folder, train_label_folder, test_image_folder, test_label_folder, ratio, seed):
8
+ if not os.path.exists(train_image_folder):
9
+ os.makedirs(train_image_folder)
10
+
11
+ if not os.path.exists(train_label_folder):
12
+ os.makedirs(train_label_folder)
13
+
14
+ if not os.path.exists(test_image_folder):
15
+ os.makedirs(test_image_folder)
16
+
17
+ if not os.path.exists(test_label_folder):
18
+ os.makedirs(test_label_folder)
19
+
20
+ file_list = [f for f in os.listdir(image_folder) if f.endswith(".png")]
21
+
22
+ random.seed(seed)
23
+ random.shuffle(file_list)
24
+
25
+ train_size = int(len(file_list) * ratio)
26
+
27
+ for idx, file_name in enumerate(file_list):
28
+ src_image_path = os.path.join(image_folder, file_name)
29
+ src_label_path = os.path.join(label_folder, file_name.replace(".png", ".xml"))
30
+
31
+ if idx < train_size:
32
+ dest_image_path = os.path.join(train_image_folder, file_name)
33
+ dest_label_path = os.path.join(train_label_folder, file_name.replace(".png", ".xml"))
34
+ else:
35
+ dest_image_path = os.path.join(test_image_folder, file_name)
36
+ dest_label_path = os.path.join(test_label_folder, file_name.replace(".png", ".xml"))
37
+
38
+ shutil.copy(src_image_path, dest_image_path)
39
+ shutil.copy(src_label_path, dest_label_path)
40
+
41
+
42
+ if __name__ == "__main__":
43
+ images_folder = r'J:\data_resized_to_416\img\single-complex'
44
+ labels_folder = r'J:\data_resized_to_416\label\single-complex'
45
+ train_images_folder = r'J:\experiment_data\train_test_split\train\img\single-complex'
46
+ train_labels_folder = r'J:\experiment_data\train_test_split\train\label\single-complex'
47
+ test_images_folder = r'J:\experiment_data\train_test_split\test\img\single-complex'
48
+ test_labels_folder = r'J:\experiment_data\train_test_split\test\label\single-complex'
49
+
50
+ split_ratio = 1 - 500 / 13000 # 训练集所占的比例
51
+ random_seed = 42 # 随机数种子
52
+
53
+ split_dataset(images_folder, labels_folder,
54
+ train_images_folder, train_labels_folder, test_images_folder, test_labels_folder, split_ratio, random_seed)
detected_img/complex/000035r.png ADDED

Git LFS Details

  • SHA256: 4e8f5d79a5879336d55bc76e265529ad0dd862d79aff87e30b0006be6a481cf4
  • Pointer size: 131 Bytes
  • Size of remote file: 178 kB
detected_img/multi-object/multi-object.png ADDED

Git LFS Details

  • SHA256: fdaf84c3814bd4b811272d8c7f9c0ee6e68f13a9abcf64e0bc4e1f6f3237ede4
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
detected_img/simple/000009r.png ADDED

Git LFS Details

  • SHA256: b124daa15c7f7e159c312036dba0d2980974d0cb2be79b7a2b9d0e0ca4c20605
  • Pointer size: 131 Bytes
  • Size of remote file: 114 kB
draw_function.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from PIL import Image
3
+ import numpy as np
4
+
5
+
6
+ def draw_function(picture_path, save_dir, model, transform):
7
+ for i in picture_path:
8
+ # img = cv2.imread(i)
9
+ img = Image.open(i)
10
+ img = img.convert('RGB')
11
+ img1 = img.copy()
12
+ img1 = np.array(img1)
13
+ # img = cv2.resize(img,(400,300))
14
+ img = transform(img)
15
+ img = img.unsqueeze(dim=0)
16
+ # img = img/255
17
+ # img = img.reshape((1,300,400,3))
18
+
19
+ # out1, out2, out3, out4, out5 = model.predict(img)
20
+ predict_grasp = model(img)
21
+ x = predict_grasp[1].item() # x取第一个预测值
22
+ y = predict_grasp[2].item() # y取第二个预测值
23
+ w = predict_grasp[3].item() # w取第三个预测值
24
+ h = predict_grasp[4].item() # h取第四个预测值
25
+ theta = predict_grasp[5].item() # theta取第五个预测值
26
+ center = (x, y)
27
+ size = (w, h)
28
+ angle = theta
29
+ box = cv2.boxPoints((center, size, angle))
30
+ box = np.int64(box)
31
+ # predict_grasp = predict_grasp.cpu().detach().numpy()
32
+ # vertice = np.zeros((4, 2))
33
+ # x = predict_grasp[1]
34
+ # y = predict_grasp[2]
35
+ # w = predict_grasp[3]
36
+ # h = predict_grasp[4]
37
+ # theta = predict_grasp[5] / 180 * 3.1415927
38
+ # vertice[0][0] = x - w / 2 * np.cos(theta) + h / 2 * np.sin(theta)
39
+ # vertice[0][1] = y - w / 2 * np.sin(theta) - h / 2 * np.cos(theta)
40
+ # vertice[1][0] = x + w / 2 * np.cos(theta) + h / 2 * np.sin(theta)
41
+ # vertice[1][1] = y + w / 2 * np.sin(theta) - h / 2 * np.cos(theta)
42
+ # vertice[2][0] = x + w / 2 * np.cos(theta) - h / 2 * np.sin(theta)
43
+ # vertice[2][1] = y + w / 2 * np.sin(theta) + h / 2 * np.cos(theta)
44
+ # vertice[3][0] = x - w / 2 * np.cos(theta) - h / 2 * np.sin(theta)
45
+ # vertice[3][1] = y - w / 2 * np.sin(theta) + h / 2 * np.cos(theta)
46
+ # p1 = (int(vertice[0][0]), int(vertice[0][1]))
47
+ # p2 = (int(vertice[1][0]), int(vertice[1][1]))
48
+ # p3 = (int(vertice[2][0]), int(vertice[2][1]))
49
+ # p4 = (int(vertice[3][0]), int(vertice[3][1]))
50
+
51
+ point_color1 = (255, 255, 0) # BGR
52
+ point_color2 = (255, 0, 255) # BGR
53
+ # point_color1 = (255, 255, 0) # BGR
54
+ # point_color2 = (255, 255, 0) # BGR
55
+
56
+ thickness = 2
57
+ lineType = 4
58
+ # img_p = k.numpy()
59
+ # img = img.reshape((300,400,3))
60
+ img_p = cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)
61
+ # img_p = img1
62
+
63
+ cv2.line(img_p, box[0], box[3], point_color1, thickness, lineType)
64
+ cv2.line(img_p, box[3], box[2], point_color2, thickness, lineType)
65
+ cv2.line(img_p, box[2], box[1], point_color1, thickness, lineType)
66
+ cv2.line(img_p, box[1], box[0], point_color2, thickness, lineType)
67
+
68
+ picture_name = i.split('\\')[-1]
69
+ save_path = save_dir + '\\' + picture_name
70
+ cv2.imwrite(save_path, img_p)
71
+
draw_single_box.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from model import mobile_vit_small
7
+ from Anchor import *
8
+ import cv2
9
+ from torchvision import transforms
10
+ from grasp_detect_singlebox import DetectSingleImage
11
+ from draw_function import draw_function
12
+
13
+
14
+ def draw_one_box(img, coordinate):
15
+ # center = (cx, cy)
16
+ # size = (w, h)
17
+ # angle = theta
18
+ center = (coordinate[1].item(), coordinate[2].item())
19
+ size = (coordinate[3].item(), coordinate[4].item())
20
+ angle = coordinate[5].item()
21
+ box = cv2.boxPoints((center, size, angle))
22
+ box = np.int64(box)
23
+ # print(box)
24
+ # Font = cv2.FONT_HERSHEY_SIMPLEX
25
+ # cv2.putText(img, 'c: ' + str(round(coordinate[0].item(), 3)), (box[3][0], box[3][1]), Font, 0.5, (0, 0, 255), 1)
26
+ cv2.drawContours(img, [box], -1, (0, 255, 0), 2)
27
+
28
+ cv2.imshow("Image", img)
29
+ cv2.waitKey(0)
30
+ cv2.destroyAllWindows()
31
+
32
+
33
+ def draw_pictures(imgs_path, save_dir):
34
+ for i in imgs_path:
35
+ img = cv2.imread(i)
36
+ img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
37
+ img2 = transform(img2).unsqueeze(dim=0).to(device)
38
+ box = inference_single_image(img2)
39
+ center = (box[1].item(), box[2].item())
40
+ size = (box[3].item(), box[4].item())
41
+ angle = box[5].item()
42
+ box = cv2.boxPoints((center, size, angle))
43
+ box = np.int64(box)
44
+ cv2.drawContours(img, [box], -1, (0, 255, 0), 2)
45
+ cv2.imwrite(save_dir + '\\' + i.split('\\')[-1], img)
46
+
47
+
48
+ if __name__ == '__main__':
49
+ # 权重路径
50
+ weights_path = r'weights\epoch6_loss_8.045684943666645.pth'
51
+
52
+ # 图像文件夹路径
53
+ imgs_path = glob.glob(r'J:\experiment_data\0.1 test\single-complex\img\*.png')
54
+
55
+ # 保存路径
56
+ save_dir = r'detected_img\complex'
57
+
58
+ # 指定设备
59
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
60
+ device = 'cpu'
61
+ transform = transforms.Compose([
62
+ transforms.ToTensor(),
63
+ ])
64
+
65
+ inference_single_image = DetectSingleImage(device=device, weights_path=weights_path)
66
+
67
+ draw_function(picture_path=imgs_path, save_dir=save_dir, model=inference_single_image, transform=transform)
68
+
69
+ # draw_pictures(imgs_path=imgs_path, save_dir=save_dir)
70
+
71
+ # print('置信度:', box[0].data.item())
72
+
73
+ # draw_one_box(img,
74
+ # confidence.data.item(),
75
+ # cx.data.item(), cy.data.item(), w.data.item(), h.data.item(), theta.data.item())
76
+
evaluate_acc.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from shapely.geometry import Polygon
4
+ import glob
5
+ import torch
6
+ from torch import nn
7
+ import torchvision
8
+ from PIL import Image
9
+ from grasp_detect_singlebox import *
10
+
11
+
12
+ # 图像预处理方式
13
+ transform = torchvision.transforms.Compose([
14
+ transforms.ToTensor(),
15
+ # transforms.Resize((300, 400))
16
+ ])
17
+
18
+
19
+ # 五维抓取坐标转四点坐标
20
+ def grasp_to_point(predict_grasp, radian=False): # 输入抓取框的五维表示
21
+ # vertice = np.zeros((4, 2)) # 生成一个4*2的数组用于保存四个点的八个坐标值
22
+ x = predict_grasp[0].item() # x取第一个预测值
23
+ y = predict_grasp[1].item() # y取第二个预测值
24
+ w = predict_grasp[2].item() # w取第三个预测值
25
+ h = predict_grasp[3].item() # h取第四个预测值
26
+ theta = predict_grasp[4].item() # theta取第五个预测值
27
+ center = (x, y)
28
+ size = (w, h)
29
+ if radian:
30
+ angle = theta / 3.1415927 * 180
31
+ else:
32
+ angle = theta
33
+ box = cv2.boxPoints((center, size, angle))
34
+
35
+ return box
36
+
37
+
38
+ # 计算jaccard指数
39
+ def intersection(g, p): # 输入标签的四点坐标和预测的四点坐标
40
+ g = np.asarray(g)
41
+ p = np.asarray(p)
42
+ g = Polygon(g[:8].reshape((4, 2)))
43
+ p = Polygon(p[:8].reshape((4, 2)))
44
+ if not g.is_valid or not p.is_valid:
45
+ return 0
46
+ inter = Polygon(g).intersection(Polygon(p)).area
47
+ union = g.area + p.area - inter
48
+ if union == 0:
49
+ return 0
50
+ else:
51
+ return inter / union
52
+
53
+
54
+ # 判断单个框与单个框之间是否抓取有效
55
+ def judge_availabel(predict_grasp, ground_truth): # 输入五维抓取表示:预测的、标签的。 有效返回1,无效返回0
56
+ predict_point = grasp_to_point(predict_grasp) # 预测的五维抓取转四点坐标
57
+ ground_truth_point = grasp_to_point(ground_truth, radian=True) # 标签的五维抓取转四点坐标
58
+ jaccard = intersection(ground_truth_point, predict_point) # 计算二者的jaccard指数
59
+ theta_predict = predict_grasp[-1].data.item() # 取出预测的角度值
60
+ theta_ground_truth = ground_truth[-1] / 3.1415927 * 180 # 取出标签的角度值
61
+
62
+ # 以下代码将预测角度和Ground Truth转化到0-180度之间
63
+ if theta_predict >= 180:
64
+ theta_predict -= 180
65
+ if theta_ground_truth >= 180:
66
+ theta_ground_truth -= 180
67
+ if theta_predict < 0:
68
+ theta_predict += 180
69
+ if theta_ground_truth < 0:
70
+ theta_ground_truth += 180
71
+ # 判定1
72
+ distance_of_theta1 = abs(theta_predict - theta_ground_truth)
73
+ # 以下代码将角度转化到-pi/2到+pi/2之间
74
+ if theta_predict > 90:
75
+ theta_predict -= 180
76
+ if theta_ground_truth > 90:
77
+ theta_ground_truth -= 180
78
+ # 判定2
79
+ distance_of_theta2 = abs(theta_predict - theta_ground_truth)
80
+ # 综合判定
81
+ distance_of_theta = min(distance_of_theta1, distance_of_theta2) # 计算角度差
82
+
83
+ if jaccard >= 0.25 and distance_of_theta <= 30: # 符合有效抓取的条件
84
+ available = 1
85
+ else:
86
+ available = 0
87
+ return available
88
+
89
+
90
+ # 判断一张图是否抓取有效
91
+ def judge_picture(picture_path, text_path): # 图片地址,标签地址。 有效返回1,无效返回0
92
+ img = Image.open(picture_path) # 读入单张要预测的图片
93
+ img = img.convert('RGB')
94
+ img = transform(img)
95
+ img = img.unsqueeze(dim=0)
96
+ img = img.to(device)
97
+ predict_grasp = inference_single_image(img) # 预测抓取位置的五维表示
98
+ predict_grasp = predict_grasp[1:]
99
+ # predict_grasp = predict_grasp.cpu().detach().numpy()
100
+ # print(predict_grasp)
101
+ # print(predict_grasp[0].detach().numpy())
102
+ ground_truth = np.loadtxt(text_path) # 读入标签文件
103
+ flag = 0 # 标志位置0
104
+ for i in range(len(ground_truth)): # 遍历每一个标签中的抓取位置
105
+ if judge_availabel(predict_grasp, ground_truth[i]) == 1:
106
+ flag = 1
107
+ break
108
+ return flag
109
+
110
+
111
+ # 计算正确率
112
+ def evaluate_grasp(picture_dir_path, text_dir_path): # 输入图片文件夹路径,标签文件夹路径
113
+ text_path_s = glob.glob(text_dir_path + '\\' + '*.txt') # 获取全部标签文件的路径
114
+ text_path_s.sort(key=lambda x: x.split('\\')[-1].split('.txt')[0]) # 根据文件名进行排序
115
+ img_path_s = glob.glob(picture_dir_path + '\\' + '*.png') # 获取全部图片文件的路径
116
+ img_path_s.sort(key=lambda x: x.split('\\')[-1].split('.png')[0]) # 根据文件名进行排序
117
+ yes = 0
118
+ total = 0
119
+ for i in range(len(text_path_s)):
120
+ available = judge_picture(img_path_s[i], text_path_s[i]) # 判断该图是否有效检测出有效抓取
121
+ if available == 1:
122
+ yes = yes + 1
123
+ total = total + 1
124
+ # print(img_path_s[i][-9:]+':Right') #输出该图片检测正确的信息
125
+ else:
126
+ print(img_path_s[i].split('\\')[-1] + ':False') # 输出该图片检测错误的信息
127
+ total = total + 1
128
+ print('检测总图片数:'+str(total))
129
+ print('检测有效抓取数:'+str(yes))
130
+ print('准确率:', yes/total)
131
+ return yes / total
132
+
133
+
134
+ if __name__ == '__main__':
135
+ # 权重文件路径 & 测试图片、标签文件夹地址
136
+ weights_path = r'weights\epoch6_loss_8.045684943666645.pth'
137
+
138
+ picture_dir_path = r'J:\experiment_data\0.1 test\single-simple\img'
139
+ text_dir_path = r'J:\experiment_data\0.1 test\single-simple\label'
140
+
141
+ # 指定测试评价设备
142
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
143
+
144
+ # 是否多卡训练
145
+ multi_GPU = False
146
+
147
+ # 定义模型
148
+ inference_single_image = DetectSingleImage(device=device, weights_path=weights_path)
149
+
150
+ # 测试模型
151
+ evaluate_grasp(picture_dir_path, text_dir_path)
152
+
grasp_detect_multibox.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from torch import nn
4
+ from model import get_model
5
+ from Anchor import *
6
+ import cv2
7
+ from torchvision import transforms
8
+ import torch.nn.functional as F
9
+
10
+
11
+ class DetectMultiImage(nn.Module):
12
+ def __init__(self, device, weights_path, multi_gpu=False, weights=True):
13
+ super(DetectMultiImage, self).__init__()
14
+ self.net = get_model().to(device)
15
+ if multi_gpu:
16
+ self.net = nn.DataParallel(self.net)
17
+ if weights:
18
+ self.net.load_state_dict(torch.load(weights_path))
19
+ self.net.eval()
20
+
21
+ def get_index_and_bias(self, output, confidence_threshold):
22
+ N, C, H, W = output.shape
23
+ # N 90 26 26 ----> N 26 26 90
24
+ output = output.permute(0, 2, 3, 1)
25
+ # N 26 26 90 ----> N 26 26 15 6
26
+ output = output.reshape(N, H, W, num_anchors, -1)
27
+ # 取出置信度大于设定阈值confidence_threshold的所有box, mask_obj的shape:N W H num_anchors
28
+ mask_obj = (F.sigmoid(output[..., 0]) >= confidence_threshold)
29
+ # 返回mask_obj中为True的坐标索引,index的shape:为True的个数 x 4 (4的含义:N H W num_anchors的索引)
30
+ index = mask_obj.nonzero()
31
+ # 获得偏移量:confidence,tx,ty,tw,th,t_theta,bias的shape:为True的个数 x 6 (6的含义:box的6个属性值)
32
+ bias = output[mask_obj]
33
+ return index, bias
34
+
35
+ def get_coordinate(self, index, bias):
36
+ # 以下,confidence, cx, cy, w, h, theta的shape都是:为True的个数 x 1
37
+ confidence = torch.sigmoid(bias[:, 0])
38
+ # cx = index * field + sigmoid(bias) * filed
39
+ cx = (index[:, 2] + torch.sigmoid(bias[:, 1])) * field_of_grid_cell
40
+ cy = (index[:, 1] + torch.sigmoid(bias[:, 2])) * field_of_grid_cell
41
+ w = anchor_w * torch.exp(bias[:, 3])
42
+ h = anchor_h * torch.exp(bias[:, 4])
43
+ # theta已经转换为角度制
44
+ theta = (index[:, 3] + torch.sigmoid(bias[:, 5])) * theta_margin
45
+ return confidence, cx, cy, w, h, theta
46
+
47
+ def forward(self, input, confidence_threshold):
48
+ output = self.net(input)
49
+ index, bias = self.get_index_and_bias(output, confidence_threshold)
50
+ confidence, cx, cy, w, h, theta = self.get_coordinate(index, bias)
51
+ # 返回shape:为True的个数 x 6
52
+ return torch.cat([confidence.unsqueeze(1),
53
+ cx.unsqueeze(1), cy.unsqueeze(1), w.unsqueeze(1), h.unsqueeze(1), theta.unsqueeze(1)], dim=1)
54
+
55
+
56
+ # def draw_multi_box(img, box_coordinates):
57
+ # for i in range(box_coordinates.shape[0]):
58
+ # center = (box_coordinates[i, 1].item(), box_coordinates[i, 2].item())
59
+ # size = (box_coordinates[i, 3].item(), box_coordinates[i, 4].item())
60
+ # angle = box_coordinates[i, 5].item()
61
+ # box = cv2.boxPoints((center, size, angle))
62
+ # box = np.int64(box)
63
+ # cv2.drawContours(img, [box], -1, (0, 255, 0), 2)
64
+ # cv2.imshow("Image", img)
65
+ # cv2.waitKey(0)
66
+ # cv2.destroyAllWindows()
67
+
68
+
69
+ def draw_multi_box(img, box_coordinates):
70
+ point_color1 = (255, 255, 0) # BGR
71
+ point_color2 = (255, 0, 255) # BGR
72
+ thickness = 2
73
+ lineType = 4
74
+ for i in range(box_coordinates.shape[0]):
75
+ center = (box_coordinates[i, 1].item(), box_coordinates[i, 2].item())
76
+ size = (box_coordinates[i, 3].item(), box_coordinates[i, 4].item())
77
+ angle = box_coordinates[i, 5].item()
78
+ box = cv2.boxPoints((center, size, angle))
79
+ box = np.int64(box)
80
+ cv2.line(img, box[0], box[3], point_color1, thickness, lineType)
81
+ cv2.line(img, box[3], box[2], point_color2, thickness, lineType)
82
+ cv2.line(img, box[2], box[1], point_color1, thickness, lineType)
83
+ cv2.line(img, box[1], box[0], point_color2, thickness, lineType)
84
+ cv2.imshow("Image", img)
85
+ cv2.waitKey(0)
86
+ cv2.destroyAllWindows()
87
+
88
+
89
+ if __name__ == '__main__':
90
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
91
+
92
+ weights_path = 'weights/epoch6_loss_8.045684943666645.pth'
93
+
94
+ img = cv2.imread(r'J:\experiment_data\MOS\img\002243r.png')
95
+
96
+ transform = transforms.Compose([
97
+ transforms.ToTensor(),
98
+ ])
99
+
100
+ inference_multi_image = DetectMultiImage(device=device, weights_path=weights_path)
101
+
102
+ img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 转BGR格式为RGB格式
103
+ img2 = transform(img2).unsqueeze(dim=0).to(device)
104
+
105
+ boxes = inference_multi_image(img2, 0.9999)
106
+
107
+ print(boxes.shape)
108
+ print(boxes[:, 0].data[:5])
109
+
110
+ draw_multi_box(img, boxes.data) # 此处传入的img是OpenCV的BGR格式的
111
+
grasp_detect_singlebox.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from torch import nn
4
+ from model import get_model
5
+ from Anchor import *
6
+ import cv2
7
+ from torchvision import transforms
8
+
9
+
10
+ class DetectSingleImage(nn.Module):
11
+ def __init__(self, device, weights_path, multi_gpu=False, weights=True):
12
+ super(DetectSingleImage, self).__init__()
13
+ self.net = get_model().to(device)
14
+ if multi_gpu:
15
+ self.net = nn.DataParallel(self.net)
16
+ if weights:
17
+ self.net.load_state_dict(torch.load(weights_path))
18
+ print('载入权重完成')
19
+ self.net.eval()
20
+
21
+ def get_index_and_bias(self, output):
22
+ N, C, H, W = output.shape
23
+ # N C H W ----> N H W C
24
+ output = output.permute(0, 2, 3, 1)
25
+ # N H W C ----> N H W num_anchors 6
26
+ output = output.reshape(N, H, W, num_anchors, -1)
27
+ # 只取出置信度最大的box
28
+ select_box = torch.max(output[..., 0])
29
+ # mask_obj的shape:N H W num_anchors,只取出置信度最大的box
30
+ mask_obj = (output[..., 0] == select_box)
31
+ # 返回mask_obj中为True的坐标索引,这里指定第一个元素,因为可能出现多个box置信度相同且都是最大的情况,index:N H W num_anchors
32
+ index = mask_obj.nonzero()[0]
33
+ # index = mask_obj.nonzero()
34
+ # 获得偏移量:confidence,tx,ty,tw,th,t_theta
35
+ # bias = output[index[0]][index[1]][index[2]][index[3]]
36
+ bias = output[mask_obj][0]
37
+ return index, bias
38
+
39
+ def get_coordinate(self, index, bias):
40
+ confidence = torch.sigmoid(bias[0])
41
+ cx = (index[2] + torch.sigmoid(bias[1])) * field_of_grid_cell
42
+ cy = (index[1] + torch.sigmoid(bias[2])) * field_of_grid_cell
43
+ w = anchor_w * torch.exp(bias[3])
44
+ h = anchor_h * torch.exp(bias[4])
45
+ theta = (index[3] + torch.sigmoid(bias[5])) * theta_margin
46
+ return confidence, cx, cy, w, h, theta
47
+
48
+ def forward(self, input):
49
+ output = self.net(input)
50
+ index, bias = self.get_index_and_bias(output)
51
+ confidence, cx, cy, w, h, theta = self.get_coordinate(index, bias)
52
+ return torch.cat([confidence.unsqueeze(0),
53
+ cx.unsqueeze(0), cy.unsqueeze(0), w.unsqueeze(0), h.unsqueeze(0), theta.unsqueeze(0)], dim=0)
54
+
55
+
56
+ def draw_one_box(img, coordinate):
57
+ # center = (cx, cy)
58
+ # size = (w, h)
59
+ # angle = theta
60
+ center = (coordinate[1].item(), coordinate[2].item())
61
+ size = (coordinate[3].item(), coordinate[4].item())
62
+ angle = coordinate[5].item()
63
+ box = cv2.boxPoints((center, size, angle))
64
+ box = np.int64(box)
65
+ # print(box)
66
+ # Font = cv2.FONT_HERSHEY_SIMPLEX
67
+ # cv2.putText(img, 'c: ' + str(round(coordinate[0].item(), 3)), (box[3][0], box[3][1]), Font, 0.5, (0, 0, 255), 1)
68
+ cv2.drawContours(img, [box], -1, (0, 255, 0), 2)
69
+
70
+ cv2.imshow("Image", img)
71
+ cv2.waitKey(0)
72
+ cv2.destroyAllWindows()
73
+
74
+
75
+ if __name__ == '__main__':
76
+ weights_path = r'weights\Feature_Concat\epoch12_loss_424.52915453940744.pth'
77
+
78
+ img = cv2.imread(r'J:\experiment_data\0.1 test\single-complex\img\000009r.png')
79
+
80
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
81
+
82
+ transform = transforms.Compose([
83
+ transforms.ToTensor(),
84
+ ])
85
+
86
+ inference_single_image = DetectSingleImage(device=device, weights_path=weights_path)
87
+
88
+ # img = np.random.randn(416, 416, 3).astype(np.float32)
89
+ img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
90
+ img2 = transform(img2).unsqueeze(dim=0).to(device)
91
+
92
+ box = inference_single_image(img2)
93
+ print(box.shape)
94
+ print('置信度:', box[0].data.item())
95
+
96
+ draw_one_box(img, box)
97
+
98
+ # draw_one_box(img,
99
+ # confidence.data.item(),
100
+ # cx.data.item(), cy.data.item(), w.data.item(), h.data.item(), theta.data.item())
log/training_logs.txt ADDED
File without changes
model.py ADDED
@@ -0,0 +1,884 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ original code from apple:
3
+ https://github.com/apple/ml-cvnets/blob/main/cvnets/models/classification/mobilevit.py
4
+ """
5
+
6
+ from typing import Optional, Tuple, Union, Dict
7
+ import math
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch import Tensor
11
+ from torch.nn import functional as F
12
+
13
+ from transformer import TransformerEncoder
14
+ from model_config import get_config
15
+
16
+ from torchinfo import summary
17
+ from thop import profile
18
+
19
+ from Anchor import num_anchors
20
+
21
+
22
+ def make_divisible(
23
+ v: Union[float, int],
24
+ divisor: Optional[int] = 8,
25
+ min_value: Optional[Union[float, int]] = None,
26
+ ) -> Union[float, int]:
27
+ """
28
+ This function is taken from the original tf repo.
29
+ It ensures that all layers have a channel number that is divisible by 8
30
+ It can be seen here:
31
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
32
+ :param v:
33
+ :param divisor:
34
+ :param min_value:
35
+ :return:
36
+ """
37
+ if min_value is None:
38
+ min_value = divisor
39
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
40
+ # Make sure that round down does not go down by more than 10%.
41
+ if new_v < 0.9 * v:
42
+ new_v += divisor
43
+ return new_v
44
+
45
+
46
+ class ConvLayer(nn.Module):
47
+ """
48
+ Applies a 2D convolution over an input
49
+
50
+ Args:
51
+ in_channels (int): :math:`C_{in}` from an expected input of size :math:`(N, C_{in}, H_{in}, W_{in})`
52
+ out_channels (int): :math:`C_{out}` from an expected output of size :math:`(N, C_{out}, H_{out}, W_{out})`
53
+ kernel_size (Union[int, Tuple[int, int]]): Kernel size for convolution.
54
+ stride (Union[int, Tuple[int, int]]): Stride for convolution. Default: 1
55
+ groups (Optional[int]): Number of groups in convolution. Default: 1
56
+ bias (Optional[bool]): Use bias. Default: ``False``
57
+ use_norm (Optional[bool]): Use normalization layer after convolution. Default: ``True``
58
+ use_act (Optional[bool]): Use activation layer after convolution (or convolution and normalization).
59
+ Default: ``True``
60
+
61
+ Shape:
62
+ - Input: :math:`(N, C_{in}, H_{in}, W_{in})`
63
+ - Output: :math:`(N, C_{out}, H_{out}, W_{out})`
64
+
65
+ .. note::
66
+ For depth-wise convolution, `groups=C_{in}=C_{out}`.
67
+ """
68
+
69
+ def __init__(
70
+ self,
71
+ in_channels: int,
72
+ out_channels: int,
73
+ kernel_size: Union[int, Tuple[int, int]],
74
+ stride: Optional[Union[int, Tuple[int, int]]] = 1,
75
+ groups: Optional[int] = 1,
76
+ bias: Optional[bool] = False,
77
+ use_norm: Optional[bool] = True,
78
+ use_act: Optional[bool] = True,
79
+ ) -> None:
80
+ super().__init__()
81
+
82
+ if isinstance(kernel_size, int):
83
+ kernel_size = (kernel_size, kernel_size)
84
+
85
+ if isinstance(stride, int):
86
+ stride = (stride, stride)
87
+
88
+ assert isinstance(kernel_size, Tuple)
89
+ assert isinstance(stride, Tuple)
90
+
91
+ padding = (
92
+ int((kernel_size[0] - 1) / 2),
93
+ int((kernel_size[1] - 1) / 2),
94
+ )
95
+
96
+ block = nn.Sequential()
97
+
98
+ conv_layer = nn.Conv2d(
99
+ in_channels=in_channels,
100
+ out_channels=out_channels,
101
+ kernel_size=kernel_size,
102
+ stride=stride,
103
+ groups=groups,
104
+ padding=padding,
105
+ bias=bias
106
+ )
107
+
108
+ block.add_module(name="conv", module=conv_layer)
109
+
110
+ if use_norm:
111
+ norm_layer = nn.BatchNorm2d(num_features=out_channels, momentum=0.1)
112
+ block.add_module(name="norm", module=norm_layer)
113
+
114
+ if use_act:
115
+ act_layer = nn.SiLU()
116
+ block.add_module(name="act", module=act_layer)
117
+
118
+ self.block = block
119
+
120
+ def forward(self, x: Tensor) -> Tensor:
121
+ return self.block(x)
122
+
123
+
124
+ class InvertedResidual(nn.Module):
125
+ """
126
+ This class implements the inverted residual block, as described in `MobileNetv2 <https://arxiv.org/abs/1801.04381>`_ paper
127
+
128
+ Args:
129
+ in_channels (int): :math:`C_{in}` from an expected input of size :math:`(N, C_{in}, H_{in}, W_{in})`
130
+ out_channels (int): :math:`C_{out}` from an expected output of size :math:`(N, C_{out}, H_{out}, W_{out)`
131
+ stride (int): Use convolutions with a stride. Default: 1
132
+ expand_ratio (Union[int, float]): Expand the input channels by this factor in depth-wise conv
133
+ skip_connection (Optional[bool]): Use skip-connection. Default: True
134
+
135
+ Shape:
136
+ - Input: :math:`(N, C_{in}, H_{in}, W_{in})`
137
+ - Output: :math:`(N, C_{out}, H_{out}, W_{out})`
138
+
139
+ .. note::
140
+ If `in_channels =! out_channels` and `stride > 1`, we set `skip_connection=False`
141
+
142
+ """
143
+
144
+ def __init__(
145
+ self,
146
+ in_channels: int,
147
+ out_channels: int,
148
+ stride: int,
149
+ expand_ratio: Union[int, float],
150
+ skip_connection: Optional[bool] = True,
151
+ ) -> None:
152
+ assert stride in [1, 2]
153
+ hidden_dim = make_divisible(int(round(in_channels * expand_ratio)), 8)
154
+
155
+ super().__init__()
156
+
157
+ block = nn.Sequential()
158
+ if expand_ratio != 1:
159
+ block.add_module(
160
+ name="exp_1x1",
161
+ module=ConvLayer(
162
+ in_channels=in_channels,
163
+ out_channels=hidden_dim,
164
+ kernel_size=1
165
+ ),
166
+ )
167
+
168
+ block.add_module(
169
+ name="conv_3x3",
170
+ module=ConvLayer(
171
+ in_channels=hidden_dim,
172
+ out_channels=hidden_dim,
173
+ stride=stride,
174
+ kernel_size=3,
175
+ groups=hidden_dim
176
+ ),
177
+ )
178
+
179
+ block.add_module(
180
+ name="red_1x1",
181
+ module=ConvLayer(
182
+ in_channels=hidden_dim,
183
+ out_channels=out_channels,
184
+ kernel_size=1,
185
+ use_act=False,
186
+ use_norm=True,
187
+ ),
188
+ )
189
+
190
+ self.block = block
191
+ self.in_channels = in_channels
192
+ self.out_channels = out_channels
193
+ self.exp = expand_ratio
194
+ self.stride = stride
195
+ self.use_res_connect = (
196
+ self.stride == 1 and in_channels == out_channels and skip_connection
197
+ )
198
+
199
+ def forward(self, x: Tensor, *args, **kwargs) -> Tensor:
200
+ if self.use_res_connect:
201
+ return x + self.block(x)
202
+ else:
203
+ return self.block(x)
204
+
205
+
206
+ class MobileViTBlock(nn.Module):
207
+ """
208
+ This class defines the `MobileViT block <https://arxiv.org/abs/2110.02178?context=cs.LG>`_
209
+
210
+ Args:
211
+ opts: command line arguments
212
+ in_channels (int): :math:`C_{in}` from an expected input of size :math:`(N, C_{in}, H, W)`
213
+ transformer_dim (int): Input dimension to the transformer unit
214
+ ffn_dim (int): Dimension of the FFN block
215
+ n_transformer_blocks (int): Number of transformer blocks. Default: 2
216
+ head_dim (int): Head dimension in the multi-head attention. Default: 32
217
+ attn_dropout (float): Dropout in multi-head attention. Default: 0.0
218
+ dropout (float): Dropout rate. Default: 0.0
219
+ ffn_dropout (float): Dropout between FFN layers in transformer. Default: 0.0
220
+ patch_h (int): Patch height for unfolding operation. Default: 8
221
+ patch_w (int): Patch width for unfolding operation. Default: 8
222
+ transformer_norm_layer (Optional[str]): Normalization layer in the transformer block. Default: layer_norm
223
+ conv_ksize (int): Kernel size to learn local representations in MobileViT block. Default: 3
224
+ no_fusion (Optional[bool]): Do not combine the input and output feature maps. Default: False
225
+ """
226
+
227
+ def __init__(
228
+ self,
229
+ in_channels: int,
230
+ transformer_dim: int,
231
+ ffn_dim: int,
232
+ n_transformer_blocks: int = 2,
233
+ head_dim: int = 32,
234
+ attn_dropout: float = 0.0,
235
+ dropout: float = 0.0,
236
+ ffn_dropout: float = 0.0,
237
+ patch_h: int = 8,
238
+ patch_w: int = 8,
239
+ conv_ksize: Optional[int] = 3,
240
+ *args,
241
+ **kwargs
242
+ ) -> None:
243
+ super().__init__()
244
+
245
+ conv_3x3_in = ConvLayer(
246
+ in_channels=in_channels,
247
+ out_channels=in_channels,
248
+ kernel_size=conv_ksize,
249
+ stride=1
250
+ )
251
+ conv_1x1_in = ConvLayer(
252
+ in_channels=in_channels,
253
+ out_channels=transformer_dim,
254
+ kernel_size=1,
255
+ stride=1,
256
+ use_norm=False,
257
+ use_act=False
258
+ )
259
+
260
+ conv_1x1_out = ConvLayer(
261
+ in_channels=transformer_dim,
262
+ out_channels=in_channels,
263
+ kernel_size=1,
264
+ stride=1
265
+ )
266
+ conv_3x3_out = ConvLayer(
267
+ in_channels=2 * in_channels,
268
+ out_channels=in_channels,
269
+ kernel_size=conv_ksize,
270
+ stride=1
271
+ )
272
+
273
+ self.local_rep = nn.Sequential()
274
+ self.local_rep.add_module(name="conv_3x3", module=conv_3x3_in)
275
+ self.local_rep.add_module(name="conv_1x1", module=conv_1x1_in)
276
+
277
+ assert transformer_dim % head_dim == 0
278
+ num_heads = transformer_dim // head_dim
279
+
280
+ global_rep = [
281
+ TransformerEncoder(
282
+ embed_dim=transformer_dim,
283
+ ffn_latent_dim=ffn_dim,
284
+ num_heads=num_heads,
285
+ attn_dropout=attn_dropout,
286
+ dropout=dropout,
287
+ ffn_dropout=ffn_dropout
288
+ )
289
+ for _ in range(n_transformer_blocks)
290
+ ]
291
+ global_rep.append(nn.LayerNorm(transformer_dim))
292
+ self.global_rep = nn.Sequential(*global_rep)
293
+
294
+ self.conv_proj = conv_1x1_out
295
+ self.fusion = conv_3x3_out
296
+
297
+ self.patch_h = patch_h
298
+ self.patch_w = patch_w
299
+ self.patch_area = self.patch_w * self.patch_h
300
+
301
+ self.cnn_in_dim = in_channels
302
+ self.cnn_out_dim = transformer_dim
303
+ self.n_heads = num_heads
304
+ self.ffn_dim = ffn_dim
305
+ self.dropout = dropout
306
+ self.attn_dropout = attn_dropout
307
+ self.ffn_dropout = ffn_dropout
308
+ self.n_blocks = n_transformer_blocks
309
+ self.conv_ksize = conv_ksize
310
+
311
+ def unfolding(self, x: Tensor) -> Tuple[Tensor, Dict]:
312
+ patch_w, patch_h = self.patch_w, self.patch_h
313
+ patch_area = patch_w * patch_h
314
+ batch_size, in_channels, orig_h, orig_w = x.shape
315
+
316
+ new_h = int(math.ceil(orig_h / self.patch_h) * self.patch_h)
317
+ new_w = int(math.ceil(orig_w / self.patch_w) * self.patch_w)
318
+
319
+ interpolate = False
320
+ if new_w != orig_w or new_h != orig_h:
321
+ # Note: Padding can be done, but then it needs to be handled in attention function.
322
+ x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False)
323
+ interpolate = True
324
+
325
+ # number of patches along width and height
326
+ num_patch_w = new_w // patch_w # n_w
327
+ num_patch_h = new_h // patch_h # n_h
328
+ num_patches = num_patch_h * num_patch_w # N
329
+
330
+ # [B, C, H, W] -> [B * C * n_h, p_h, n_w, p_w]
331
+ x = x.reshape(batch_size * in_channels * num_patch_h, patch_h, num_patch_w, patch_w)
332
+ # [B * C * n_h, p_h, n_w, p_w] -> [B * C * n_h, n_w, p_h, p_w]
333
+ x = x.transpose(1, 2)
334
+ # [B * C * n_h, n_w, p_h, p_w] -> [B, C, N, P] where P = p_h * p_w and N = n_h * n_w
335
+ x = x.reshape(batch_size, in_channels, num_patches, patch_area)
336
+ # [B, C, N, P] -> [B, P, N, C]
337
+ x = x.transpose(1, 3)
338
+ # [B, P, N, C] -> [BP, N, C]
339
+ x = x.reshape(batch_size * patch_area, num_patches, -1)
340
+
341
+ info_dict = {
342
+ "orig_size": (orig_h, orig_w),
343
+ "batch_size": batch_size,
344
+ "interpolate": interpolate,
345
+ "total_patches": num_patches,
346
+ "num_patches_w": num_patch_w,
347
+ "num_patches_h": num_patch_h,
348
+ }
349
+
350
+ return x, info_dict
351
+
352
+ def folding(self, x: Tensor, info_dict: Dict) -> Tensor:
353
+ n_dim = x.dim()
354
+ assert n_dim == 3, "Tensor should be of shape BPxNxC. Got: {}".format(
355
+ x.shape
356
+ )
357
+ # [BP, N, C] --> [B, P, N, C]
358
+ x = x.contiguous().view(
359
+ info_dict["batch_size"], self.patch_area, info_dict["total_patches"], -1
360
+ )
361
+
362
+ batch_size, pixels, num_patches, channels = x.size()
363
+ num_patch_h = info_dict["num_patches_h"]
364
+ num_patch_w = info_dict["num_patches_w"]
365
+
366
+ # [B, P, N, C] -> [B, C, N, P]
367
+ x = x.transpose(1, 3)
368
+ # [B, C, N, P] -> [B*C*n_h, n_w, p_h, p_w]
369
+ x = x.reshape(batch_size * channels * num_patch_h, num_patch_w, self.patch_h, self.patch_w)
370
+ # [B*C*n_h, n_w, p_h, p_w] -> [B*C*n_h, p_h, n_w, p_w]
371
+ x = x.transpose(1, 2)
372
+ # [B*C*n_h, p_h, n_w, p_w] -> [B, C, H, W]
373
+ x = x.reshape(batch_size, channels, num_patch_h * self.patch_h, num_patch_w * self.patch_w)
374
+ if info_dict["interpolate"]:
375
+ x = F.interpolate(
376
+ x,
377
+ size=info_dict["orig_size"],
378
+ mode="bilinear",
379
+ align_corners=False,
380
+ )
381
+ return x
382
+
383
+ def forward(self, x: Tensor) -> Tensor:
384
+ res = x
385
+
386
+ fm = self.local_rep(x)
387
+
388
+ # convert feature map to patches
389
+ patches, info_dict = self.unfolding(fm)
390
+
391
+ # learn global representations
392
+ for transformer_layer in self.global_rep:
393
+ patches = transformer_layer(patches)
394
+
395
+ # [B x Patch x Patches x C] -> [B x C x Patches x Patch]
396
+ fm = self.folding(x=patches, info_dict=info_dict)
397
+
398
+ fm = self.conv_proj(fm)
399
+
400
+ fm = self.fusion(torch.cat((res, fm), dim=1))
401
+ return fm
402
+
403
+
404
+ # 将四倍下采样特征与五倍下采样特征融合
405
+ # 将模型改为单分支输出预测
406
+ # S版本,4倍下采样输出128通道,5倍下采样输出160通道
407
+ class ResidualConvBlock(nn.Module):
408
+ def __init__(self,
409
+ in_channels: int,
410
+ middle_channels: int,
411
+ ) -> None:
412
+ super().__init__()
413
+ self.res_conv_block = nn.Sequential(
414
+ nn.Conv2d(in_channels=in_channels, out_channels=middle_channels,
415
+ kernel_size=(3, 3), stride=(1, 1), padding=1,
416
+ bias=False),
417
+ nn.BatchNorm2d(num_features=middle_channels),
418
+ nn.SiLU(),
419
+ nn.Conv2d(in_channels=middle_channels, out_channels=in_channels,
420
+ kernel_size=(3, 3), stride=(1, 1), padding=1,
421
+ bias=False),
422
+ nn.BatchNorm2d(num_features=in_channels),
423
+ )
424
+
425
+ def forward(self, x: Tensor) -> Tensor:
426
+ x = F.silu(self.res_conv_block(x) + x)
427
+ return x
428
+
429
+
430
+ class ConvSet(nn.Module):
431
+ def __init__(self,
432
+ in_channels: int,
433
+ middle_channels: int,
434
+ out_channels: int,
435
+ ) -> None:
436
+ super(ConvSet, self).__init__()
437
+ self.convset = nn.Sequential()
438
+ self.convset.add_module(name='res_conv_block1',
439
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
440
+ self.convset.add_module(name='res_conv_block2',
441
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
442
+ self.convset.add_module(name='res_conv_block3',
443
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
444
+ self.convset.add_module(name='res_conv_block4',
445
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
446
+ self.convset.add_module(name='conv_3x3',
447
+ module=nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
448
+ kernel_size=(3, 3), stride=(1, 1), padding=1,
449
+ bias=False))
450
+ self.convset.add_module(name='bn', module=nn.BatchNorm2d(num_features=out_channels))
451
+ self.convset.add_module(name='SiLU', module=nn.SiLU())
452
+
453
+ def forward(self, x: Tensor) -> Tensor:
454
+ x = self.convset(x)
455
+ return x
456
+
457
+
458
+ class ConvDownSampling(nn.Module):
459
+ def __init__(self,
460
+ in_channels: int,
461
+ out_channels: int,
462
+ ) -> None:
463
+ super().__init__()
464
+ self.convdownsampling = nn.Sequential()
465
+ # 3x3卷积
466
+ self.convdownsampling.add_module(name='conv1_3x3', module=nn.Conv2d(in_channels=in_channels,
467
+ out_channels=2*in_channels,
468
+ kernel_size=(3, 3),
469
+ stride=(2, 2),
470
+ padding=1,
471
+ bias=False))
472
+ self.convdownsampling.add_module(name='bn1', module=nn.BatchNorm2d(num_features=2*in_channels))
473
+ self.convdownsampling.add_module(name='SiLU1', module=nn.SiLU())
474
+ # 3x3卷积
475
+ self.convdownsampling.add_module(name='conv2_3x3', module=nn.Conv2d(in_channels=2*in_channels,
476
+ out_channels=out_channels,
477
+ kernel_size=(3, 3),
478
+ stride=(1, 1),
479
+ padding=1,
480
+ bias=False))
481
+ self.convdownsampling.add_module(name='bn2', module=nn.BatchNorm2d(num_features=out_channels))
482
+ self.convdownsampling.add_module(name='SiLU2', module=nn.SiLU())
483
+
484
+ def forward(self, x: Tensor) -> Tensor:
485
+ x = self.convdownsampling(x)
486
+ return x
487
+
488
+
489
+ class PositionalEncoding(nn.Module):
490
+ def __init__(self,
491
+ d_model: int,
492
+ max_len: int,
493
+ ) -> None:
494
+ super(PositionalEncoding, self).__init__()
495
+ pe = torch.zeros(max_len, d_model)
496
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
497
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
498
+ pe[:, 0::2] = torch.sin(position * div_term)
499
+ pe[:, 1::2] = torch.cos(position * div_term)
500
+ # # pe: [max_len, d_model] -> [1, max_len, d_model] -> [max_len, 1, d_model]
501
+ # pe = pe.unsqueeze(0).transpose(0, 1)
502
+ # pe: [max_len, d_model] -> [1, max_len, d_model]
503
+ pe = pe.unsqueeze(0)
504
+ # pe.requires_grad = False
505
+ self.register_buffer('pe', pe)
506
+
507
+ def forward(self, x: Tensor) -> Tensor:
508
+ # # x: [max_len, N, d_model]
509
+ # return x + self.pe[:x.size(0), :]
510
+ # x: [N, max_len, d_model]
511
+ # broadcast: pe: [1, max_len, d_model] -> [N, max_len, d_model]
512
+ return x + self.pe[:, :x.size(1), :]
513
+
514
+
515
+ class GlobalTransformerEncoder(nn.Module):
516
+ def __init__(self,
517
+ # in_channels: int,
518
+ d_model: int,
519
+ n_head: int,
520
+ ffn_dim: int,
521
+ encoder_layers: int,
522
+ ) -> None:
523
+ super().__init__()
524
+
525
+ # if in_channels != d_model:
526
+ # # 调整维度到d_model
527
+ # self.conv1_1x1 = nn.Conv2d(in_channels=in_channels, out_channels=d_model,
528
+ # kernel_size=(1, 1), stride=(1, 1), padding=0)
529
+ # # 调整维度到in_channels
530
+ # self.conv2_1x1 = nn.Conv2d(in_channels=d_model, out_channels=in_channels,
531
+ # kernel_size=(1, 1), stride=(1, 1), padding=0)
532
+
533
+ # 位置编码
534
+ self.position_encoding = PositionalEncoding(d_model=d_model, max_len=169)
535
+ # Transformer Encoder全局表征
536
+ self.transformer_encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=n_head,
537
+ dim_feedforward=ffn_dim,
538
+ batch_first=True,
539
+ norm_first=True)
540
+ self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer,
541
+ num_layers=encoder_layers, norm=nn.LayerNorm(d_model))
542
+
543
+ def sequentialize(self, x: Tensor) -> Tuple[Tensor, dict]:
544
+ N, C, H, W = x.shape
545
+ # [N C H W] -> [N H W C]
546
+ x = x.permute(0, 2, 3, 1)
547
+ # [N H W C] -> [N H*W C]
548
+ x = x.reshape(N, H*W, C)
549
+ shape_dict = {
550
+ 'origin_N': N,
551
+ 'origin_C': C,
552
+ 'origin_H': H,
553
+ 'origin_W': W,
554
+ }
555
+ return x, shape_dict
556
+
557
+ def unsequentialize(self, x: Tensor, dim_dict: dict) -> Tensor:
558
+ # [N H*W C] -> [N H W C]
559
+ x = x.contiguous().view(dim_dict['origin_N'], dim_dict['origin_H'], dim_dict['origin_W'], dim_dict['origin_C'])
560
+ # [N H W C] -> [N C H W]
561
+ x = x.permute(0, 3, 1, 2)
562
+ return x
563
+
564
+ def forward(self, x: Tensor) -> Tensor:
565
+ # x = self.conv1_1x1(x)
566
+ x, shape_dict = self.sequentialize(x)
567
+ x = self.position_encoding(x)
568
+ x = self.transformer_encoder(x)
569
+ x = self.unsequentialize(x, dim_dict=shape_dict)
570
+ # x = self.conv2_1x1(x)
571
+ # x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
572
+ return x
573
+
574
+
575
+ # class TransposeConv(nn.Module):
576
+ # def __init__(self,
577
+ # in_channels,
578
+ # out_channels
579
+ # ):
580
+ # super(TransposeConv, self).__init__()
581
+ # self.transpose_conv = nn.Sequential()
582
+ # self.transpose_conv.add_module(name='conv_3x3', module=nn.Conv2d(in_channels=in_channels,
583
+ # out_channels=in_channels,
584
+ # kernel_size=(3, 3), stride=(1, 1), padding=1,
585
+ # bias=False))
586
+ # self.transpose_conv.add_module(name='bn1', module=nn.BatchNorm2d(in_channels))
587
+ # self.transpose_conv.add_module(name='SiLU1', module=nn.SiLU())
588
+ # self.transpose_conv.add_module(name='conv_1x1', module=nn.Conv2d(in_channels=in_channels,
589
+ # out_channels=out_channels,
590
+ # kernel_size=(1, 1), stride=(1, 1), padding=0,
591
+ # bias=False))
592
+ # self.transpose_conv.add_module(name='bn2', module=nn.BatchNorm2d(out_channels))
593
+ # self.transpose_conv.add_module(name='SiLU2', module=nn.SiLU())
594
+ # self.transpose_conv.add_module(name='transpose_conv', module=nn.ConvTranspose2d(in_channels=out_channels,
595
+ # out_channels=out_channels,
596
+ # kernel_size=(2, 2),
597
+ # stride=(2, 2), padding=(0, 0),
598
+ # output_padding=(0, 0)))
599
+ # self.transpose_conv.add_module(name='bn3', module=nn.BatchNorm2d(out_channels))
600
+ # self.transpose_conv.add_module(name='SiLU3', module=nn.SiLU())
601
+
602
+ # def forward(self, x):
603
+ # x = self.transpose_conv(x)
604
+ # return x
605
+
606
+
607
+ # class SpatialPyramidPoolingFast(nn.Module):
608
+ # def __init__(
609
+ # self,
610
+ # in_channels
611
+ # ):
612
+ # super(SpatialPyramidPoolingFast, self).__init__()
613
+ # # 降维1*1卷积
614
+ # self.conv1_1_1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels//4,
615
+ # kernel_size=(1, 1), stride=(1, 1),
616
+ # bias=False)
617
+ # self.bn1 = nn.BatchNorm2d(num_features=in_channels//4)
618
+ # self.SiLU1 = nn.SiLU()
619
+ # # 最大池化
620
+ # self.MaxPool_5_5 = nn.MaxPool2d(kernel_size=(5, 5), stride=(1, 1), padding=2)
621
+ # self.MaxPool1_3_3 = nn.MaxPool2d(kernel_size=(3, 3), stride=(1, 1), padding=1)
622
+ # self.MaxPool2_3_3 = nn.MaxPool2d(kernel_size=(3, 3), stride=(1, 1), padding=1)
623
+ # # 特征融合
624
+ # self.conv2_1_1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels,
625
+ # kernel_size=(1, 1), stride=(1, 1),
626
+ # bias=False)
627
+ # self.bn2 = nn.BatchNorm2d(num_features=in_channels)
628
+ # self.SiLU2 = nn.SiLU()
629
+
630
+ # def forward(self, x):
631
+ # x1 = self.conv1_1_1(x)
632
+ # x1 = self.bn1(x1)
633
+ # x1 = self.SiLU1(x1)
634
+ # x2 = self.MaxPool_5_5(x1)
635
+ # x3 = self.MaxPool1_3_3(x2)
636
+ # x4 = self.MaxPool2_3_3(x3)
637
+ # x = torch.concat([x1, x2, x3, x4], dim=1)
638
+ # x = self.conv2_1_1(x)
639
+ # x = self.bn2(x)
640
+ # x = self.SiLU2(x)
641
+ # return x
642
+
643
+
644
+ class Detector(nn.Module):
645
+ def __init__(self,
646
+ in_channels: int,
647
+ middle_channels: int,
648
+ out_channels: int,
649
+ ) -> None:
650
+ super(Detector, self).__init__()
651
+ self.detector = nn.Sequential()
652
+ self.detector.add_module(name='res_conv_block1',
653
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
654
+ self.detector.add_module(name='res_conv_block2',
655
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
656
+ self.detector.add_module(name='res_conv_block3',
657
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
658
+ self.detector.add_module(name='res_conv_block4',
659
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
660
+ self.detector.add_module(name='res_conv_block5',
661
+ module=ResidualConvBlock(in_channels=in_channels, middle_channels=middle_channels))
662
+ # 输出
663
+ self.detector.add_module(name='conv2_1x1', module=nn.Conv2d(in_channels=in_channels,
664
+ out_channels=out_channels,
665
+ kernel_size=(1, 1), stride=(1, 1), padding=0,
666
+ bias=True))
667
+
668
+ def forward(self, x: Tensor) -> Tensor:
669
+ x = self.detector(x)
670
+ return x
671
+
672
+
673
+ class MobileViT(nn.Module):
674
+ """
675
+ This class implements the `MobileViT architecture <https://arxiv.org/abs/2110.02178?context=cs.LG>`_
676
+ """
677
+ def __init__(self, model_cfg: Dict, num_classes: int = 1000):
678
+ super().__init__()
679
+
680
+ image_channels = 3
681
+ out_channels = 16
682
+
683
+ self.conv_1 = ConvLayer(
684
+ in_channels=image_channels,
685
+ out_channels=out_channels,
686
+ kernel_size=3,
687
+ stride=2
688
+ )
689
+
690
+ self.layer_1, out_channels = self._make_layer(input_channel=out_channels, cfg=model_cfg["layer1"])
691
+ self.layer_2, out_channels = self._make_layer(input_channel=out_channels, cfg=model_cfg["layer2"])
692
+ self.layer_3, out_channels = self._make_layer(input_channel=out_channels, cfg=model_cfg["layer3"])
693
+ self.layer_4, out_channels = self._make_layer(input_channel=out_channels, cfg=model_cfg["layer4"])
694
+ self.layer_5, out_channels = self._make_layer(input_channel=out_channels, cfg=model_cfg["layer5"])
695
+
696
+ # exp_channels = min(model_cfg["last_layer_exp_factor"] * out_channels, 960)
697
+ # self.conv_1x1_exp = ConvLayer(
698
+ # in_channels=out_channels,
699
+ # out_channels=exp_channels,
700
+ # kernel_size=1
701
+ # )
702
+ #
703
+ # self.classifier = nn.Sequential()
704
+ # self.classifier.add_module(name="global_pool", module=nn.AdaptiveAvgPool2d(1))
705
+ # self.classifier.add_module(name="flatten", module=nn.Flatten())
706
+ # if 0.0 < model_cfg["cls_dropout"] < 1.0:
707
+ # self.classifier.add_module(name="dropout", module=nn.Dropout(p=model_cfg["cls_dropout"]))
708
+ # self.classifier.add_module(name="fc", module=nn.Linear(in_features=exp_channels, out_features=num_classes))
709
+
710
+ # 信息融合、检测模块
711
+ self.conv_downsampling1 = ConvDownSampling(in_channels=96, out_channels=128)
712
+ self.convset1 = ConvSet(in_channels=256, middle_channels=128, out_channels=120)
713
+ self.conv_downsampling2 = ConvDownSampling(in_channels=120, out_channels=160)
714
+ self.convset2 = ConvSet(in_channels=320, middle_channels=160, out_channels=240)
715
+ self.transformer_encoder = GlobalTransformerEncoder(d_model=240, n_head=4, ffn_dim=480, encoder_layers=4)
716
+ self.detector = Detector(in_channels=240, middle_channels=120, out_channels=num_anchors*6)
717
+
718
+ # weight init
719
+ self.apply(self.init_parameters)
720
+
721
+ def _make_layer(self, input_channel, cfg: Dict) -> Tuple[nn.Sequential, int]:
722
+ block_type = cfg.get("block_type", "mobilevit")
723
+ if block_type.lower() == "mobilevit":
724
+ return self._make_mit_layer(input_channel=input_channel, cfg=cfg)
725
+ else:
726
+ return self._make_mobilenet_layer(input_channel=input_channel, cfg=cfg)
727
+
728
+ @staticmethod
729
+ def _make_mobilenet_layer(input_channel: int, cfg: Dict) -> Tuple[nn.Sequential, int]:
730
+ output_channels = cfg.get("out_channels")
731
+ num_blocks = cfg.get("num_blocks", 2)
732
+ expand_ratio = cfg.get("expand_ratio", 4)
733
+ block = []
734
+
735
+ for i in range(num_blocks):
736
+ stride = cfg.get("stride", 1) if i == 0 else 1
737
+
738
+ layer = InvertedResidual(
739
+ in_channels=input_channel,
740
+ out_channels=output_channels,
741
+ stride=stride,
742
+ expand_ratio=expand_ratio
743
+ )
744
+ block.append(layer)
745
+ input_channel = output_channels
746
+
747
+ return nn.Sequential(*block), input_channel
748
+
749
+ @staticmethod
750
+ def _make_mit_layer(input_channel: int, cfg: Dict) -> Tuple[nn.Sequential, int]:
751
+ stride = cfg.get("stride", 1)
752
+ block = []
753
+
754
+ if stride == 2:
755
+ layer = InvertedResidual(
756
+ in_channels=input_channel,
757
+ out_channels=cfg.get("out_channels"),
758
+ stride=stride,
759
+ expand_ratio=cfg.get("mv_expand_ratio", 4)
760
+ )
761
+
762
+ block.append(layer)
763
+ input_channel = cfg.get("out_channels")
764
+
765
+ transformer_dim = cfg["transformer_channels"]
766
+ ffn_dim = cfg.get("ffn_dim")
767
+ num_heads = cfg.get("num_heads", 4)
768
+ head_dim = transformer_dim // num_heads
769
+
770
+ if transformer_dim % head_dim != 0:
771
+ raise ValueError("Transformer input dimension should be divisible by head dimension. "
772
+ "Got {} and {}.".format(transformer_dim, head_dim))
773
+
774
+ block.append(MobileViTBlock(
775
+ in_channels=input_channel,
776
+ transformer_dim=transformer_dim,
777
+ ffn_dim=ffn_dim,
778
+ n_transformer_blocks=cfg.get("transformer_blocks", 1),
779
+ patch_h=cfg.get("patch_h", 2),
780
+ patch_w=cfg.get("patch_w", 2),
781
+ dropout=cfg.get("dropout", 0.1),
782
+ ffn_dropout=cfg.get("ffn_dropout", 0.0),
783
+ attn_dropout=cfg.get("attn_dropout", 0.1),
784
+ head_dim=head_dim,
785
+ conv_ksize=3
786
+ ))
787
+
788
+ return nn.Sequential(*block), input_channel
789
+
790
+ @staticmethod
791
+ def init_parameters(m):
792
+ if isinstance(m, nn.Conv2d):
793
+ if m.weight is not None:
794
+ nn.init.kaiming_normal_(m.weight, mode="fan_out")
795
+ if m.bias is not None:
796
+ nn.init.zeros_(m.bias)
797
+ elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
798
+ if m.weight is not None:
799
+ nn.init.ones_(m.weight)
800
+ if m.bias is not None:
801
+ nn.init.zeros_(m.bias)
802
+ elif isinstance(m, (nn.Linear,)):
803
+ if m.weight is not None:
804
+ nn.init.trunc_normal_(m.weight, mean=0.0, std=0.02)
805
+ if m.bias is not None:
806
+ nn.init.zeros_(m.bias)
807
+ else:
808
+ pass
809
+
810
+ def forward(self, x: Tensor) -> Tensor:
811
+ x = self.conv_1(x)
812
+ x = self.layer_1(x)
813
+ x = self.layer_2(x)
814
+
815
+ x = self.layer_3(x) # 下采样率8,输出维度:52x52x96
816
+ x1 = self.layer_4(x) # 下采样率16,输出维度:26x26x128
817
+ x2 = self.layer_5(x1) # 下采样率32,输出维度:13x13x160
818
+
819
+ x = self.conv_downsampling1(x)
820
+ x = torch.concat([x, x1], dim=1)
821
+ x = self.convset1(x)
822
+ x = self.conv_downsampling2(x)
823
+ x = torch.concat([x, x2], dim=1)
824
+ x = self.convset2(x)
825
+ x = self.transformer_encoder(x)
826
+ x = self.detector(x) # 需要提取倒数第二层的特征图时,注释改行,使用下面的代码
827
+ # for i, module in enumerate(self.detector.detector):
828
+ # x = module(x)
829
+ #
830
+ # if i == 4:
831
+ # # print(module)
832
+ # break
833
+ return x
834
+
835
+
836
+ def mobile_vit_xx_small(num_classes: int = 1000):
837
+ # pretrain weight link
838
+ # https://docs-assets.developer.apple.com/ml-research/models/cvnets/classification/mobilevit_xxs.pt
839
+ config = get_config("xx_small")
840
+ m = MobileViT(config, num_classes=num_classes)
841
+ return m
842
+
843
+
844
+ def mobile_vit_x_small(num_classes: int = 1000):
845
+ # pretrain weight link
846
+ # https://docs-assets.developer.apple.com/ml-research/models/cvnets/classification/mobilevit_xs.pt
847
+ config = get_config("x_small")
848
+ m = MobileViT(config, num_classes=num_classes)
849
+ return m
850
+
851
+
852
+ def mobile_vit_small(num_classes: int = 1000):
853
+ # pretrain weight link
854
+ # https://docs-assets.developer.apple.com/ml-research/models/cvnets/classification/mobilevit_s.pt
855
+ config = get_config("small")
856
+ m = MobileViT(config, num_classes=num_classes)
857
+ return m
858
+
859
+
860
+ def get_model():
861
+ model = mobile_vit_small()
862
+ return model
863
+
864
+
865
+ if __name__ == '__main__':
866
+ def count_parameters(model):
867
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
868
+
869
+ model = get_model()
870
+ x = torch.randn(1, 3, 416, 416)
871
+ params = count_parameters(model)
872
+ flops, parameters = profile(model, inputs=(x,))
873
+ total_params = sum(p.numel() for p in model.parameters())
874
+ print(x.shape)
875
+ print(model(x).shape)
876
+ print(summary(model, input_size=(1, 3, 416, 416)))
877
+ print(f"Total Params: {total_params / 1e6:.2f} M")
878
+ print(f"Total trainable Params: {params / 1e6:.2f} M")
879
+ print(f"FLOPs: {flops / 1e9:.2f} GFLOPs")
880
+
881
+ # print(model(x)[1].shape)
882
+ # print(model)
883
+
884
+
model_config.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_config(mode: str = "xxs") -> dict:
2
+ if mode == "xx_small":
3
+ mv2_exp_mult = 2
4
+ config = {
5
+ "layer1": {
6
+ "out_channels": 16,
7
+ "expand_ratio": mv2_exp_mult,
8
+ "num_blocks": 1,
9
+ "stride": 1,
10
+ "block_type": "mv2",
11
+ },
12
+ "layer2": {
13
+ "out_channels": 24,
14
+ "expand_ratio": mv2_exp_mult,
15
+ "num_blocks": 3,
16
+ "stride": 2,
17
+ "block_type": "mv2",
18
+ },
19
+ "layer3": { # 28x28
20
+ "out_channels": 48,
21
+ "transformer_channels": 64,
22
+ "ffn_dim": 128,
23
+ "transformer_blocks": 2,
24
+ "patch_h": 2, # 8,
25
+ "patch_w": 2, # 8,
26
+ "stride": 2,
27
+ "mv_expand_ratio": mv2_exp_mult,
28
+ "num_heads": 4,
29
+ "block_type": "mobilevit",
30
+ },
31
+ "layer4": { # 14x14
32
+ "out_channels": 64,
33
+ "transformer_channels": 80,
34
+ "ffn_dim": 160,
35
+ "transformer_blocks": 4,
36
+ "patch_h": 2, # 4,
37
+ "patch_w": 2, # 4,
38
+ "stride": 2,
39
+ "mv_expand_ratio": mv2_exp_mult,
40
+ "num_heads": 4,
41
+ "block_type": "mobilevit",
42
+ },
43
+ "layer5": { # 7x7
44
+ "out_channels": 80,
45
+ "transformer_channels": 96,
46
+ "ffn_dim": 192,
47
+ "transformer_blocks": 3,
48
+ "patch_h": 2,
49
+ "patch_w": 2,
50
+ "stride": 2,
51
+ "mv_expand_ratio": mv2_exp_mult,
52
+ "num_heads": 4,
53
+ "block_type": "mobilevit",
54
+ },
55
+ "last_layer_exp_factor": 4,
56
+ "cls_dropout": 0.1
57
+ }
58
+ elif mode == "x_small":
59
+ mv2_exp_mult = 4
60
+ config = {
61
+ "layer1": {
62
+ "out_channels": 32,
63
+ "expand_ratio": mv2_exp_mult,
64
+ "num_blocks": 1,
65
+ "stride": 1,
66
+ "block_type": "mv2",
67
+ },
68
+ "layer2": {
69
+ "out_channels": 48,
70
+ "expand_ratio": mv2_exp_mult,
71
+ "num_blocks": 3,
72
+ "stride": 2,
73
+ "block_type": "mv2",
74
+ },
75
+ "layer3": { # 28x28
76
+ "out_channels": 64,
77
+ "transformer_channels": 96,
78
+ "ffn_dim": 192,
79
+ "transformer_blocks": 2,
80
+ "patch_h": 2,
81
+ "patch_w": 2,
82
+ "stride": 2,
83
+ "mv_expand_ratio": mv2_exp_mult,
84
+ "num_heads": 4,
85
+ "block_type": "mobilevit",
86
+ },
87
+ "layer4": { # 14x14
88
+ "out_channels": 80,
89
+ "transformer_channels": 120,
90
+ "ffn_dim": 240,
91
+ "transformer_blocks": 4,
92
+ "patch_h": 2,
93
+ "patch_w": 2,
94
+ "stride": 2,
95
+ "mv_expand_ratio": mv2_exp_mult,
96
+ "num_heads": 4,
97
+ "block_type": "mobilevit",
98
+ },
99
+ "layer5": { # 7x7
100
+ "out_channels": 96,
101
+ "transformer_channels": 144,
102
+ "ffn_dim": 288,
103
+ "transformer_blocks": 3,
104
+ "patch_h": 2,
105
+ "patch_w": 2,
106
+ "stride": 2,
107
+ "mv_expand_ratio": mv2_exp_mult,
108
+ "num_heads": 4,
109
+ "block_type": "mobilevit",
110
+ },
111
+ "last_layer_exp_factor": 4,
112
+ "cls_dropout": 0.1
113
+ }
114
+ elif mode == "small":
115
+ mv2_exp_mult = 4
116
+ config = {
117
+ "layer1": {
118
+ "out_channels": 32,
119
+ "expand_ratio": mv2_exp_mult,
120
+ "num_blocks": 1,
121
+ "stride": 1,
122
+ "block_type": "mv2",
123
+ },
124
+ "layer2": {
125
+ "out_channels": 64,
126
+ "expand_ratio": mv2_exp_mult,
127
+ "num_blocks": 3,
128
+ "stride": 2,
129
+ "block_type": "mv2",
130
+ },
131
+ "layer3": { # 28x28
132
+ "out_channels": 96,
133
+ "transformer_channels": 144,
134
+ "ffn_dim": 288,
135
+ "transformer_blocks": 2,
136
+ "patch_h": 2,
137
+ "patch_w": 2,
138
+ "stride": 2,
139
+ "mv_expand_ratio": mv2_exp_mult,
140
+ "num_heads": 4,
141
+ "block_type": "mobilevit",
142
+ },
143
+ "layer4": { # 14x14
144
+ "out_channels": 128,
145
+ "transformer_channels": 192,
146
+ "ffn_dim": 384,
147
+ "transformer_blocks": 4,
148
+ "patch_h": 2,
149
+ "patch_w": 2,
150
+ "stride": 2,
151
+ "mv_expand_ratio": mv2_exp_mult,
152
+ "num_heads": 4,
153
+ "block_type": "mobilevit",
154
+ },
155
+ "layer5": { # 7x7
156
+ "out_channels": 160,
157
+ "transformer_channels": 240,
158
+ "ffn_dim": 480,
159
+ "transformer_blocks": 3,
160
+ "patch_h": 2,
161
+ "patch_w": 2,
162
+ "stride": 2,
163
+ "mv_expand_ratio": mv2_exp_mult,
164
+ "num_heads": 4,
165
+ "block_type": "mobilevit",
166
+ },
167
+ "last_layer_exp_factor": 4,
168
+ "cls_dropout": 0.1
169
+ }
170
+ else:
171
+ raise NotImplementedError
172
+
173
+ for k in ["layer1", "layer2", "layer3", "layer4", "layer5"]:
174
+ config[k].update({"dropout": 0.1, "ffn_dropout": 0.0, "attn_dropout": 0.0})
175
+
176
+ return config
picture/RAGT.pdf ADDED
Binary file (32.4 kB). View file
 
picture/RAGT.png ADDED

Git LFS Details

  • SHA256: 4ea58184a982e13ac9a47dda7d31d77c6d917ebfbf4fb182717f227290117a28
  • Pointer size: 130 Bytes
  • Size of remote file: 83.7 kB
picture/RARA.pdf ADDED
Binary file (33.3 kB). View file
 
picture/RARA.png ADDED

Git LFS Details

  • SHA256: f3cca629185e2634af84054fcb5c9f414828953b564aacb567aede0015f3db4a
  • Pointer size: 131 Bytes
  • Size of remote file: 111 kB
picture/RAST.pdf ADDED
Binary file (60 kB). View file
 
picture/RAST.png ADDED

Git LFS Details

  • SHA256: 96d3ca73b57c29067c10ed34f4ec813d3fbcc22539a573e3812b15d78315f470
  • Pointer size: 131 Bytes
  • Size of remote file: 118 kB
picture/annotation.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95c55ae4482808842c8ed21462f0e2aaf732ecf7781c9ba1453bfd0e43e0240e
3
+ size 232548
picture/annotation.png ADDED

Git LFS Details

  • SHA256: 002da410bf1781d78789479fce3a48761631218e98b6638ef1686fee6a2cc2c1
  • Pointer size: 132 Bytes
  • Size of remote file: 1.4 MB
picture/dataset.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10fb99585cd813e2167117047243c4715801c4a7cf093f4cf72d48ca1176ab83
3
+ size 514676
picture/dataset.png ADDED

Git LFS Details

  • SHA256: 2852713dc0b09b43dcfb00cd71ba75e0feb7914585070e3c37b07d4d9cc6bf81
  • Pointer size: 132 Bytes
  • Size of remote file: 2.81 MB
picture/detected-multi-obj.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1af8550c71fee1a235dc483971d946ef6963fb0c7e38874c5b7f91232aa5534a
3
+ size 107762
picture/detected-multi-obj.png ADDED

Git LFS Details

  • SHA256: def1e07f889bc288574fcfe0417e567978821e24490448ce18aa02a6c4ab105e
  • Pointer size: 131 Bytes
  • Size of remote file: 906 kB
picture/detected-single-obj.pdf ADDED
Binary file (75.3 kB). View file
 
picture/detected-single-obj.png ADDED

Git LFS Details

  • SHA256: b15b34465635253ecb8eceadb3ec1e61fbaff4f3d30abda26ec6346112864310
  • Pointer size: 131 Bytes
  • Size of remote file: 429 kB
pretrained_weights/mobilevit_s.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96355cfd384c528756eef9b56030dccbcfebaaa0534a8187810cba668f8b085d
3
+ size 22492254
similarity.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from model import get_model
4
+ import glob
5
+ import os
6
+ import torch.nn.functional as F
7
+ from torchvision import transforms
8
+ import cv2
9
+ import numpy as np
10
+
11
+
12
+ transform = transforms.Compose([
13
+ transforms.ToTensor(),
14
+ ])
15
+
16
+
17
+ def cosine_similarity(v, M):
18
+ # 计算向量v的模
19
+ v_norm = np.linalg.norm(v)
20
+
21
+ # 计算矩阵M每一列的模
22
+ M_norm = np.linalg.norm(M, axis=0)
23
+
24
+ # 计算向量v和矩阵M每一列的点积
25
+ dot_product = np.dot(v, M)
26
+
27
+ # 计算余弦相似度
28
+ similarity = dot_product / (v_norm * M_norm)
29
+
30
+ return similarity
31
+
32
+
33
+ if __name__ == '__main__':
34
+ weights_path = 'weights/epoch6_loss_8.045684943666645.pth'
35
+ img_dir = r'J:\experiment_data\0.1 test\test_img'
36
+ target_img_index = 500
37
+
38
+ img_path = glob.glob(img_dir + os.sep + '*.png')
39
+ model = get_model()
40
+ model.load_state_dict(torch.load(weights_path))
41
+ model.eval()
42
+
43
+ vectors = []
44
+ for i in img_path:
45
+ print(i)
46
+ img = cv2.imread(i, -1)
47
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
48
+ img = transform(img).unsqueeze(0)
49
+ vector = model(img)
50
+ vector = vector.squeeze().reshape(-1).detach().numpy()
51
+ vector = vector.tolist()
52
+ vectors.append(vector)
53
+ vectors = np.array(vectors, dtype=np.float32).transpose()
54
+ print(f'特征矩阵维度是:\n {vectors.shape}')
55
+
56
+ target_img = cv2.imread(img_path[target_img_index], -1)
57
+ target_img = cv2.cvtColor(target_img, cv2.COLOR_BGR2RGB)
58
+ target_img = transform(target_img).unsqueeze(0)
59
+ target_vector = model(target_img)
60
+ target_vector = target_vector.squeeze().reshape(1, -1).detach().numpy()
61
+ target_vector = target_vector.astype(np.float32)
62
+
63
+ cos_similarity = cosine_similarity(target_vector, vectors)
64
+ sorted_indices = np.argsort(-cos_similarity)
65
+ v_sorted = np.take(cos_similarity, sorted_indices)
66
+ # print(f'相似度向量:\n {v_sorted}')
67
+ # print(f'相似度序号向量:\n {sorted_indices}')
68
+ print(f'排序向量维度:\n {sorted_indices.shape}')
69
+
70
+ print(f'前10的相似度:\n {v_sorted[0, :10]}')
71
+ print(f'前10的图像:\n {sorted_indices[0, :10]}')
72
+
73
+ print(f'最后10个的相似度:\n {v_sorted[0, -10:]}')
74
+ print(f'最后10个的图像:\n {sorted_indices[0, -10:]}')
75
+
76
+
to_yolo_dataset.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data import Dataset
2
+ import numpy as np
3
+ from Anchor import anchor_w, anchor_h, theta_margin, Anchor_eps, num_anchors, num_grid_cell, field_of_grid_cell, anchor_thetas
4
+ from PIL import Image
5
+ from torchvision import transforms
6
+ import glob
7
+ from torch.utils.data import DataLoader
8
+ import pandas as pd
9
+ import math
10
+
11
+
12
+ def get_one_label(label_path):
13
+ # 每行标签的顺序是x, y, w, h, theta
14
+ # 最终输出的下采样倍数是4倍,每个grid cell对应原图上16*16的位置
15
+ labels = np.loadtxt(label_path)
16
+ # labels = pd.read_csv(label_path, header=None, sep=' ').to_numpy()
17
+ # tensor的每个box的标签值顺序是:confidence、bx、by、bw、bh、theta
18
+ tensor = np.zeros((num_grid_cell, num_grid_cell, num_anchors, 6))
19
+ for box in labels:
20
+ x = int(box[0] // field_of_grid_cell)
21
+ bx = (box[0] % field_of_grid_cell) / field_of_grid_cell
22
+ # bx = sigmoid(tx)
23
+ tx = math.log((bx + Anchor_eps) / (1 - bx))
24
+ y = int(box[1] // field_of_grid_cell)
25
+ by = (box[1] % field_of_grid_cell) / field_of_grid_cell
26
+ # by = sigmoid(ty)
27
+ ty = math.log((by + Anchor_eps) / (1 - by))
28
+ bw = box[2] / anchor_w
29
+ # bw = exp(tw)
30
+ tw = math.log(bw + Anchor_eps)
31
+ bh = box[3] / anchor_h
32
+ # bh = exp(th)
33
+ th = math.log(bh + Anchor_eps)
34
+ # 这里theta是弧度制,需要转换为角度制
35
+ while box[4] >= 3.1415927:
36
+ box[4] -= 3.1415927
37
+ theta = box[4] / 3.1415927 * 180
38
+ theta_anchor_match = int(theta // theta_margin)
39
+ b_theta = (theta % theta_margin) / theta_margin
40
+ # b_theta = sigmoid(t_theta)
41
+ t_theta = math.log((b_theta + Anchor_eps) / (1 - b_theta))
42
+ # 赋值
43
+ tensor[y][x][theta_anchor_match][0] = 1
44
+ tensor[y][x][theta_anchor_match][1] = tx
45
+ tensor[y][x][theta_anchor_match][2] = ty
46
+ tensor[y][x][theta_anchor_match][3] = tw
47
+ tensor[y][x][theta_anchor_match][4] = th
48
+ tensor[y][x][theta_anchor_match][5] = t_theta
49
+
50
+ tensor = tensor.astype(np.float32)
51
+ return tensor
52
+
53
+
54
+ def get_label(label_path):
55
+ labels = []
56
+ for i in label_path:
57
+ label = get_one_label(i)
58
+ labels.append(label)
59
+ return np.array(labels)
60
+
61
+
62
+ transform = transforms.Compose([
63
+ transforms.ToTensor()
64
+ ])
65
+
66
+
67
+ class YoloDataset(Dataset):
68
+ def __init__(self, img_path, label_path):
69
+ self.img_path = img_path
70
+ self.label_path = label_path
71
+
72
+ def __getitem__(self, index):
73
+ x = self.img_path[index]
74
+ x = Image.open(x)
75
+ x = x.convert('RGB')
76
+ x = transform(x)
77
+ y = self.label_path[index]
78
+ y = get_one_label(y)
79
+ return x, y
80
+
81
+ def __len__(self):
82
+ return len(self.img_path)
83
+
84
+
85
+ if __name__ == '__main__':
86
+ img_path = glob.glob(r'data\train_data\img\*.png')
87
+ label_path = glob.glob(r'data\train_data\label\*.txt')
88
+
89
+ dataset = YoloDataset(img_path, label_path)
90
+ dataloader = DataLoader(
91
+ dataset,
92
+ batch_size=4,
93
+ shuffle=False
94
+ )
95
+ img, label = next(iter(dataloader))
96
+ print(img.shape)
97
+ print(label.shape)
98
+ print(label[0][int(195.579085//32)][int(160.0963//32)])
99
+ # 160.0963 195.579085