Spaces:
Runtime error
Runtime error
Chris
commited on
Commit
·
a3281f6
1
Parent(s):
b9cc655
Successfully generating results_dict!
Browse files- .gitignore +4 -0
- =1.12 +14 -14
- app.py +13 -6
- calculate_masks.py +0 -2
- calculate_measures.py +74 -45
- requirements.txt +1 -0
.gitignore
CHANGED
|
@@ -4,3 +4,7 @@ output
|
|
| 4 |
share
|
| 5 |
input_img.jpg
|
| 6 |
input_img.jpg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
share
|
| 5 |
input_img.jpg
|
| 6 |
input_img.jpg
|
| 7 |
+
app.py
|
| 8 |
+
input_img.jpg
|
| 9 |
+
requirements.txt
|
| 10 |
+
__pycache__
|
=1.12
CHANGED
|
@@ -1,14 +1,14 @@
|
|
| 1 |
-
Requirement already satisfied: xtcocotools in
|
| 2 |
-
Requirement already satisfied:
|
| 3 |
-
Requirement already satisfied: setuptools>=18.0 in
|
| 4 |
-
Requirement already satisfied:
|
| 5 |
-
Requirement already satisfied: matplotlib>=2.1.0 in
|
| 6 |
-
Requirement already satisfied:
|
| 7 |
-
Requirement already satisfied: fonttools>=4.22.0 in
|
| 8 |
-
Requirement already satisfied:
|
| 9 |
-
Requirement already satisfied: packaging>=20.0 in
|
| 10 |
-
Requirement already satisfied:
|
| 11 |
-
Requirement already satisfied:
|
| 12 |
-
Requirement already satisfied:
|
| 13 |
-
Requirement already satisfied: pillow>=
|
| 14 |
-
Requirement already satisfied: six>=1.5 in
|
|
|
|
| 1 |
+
Requirement already satisfied: xtcocotools in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (1.14.3)
|
| 2 |
+
Requirement already satisfied: cython>=0.27.3 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from xtcocotools) (3.0.7)
|
| 3 |
+
Requirement already satisfied: setuptools>=18.0 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from xtcocotools) (60.2.0)
|
| 4 |
+
Requirement already satisfied: numpy>=1.20.0 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from xtcocotools) (1.26.3)
|
| 5 |
+
Requirement already satisfied: matplotlib>=2.1.0 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from xtcocotools) (3.8.2)
|
| 6 |
+
Requirement already satisfied: pyparsing>=2.3.1 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (3.1.1)
|
| 7 |
+
Requirement already satisfied: fonttools>=4.22.0 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (4.47.0)
|
| 8 |
+
Requirement already satisfied: cycler>=0.10 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (0.12.1)
|
| 9 |
+
Requirement already satisfied: packaging>=20.0 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (23.2)
|
| 10 |
+
Requirement already satisfied: python-dateutil>=2.7 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (2.8.2)
|
| 11 |
+
Requirement already satisfied: contourpy>=1.0.1 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (1.2.0)
|
| 12 |
+
Requirement already satisfied: kiwisolver>=1.3.1 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (1.4.5)
|
| 13 |
+
Requirement already satisfied: pillow>=8 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (10.2.0)
|
| 14 |
+
Requirement already satisfied: six>=1.5 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from python-dateutil>=2.7->matplotlib>=2.1.0->xtcocotools) (1.16.0)
|
app.py
CHANGED
|
@@ -11,15 +11,22 @@ os.system("pip install 'mmpose'")
|
|
| 11 |
|
| 12 |
import gradio as gr
|
| 13 |
|
| 14 |
-
def generate_output(
|
| 15 |
# TODO: These file names will need to be unique in case of multiple requests at once, and they will need to be deleted after the function is done.
|
| 16 |
-
front_keypoint_result = predict_pose(
|
| 17 |
-
side_keypoint_result = predict_pose(
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
|
|
|
| 21 |
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
return (front_keypoint_result[0], front_keypoint_result[1], side_keypoint_result[0], side_keypoint_result[1])
|
| 25 |
|
|
|
|
| 11 |
|
| 12 |
import gradio as gr
|
| 13 |
|
| 14 |
+
def generate_output(front_img_path, side_img_path):
|
| 15 |
# TODO: These file names will need to be unique in case of multiple requests at once, and they will need to be deleted after the function is done.
|
| 16 |
+
front_keypoint_result = predict_pose(front_img_path, "front.jpg")
|
| 17 |
+
side_keypoint_result = predict_pose(side_img_path, "side.jpg")
|
| 18 |
|
| 19 |
+
# Should we create the image separately? Seems weird to get it as a result from this only to use it in something else below.
|
| 20 |
+
front_image = front_keypoint_result[0]
|
| 21 |
+
side_image = side_keypoint_result[0]
|
| 22 |
|
| 23 |
+
front_keypoint_data = front_keypoint_result[1]
|
| 24 |
+
side_keypoint_data = side_keypoint_result[1]
|
| 25 |
+
|
| 26 |
+
front_seg_mask = calculate_seg_mask(front_img_path)
|
| 27 |
+
side_rcnn_mask = calculate_seg_mask(side_img_path)
|
| 28 |
+
|
| 29 |
+
measures = calculate_all_measures(front_image, side_image, front_keypoint_data, side_keypoint_data, front_seg_mask, side_rcnn_mask)
|
| 30 |
|
| 31 |
return (front_keypoint_result[0], front_keypoint_result[1], side_keypoint_result[0], side_keypoint_result[1])
|
| 32 |
|
calculate_masks.py
CHANGED
|
@@ -3,8 +3,6 @@ from transformers import SegformerImageProcessor, AutoModelForSemanticSegmentati
|
|
| 3 |
import torch.nn as nn
|
| 4 |
|
| 5 |
def calculate_seg_mask(image):
|
| 6 |
-
image = Image.open(image).convert("RGB")
|
| 7 |
-
|
| 8 |
processor = SegformerImageProcessor.from_pretrained("mattmdjaga/segformer_b2_clothes")
|
| 9 |
model = AutoModelForSemanticSegmentation.from_pretrained("mattmdjaga/segformer_b2_clothes")
|
| 10 |
|
|
|
|
| 3 |
import torch.nn as nn
|
| 4 |
|
| 5 |
def calculate_seg_mask(image):
|
|
|
|
|
|
|
| 6 |
processor = SegformerImageProcessor.from_pretrained("mattmdjaga/segformer_b2_clothes")
|
| 7 |
model = AutoModelForSemanticSegmentation.from_pretrained("mattmdjaga/segformer_b2_clothes")
|
| 8 |
|
calculate_measures.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import numpy as np
|
|
|
|
| 2 |
|
| 3 |
def get_width(mask, keypoint_y):
|
| 4 |
pred_np = mask.numpy()
|
|
@@ -10,9 +11,35 @@ def get_width(mask, keypoint_y):
|
|
| 10 |
|
| 11 |
return last_index-first_index
|
| 12 |
|
| 13 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
results_dict = {}
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
# calculate the body length
|
| 17 |
# pick the longer from the two ankle keypoints on y coordinate
|
| 18 |
side_body_length = side_keypoints[15][1] if side_keypoints[15][1] > side_keypoints[16][1] else side_keypoints[16][1]
|
|
@@ -56,49 +83,51 @@ def calculate_all_measures(front_keypoints, side_keypoints, front_seg_mask, side
|
|
| 56 |
results_dict['waist_width'] = get_width(front_seg_mask, waist_y)
|
| 57 |
|
| 58 |
# Calculate bounding box for thigh
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
#
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
#
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
#
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
#
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
#
|
| 79 |
-
#
|
| 80 |
-
#
|
| 81 |
-
#
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
|
| 88 |
# # calculate ratios
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
| 1 |
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
|
| 4 |
def get_width(mask, keypoint_y):
|
| 5 |
pred_np = mask.numpy()
|
|
|
|
| 11 |
|
| 12 |
return last_index-first_index
|
| 13 |
|
| 14 |
+
def calculate_bbox(original_image, keypoint_1, keypoint_2):
|
| 15 |
+
# Determine bounding box coordinates
|
| 16 |
+
min_x = 0
|
| 17 |
+
min_y = int(min(keypoint_1[1], keypoint_2[1]))
|
| 18 |
+
max_y = int(max(keypoint_1[1], keypoint_2[1]))
|
| 19 |
+
# Set max_x to be the width of the image
|
| 20 |
+
max_x = original_image.size[0]
|
| 21 |
+
return [min_x, max_x, min_y, max_y]
|
| 22 |
+
|
| 23 |
+
def get_volume_result(mask_of_interest, original_image, max_x, min_x, max_y, min_y):
|
| 24 |
+
# thr = mask_of_interest[mask_of_interest > 0].mean()
|
| 25 |
+
thr = 0
|
| 26 |
+
if min_x < 0 or max_x > mask_of_interest.shape[1] or min_y < 0 or max_y > mask_of_interest.shape[0]:
|
| 27 |
+
print("Warning: Invalid indices for slicing.")
|
| 28 |
+
if min_x < max_x and min_y < max_y:
|
| 29 |
+
region_inside_bounding_box = mask_of_interest[min_y:max_y, min_x:max_x] > thr
|
| 30 |
+
return region_inside_bounding_box.sum()
|
| 31 |
+
else:
|
| 32 |
+
print("Invalid bounding box indices.")
|
| 33 |
+
return None
|
| 34 |
+
|
| 35 |
+
def calculate_all_measures(front_image, side_image, front_keypoint_data, side_keypoint_data, front_seg_mask, side_rcnn_mask):
|
| 36 |
results_dict = {}
|
| 37 |
|
| 38 |
+
front_keypoints = front_keypoint_data['keypoints']
|
| 39 |
+
front_keypoint_names = front_keypoint_data['keypoint_names']
|
| 40 |
+
side_keypoints = side_keypoint_data['keypoints']
|
| 41 |
+
side_keypoint_names = side_keypoint_data['keypoint_names']
|
| 42 |
+
|
| 43 |
# calculate the body length
|
| 44 |
# pick the longer from the two ankle keypoints on y coordinate
|
| 45 |
side_body_length = side_keypoints[15][1] if side_keypoints[15][1] > side_keypoints[16][1] else side_keypoints[16][1]
|
|
|
|
| 83 |
results_dict['waist_width'] = get_width(front_seg_mask, waist_y)
|
| 84 |
|
| 85 |
# Calculate bounding box for thigh
|
| 86 |
+
right_knee = side_keypoints[side_keypoint_names.index("right_knee")]
|
| 87 |
+
right_hip = side_keypoints[side_keypoint_names.index("right_hip")]
|
| 88 |
+
# Calculate bounding box for torso
|
| 89 |
+
right_shoulder = side_keypoints[side_keypoint_names.index("right_shoulder")]
|
| 90 |
+
|
| 91 |
+
# Replace keypoints, keypoint_names, combined_mask, and original_image with your actual data
|
| 92 |
+
thigh_bbox = calculate_bbox(side_image, right_knee, right_hip)
|
| 93 |
+
torso_bbox = calculate_bbox(side_image, right_hip, right_shoulder)
|
| 94 |
+
# Calculate midpoint coordinates
|
| 95 |
+
torso_midpoint = [0, (right_hip[1] + right_shoulder[1]) / 2]
|
| 96 |
+
lower_torso_bbox = calculate_bbox(side_image, right_hip, torso_midpoint)
|
| 97 |
+
upper_torso_bbox = calculate_bbox(side_image, torso_midpoint, right_shoulder)
|
| 98 |
+
|
| 99 |
+
# Replace keypoints, keypoint_names, combined_mask, and original_image with your actual data
|
| 100 |
+
thigh_area = get_volume_result(side_rcnn_mask, side_image, thigh_bbox[1], thigh_bbox[0], thigh_bbox[3], thigh_bbox[2]) # Thigh volume
|
| 101 |
+
torso_area = get_volume_result(side_rcnn_mask, side_image, torso_bbox[1], torso_bbox[0], torso_bbox[3], torso_bbox[2]) # Torso volume
|
| 102 |
+
lower_torso_area = get_volume_result(side_rcnn_mask, side_image, lower_torso_bbox[1], lower_torso_bbox[0], lower_torso_bbox[3], lower_torso_bbox[2]) # Lower torso volume
|
| 103 |
+
upper_torso_area = get_volume_result(side_rcnn_mask, side_image, upper_torso_bbox[1], upper_torso_bbox[0], upper_torso_bbox[3], upper_torso_bbox[2]) # Upper torso volume
|
| 104 |
+
full_side_body_area = (side_rcnn_mask > 0).sum()
|
| 105 |
+
# print(f"Thigh area: {thigh_area}")
|
| 106 |
+
# print(f"Torso area: {torso_area}")
|
| 107 |
+
# print(f"Lower torso area: {lower_torso_area}")
|
| 108 |
+
# print(f"Upper torso area: {upper_torso_area}")
|
| 109 |
+
results_dict['thigh_area'] = thigh_area
|
| 110 |
+
results_dict['torso_area'] = torso_area
|
| 111 |
+
results_dict['lower_torso_area'] = lower_torso_area
|
| 112 |
+
results_dict['upper_torso_area'] = upper_torso_area
|
| 113 |
+
results_dict['full_side_body_area'] = full_side_body_area
|
| 114 |
|
| 115 |
# # calculate ratios
|
| 116 |
+
results_dict['thigh_normalised'] = thigh_area / side_body_length
|
| 117 |
+
results_dict['torso_normalised'] = torso_area / side_body_length
|
| 118 |
+
results_dict['thigh_to_torso_ratio_normalised'] = results_dict['thigh_normalised'] / results_dict['torso_normalised']
|
| 119 |
+
results_dict['thigh_to_torso_ratio'] = thigh_area / torso_area
|
| 120 |
+
results_dict['upper_torso_normalised'] = upper_torso_area / side_body_length
|
| 121 |
+
results_dict['lower_torso_normalised'] = lower_torso_area / side_body_length
|
| 122 |
+
results_dict['upper_to_lower_torso_normalised_ratio'] = results_dict['upper_torso_normalised'] / results_dict['lower_torso_normalised']
|
| 123 |
+
results_dict['upper_to_lower_torso_ratio'] = upper_torso_area / lower_torso_area
|
| 124 |
+
results_dict['shoulder_to_hip_ratio'] = results_dict['shoulder_width'] / results_dict['hip_width']
|
| 125 |
+
results_dict['shoulder_to_waist_ratio'] = results_dict['shoulder_width'] / results_dict['waist_width']
|
| 126 |
+
results_dict['waist_to_hip_ratio'] = results_dict['waist_width'] / results_dict['hip_width']
|
| 127 |
+
results_dict['thigh_to_body_ratio'] = thigh_area / full_side_body_area
|
| 128 |
+
results_dict['upper_torso_to_body_ratio'] = upper_torso_area / full_side_body_area
|
| 129 |
+
results_dict['upper_torso_to_body_ratio'] = upper_torso_area / full_side_body_area
|
| 130 |
+
|
| 131 |
+
results_df = pd.DataFrame(results_dict)
|
| 132 |
+
|
| 133 |
+
return results_dict
|
requirements.txt
CHANGED
|
@@ -18,3 +18,4 @@ thop~=0.1.1-2209072238
|
|
| 18 |
timm~=0.9.2
|
| 19 |
super-gradients~=3.2.0
|
| 20 |
openmim
|
|
|
|
|
|
| 18 |
timm~=0.9.2
|
| 19 |
super-gradients~=3.2.0
|
| 20 |
openmim
|
| 21 |
+
transformers
|