Chayanat commited on
Commit
8f509db
·
verified ·
1 Parent(s): ca4ead4
Files changed (1) hide show
  1. app.py +434 -96
app.py CHANGED
@@ -1,6 +1,12 @@
1
  import numpy as np
2
  import gradio as gr
3
  import cv2
 
 
 
 
 
 
4
 
5
  from models.HybridGNet2IGSC import Hybrid
6
  from utils.utils import scipy_to_torch_sparse, genMatrixesLungsHeart
@@ -49,6 +55,80 @@ def getMasks(landmarks, h, w):
49
  return RL_mask, LL_mask, H_mask
50
 
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  def calculate_image_tilt(landmarks):
53
  """Calculate image tilt angle based on lung symmetry"""
54
  RL = landmarks[0:44] # Right lung
@@ -70,6 +150,85 @@ def calculate_image_tilt(landmarks):
70
 
71
  return angle_deg, rl_top, ll_top
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  def rotate_points(points, angle_deg, center):
74
  """Rotate points around a center by given angle"""
75
  angle_rad = np.radians(-angle_deg) # Negative to correct the tilt
@@ -301,27 +460,88 @@ def validate_landmarks_consistency(landmarks, original_landmarks, threshold=0.05
301
  LL = landmarks[44:94]
302
  H = landmarks[94:]
303
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  rl_center_x = np.mean(RL[:, 0])
305
  ll_center_x = np.mean(LL[:, 0])
306
  h_center_x = np.mean(H[:, 0])
307
 
308
- # Heart should be between lung centers
309
- if not (min(rl_center_x, ll_center_x) <= h_center_x <= max(rl_center_x, ll_center_x)):
310
- print("Warning: Heart position validation failed")
311
- return False
312
-
313
- # Check if total change is reasonable
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
  total_change = np.mean(np.linalg.norm(landmarks - original_landmarks, axis=1))
315
  relative_change = total_change / np.mean(np.linalg.norm(original_landmarks, axis=1))
316
 
317
- if relative_change > threshold:
318
- print(f"Warning: Landmarks changed by {relative_change:.3f}, exceeds threshold {threshold}")
319
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
 
321
- return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
 
323
  except Exception as e:
324
- print(f"Error in landmark validation: {e}")
325
  return False
326
 
327
  def calculate_ctr_robust(landmarks, corrected_landmarks=None):
@@ -420,85 +640,140 @@ def calculate_ctr_robust(landmarks, corrected_landmarks=None):
420
  }
421
 
422
 
423
- def detect_image_rotation_advanced(img):
424
- """Enhanced rotation detection using multiple methods"""
425
  try:
426
  angles = []
 
427
 
428
- # Method 1: Edge-based detection with focus on spine/mediastinum
429
- edges = cv2.Canny((img * 255).astype(np.uint8), 50, 150)
430
  h, w = img.shape
431
 
432
- # Focus on central region where spine should be
433
- spine_region = edges[h//4:3*h//4, w//3:2*w//3]
 
 
 
 
 
 
 
434
 
435
- # Find strong vertical lines (spine alignment)
436
- lines = cv2.HoughLines(spine_region, 1, np.pi/180, threshold=50)
437
- if lines is not None:
438
- for line in lines[:5]: # Top 5 lines
439
- rho, theta = line[0]
440
- angle = np.degrees(theta) - 90
441
- if abs(angle) < 30: # Near vertical lines
442
- angles.append(angle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
 
444
- # Method 2: Chest boundary detection
445
- # Find chest outline using contours
446
  contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
447
  if contours:
448
- # Get largest contour (chest boundary)
449
- largest_contour = max(contours, key=cv2.contourArea)
450
 
451
- # Fit ellipse to chest boundary
452
- if len(largest_contour) >= 5:
453
- ellipse = cv2.fitEllipse(largest_contour)
454
- chest_angle = ellipse[2] - 90 # Convert to rotation angle
455
- if abs(chest_angle) < 45:
456
- angles.append(chest_angle)
457
-
458
- # Method 3: Template-based symmetry detection
459
- # Check left-right symmetry
460
- left_half = img[:, :w//2]
461
- right_half = np.fliplr(img[:, w//2:])
462
-
463
- # Try different rotation angles to find best symmetry
464
- best_angle = 0
465
- best_correlation = 0
466
 
467
- for test_angle in range(-15, 16, 2):
468
- if test_angle == 0:
469
- test_left = left_half
470
- else:
471
- center = (left_half.shape[1]//2, left_half.shape[0]//2)
472
- rotation_matrix = cv2.getRotationMatrix2D(center, test_angle, 1.0)
473
- test_left = cv2.warpAffine(left_half, rotation_matrix,
474
- (left_half.shape[1], left_half.shape[0]))
 
475
 
476
- # Calculate correlation
477
- correlation = cv2.matchTemplate(test_left, right_half, cv2.TM_CCOEFF_NORMED).max()
478
- if correlation > best_correlation:
479
- best_correlation = correlation
480
- best_angle = test_angle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481
 
482
- if best_correlation > 0.3: # Good symmetry found
483
- angles.append(best_angle)
 
 
 
484
 
485
- # Combine all methods
486
- if angles:
487
- # Remove outliers using IQR
488
  angles = np.array(angles)
489
- Q1, Q3 = np.percentile(angles, [25, 75])
490
- IQR = Q3 - Q1
491
- filtered_angles = angles[(angles >= Q1 - 1.5*IQR) & (angles <= Q3 + 1.5*IQR)]
492
 
493
- if len(filtered_angles) > 0:
494
- final_angle = np.median(filtered_angles)
495
- return final_angle if abs(final_angle) > 1 else 0
 
 
 
 
 
 
 
 
 
 
 
 
 
496
 
497
- return 0
498
 
499
  except Exception as e:
500
  print(f"Error in advanced rotation detection: {e}")
501
- return 0
502
 
503
  def rotate_image(img, angle):
504
  """Rotate image by given angle"""
@@ -541,18 +816,35 @@ def segment(input_img):
541
  original_img = cv2.imread(input_img, 0) / 255.0
542
  original_shape = original_img.shape[:2]
543
 
544
- # Step 1: Enhanced rotation detection (re-enabled)
545
- detected_rotation = detect_image_rotation_advanced(original_img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546
  was_rotated = False
547
  processing_img = original_img
548
 
549
- # Step 2: Rotate image if significant rotation detected
550
- if abs(detected_rotation) > 3:
 
551
  processing_img, actual_rotation = rotate_image(original_img, -detected_rotation)
552
  was_rotated = True
553
- print(f"Applied rotation correction: {detected_rotation:.1f}°")
554
  else:
555
  actual_rotation = 0
 
 
556
 
557
  # Step 3: Preprocess the image
558
  img, (h, w, padding) = preprocess(processing_img)
@@ -574,50 +866,96 @@ def segment(input_img):
574
  # Step 7: Convert output to int
575
  output = output.astype('int')
576
 
577
- # Step 8: Draw results on original image
578
- outseg, corrected_data = drawOnTop(original_img, output, original_shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579
 
580
  except Exception as e:
581
  print(f"Error in segmentation: {e}")
582
- # Return a basic error response
583
- return None, None, 0, f"Error: {str(e)}"
 
 
 
584
 
585
  seg_to_save = (outseg.copy() * 255).astype('uint8')
586
  cv2.imwrite("tmp/overlap_segmentation.png", cv2.cvtColor(seg_to_save, cv2.COLOR_RGB2BGR))
587
 
588
- # Step 9: Robust CTR calculation
589
- ctr_result = calculate_ctr_robust(output, corrected_data)
590
  ctr_value = ctr_result['ctr']
591
- tilt_angle = ctr_result['tilt_angle']
592
 
593
- # Enhanced interpretation with quality indicators
594
  interpretation_parts = []
595
 
596
- # CTR interpretation
 
 
 
 
 
 
597
  if ctr_value < 0.5:
598
- base_interpretation = "Normal"
599
  elif 0.50 <= ctr_value <= 0.55:
600
- base_interpretation = "Mild Cardiomegaly (CTR 50-55%)"
601
  elif 0.56 <= ctr_value <= 0.60:
602
- base_interpretation = "Moderate Cardiomegaly (CTR 56-60%)"
603
  elif ctr_value > 0.60:
604
- base_interpretation = "Severe Cardiomegaly (CTR > 60%)"
605
  else:
606
- base_interpretation = "Cardiomegaly"
607
 
608
  interpretation_parts.append(base_interpretation)
609
 
610
- # Add quality indicators
 
611
  if was_rotated:
612
- interpretation_parts.append(f"Image rotation corrected ({detected_rotation:.1f}°)")
613
 
614
  if ctr_result['correction_applied']:
615
- interpretation_parts.append(f"Anatomical tilt corrected ({tilt_angle:.1f}°)")
616
- elif tilt_angle > 3:
617
- interpretation_parts.append(f"Residual tilt detected ({tilt_angle:.1f}°)")
 
 
 
 
 
 
 
618
 
619
- # Add confidence indicator
620
- interpretation_parts.append(f"Confidence: {ctr_result['confidence']}")
 
621
 
622
  final_interpretation = " | ".join(interpretation_parts)
623
 
 
1
  import numpy as np
2
  import gradio as gr
3
  import cv2
4
+ from sklearn.cluster import DBSCAN
5
+ from scipy import ndimage
6
+ from skimage.feature import hog
7
+ from skimage import filters
8
+ import warnings
9
+ warnings.filterwarnings('ignore')
10
 
11
  from models.HybridGNet2IGSC import Hybrid
12
  from utils.utils import scipy_to_torch_sparse, genMatrixesLungsHeart
 
55
  return RL_mask, LL_mask, H_mask
56
 
57
 
58
+ def assess_image_quality(img):
59
+ """ประเมินคุณภาพของภาพสำหรับการปรับ threshold"""
60
+ try:
61
+ # 1. Contrast assessment
62
+ contrast = np.std(img)
63
+
64
+ # 2. Noise assessment (using Laplacian variance)
65
+ laplacian_var = cv2.Laplacian(img, cv2.CV_64F).var()
66
+
67
+ # 3. Edge density
68
+ edges = cv2.Canny((img * 255).astype(np.uint8), 50, 150)
69
+ edge_density = np.sum(edges > 0) / edges.size
70
+
71
+ # 4. Brightness distribution
72
+ brightness_std = np.std(img)
73
+
74
+ # Normalize scores (0-1)
75
+ contrast_score = min(contrast / 0.3, 1.0)
76
+ noise_score = min(laplacian_var / 500, 1.0)
77
+ edge_score = min(edge_density / 0.1, 1.0)
78
+ brightness_score = min(brightness_std / 0.25, 1.0)
79
+
80
+ # Overall quality score
81
+ quality = (contrast_score * 0.3 + noise_score * 0.3 +
82
+ edge_score * 0.25 + brightness_score * 0.15)
83
+
84
+ return {
85
+ 'overall': min(quality, 1.0),
86
+ 'contrast': contrast_score,
87
+ 'noise': noise_score,
88
+ 'edge_density': edge_score,
89
+ 'brightness': brightness_score
90
+ }
91
+ except Exception as e:
92
+ print(f"Error in quality assessment: {e}")
93
+ return {'overall': 0.5, 'contrast': 0.5, 'noise': 0.5,
94
+ 'edge_density': 0.5, 'brightness': 0.5}
95
+
96
+ def adaptive_thresholding(quality_scores, image_characteristics):
97
+ """กำหนด threshold แบบปรับเปลี่ยนตามคุณภาพภาพ"""
98
+ base_rotation_threshold = 3.0
99
+ base_tilt_threshold = 2.0
100
+
101
+ quality = quality_scores['overall']
102
+
103
+ # ปรับตามคุณภาพภาพ
104
+ if quality < 0.4: # คุณภาพต่ำ
105
+ rotation_multiplier = 2.0
106
+ tilt_multiplier = 2.0
107
+ elif quality < 0.6: # คุณภาพปานกลาง
108
+ rotation_multiplier = 1.5
109
+ tilt_multiplier = 1.5
110
+ elif quality > 0.8: # คุณภาพสูง
111
+ rotation_multiplier = 0.8
112
+ tilt_multiplier = 0.8
113
+ else: # คุณภาพดี
114
+ rotation_multiplier = 1.0
115
+ tilt_multiplier = 1.0
116
+
117
+ # ปรับตาม characteristics อื่นๆ
118
+ if image_characteristics.get('has_medical_devices', False):
119
+ rotation_multiplier *= 1.3
120
+ tilt_multiplier *= 1.3
121
+
122
+ if image_characteristics.get('patient_age', 'adult') == 'pediatric':
123
+ rotation_multiplier *= 1.2
124
+ tilt_multiplier *= 1.2
125
+
126
+ return {
127
+ 'rotation_threshold': base_rotation_threshold * rotation_multiplier,
128
+ 'tilt_threshold': base_tilt_threshold * tilt_multiplier,
129
+ 'confidence_threshold': 0.7 if quality > 0.6 else 0.5
130
+ }
131
+
132
  def calculate_image_tilt(landmarks):
133
  """Calculate image tilt angle based on lung symmetry"""
134
  RL = landmarks[0:44] # Right lung
 
150
 
151
  return angle_deg, rl_top, ll_top
152
 
153
+ def detect_vertical_alignment(img):
154
+ """ตรวจจับการเอียงในแนวตั้ง (เช่น spine alignment)"""
155
+ try:
156
+ h, w = img.shape
157
+
158
+ # Focus on central vertical region (spine area)
159
+ spine_region = img[:, w//3:2*w//3]
160
+
161
+ # Apply edge detection
162
+ edges = cv2.Canny((spine_region * 255).astype(np.uint8), 30, 100)
163
+
164
+ # Find vertical lines using Hough Transform
165
+ lines = cv2.HoughLines(edges, 1, np.pi/180, threshold=int(h*0.3))
166
+
167
+ if lines is not None:
168
+ vertical_angles = []
169
+ for line in lines:
170
+ rho, theta = line[0]
171
+ angle = np.degrees(theta) - 90 # Convert to rotation angle
172
+
173
+ # Filter for near-vertical lines
174
+ if abs(angle) < 20:
175
+ vertical_angles.append(angle)
176
+
177
+ if vertical_angles:
178
+ # Remove outliers and get median
179
+ angles_arr = np.array(vertical_angles)
180
+ Q1, Q3 = np.percentile(angles_arr, [25, 75])
181
+ IQR = Q3 - Q1
182
+ filtered = angles_arr[(angles_arr >= Q1 - 1.5*IQR) &
183
+ (angles_arr <= Q3 + 1.5*IQR)]
184
+
185
+ if len(filtered) > 0:
186
+ return np.median(filtered)
187
+
188
+ return 0
189
+ except Exception as e:
190
+ print(f"Error in vertical alignment detection: {e}")
191
+ return 0
192
+
193
+ def ml_based_tilt_detection(img):
194
+ """ใช้ HOG features และ pattern recognition สำหรับตรวจจับการเอียง"""
195
+ try:
196
+ # Resize for consistent processing
197
+ resized = cv2.resize(img, (256, 256))
198
+
199
+ # Extract HOG features
200
+ features = hog(resized, orientations=9, pixels_per_cell=(16, 16),
201
+ cells_per_block=(2, 2), block_norm='L2-Hys')
202
+
203
+ # Test multiple rotation angles
204
+ best_angle = 0
205
+ best_score = 0
206
+
207
+ for test_angle in range(-20, 21, 2):
208
+ # Rotate image
209
+ center = (128, 128)
210
+ rotation_matrix = cv2.getRotationMatrix2D(center, test_angle, 1.0)
211
+ rotated = cv2.warpAffine(resized, rotation_matrix, (256, 256))
212
+
213
+ # Extract features from rotated image
214
+ rotated_features = hog(rotated, orientations=9, pixels_per_cell=(16, 16),
215
+ cells_per_block=(2, 2), block_norm='L2-Hys')
216
+
217
+ # Calculate symmetry score (simplified)
218
+ left_features = rotated_features[:len(rotated_features)//2]
219
+ right_features = rotated_features[len(rotated_features)//2:]
220
+
221
+ # Correlation as symmetry measure
222
+ correlation = np.corrcoef(left_features, right_features)[0, 1]
223
+ if not np.isnan(correlation) and correlation > best_score:
224
+ best_score = correlation
225
+ best_angle = test_angle
226
+
227
+ return best_angle if best_score > 0.3 else 0
228
+ except Exception as e:
229
+ print(f"Error in ML-based detection: {e}")
230
+ return 0
231
+
232
  def rotate_points(points, angle_deg, center):
233
  """Rotate points around a center by given angle"""
234
  angle_rad = np.radians(-angle_deg) # Negative to correct the tilt
 
460
  LL = landmarks[44:94]
461
  H = landmarks[94:]
462
 
463
+ original_RL = original_landmarks[0:44]
464
+ original_LL = original_landmarks[44:94]
465
+ original_H = original_landmarks[94:]
466
+
467
+ validation_results = {
468
+ 'heart_position': False,
469
+ 'lung_symmetry': False,
470
+ 'relative_change': False,
471
+ 'anatomical_ratios': False,
472
+ 'outlier_detection': False
473
+ }
474
+
475
+ # 1. Heart position validation (enhanced)
476
  rl_center_x = np.mean(RL[:, 0])
477
  ll_center_x = np.mean(LL[:, 0])
478
  h_center_x = np.mean(H[:, 0])
479
 
480
+ # Heart should be between lung centers with some tolerance
481
+ lung_span = abs(rl_center_x - ll_center_x)
482
+ heart_deviation = min(abs(h_center_x - rl_center_x), abs(h_center_x - ll_center_x))
483
+
484
+ if heart_deviation < lung_span * 0.6: # Heart within 60% of lung span
485
+ validation_results['heart_position'] = True
486
+
487
+ # 2. Lung symmetry validation
488
+ rl_centroid = np.mean(RL, axis=0)
489
+ ll_centroid = np.mean(LL, axis=0)
490
+
491
+ # Check if lungs are reasonably symmetric
492
+ lung_distance = np.linalg.norm(rl_centroid - ll_centroid)
493
+ original_lung_distance = np.linalg.norm(np.mean(original_RL, axis=0) - np.mean(original_LL, axis=0))
494
+
495
+ symmetry_change = abs(lung_distance - original_lung_distance) / original_lung_distance
496
+ if symmetry_change < 0.15: # Less than 15% change in lung symmetry
497
+ validation_results['lung_symmetry'] = True
498
+
499
+ # 3. Relative change validation (enhanced)
500
  total_change = np.mean(np.linalg.norm(landmarks - original_landmarks, axis=1))
501
  relative_change = total_change / np.mean(np.linalg.norm(original_landmarks, axis=1))
502
 
503
+ if relative_change < threshold:
504
+ validation_results['relative_change'] = True
505
+
506
+ # 4. Anatomical ratios validation
507
+ # Heart width to lung span ratio should remain reasonable
508
+ heart_width = np.max(H[:, 0]) - np.min(H[:, 0])
509
+ lung_span_total = max(np.max(RL[:, 0]), np.max(LL[:, 0])) - min(np.min(RL[:, 0]), np.min(LL[:, 0]))
510
+
511
+ original_heart_width = np.max(original_H[:, 0]) - np.min(original_H[:, 0])
512
+ original_lung_span = max(np.max(original_RL[:, 0]), np.max(original_LL[:, 0])) - min(np.min(original_RL[:, 0]), np.min(original_LL[:, 0]))
513
+
514
+ current_ratio = heart_width / lung_span_total if lung_span_total > 0 else 0
515
+ original_ratio = original_heart_width / original_lung_span if original_lung_span > 0 else 0
516
+
517
+ ratio_change = abs(current_ratio - original_ratio) / original_ratio if original_ratio > 0 else 0
518
+ if ratio_change < 0.1: # Less than 10% change in anatomical ratios
519
+ validation_results['anatomical_ratios'] = True
520
+
521
+ # 5. Outlier detection using DBSCAN
522
+ try:
523
+ all_points = landmarks.reshape(-1, 2)
524
+ clustering = DBSCAN(eps=20, min_samples=3).fit(all_points)
525
+ outlier_ratio = np.sum(clustering.labels_ == -1) / len(clustering.labels_)
526
 
527
+ if outlier_ratio < 0.05: # Less than 5% outliers
528
+ validation_results['outlier_detection'] = True
529
+ except:
530
+ validation_results['outlier_detection'] = True # Default to True if clustering fails
531
+
532
+ # Overall validation score
533
+ validation_score = sum(validation_results.values()) / len(validation_results)
534
+
535
+ # Log detailed results
536
+ if validation_score < 0.8:
537
+ failed_checks = [k for k, v in validation_results.items() if not v]
538
+ print(f"Validation warnings - Failed checks: {failed_checks}")
539
+ print(f"Validation score: {validation_score:.2f}")
540
+
541
+ return validation_score >= 0.6 # Require at least 60% of checks to pass
542
 
543
  except Exception as e:
544
+ print(f"Error in enhanced landmark validation: {e}")
545
  return False
546
 
547
  def calculate_ctr_robust(landmarks, corrected_landmarks=None):
 
640
  }
641
 
642
 
643
+ def detect_image_rotation_advanced(img, quality_scores=None):
644
+ """Enhanced rotation detection using multiple methods with quality adaptation"""
645
  try:
646
  angles = []
647
+ confidence_scores = []
648
 
 
 
649
  h, w = img.shape
650
 
651
+ # Adaptive parameters based on image quality
652
+ if quality_scores and quality_scores['overall'] < 0.5:
653
+ edge_threshold_low, edge_threshold_high = 30, 100
654
+ hough_threshold_factor = 0.2
655
+ correlation_threshold = 0.25
656
+ else:
657
+ edge_threshold_low, edge_threshold_high = 50, 150
658
+ hough_threshold_factor = 0.3
659
+ correlation_threshold = 0.35
660
 
661
+ # Method 1: Multi-region edge detection
662
+ edges = cv2.Canny((img * 255).astype(np.uint8), edge_threshold_low, edge_threshold_high)
663
+
664
+ # Focus on multiple regions
665
+ regions = {
666
+ 'spine': edges[h//4:3*h//4, w//3:2*w//3],
667
+ 'left_lung': edges[h//6:5*h//6, w//6:w//2],
668
+ 'right_lung': edges[h//6:5*h//6, w//2:5*w//6]
669
+ }
670
+
671
+ for region_name, region in regions.items():
672
+ lines = cv2.HoughLines(region, 1, np.pi/180,
673
+ threshold=int(region.shape[0] * hough_threshold_factor))
674
+ if lines is not None:
675
+ region_angles = []
676
+ for line in lines[:3]: # Top 3 lines per region
677
+ rho, theta = line[0]
678
+ angle = np.degrees(theta) - 90
679
+ if abs(angle) < 25: # Near vertical lines
680
+ region_angles.append(angle)
681
+
682
+ if region_angles:
683
+ region_angle = np.median(region_angles)
684
+ angles.append(region_angle)
685
+ confidence_scores.append(len(region_angles) / 3.0)
686
 
687
+ # Method 2: Enhanced chest boundary detection
 
688
  contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
689
  if contours:
690
+ # Get top 2 largest contours
691
+ sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2]
692
 
693
+ for contour in sorted_contours:
694
+ if len(contour) >= 5 and cv2.contourArea(contour) > h*w*0.01:
695
+ try:
696
+ ellipse = cv2.fitEllipse(contour)
697
+ chest_angle = ellipse[2] - 90
698
+ if abs(chest_angle) < 30:
699
+ angles.append(chest_angle)
700
+ confidence_scores.append(cv2.contourArea(contour) / (h*w))
701
+ except:
702
+ continue
 
 
 
 
 
703
 
704
+ # Method 3: Multi-scale template matching
705
+ for scale in [1.0, 0.8, 0.6]:
706
+ scaled_h, scaled_w = int(h * scale), int(w * scale)
707
+ if scaled_h < 100 or scaled_w < 100:
708
+ continue
709
+
710
+ scaled_img = cv2.resize(img, (scaled_w, scaled_h))
711
+ left_half = scaled_img[:, :scaled_w//2]
712
+ right_half = np.fliplr(scaled_img[:, scaled_w//2:])
713
 
714
+ best_angle = 0
715
+ best_correlation = 0
716
+
717
+ for test_angle in range(-18, 19, 3):
718
+ if test_angle == 0:
719
+ test_left = left_half
720
+ else:
721
+ center = (left_half.shape[1]//2, left_half.shape[0]//2)
722
+ rotation_matrix = cv2.getRotationMatrix2D(center, test_angle, 1.0)
723
+ test_left = cv2.warpAffine(left_half, rotation_matrix,
724
+ (left_half.shape[1], left_half.shape[0]))
725
+
726
+ try:
727
+ correlation = cv2.matchTemplate(test_left, right_half, cv2.TM_CCOEFF_NORMED).max()
728
+ if correlation > best_correlation:
729
+ best_correlation = correlation
730
+ best_angle = test_angle
731
+ except:
732
+ continue
733
+
734
+ if best_correlation > correlation_threshold:
735
+ angles.append(best_angle)
736
+ confidence_scores.append(best_correlation * scale) # Weight by scale
737
+
738
+ # Method 4: Vertical alignment detection
739
+ vertical_angle = detect_vertical_alignment(img)
740
+ if abs(vertical_angle) > 1:
741
+ angles.append(vertical_angle)
742
+ confidence_scores.append(0.7)
743
 
744
+ # Method 5: ML-based detection
745
+ ml_angle = ml_based_tilt_detection(img)
746
+ if abs(ml_angle) > 1:
747
+ angles.append(ml_angle)
748
+ confidence_scores.append(0.6)
749
 
750
+ # Combine all methods with weighted averaging
751
+ if angles and confidence_scores:
 
752
  angles = np.array(angles)
753
+ confidence_scores = np.array(confidence_scores)
 
 
754
 
755
+ # Remove extreme outliers (more than 2.5 standard deviations)
756
+ if len(angles) > 2:
757
+ z_scores = np.abs((angles - np.mean(angles)) / np.std(angles))
758
+ mask = z_scores < 2.5
759
+ angles = angles[mask]
760
+ confidence_scores = confidence_scores[mask]
761
+
762
+ if len(angles) > 0:
763
+ # Weighted average
764
+ weights = confidence_scores / np.sum(confidence_scores)
765
+ final_angle = np.average(angles, weights=weights)
766
+ overall_confidence = np.mean(confidence_scores)
767
+
768
+ # Only return angle if confidence is sufficient and angle is significant
769
+ if overall_confidence > 0.3 and abs(final_angle) > 1.5:
770
+ return final_angle, overall_confidence
771
 
772
+ return 0, 0
773
 
774
  except Exception as e:
775
  print(f"Error in advanced rotation detection: {e}")
776
+ return 0, 0
777
 
778
  def rotate_image(img, angle):
779
  """Rotate image by given angle"""
 
816
  original_img = cv2.imread(input_img, 0) / 255.0
817
  original_shape = original_img.shape[:2]
818
 
819
+ # Step 1: Assess image quality
820
+ quality_scores = assess_image_quality(original_img)
821
+ print(f"Image quality assessment: {quality_scores['overall']:.2f}")
822
+
823
+ # Step 2: Detect medical devices or special characteristics
824
+ image_characteristics = {
825
+ 'has_medical_devices': False, # Could be enhanced with device detection
826
+ 'patient_age': 'adult', # Could be inferred from image characteristics
827
+ 'image_type': 'standard' # PA, AP, lateral, etc.
828
+ }
829
+
830
+ # Step 3: Get adaptive thresholds
831
+ thresholds = adaptive_thresholding(quality_scores, image_characteristics)
832
+
833
+ # Step 4: Enhanced rotation detection with quality adaptation
834
+ detected_rotation, rotation_confidence = detect_image_rotation_advanced(original_img, quality_scores)
835
  was_rotated = False
836
  processing_img = original_img
837
 
838
+ # Step 5: Rotate image if significant rotation detected (adaptive threshold)
839
+ rotation_threshold = thresholds['rotation_threshold']
840
+ if abs(detected_rotation) > rotation_threshold and rotation_confidence > 0.4:
841
  processing_img, actual_rotation = rotate_image(original_img, -detected_rotation)
842
  was_rotated = True
843
+ print(f"Applied rotation correction: {detected_rotation:.1f}° (confidence: {rotation_confidence:.2f})")
844
  else:
845
  actual_rotation = 0
846
+ if abs(detected_rotation) > 1:
847
+ print(f"Rotation detected ({detected_rotation:.1f}°) but below threshold or low confidence")
848
 
849
  # Step 3: Preprocess the image
850
  img, (h, w, padding) = preprocess(processing_img)
 
866
  # Step 7: Convert output to int
867
  output = output.astype('int')
868
 
869
+ # Step 8: Enhanced landmark correction with adaptive thresholds
870
+ tilt_angle, rl_top, ll_top = calculate_image_tilt(output)
871
+ tilt_threshold = thresholds['tilt_threshold']
872
+
873
+ # Apply tilt correction with enhanced validation
874
+ corrected_data = None
875
+ if abs(tilt_angle) > tilt_threshold:
876
+ image_center = np.array([original_shape[1]/2, original_shape[0]/2])
877
+
878
+ # Test correction
879
+ RL_test = rotate_points(output[0:44], tilt_angle, image_center)
880
+ LL_test = rotate_points(output[44:94], tilt_angle, image_center)
881
+ H_test = rotate_points(output[94:], tilt_angle, image_center)
882
+
883
+ corrected_landmarks = np.vstack([RL_test, LL_test, H_test])
884
+
885
+ # Enhanced validation with adaptive threshold
886
+ validation_threshold = 0.08 if quality_scores['overall'] < 0.5 else 0.05
887
+ if validate_landmarks_consistency(corrected_landmarks, output, validation_threshold):
888
+ corrected_data = (RL_test, LL_test, H_test, tilt_angle)
889
+ print(f"Anatomical tilt correction applied: {tilt_angle:.1f}°")
890
+ else:
891
+ print(f"Tilt correction validation failed, using original landmarks")
892
+
893
+ # Step 9: Draw results on original image
894
+ outseg, final_corrected_data = drawOnTop(original_img, output, original_shape)
895
+
896
+ # Use corrected data if available
897
+ if corrected_data is not None:
898
+ final_corrected_data = corrected_data
899
 
900
  except Exception as e:
901
  print(f"Error in segmentation: {e}")
902
+ # Return a basic error response with quality info
903
+ error_msg = f"Error: {str(e)}"
904
+ if 'quality_scores' in locals():
905
+ error_msg += f" | Image Quality: {quality_scores['overall']:.2f}"
906
+ return None, None, 0, error_msg
907
 
908
  seg_to_save = (outseg.copy() * 255).astype('uint8')
909
  cv2.imwrite("tmp/overlap_segmentation.png", cv2.cvtColor(seg_to_save, cv2.COLOR_RGB2BGR))
910
 
911
+ # Step 10: Enhanced CTR calculation with quality metrics
912
+ ctr_result = calculate_ctr_robust(output, final_corrected_data)
913
  ctr_value = ctr_result['ctr']
914
+ actual_tilt_angle = ctr_result['tilt_angle']
915
 
916
+ # Enhanced interpretation with comprehensive quality indicators
917
  interpretation_parts = []
918
 
919
+ # CTR interpretation with confidence adjustment
920
+ confidence_modifier = ""
921
+ if quality_scores['overall'] < 0.4:
922
+ confidence_modifier = " (Low Image Quality)"
923
+ elif ctr_result['confidence'] == 'Low':
924
+ confidence_modifier = " (Uncertain)"
925
+
926
  if ctr_value < 0.5:
927
+ base_interpretation = f"Normal{confidence_modifier}"
928
  elif 0.50 <= ctr_value <= 0.55:
929
+ base_interpretation = f"Mild Cardiomegaly (CTR 50-55%){confidence_modifier}"
930
  elif 0.56 <= ctr_value <= 0.60:
931
+ base_interpretation = f"Moderate Cardiomegaly (CTR 56-60%){confidence_modifier}"
932
  elif ctr_value > 0.60:
933
+ base_interpretation = f"Severe Cardiomegaly (CTR > 60%){confidence_modifier}"
934
  else:
935
+ base_interpretation = f"Cardiomegaly{confidence_modifier}"
936
 
937
  interpretation_parts.append(base_interpretation)
938
 
939
+ # Add processing quality indicators
940
+ quality_info = []
941
  if was_rotated:
942
+ quality_info.append(f"Rotation: {detected_rotation:.1f}° (conf: {rotation_confidence:.2f})")
943
 
944
  if ctr_result['correction_applied']:
945
+ quality_info.append(f"Tilt: {actual_tilt_angle:.1f}°")
946
+ elif actual_tilt_angle > tilt_threshold:
947
+ quality_info.append(f"Residual tilt: {actual_tilt_angle:.1f}°")
948
+
949
+ # Add image quality score
950
+ quality_info.append(f"Quality: {quality_scores['overall']:.2f}")
951
+ quality_info.append(f"Confidence: {ctr_result['confidence']}")
952
+
953
+ if quality_info:
954
+ interpretation_parts.append(" | ".join(quality_info))
955
 
956
+ # Add method variance warning if needed
957
+ if ctr_result['method_variance'] > 0.03:
958
+ interpretation_parts.append(f"⚠️ Method variance: {ctr_result['method_variance']:.3f}")
959
 
960
  final_interpretation = " | ".join(interpretation_parts)
961