onlineinfoh's picture
add all scripts
9ee6a3d
raw
history blame
18 kB
#!/usr/bin/env python3
"""
saggital_ICC.py
Recompute ICC analysis for sagittal measurements only using the 2 CSV files.
Each column represents data from one rater, comparing all 5 columns for ICC.
"""
import pandas as pd # type: ignore
import numpy as np # type: ignore
import matplotlib.pyplot as plt # type: ignore
from scipy import stats # type: ignore
from scipy.stats import f # type: ignore
import argparse
import sys
from pathlib import Path
ID_LIKE = {"case", "case_id", "id", "subject", "subject_id", "uid", "study", "study_id"}
def detect_rater_columns(df: pd.DataFrame, min_unique: int = 3):
"""Detect rater columns."""
rater_like = [c for c in df.columns if str(c).strip().lower().startswith("rater")]
if len(rater_like) >= 2:
return rater_like
num_cols = df.select_dtypes(include=[np.number]).columns.tolist()
keep = []
for c in num_cols:
cl = str(c).strip().lower()
if cl in ID_LIKE:
continue
if df[c].nunique(dropna=True) >= min_unique:
keep.append(c)
return keep
def icc2k_absolute(y: np.ndarray):
"""
Compute ICC(2,k): two-way random-effects, absolute-agreement, average of k raters.
Returns ICC(2,k) and mean-square terms.
"""
y = np.asarray(y, float)
n, k = y.shape
mean_targets = y.mean(axis=1, keepdims=True)
mean_raters = y.mean(axis=0, keepdims=True)
grand_mean = y.mean()
SSR = k * np.sum((mean_targets - grand_mean)**2)
SSC = n * np.sum((mean_raters - grand_mean)**2)
SSE = np.sum((y - mean_targets - mean_raters + grand_mean)**2)
dfR, dfC = n - 1, k - 1
dfE = (n - 1) * (k - 1)
MSR = SSR / dfR if dfR > 0 else np.nan
MSC = SSC / dfC if dfC > 0 else np.nan
MSE = SSE / dfE if dfE > 0 else np.nan
numerator = MSR - MSE
denominator = MSR + (MSC - MSE) / n
icc2k = numerator / denominator if denominator != 0 else np.nan
return icc2k, MSR, MSC, MSE
def bootstrap_icc2k(y, n_boot=5000, seed=42):
"""Bootstrap ICC(2,k) confidence intervals."""
rng = np.random.default_rng(seed)
n, _ = y.shape
boots = []
for _ in range(n_boot):
idx = rng.integers(0, n, size=n)
icc, _, _, _ = icc2k_absolute(y[idx, :])
boots.append(icc)
boots = np.asarray(boots)
lo, hi = np.nanpercentile(boots, [2.5, 97.5])
return float(np.nanmean(boots)), float(lo), float(hi), boots
def format_pm(mean, sd, decimals=1):
"""Format mean ± SD."""
if np.isnan(mean) or np.isnan(sd):
return "NA"
f = f"{{:.{decimals}f}} ± {{:.{decimals}f}}"
return f.format(mean, sd)
def detect_cobb_series(df: pd.DataFrame) -> pd.Series:
"""Detect Cobb angle column in test data."""
cobb_cols = [c for c in df.columns if "cobb" in str(c).lower()]
if cobb_cols:
s = pd.to_numeric(df[cobb_cols[0]], errors="coerce") # type: ignore
return s
num_cols = df.select_dtypes(include=[np.number]).columns.tolist()
if not num_cols:
raise ValueError("No numeric columns found for Cobb angles.")
return df[num_cols[0]]
def fmt(x, dec=1):
return f"{x:.{dec}f}"
def create_test_cobb_summary(csv_path, outdir=".", decimals=1):
"""Create summary statistics for test dataset with single-observer Cobb angles."""
csv_path = Path(csv_path)
outdir = Path(outdir)
outdir.mkdir(parents=True, exist_ok=True)
if not csv_path.exists():
print(f"[ERROR] CSV not found: {csv_path}")
return
df = pd.read_csv(csv_path)
try:
s = detect_cobb_series(df)
except Exception as e:
print(f"[ERROR] {e}")
print(f"Columns seen: {list(df.columns)}")
return
x = pd.to_numeric(s, errors="coerce").dropna().to_numpy() # type: ignore
n = x.size
if n == 0:
print("[ERROR] No valid numeric Cobb values found.")
return
mean = float(np.mean(x)) # type: ignore
sd = float(np.std(x, ddof=1)) if n > 1 else float("nan") # type: ignore
median = float(np.median(x))
q1, q3 = [float(np.percentile(x, p)) for p in (25, 75)]
iqr = q3 - q1
xmin = float(np.min(x))
xmax = float(np.max(x))
print("\n=== Single-Observer Thoracic Cobb Summary (Test Set) ===")
print(f"n = {n}")
print(f"Mean ± SD: {fmt(mean, decimals)} ± {fmt(sd, decimals)} deg")
print(f"Median [IQR]: {fmt(median, decimals)} [{fmt(q1, decimals)}{fmt(q3, decimals)}] deg")
print(f"Range: {fmt(xmin, decimals)}{fmt(xmax, decimals)} deg")
print("=========================================================")
out_csv = outdir / "test_cobb_summary.csv"
pd.DataFrame([{
"n": n,
"mean": mean,
"sd": sd,
"median": median,
"q1": q1,
"q3": q3,
"iqr": iqr,
"min": xmin,
"max": xmax
}]).to_csv(out_csv, index=False)
print(f"[OK] Saved: {out_csv}")
return mean, sd, median, q1, q3, iqr, xmin, xmax, n
def calculate_icc_2_1(data):
"""Calculate ICC(2,1)."""
n_subjects, n_raters = data.shape
subject_means = np.mean(data, axis=1)
rater_means = np.mean(data, axis=0)
grand_mean = np.mean(data)
SS_between_subjects = n_raters * np.sum((subject_means - grand_mean) ** 2)
SS_between_raters = n_subjects * np.sum((rater_means - grand_mean) ** 2)
SS_error = 0
for i in range(n_subjects):
for j in range(n_raters):
SS_error += (data[i, j] - subject_means[i] - rater_means[j] + grand_mean) ** 2
MS_between_subjects = SS_between_subjects / (n_subjects - 1)
MS_between_raters = SS_between_raters / (n_raters - 1)
MS_error = SS_error / ((n_subjects - 1) * (n_raters - 1))
icc_numerator = MS_between_subjects - MS_error
icc_denominator = MS_between_subjects + (n_raters - 1) * MS_error
icc_value = icc_numerator / icc_denominator
f_stat = MS_between_subjects / MS_error
df1 = n_subjects - 1
df2 = (n_subjects - 1) * (n_raters - 1)
p_value = 1 - f.cdf(f_stat, df1, df2)
alpha = 0.05
f_lower = f_stat / f.ppf(1 - alpha/2, df1, df2)
f_upper = f_stat * f.ppf(1 - alpha/2, df1, df2)
ci_lower = max(0, (f_lower - 1) / (f_lower + n_raters - 1))
ci_upper = min(1, (f_upper - 1) / (f_upper + n_raters - 1))
return icc_value, f_stat, p_value, (ci_lower, ci_upper)
def create_comprehensive_summary(csv_path, outdir=".", decimals=1, n_boot=5000):
"""Create summary statistics including bootstrap ICC."""
csv_path = Path(csv_path)
outdir = Path(outdir)
outdir.mkdir(parents=True, exist_ok=True)
if not csv_path.exists():
print(f"[ERROR] CSV not found: {csv_path}")
return
df = pd.read_csv(csv_path, sep='\t', header=None) # type: ignore
raters = list(range(df.shape[1]))
y = df.to_numpy(float)
n, k = y.shape
rater_means = np.nanmean(y, axis=0)
rater_sds = np.nanstd(y, axis=0, ddof=1)
per_case_sd = np.nanstd(y, axis=1, ddof=1)
across_mean = float(np.nanmean(per_case_sd))
across_sd = float(np.nanstd(per_case_sd, ddof=1))
grand_mean = float(np.nanmean(y))
icc2k, MSR, MSC, MSE = icc2k_absolute(y)
_, lo, hi, boots = bootstrap_icc2k(y, n_boot=n_boot)
print("\n=== Five-Observer Thoracic Cobb Summary (Development Set) ===")
print(f"Detected raters (k={k}): {raters}")
for i, (m, s) in enumerate(zip(rater_means, rater_sds)):
print(f"Rater {i+1:>8d}: {m:.{decimals}f} ± {s:.{decimals}f} deg")
print(f"Across-rater SD (per-case): mean ± SD = {across_mean:.{decimals}f} ± {across_sd:.{decimals}f} deg")
print(f"Grand mean across all ratings: {grand_mean:.{decimals}f} deg")
print(f"ICC(2,k) absolute agreement (bootstrap 95% CI): {icc2k:.3f} [{lo:.3f}, {hi:.3f}]")
print("==============================================================")
rows = []
for i, (m, s) in enumerate(zip(rater_means, rater_sds)):
rows.append({"measure": "rater_mean_sd", "rater": f"Rater_{i+1}", "mean": m, "sd": s})
rows.append({"measure": "across_rater_sd_mean", "rater": "NA", "mean": across_mean, "sd": across_sd})
rows.append({"measure": "grand_mean", "rater": "NA", "mean": grand_mean, "sd": np.nan})
rows.append({"measure": "icc2k", "rater": "NA", "mean": icc2k, "sd": np.nan})
rows.append({"measure": "icc2k_ci_lo", "rater": "NA", "mean": lo, "sd": np.nan})
rows.append({"measure": "icc2k_ci_hi", "rater": "NA", "mean": hi, "sd": np.nan})
pd.DataFrame(rows).to_csv(outdir / "dev_cobb_summary.csv", index=False)
print(f"[OK] Saved summaries in {outdir.resolve()}")
return icc2k, lo, hi
def create_sagittal_icc_plot():
"""Create ICC plot for sagittal measurements only"""
csv_files = {
'../cobb_angles/dev_cobb.csv': 'Sagittal Thoracic'
}
results = {}
for filename, display_name in csv_files.items():
try:
df = pd.read_csv(filename, sep='\t', header=None) # type: ignore
data = df.values
print(f"\n{display_name} Data Shape: {data.shape}")
print(f"Data preview:\n{data[:5]}")
icc_value, f_stat, p_value, ci = calculate_icc_2_1(data)
results[display_name] = {
'icc': icc_value,
'f_stat': f_stat,
'p_value': p_value,
'ci_lower': ci[0],
'ci_upper': ci[1],
'n_subjects': data.shape[0],
'n_raters': data.shape[1]
}
print(f"{display_name}: ICC = {icc_value:.4f}, CI = [{ci[0]:.3f}, {ci[1]:.3f}]")
print(f"F-statistic = {f_stat:.4f}, p-value = {p_value:.4f}")
except Exception as e:
print(f"Error processing {filename}: {e}")
continue
if not results:
print("No data processed successfully.")
return
fig1, ax1 = plt.subplots(1, 1, figsize=(4, 8)) # type: ignore
names = list(results.keys())
icc_values = [results[name]['icc'] for name in names]
ci_lowers = [results[name]['ci_lower'] for name in names]
ci_uppers = [results[name]['ci_upper'] for name in names]
colors = ['#2E86AB', '#A23B72']
bars = ax1.bar(names, icc_values, color=colors, alpha=0.8, width=0.3,
edgecolor='black', linewidth=1)
ax1.errorbar(names, icc_values,
yerr=[np.array(icc_values) - np.array(ci_lowers),
np.array(ci_uppers) - np.array(icc_values)],
fmt='none', color='red', capsize=5, capthick=2)
for i, (bar, value) in enumerate(zip(bars, icc_values)):
ax1.text(bar.get_x() + bar.get_width()/2, value + 0.02,
f'{value:.3f}', ha='center', va='bottom',
fontweight='bold', fontsize=12)
ax1.set_ylabel('ICC Value', fontsize=12, fontweight='bold')
ax1.set_title('Intraclass Correlation Coefficients\nSagittal Thoracic Measurements',
fontsize=14, fontweight='bold')
ax1.set_ylim(0, 1.1)
ax1.grid(True, alpha=0.3, linestyle='--')
ax1.set_axisbelow(True)
ax1.tick_params(axis='x', rotation=0)
plt.tight_layout()
plt.show()
fig2, ax2 = plt.subplots(1, 1, figsize=(4, 8)) # type: ignore
ax2.axis('off')
table_data = []
for name in names:
result = results[name]
table_data.append([
name,
f"{result['icc']:.4f}",
f"[{result['ci_lower']:.3f}, {result['ci_upper']:.3f}]",
f"{result['p_value']:.4f}",
f"{result['n_subjects']}x{result['n_raters']}"
])
table = ax2.table(cellText=table_data,
colLabels=['Measurement Type', 'ICC(2,1)', '95% CI', 'p-value', 'Dimensions'],
cellLoc='center',
loc='center',
bbox=[0, 0, 1, 1])
table.auto_set_font_size(False)
table.set_fontsize(10)
table.scale(1, 2)
for i in range(len(table_data[0])):
table[(0, i)].set_facecolor('#4CAF50')
table[(0, i)].set_text_props(weight='bold', color='white')
for i in range(1, len(table_data) + 1):
for j in range(len(table_data[0])):
table[(i, j)].set_facecolor('#F5F5F5' if i % 2 == 0 else 'white')
ax2.set_title('ICC Analysis Results - Sagittal Thoracic', fontsize=14, fontweight='bold', pad=20)
plt.tight_layout()
plt.show()
if results:
fig3, ax3 = plt.subplots(1, 1, figsize=(6, 6)) # type: ignore
first_name = list(results.keys())[0]
for filename, display_name in csv_files.items():
if display_name == first_name:
df = pd.read_csv(filename, sep='\t', header=None) # type: ignore
data = df.values
break
if data.shape[1] >= 2:
means = np.mean(data, axis=1) # type: ignore
differences = []
for i in range(data.shape[0]):
subject_ratings = data[i, :]
subject_mean = np.mean(subject_ratings) # type: ignore
mean_abs_diff = np.mean(np.abs(subject_ratings - subject_mean)) # type: ignore
differences.append(mean_abs_diff)
differences = np.array(differences)
mean_diff = np.mean(differences) # type: ignore
std_diff = np.std(differences) # type: ignore
upper_limit = mean_diff + 1.96 * std_diff
lower_limit = mean_diff - 1.96 * std_diff
ax3.scatter(means, differences, alpha=0.7, s=50, color='#2E86AB')
ax3.axhline(y=mean_diff, color='red', linestyle='-', linewidth=2)
ax3.axhline(y=upper_limit, color='red', linestyle='--', linewidth=1)
ax3.axhline(y=lower_limit, color='red', linestyle='--', linewidth=1)
ax3.set_xlabel('Mean Thoracic Cobb Angle of All Five Raters (deg)', fontsize=12, fontweight='bold')
ax3.set_ylabel('Mean Absolute Difference from Average', fontsize=12, fontweight='bold')
ax3.set_title('Inter-Rater Variability Plot\nSagittal Thoracic', fontsize=14, fontweight='bold')
ax3.grid(True, alpha=0.3, linestyle='--')
ax3.text(0.05, 0.95, f'Limits of Agreement:\n{lower_limit:.2f} to {upper_limit:.2f}',
transform=ax3.transAxes, fontsize=10, verticalalignment='top',
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
plt.tight_layout()
plt.savefig('../ICC_results/sagittal_inter_rater_variability.png', dpi=300, bbox_inches='tight', # type: ignore
facecolor='white', edgecolor='none')
plt.show()
bland_altman_values = {}
if results:
first_name = list(results.keys())[0]
for filename, display_name in csv_files.items():
if display_name == first_name:
df = pd.read_csv(filename, sep='\t', header=None) # type: ignore
data = df.values
break
if data.shape[1] >= 2:
means = np.mean(data, axis=1) # type: ignore
differences = []
for i in range(data.shape[0]):
subject_ratings = data[i, :]
subject_mean = np.mean(subject_ratings) # type: ignore
mean_abs_diff = np.mean(np.abs(subject_ratings - subject_mean)) # type: ignore
differences.append(mean_abs_diff)
differences = np.array(differences)
mean_diff = np.mean(differences) # type: ignore
std_diff = np.std(differences) # type: ignore
upper_limit = mean_diff + 1.96 * std_diff
lower_limit = mean_diff - 1.96 * std_diff
bland_altman_values = {
'Mean_Difference': round(mean_diff, 2),
'Upper_Limit': round(upper_limit, 2),
'Lower_Limit': round(lower_limit, 2),
'Std_Difference': round(std_diff, 2)
}
results_df = pd.DataFrame([
{
'Measurement_Type': name,
'ICC_2_1': round(results[name]['icc'], 2),
'CI_Lower': round(results[name]['ci_lower'], 2),
'CI_Upper': round(results[name]['ci_upper'], 2),
'F_Statistic': round(results[name]['f_stat'], 2),
'P_Value': round(results[name]['p_value'], 2),
'N_Subjects': results[name]['n_subjects'],
'N_Raters': results[name]['n_raters'],
'Bland_Altman_Mean_Diff': bland_altman_values.get('Mean_Difference', ''),
'Bland_Altman_Upper_Limit': bland_altman_values.get('Upper_Limit', ''),
'Bland_Altman_Lower_Limit': bland_altman_values.get('Lower_Limit', ''),
'Bland_Altman_Std_Diff': bland_altman_values.get('Std_Difference', '')
}
for name in names
])
results_df.to_csv('../ICC_results/sagittal_icc_results.csv', index=False)
print(f"\nResults saved to '../ICC_results/sagittal_icc_results.csv'")
print(f"Inter-Rater Variability Plot saved as '../ICC_results/sagittal_inter_rater_variability.png'")
print(f"\n=== SAGITTAL THORACIC ICC ANALYSIS SUMMARY ===")
for name in names:
result = results[name]
icc = result['icc']
ci_lower = result['ci_lower']
ci_upper = result['ci_upper']
print(f"{name}: ICC = {icc:.4f}")
print(f" 95% CI: [{ci_lower:.3f}, {ci_upper:.3f}]")
print("\n" + "="*60)
print("SUMMARY WITH BOOTSTRAP ICC(2,k)")
print("="*60)
create_comprehensive_summary(
csv_path='../cobb_angles/dev_cobb.csv',
outdir='../ICC_results',
decimals=1,
n_boot=5000
)
print("\n" + "="*60)
print("TEST DATASET ANALYSIS (SINGLE-OBSERVER COBB ANGLES)")
print("="*60)
create_test_cobb_summary(
csv_path='../cobb_angles/test_cobb.csv',
outdir='../ICC_results',
decimals=1
)
if __name__ == "__main__":
create_sagittal_icc_plot()