Spine-Analysis-Pipeline / scripts /pearson_analysis.py
onlineinfoh's picture
add all scripts
9ee6a3d
raw
history blame
8.27 kB
#!/usr/bin/env python3
"""
Pearson correlation analysis for muscle fat vs Cobb angles
"""
import pandas as pd # type: ignore
import numpy as np # type: ignore
from scipy.stats import pearsonr # type: ignore
from pathlib import Path
dev_fat_file = "../fatty_data/dev_fat.csv"
dev_cobb_file = "../cobb_angles/dev_cobb.csv"
dev_output_dir = "../pearson_correlation/dev_cobb_corr"
# Model prediction data
dev_model_pred_fat_file = "../fatty_data/model_pred_dev.csv"
dev_model_pred_output_dir = "../pearson_correlation/dev_model_cobb_corr"
test_fat_file = "../fatty_data/test_fat.csv"
test_cobb_file = "../cobb_angles/test_cobb.csv"
test_output_dir = "../pearson_correlation/test_cobb_corr"
muscle_names = [
"psoas", "quadratus_lumborum", "paraspinal", "latissimus_dorsi",
"iliacus", "rectus_femoris", "vastus", "rhomboid", "trapezius"
]
def load_dev_data():
"""Load development dataset (100-120)."""
print("Loading development dataset...")
fat_df = pd.read_csv(dev_fat_file)
print(f"Fatty data loaded: {len(fat_df)} cases")
cobb_df = pd.read_csv(dev_cobb_file, sep='\t', header=None) # type: ignore
print(f"Cobb data loaded: {len(cobb_df)} cases")
fat_df = fat_df[fat_df['case_id'] != 'Mean ± SD'].copy()
fat_df = fat_df[pd.to_numeric(fat_df['case_id'], errors='coerce').notna()] # type: ignore
fat_df['case_id'] = fat_df['case_id'].astype(int)
n_cases = min(len(cobb_df), len(fat_df))
print(f"Using {n_cases} cases for development correlation analysis")
cobb_values = cobb_df.iloc[:n_cases].values
cobb_aligned = np.mean(cobb_values, axis=1) # type: ignore
fat_aligned = fat_df.iloc[:n_cases]
print(f"Cobb angles range: {cobb_aligned.min():.1f} to {cobb_aligned.max():.1f}")
return cobb_aligned, fat_aligned, n_cases
def load_dev_model_pred_data():
"""Load development dataset with model predictions (100-120)."""
print("Loading development dataset with model predictions...")
fat_df = pd.read_csv(dev_model_pred_fat_file)
print(f"Model prediction fatty data loaded: {len(fat_df)} cases")
cobb_df = pd.read_csv(dev_cobb_file, sep='\t', header=None) # type: ignore
print(f"Cobb data loaded: {len(cobb_df)} cases")
fat_df = fat_df[fat_df['case_id'] != 'Mean ± SD'].copy()
fat_df = fat_df[pd.to_numeric(fat_df['case_id'], errors='coerce').notna()] # type: ignore
fat_df['case_id'] = fat_df['case_id'].astype(int)
n_cases = min(len(cobb_df), len(fat_df))
print(f"Using {n_cases} cases for model prediction correlation analysis")
cobb_values = cobb_df.iloc[:n_cases].values
cobb_aligned = np.mean(cobb_values, axis=1) # type: ignore
fat_aligned = fat_df.iloc[:n_cases]
print(f"Cobb angles range: {cobb_aligned.min():.1f} to {cobb_aligned.max():.1f}")
return cobb_aligned, fat_aligned, n_cases
def load_test_data():
"""Load test dataset (251-500)."""
print("Loading test dataset...")
fat_df = pd.read_csv(test_fat_file)
print(f"Fatty data loaded: {len(fat_df)} cases")
cobb_df = pd.read_csv(test_cobb_file, header=None, names=['cobb_angle']) # type: ignore
print(f"Cobb data loaded: {len(cobb_df)} cases")
fat_df = fat_df[fat_df['case_id'] != 'Mean ± SD'].copy()
fat_df = fat_df[pd.to_numeric(fat_df['case_id'], errors='coerce').notna()] # type: ignore
fat_df['case_id'] = fat_df['case_id'].astype(int)
n_cases = min(len(cobb_df), len(fat_df))
print(f"Using {n_cases} cases for test correlation analysis")
cobb_aligned = cobb_df.iloc[:n_cases]['cobb_angle'].values
fat_aligned = fat_df.iloc[:n_cases]
print(f"Cobb angles range: {cobb_aligned.min():.1f} to {cobb_aligned.max():.1f}")
return cobb_aligned, fat_aligned, n_cases
def calculate_correlations(cobb_angles, fat_data, dataset_name):
"""Calculate Pearson correlations between Cobb angles and fatty percentages."""
print(f"\nCalculating correlations for {dataset_name} dataset...")
results = []
for muscle in muscle_names:
fat_col = f"{muscle}_fat_pct"
if fat_col in fat_data.columns:
fat_percentages = pd.to_numeric(fat_data[fat_col], errors='coerce').values # type: ignore
valid_indices = ~np.isnan(fat_percentages) # type: ignore
if not np.any(valid_indices):
print(f"Warning: No valid data for {muscle}")
continue
cobb_filtered = cobb_angles[valid_indices]
fat_filtered = fat_percentages[valid_indices]
correlation, p_value = pearsonr(cobb_filtered, fat_filtered) # type: ignore
results.append({
'Muscle': muscle,
'Correlation': round(correlation, 4),
'P_Value': round(p_value, 4),
'N_Cases': len(cobb_filtered)
})
print(f"{muscle}: r = {correlation:.4f}, p = {p_value:.4f}")
else:
print(f"Warning: Column {fat_col} not found in fatty data")
return results
def save_results(results, output_dir, dataset_name):
"""Save correlation results to CSV."""
output_dir.mkdir(parents=True, exist_ok=True)
results_df = pd.DataFrame(results)
output_file = output_dir / "fatty_atrophy_thoracic_correlations.csv"
results_df.to_csv(output_file, index=False)
print(f"\nResults saved to: {output_file}")
print(f"\n=== {dataset_name.upper()} CORRELATION ANALYSIS SUMMARY ===")
print(f"Total muscles analyzed: {len(results)}")
print(f"Cases used: {results[0]['N_Cases'] if results else 'N/A'}")
if results:
strongest_positive = max(results, key=lambda x: x['Correlation'])
strongest_negative = min(results, key=lambda x: x['Correlation'])
print(f"\nStrongest positive correlation: {strongest_positive['Muscle']} (r = {strongest_positive['Correlation']})")
print(f"Strongest negative correlation: {strongest_negative['Muscle']} (r = {strongest_negative['Correlation']})")
significant = [r for r in results if r['P_Value'] < 0.05]
print(f"Significant correlations (p < 0.05): {len(significant)}/{len(results)}")
def main():
"""Main function to run correlation analysis for both datasets."""
print("=== PEARSON CORRELATION ANALYSIS ===")
print("Cobb angles vs Fatty percentages")
print("="*50)
try:
print("\n" + "="*50)
print("DEVELOPMENT DATASET ANALYSIS (100-120) - MANUAL LABELS")
print("="*50)
cobb_dev, fat_dev, n_dev = load_dev_data()
results_dev = calculate_correlations(cobb_dev, fat_dev, "Development")
save_results(results_dev, Path(dev_output_dir), "Development")
print("\n" + "="*50)
print("DEVELOPMENT DATASET ANALYSIS (100-120) - MODEL PREDICTIONS")
print("="*50)
cobb_dev_model, fat_dev_model, n_dev_model = load_dev_model_pred_data()
results_dev_model = calculate_correlations(cobb_dev_model, fat_dev_model, "Development Model Predictions")
save_results(results_dev_model, Path(dev_model_pred_output_dir), "Development Model Predictions")
print("\n" + "="*50)
print("TEST DATASET ANALYSIS (251-500)")
print("="*50)
cobb_test, fat_test, n_test = load_test_data()
results_test = calculate_correlations(cobb_test, fat_test, "Test")
save_results(results_test, Path(test_output_dir), "Test")
print("\n" + "="*50)
print("ANALYSIS COMPLETE")
print("="*50)
print(f"Development dataset (manual): {n_dev} cases analyzed")
print(f"Development dataset (model): {n_dev_model} cases analyzed")
print(f"Test dataset: {n_test} cases analyzed")
print(f"Results saved to:")
print(f" - {dev_output_dir}/fatty_atrophy_thoracic_correlations.csv")
print(f" - {dev_model_pred_output_dir}/fatty_atrophy_thoracic_correlations.csv")
print(f" - {test_output_dir}/fatty_atrophy_thoracic_correlations.csv")
except Exception as e:
print(f"Error during analysis: {e}")
return False
return True
if __name__ == "__main__":
main()