Rajhuggingface4253 commited on
Commit
4542c1a
·
verified ·
1 Parent(s): e9c235a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -40,6 +40,12 @@ from neuttsair.neutts import NeuTTSAir
40
 
41
  # Explicitly use CPU as per Dockerfile and Hugging Face free tier compatibility
42
  DEVICE = "cpu"
 
 
 
 
 
 
43
 
44
  # ONNX Configuration
45
  USE_ONNX = True and ONNX_AVAILABLE # Auto-disable if ONNX not available
@@ -47,7 +53,7 @@ ONNX_MODEL_DIR = "onnx_models"
47
  os.makedirs(ONNX_MODEL_DIR, exist_ok=True)
48
 
49
  # Configure Max Workers for concurrent synthesis threads
50
- MAX_WORKERS = min(4, (os.cpu_count() or 2))
51
  tts_executor = ThreadPoolExecutor(max_workers=MAX_WORKERS)
52
  SAMPLE_RATE = 24000
53
 
 
40
 
41
  # Explicitly use CPU as per Dockerfile and Hugging Face free tier compatibility
42
  DEVICE = "cpu"
43
+ N_THREADS = os.cpu_count() or 4
44
+ os.environ['OMP_NUM_THREADS'] = str(N_THREADS) # OpenMP (PyTorch/other parallel libraries)
45
+ os.environ['MKL_NUM_THREADS'] = str(N_THREADS) # Intel MKL (used by PyTorch on Intel/AMD CPUs)
46
+ os.environ['NUMEXPR_NUM_THREADS'] = str(N_THREADS) # For NumPy/NumExpr
47
+ torch.set_num_threads(N_THREADS) # Explicit PyTorch core setting
48
+ logger.info(f"⚙️ PyTorch configured to use {N_THREADS} CPU threads for max parallelism.")
49
 
50
  # ONNX Configuration
51
  USE_ONNX = True and ONNX_AVAILABLE # Auto-disable if ONNX not available
 
53
  os.makedirs(ONNX_MODEL_DIR, exist_ok=True)
54
 
55
  # Configure Max Workers for concurrent synthesis threads
56
+ MAX_WORKERS = min(4, N_THREADS)
57
  tts_executor = ThreadPoolExecutor(max_workers=MAX_WORKERS)
58
  SAMPLE_RATE = 24000
59