Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -52,6 +52,59 @@ def device_info():
|
|
| 52 |
except subprocess.CalledProcessError as e:
|
| 53 |
print(f"Command failed: {e}")
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
@gpu_decorator(duration=SPACES_GPU_DURATION)
|
| 56 |
def transcribe(inputs, model, language, batch_size, chunk_length_s, stride_length_s, task, timestamp_mode, progress=gr.Progress(track_tqdm=True)):
|
| 57 |
try:
|
|
@@ -387,6 +440,10 @@ with demo:
|
|
| 387 |
interface_list=[file_transcribe, video_transcribe, yt_transcribe],
|
| 388 |
tab_names=["Audio", "Video", "YouTube"]
|
| 389 |
)
|
| 390 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
if __name__ == "__main__":
|
| 392 |
demo.queue().launch(ssr_mode=False)
|
|
|
|
| 52 |
except subprocess.CalledProcessError as e:
|
| 53 |
print(f"Command failed: {e}")
|
| 54 |
|
| 55 |
+
from gpustat import GPUStatCollection
|
| 56 |
+
|
| 57 |
+
def update_gpu_status():
|
| 58 |
+
if torch.cuda.is_available() == False:
|
| 59 |
+
return "No Nvidia Device"
|
| 60 |
+
try:
|
| 61 |
+
gpu_stats = GPUStatCollection.new_query()
|
| 62 |
+
for gpu in gpu_stats:
|
| 63 |
+
# Assuming you want to monitor the first GPU, index 0
|
| 64 |
+
gpu_id = gpu.index
|
| 65 |
+
gpu_name = gpu.name
|
| 66 |
+
gpu_utilization = gpu.utilization
|
| 67 |
+
memory_used = gpu.memory_used
|
| 68 |
+
memory_total = gpu.memory_total
|
| 69 |
+
memory_utilization = (memory_used / memory_total) * 100
|
| 70 |
+
gpu_status=(f"> **GPU** {gpu_id}: {gpu_name}, Utilization: {gpu_utilization}%, **Memory Used**: {memory_used}MB, **Memory Total**: {memory_total}MB, **Memory Utilization**: {memory_utilization:.2f}%")
|
| 71 |
+
return gpu_status
|
| 72 |
+
|
| 73 |
+
except Exception as e:
|
| 74 |
+
print(f"Error getting GPU stats: {e}")
|
| 75 |
+
return torch_update_gpu_status()
|
| 76 |
+
|
| 77 |
+
def torch_update_gpu_status():
|
| 78 |
+
if torch.cuda.is_available():
|
| 79 |
+
gpu_info = torch.cuda.get_device_name(0)
|
| 80 |
+
gpu_memory = torch.cuda.mem_get_info(0)
|
| 81 |
+
total_memory = gpu_memory[1] / (1024 * 1024)
|
| 82 |
+
free_memory=gpu_memory[0] /(1024 *1024)
|
| 83 |
+
used_memory = (gpu_memory[1] - gpu_memory[0]) / (1024 * 1024)
|
| 84 |
+
|
| 85 |
+
gpu_status = f"> **GPU**: {gpu_info} **Free Memory**:{free_memory}MB **Total Memory**: {total_memory:.2f} MB **Used Memory**: {used_memory:.2f} MB"
|
| 86 |
+
else:
|
| 87 |
+
gpu_status = "No GPU available"
|
| 88 |
+
return gpu_status
|
| 89 |
+
|
| 90 |
+
def update_cpu_status():
|
| 91 |
+
import datetime
|
| 92 |
+
current_time = datetime.datetime.now().time()
|
| 93 |
+
time_str = current_time.strftime("%H:%M:%S")
|
| 94 |
+
|
| 95 |
+
cpu_percent = psutil.cpu_percent()
|
| 96 |
+
cpu_status = f"> **CPU Usage: {cpu_percent}% {time_str}"
|
| 97 |
+
return cpu_status
|
| 98 |
+
|
| 99 |
+
def update_status():
|
| 100 |
+
gpu_status = update_gpu_status()
|
| 101 |
+
cpu_status = update_cpu_status()
|
| 102 |
+
sys_status=gpu_status+"\n"+cpu_status
|
| 103 |
+
return sys_status
|
| 104 |
+
|
| 105 |
+
def refresh_status():
|
| 106 |
+
return update_status()
|
| 107 |
+
|
| 108 |
@gpu_decorator(duration=SPACES_GPU_DURATION)
|
| 109 |
def transcribe(inputs, model, language, batch_size, chunk_length_s, stride_length_s, task, timestamp_mode, progress=gr.Progress(track_tqdm=True)):
|
| 110 |
try:
|
|
|
|
| 440 |
interface_list=[file_transcribe, video_transcribe, yt_transcribe],
|
| 441 |
tab_names=["Audio", "Video", "YouTube"]
|
| 442 |
)
|
| 443 |
+
with gr.Group():
|
| 444 |
+
sys_status_output = gr.Markdown(label="System Status", interactive=False)
|
| 445 |
+
refresh_button = gr.Button("Refresh System Status")
|
| 446 |
+
refresh_button.click(refresh_status, None, sys_status_output)
|
| 447 |
+
|
| 448 |
if __name__ == "__main__":
|
| 449 |
demo.queue().launch(ssr_mode=False)
|