Spaces:
Runtime error
Runtime error
| import io | |
| import globales | |
| import espacios | |
| import herramientas | |
| import gradio_client | |
| from huggingface_hub import InferenceClient | |
| import conexion_firebase | |
| import random | |
| import time | |
| def genera_platillo_gpu(platillo): | |
| prompt = globales.previo + platillo | |
| print("Platillo enviado:", platillo) | |
| try: | |
| selected_space_config = random.choice(espacios.espacio) | |
| client = gradio_client.Client(selected_space_config['id'], hf_token=globales.llave) | |
| # kwargs = { | |
| # "prompt": prompt, | |
| # "api_name": "/infer" | |
| # } | |
| kwargs = selected_space_config['static_kwargs'] | |
| result = client.predict(**kwargs, | |
| prompt=prompt, | |
| #negative_prompt="live animals", | |
| # seed=42, | |
| # randomize_seed=True, | |
| width=786, | |
| height=568, | |
| # guidance_scale=3.5, | |
| # num_inference_steps=28, | |
| # api_name="/infer" | |
| ) | |
| #print("Result inmediato es: ", result) | |
| #Cuando es GPU, debe de restar segundos disponibles de HF | |
| herramientas.restaSegundosGPU(globales.work_cost) | |
| print("Platillo generado:", platillo) | |
| return result[0] | |
| except Exception as e: | |
| print("Excepci贸n: ", e) | |
| # Opci贸n para regresar imagen gen茅rica. (ya no porque se env铆a desde backend.) | |
| # return "default.png" | |
| error = f"Error 500: {e}" | |
| #return '{"Error 500": e}' | |
| return error | |
| def genera_platillo_inference(platillo): | |
| modelo_actual = conexion_firebase.obtenDato('nowme', 'huggingface', 'modelo_actual') | |
| modelo = modelo_actual | |
| print("Modelo:", modelo) | |
| #Elegir proveedor de inferencia | |
| creditos_restantes_inference = conexion_firebase.obtenDato('nowme', 'huggingface', 'hfInference') | |
| #creditos_restantes_inference = 5000 | |
| #print("Los cr茅ditos restantes de hf-inference que tienes son: ", creditos_restantes_inference) | |
| if creditos_restantes_inference > 0: | |
| provedor_seleccionado = globales.proveedor | |
| else: | |
| provedor_seleccionado = globales.proveedor_back | |
| prompt = globales.previo + platillo | |
| print("Platillo enviado:", platillo) | |
| client = InferenceClient( | |
| provider= provedor_seleccionado, | |
| api_key=globales.llave | |
| ) | |
| try: | |
| image = client.text_to_image( | |
| prompt, | |
| #negative_prompt="live animals", | |
| model=modelo, | |
| width=786, | |
| height=568, | |
| num_inference_steps=16 | |
| ) | |
| #Detenido momentaneamente por cambio a firebase. | |
| herramientas.restaSegundosInference(globales.inference_cost) | |
| except Exception as e: | |
| print("Excepci贸n: ", e) | |
| if "Gateway Time-out" in str(e): | |
| print("GATEWAY TIME-OUT 馃拃") | |
| modelo=globales.inferencia_backup | |
| #Escribe en txt el nuevo modelo. | |
| herramientas.modificaModeloActual(modelo) | |
| return f"Error: {e}" | |
| img_io = io.BytesIO() | |
| image.save(img_io, "PNG") | |
| img_io.seek(0) | |
| print("Platillo generado:", platillo) | |
| return img_io | |