Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -38,9 +38,9 @@ except ImportError: PIL_TESSERACT_AVAILABLE = False; print("WARNING: Pillow or P
|
|
| 38 |
try: import whisper; WHISPER_AVAILABLE = True
|
| 39 |
except ImportError: WHISPER_AVAILABLE = False; print("WARNING: OpenAI Whisper not found, Audio Transcription tool will be disabled.")
|
| 40 |
|
| 41 |
-
# Google GenAI
|
| 42 |
-
from google.genai.types import HarmCategory, HarmBlockThreshold
|
| 43 |
-
from google.ai import generativelanguage as glm
|
| 44 |
|
| 45 |
# LangChain
|
| 46 |
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage
|
|
@@ -78,42 +78,46 @@ google_genai_client: Optional[google_genai_sdk.Client] = None
|
|
| 78 |
try:
|
| 79 |
from langgraph.graph import StateGraph, END
|
| 80 |
try:
|
| 81 |
-
langgraph.prebuilt
|
| 82 |
LG_ToolExecutor_Class = ToolNode
|
| 83 |
print("Using langgraph.prebuilt.ToolNode for LangGraph tool execution.")
|
| 84 |
except ImportError:
|
| 85 |
try:
|
| 86 |
-
from langgraph.prebuilt import ToolExecutor
|
| 87 |
LG_ToolExecutor_Class = ToolExecutor
|
| 88 |
print("Using langgraph.prebuilt.ToolExecutor (fallback) for LangGraph tool execution.")
|
| 89 |
except ImportError as e_lg_exec_inner:
|
| 90 |
print(f"Failed to import ToolNode and ToolExecutor from langgraph.prebuilt: {e_lg_exec_inner}")
|
| 91 |
LG_ToolExecutor_Class = None
|
| 92 |
|
| 93 |
-
if LG_ToolExecutor_Class is not None:
|
| 94 |
-
# ToolInvocation might still be an issue depending on langgraph version
|
| 95 |
try:
|
| 96 |
from langgraph.prebuilt import ToolInvocation as LGToolInvocationActual
|
| 97 |
except ImportError:
|
| 98 |
-
try:
|
| 99 |
-
from langgraph.tools import ToolInvocation as LGToolInvocationActual
|
| 100 |
-
print("Imported ToolInvocation from langgraph.tools")
|
| 101 |
except ImportError as e_ti:
|
| 102 |
-
print(f"WARNING: Could not import ToolInvocation from langgraph.prebuilt or langgraph.tools: {e_ti}")
|
| 103 |
LGToolInvocationActual = None # type: ignore
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
LANGGRAPH_FLAVOR_AVAILABLE = True
|
| 109 |
LG_StateGraph, LG_END, LG_ToolInvocation, add_messages, MemorySaver_Class = \
|
| 110 |
-
StateGraph, END, LGToolInvocationActual, lg_add_messages, LGMemorySaver
|
| 111 |
-
print("Successfully imported LangGraph components.")
|
| 112 |
else:
|
| 113 |
-
LANGGRAPH_FLAVOR_AVAILABLE = False
|
| 114 |
LG_StateGraph, LG_END, add_messages, MemorySaver_Class = (None,) * 4 # type: ignore
|
| 115 |
-
print(f"WARNING: LangGraph ToolInvocation not found. LangGraph agent
|
| 116 |
-
|
|
|
|
| 117 |
LANGGRAPH_FLAVOR_AVAILABLE = False
|
| 118 |
LG_StateGraph, LG_END, LG_ToolInvocation, add_messages, MemorySaver_Class = (None,) * 5 # type: ignore
|
| 119 |
print(f"WARNING: No suitable LangGraph tool executor (ToolNode/ToolExecutor) found. LangGraph agent will be disabled.")
|
|
@@ -121,7 +125,7 @@ try:
|
|
| 121 |
except ImportError as e: # Catch import error for StateGraph, END itself
|
| 122 |
LANGGRAPH_FLAVOR_AVAILABLE = False
|
| 123 |
LG_StateGraph, LG_ToolExecutor_Class, LG_END, LG_ToolInvocation, add_messages, MemorySaver_Class = (None,) * 6
|
| 124 |
-
print(f"WARNING: Core LangGraph components (StateGraph, END) not found or import error: {e}. LangGraph agent will be disabled.")
|
| 125 |
|
| 126 |
|
| 127 |
# --- Constants ---
|
|
@@ -396,9 +400,9 @@ def initialize_agent_and_tools(force_reinit=False):
|
|
| 396 |
model=GEMINI_MODEL_NAME,
|
| 397 |
google_api_key=GOOGLE_API_KEY,
|
| 398 |
temperature=0.0,
|
| 399 |
-
# safety_settings
|
| 400 |
timeout=120,
|
| 401 |
-
convert_system_message_to_human=False # Explicitly
|
| 402 |
)
|
| 403 |
logger.info(f"LangChain LLM (Planner) initialized: {GEMINI_MODEL_NAME} (Using default safety settings, convert_system_message_to_human=False)")
|
| 404 |
except Exception as e:
|
|
@@ -417,27 +421,30 @@ def initialize_agent_and_tools(force_reinit=False):
|
|
| 417 |
except Exception as e: logger.warning(f"PythonREPLTool init failed: {e}")
|
| 418 |
logger.info(f"Final tools list for agent: {[t.name for t in TOOLS]}")
|
| 419 |
|
| 420 |
-
if LANGGRAPH_FLAVOR_AVAILABLE and all([LG_StateGraph, LG_ToolExecutor_Class, LG_END, LLM_INSTANCE,
|
| 421 |
if not LANGGRAPH_MEMORY_SAVER and MemorySaver_Class: LANGGRAPH_MEMORY_SAVER = MemorySaver_Class(); logger.info("LangGraph MemorySaver initialized.")
|
| 422 |
try:
|
| 423 |
logger.info(f"Attempting LangGraph init (Tool Executor type: {LG_ToolExecutor_Class.__name__ if LG_ToolExecutor_Class else 'None'})")
|
| 424 |
_TypedDict = getattr(__import__('typing_extensions'), 'TypedDict', dict)
|
| 425 |
class AgentState(_TypedDict): input: str; messages: Annotated[List[Any], add_messages]
|
| 426 |
|
| 427 |
-
|
| 428 |
-
tools="\n".join([f"- {t.name}: {t.description}" for t in TOOLS]),
|
|
|
|
| 429 |
)
|
|
|
|
| 430 |
def agent_node(state: AgentState):
|
| 431 |
-
|
| 432 |
-
system_message_content =
|
| 433 |
|
| 434 |
messages_for_llm = [SystemMessage(content=system_message_content)]
|
| 435 |
messages_for_llm.extend(state.get('messages', []))
|
| 436 |
|
| 437 |
logger.debug(f"LangGraph agent_node - messages_for_llm: {messages_for_llm}")
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
|
|
|
| 441 |
|
| 442 |
bound_llm = LLM_INSTANCE.bind_tools(TOOLS) # type: ignore
|
| 443 |
response = bound_llm.invoke(messages_for_llm)
|
|
@@ -446,10 +453,10 @@ def initialize_agent_and_tools(force_reinit=False):
|
|
| 446 |
if not LG_ToolExecutor_Class: raise ValueError("LG_ToolExecutor_Class is None for LangGraph.")
|
| 447 |
tool_executor_instance_lg = LG_ToolExecutor_Class(tools=TOOLS)
|
| 448 |
|
| 449 |
-
|
| 450 |
-
def tool_node(state: AgentState):
|
| 451 |
last_msg = state['messages'][-1] if state.get('messages') and isinstance(state['messages'][-1], AIMessage) else None
|
| 452 |
if not last_msg or not last_msg.tool_calls: return {"messages": []}
|
|
|
|
| 453 |
tool_results = []
|
| 454 |
for tc in last_msg.tool_calls:
|
| 455 |
name, args, tc_id = tc.get('name'), tc.get('args'), tc.get('id')
|
|
@@ -459,18 +466,30 @@ def initialize_agent_and_tools(force_reinit=False):
|
|
| 459 |
continue
|
| 460 |
try:
|
| 461 |
logger.info(f"LG Tool Invoking: '{name}' with {args} (ID: {tc_id})")
|
| 462 |
-
if not
|
| 463 |
-
|
| 464 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 465 |
tool_results.append(ToolMessage(content=str(output_lg), tool_call_id=tc_id, name=name))
|
| 466 |
except Exception as e_tool_node_lg:
|
| 467 |
logger.error(f"LG Tool Error ('{name}'): {e_tool_node_lg}", exc_info=True)
|
| 468 |
tool_results.append(ToolMessage(content=f"Error for tool {name}: {str(e_tool_node_lg)}", tool_call_id=tc_id, name=name))
|
| 469 |
return {"messages": tool_results}
|
| 470 |
|
|
|
|
| 471 |
workflow_lg = LG_StateGraph(AgentState) # type: ignore
|
| 472 |
workflow_lg.add_node("agent", agent_node)
|
| 473 |
-
|
|
|
|
|
|
|
|
|
|
| 474 |
workflow_lg.set_entry_point("agent")
|
| 475 |
def should_continue_lg(state: AgentState): return "tools" if state['messages'][-1].tool_calls else LG_END
|
| 476 |
workflow_lg.add_conditional_edges("agent", should_continue_lg, {"tools": "tools", LG_END: LG_END}) # type: ignore
|
|
@@ -541,6 +560,10 @@ def get_agent_response(prompt: str, task_id: Optional[str]=None, thread_id: Opti
|
|
| 541 |
last_message_in_history = final_state_lg_get['messages'][-1]
|
| 542 |
if hasattr(last_message_in_history, 'content') and isinstance(last_message_in_history.content, str):
|
| 543 |
return f"[INFO] Fallback to last message content: {str(last_message_in_history.content)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 544 |
else:
|
| 545 |
logger.error(f"LangGraph: Could not extract string content from the very last message: {last_message_in_history}")
|
| 546 |
return "[ERROR] LangGraph: Could not extract final answer from messages."
|
|
@@ -658,12 +681,12 @@ with gr.Blocks(css=".gradio-container {max-width:1280px !important;margin:auto !
|
|
| 658 |
demo.load(update_ui_on_load_fn_within_context, [], [agent_status_display, missing_secrets_display])
|
| 659 |
|
| 660 |
if __name__ == "__main__":
|
| 661 |
-
logger.info(f"Application starting up (v7 -
|
| 662 |
if not PYPDF2_AVAILABLE: logger.warning("PyPDF2 (PDF tool) NOT AVAILABLE.")
|
| 663 |
if not PIL_TESSERACT_AVAILABLE: logger.warning("Pillow/Pytesseract (OCR tool) NOT AVAILABLE.")
|
| 664 |
if not WHISPER_AVAILABLE: logger.warning("Whisper (Audio tool) NOT AVAILABLE.")
|
| 665 |
if LANGGRAPH_FLAVOR_AVAILABLE: logger.info(f"Core LangGraph components (StateGraph, END, {type(LG_ToolExecutor_Class).__name__ if LG_ToolExecutor_Class else 'FailedExecutor'}) loaded.")
|
| 666 |
-
else: logger.warning("Core LangGraph FAILED import or essential component (ToolExecutor/Node) missing. ReAct fallback. Check requirements & Space build logs.")
|
| 667 |
|
| 668 |
missing_vars_startup_list_global.clear()
|
| 669 |
if not GOOGLE_API_KEY: missing_vars_startup_list_global.append("GOOGLE_API_KEY")
|
|
@@ -686,4 +709,4 @@ if __name__ == "__main__":
|
|
| 686 |
|
| 687 |
logger.info(f"Space ID: {os.getenv('SPACE_ID', 'Not Set')}")
|
| 688 |
logger.info("Gradio Interface launching...")
|
| 689 |
-
demo.queue().launch(debug=os.getenv("GRADIO_DEBUG","false").lower()=="true", share=False, max_threads=20)
|
|
|
|
| 38 |
try: import whisper; WHISPER_AVAILABLE = True
|
| 39 |
except ImportError: WHISPER_AVAILABLE = False; print("WARNING: OpenAI Whisper not found, Audio Transcription tool will be disabled.")
|
| 40 |
|
| 41 |
+
# Google GenAI SDK types
|
| 42 |
+
from google.genai.types import HarmCategory, HarmBlockThreshold
|
| 43 |
+
from google.ai import generativelanguage as glm
|
| 44 |
|
| 45 |
# LangChain
|
| 46 |
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage
|
|
|
|
| 78 |
try:
|
| 79 |
from langgraph.graph import StateGraph, END
|
| 80 |
try:
|
| 81 |
+
from langgraph.prebuilt import ToolNode # Try modern ToolNode first
|
| 82 |
LG_ToolExecutor_Class = ToolNode
|
| 83 |
print("Using langgraph.prebuilt.ToolNode for LangGraph tool execution.")
|
| 84 |
except ImportError:
|
| 85 |
try:
|
| 86 |
+
from langgraph.prebuilt import ToolExecutor # Fallback to older ToolExecutor
|
| 87 |
LG_ToolExecutor_Class = ToolExecutor
|
| 88 |
print("Using langgraph.prebuilt.ToolExecutor (fallback) for LangGraph tool execution.")
|
| 89 |
except ImportError as e_lg_exec_inner:
|
| 90 |
print(f"Failed to import ToolNode and ToolExecutor from langgraph.prebuilt: {e_lg_exec_inner}")
|
| 91 |
LG_ToolExecutor_Class = None
|
| 92 |
|
| 93 |
+
if LG_ToolExecutor_Class is not None: # Only proceed if a tool executor class was found
|
|
|
|
| 94 |
try:
|
| 95 |
from langgraph.prebuilt import ToolInvocation as LGToolInvocationActual
|
| 96 |
except ImportError:
|
| 97 |
+
try:
|
| 98 |
+
from langgraph.tools import ToolInvocation as LGToolInvocationActual # Older path
|
| 99 |
+
print("Imported ToolInvocation from langgraph.tools (older path).")
|
| 100 |
except ImportError as e_ti:
|
| 101 |
+
print(f"WARNING: Could not import ToolInvocation from langgraph.prebuilt or langgraph.tools: {e_ti}. LangGraph may not function fully if ToolExecutor_Class requires it.")
|
| 102 |
LGToolInvocationActual = None # type: ignore
|
| 103 |
+
|
| 104 |
+
# Other essential LangGraph imports
|
| 105 |
+
from langgraph.graph.message import add_messages as lg_add_messages
|
| 106 |
+
from langgraph.checkpoint.memory import MemorySaver as LGMemorySaver
|
| 107 |
+
|
| 108 |
+
# Set LANGGRAPH_FLAVOR_AVAILABLE only if all critical components for your setup are loaded.
|
| 109 |
+
# If LGToolInvocationActual is critical for your chosen LG_ToolExecutor_Class, check it here.
|
| 110 |
+
if LGToolInvocationActual is not None or type(LG_ToolExecutor_Class).__name__ == 'ToolNode': # ToolNode might not need separate ToolInvocation
|
| 111 |
LANGGRAPH_FLAVOR_AVAILABLE = True
|
| 112 |
LG_StateGraph, LG_END, LG_ToolInvocation, add_messages, MemorySaver_Class = \
|
| 113 |
+
StateGraph, END, LGToolInvocationActual, lg_add_messages, LGMemorySaver # type: ignore
|
| 114 |
+
print("Successfully imported essential LangGraph components.")
|
| 115 |
else:
|
| 116 |
+
LANGGRAPH_FLAVOR_AVAILABLE = False
|
| 117 |
LG_StateGraph, LG_END, add_messages, MemorySaver_Class = (None,) * 4 # type: ignore
|
| 118 |
+
print(f"WARNING: LangGraph ToolInvocation not found and may be required. LangGraph agent functionality might be limited or disabled.")
|
| 119 |
+
|
| 120 |
+
else: # No LG_ToolExecutor_Class found
|
| 121 |
LANGGRAPH_FLAVOR_AVAILABLE = False
|
| 122 |
LG_StateGraph, LG_END, LG_ToolInvocation, add_messages, MemorySaver_Class = (None,) * 5 # type: ignore
|
| 123 |
print(f"WARNING: No suitable LangGraph tool executor (ToolNode/ToolExecutor) found. LangGraph agent will be disabled.")
|
|
|
|
| 125 |
except ImportError as e: # Catch import error for StateGraph, END itself
|
| 126 |
LANGGRAPH_FLAVOR_AVAILABLE = False
|
| 127 |
LG_StateGraph, LG_ToolExecutor_Class, LG_END, LG_ToolInvocation, add_messages, MemorySaver_Class = (None,) * 6
|
| 128 |
+
print(f"WARNING: Core LangGraph components (like StateGraph, END) not found or import error: {e}. LangGraph agent will be disabled.")
|
| 129 |
|
| 130 |
|
| 131 |
# --- Constants ---
|
|
|
|
| 400 |
model=GEMINI_MODEL_NAME,
|
| 401 |
google_api_key=GOOGLE_API_KEY,
|
| 402 |
temperature=0.0,
|
| 403 |
+
# safety_settings is removed to use model defaults.
|
| 404 |
timeout=120,
|
| 405 |
+
convert_system_message_to_human=False # Explicitly False
|
| 406 |
)
|
| 407 |
logger.info(f"LangChain LLM (Planner) initialized: {GEMINI_MODEL_NAME} (Using default safety settings, convert_system_message_to_human=False)")
|
| 408 |
except Exception as e:
|
|
|
|
| 421 |
except Exception as e: logger.warning(f"PythonREPLTool init failed: {e}")
|
| 422 |
logger.info(f"Final tools list for agent: {[t.name for t in TOOLS]}")
|
| 423 |
|
| 424 |
+
if LANGGRAPH_FLAVOR_AVAILABLE and all([LG_StateGraph, LG_ToolExecutor_Class, LG_END, LLM_INSTANCE, add_messages]): # LG_ToolInvocation removed from check as it's problematic
|
| 425 |
if not LANGGRAPH_MEMORY_SAVER and MemorySaver_Class: LANGGRAPH_MEMORY_SAVER = MemorySaver_Class(); logger.info("LangGraph MemorySaver initialized.")
|
| 426 |
try:
|
| 427 |
logger.info(f"Attempting LangGraph init (Tool Executor type: {LG_ToolExecutor_Class.__name__ if LG_ToolExecutor_Class else 'None'})")
|
| 428 |
_TypedDict = getattr(__import__('typing_extensions'), 'TypedDict', dict)
|
| 429 |
class AgentState(_TypedDict): input: str; messages: Annotated[List[Any], add_messages]
|
| 430 |
|
| 431 |
+
base_system_prompt_content = LANGGRAPH_PROMPT_TEMPLATE_STR.format(
|
| 432 |
+
tools="\n".join([f"- {t.name}: {t.description}" for t in TOOLS]),
|
| 433 |
+
input="{current_task_input_placeholder}"
|
| 434 |
)
|
| 435 |
+
|
| 436 |
def agent_node(state: AgentState):
|
| 437 |
+
current_task_actual_input = state.get('input', '')
|
| 438 |
+
system_message_content = base_system_prompt_content.replace("{current_task_input_placeholder}", current_task_actual_input)
|
| 439 |
|
| 440 |
messages_for_llm = [SystemMessage(content=system_message_content)]
|
| 441 |
messages_for_llm.extend(state.get('messages', []))
|
| 442 |
|
| 443 |
logger.debug(f"LangGraph agent_node - messages_for_llm: {messages_for_llm}")
|
| 444 |
+
# Check the content of the first message (SystemMessage)
|
| 445 |
+
if not messages_for_llm or not (isinstance(messages_for_llm, SystemMessage) and messages_for_llm.content and messages_for_llm.content.strip()):
|
| 446 |
+
logger.error("LLM call would fail in agent_node: First message (SystemMessage) is empty or not valid.")
|
| 447 |
+
return {"messages": [AIMessage(content="[ERROR] Agent node: Initial SystemMessage content is empty or invalid.")]}
|
| 448 |
|
| 449 |
bound_llm = LLM_INSTANCE.bind_tools(TOOLS) # type: ignore
|
| 450 |
response = bound_llm.invoke(messages_for_llm)
|
|
|
|
| 453 |
if not LG_ToolExecutor_Class: raise ValueError("LG_ToolExecutor_Class is None for LangGraph.")
|
| 454 |
tool_executor_instance_lg = LG_ToolExecutor_Class(tools=TOOLS)
|
| 455 |
|
| 456 |
+
def tool_node(state: AgentState): # This custom function invokes tools from AIMessage
|
|
|
|
| 457 |
last_msg = state['messages'][-1] if state.get('messages') and isinstance(state['messages'][-1], AIMessage) else None
|
| 458 |
if not last_msg or not last_msg.tool_calls: return {"messages": []}
|
| 459 |
+
|
| 460 |
tool_results = []
|
| 461 |
for tc in last_msg.tool_calls:
|
| 462 |
name, args, tc_id = tc.get('name'), tc.get('args'), tc.get('id')
|
|
|
|
| 466 |
continue
|
| 467 |
try:
|
| 468 |
logger.info(f"LG Tool Invoking: '{name}' with {args} (ID: {tc_id})")
|
| 469 |
+
# ToolNode expects a ToolInvocation if it's not being used as a direct runnable node for the whole tool step
|
| 470 |
+
# However, if LG_ToolExecutor_Class is ToolNode, it's designed to take the list of tool_calls directly.
|
| 471 |
+
# The issue was that LG_ToolInvocation itself might not be importable.
|
| 472 |
+
# If LG_ToolExecutor_Class is ToolNode, it should handle the invocation from tool_calls.
|
| 473 |
+
# Let's simplify and pass the tool_call directly if it's ToolNode
|
| 474 |
+
if LG_ToolInvocation and type(LG_ToolExecutor_Class).__name__ != 'ToolNode':
|
| 475 |
+
tool_invocation_obj_lg = LG_ToolInvocation(tool=name, tool_input=args)
|
| 476 |
+
output_lg = tool_executor_instance_lg.invoke(tool_invocation_obj_lg) # type: ignore
|
| 477 |
+
else: # Assume tool_executor_instance_lg (e.g. ToolNode) can handle the raw tool_call dict
|
| 478 |
+
output_lg = tool_executor_instance_lg.invoke(tc) # type: ignore
|
| 479 |
+
|
| 480 |
tool_results.append(ToolMessage(content=str(output_lg), tool_call_id=tc_id, name=name))
|
| 481 |
except Exception as e_tool_node_lg:
|
| 482 |
logger.error(f"LG Tool Error ('{name}'): {e_tool_node_lg}", exc_info=True)
|
| 483 |
tool_results.append(ToolMessage(content=f"Error for tool {name}: {str(e_tool_node_lg)}", tool_call_id=tc_id, name=name))
|
| 484 |
return {"messages": tool_results}
|
| 485 |
|
| 486 |
+
|
| 487 |
workflow_lg = LG_StateGraph(AgentState) # type: ignore
|
| 488 |
workflow_lg.add_node("agent", agent_node)
|
| 489 |
+
# If LG_ToolExecutor_Class is ToolNode, tool_executor_instance_lg is a runnable node.
|
| 490 |
+
# workflow_lg.add_node("tools", tool_executor_instance_lg) # This is the more standard LangGraph way for ToolNode
|
| 491 |
+
workflow_lg.add_node("tools", tool_node) # Keeping custom for now due to ToolInvocation import issues
|
| 492 |
+
|
| 493 |
workflow_lg.set_entry_point("agent")
|
| 494 |
def should_continue_lg(state: AgentState): return "tools" if state['messages'][-1].tool_calls else LG_END
|
| 495 |
workflow_lg.add_conditional_edges("agent", should_continue_lg, {"tools": "tools", LG_END: LG_END}) # type: ignore
|
|
|
|
| 560 |
last_message_in_history = final_state_lg_get['messages'][-1]
|
| 561 |
if hasattr(last_message_in_history, 'content') and isinstance(last_message_in_history.content, str):
|
| 562 |
return f"[INFO] Fallback to last message content: {str(last_message_in_history.content)}"
|
| 563 |
+
elif hasattr(last_message_in_history, 'content') and isinstance(last_message_in_history.content, list):
|
| 564 |
+
return f"[INFO] Fallback to last message (list content): {str(last_message_in_history.content)[:150]}"
|
| 565 |
+
elif isinstance(last_message_in_history, ToolMessage):
|
| 566 |
+
return f"[INFO] Fallback to ToolMessage content: {str(last_message_in_history.content)[:150]}"
|
| 567 |
else:
|
| 568 |
logger.error(f"LangGraph: Could not extract string content from the very last message: {last_message_in_history}")
|
| 569 |
return "[ERROR] LangGraph: Could not extract final answer from messages."
|
|
|
|
| 681 |
demo.load(update_ui_on_load_fn_within_context, [], [agent_status_display, missing_secrets_display])
|
| 682 |
|
| 683 |
if __name__ == "__main__":
|
| 684 |
+
logger.info(f"Application starting up (v7 - Final SafetySettings and Contents Fix)...")
|
| 685 |
if not PYPDF2_AVAILABLE: logger.warning("PyPDF2 (PDF tool) NOT AVAILABLE.")
|
| 686 |
if not PIL_TESSERACT_AVAILABLE: logger.warning("Pillow/Pytesseract (OCR tool) NOT AVAILABLE.")
|
| 687 |
if not WHISPER_AVAILABLE: logger.warning("Whisper (Audio tool) NOT AVAILABLE.")
|
| 688 |
if LANGGRAPH_FLAVOR_AVAILABLE: logger.info(f"Core LangGraph components (StateGraph, END, {type(LG_ToolExecutor_Class).__name__ if LG_ToolExecutor_Class else 'FailedExecutor'}) loaded.")
|
| 689 |
+
else: logger.warning("Core LangGraph FAILED import or essential component (ToolExecutor/Node/Invocation) missing. ReAct fallback. Check requirements & Space build logs.")
|
| 690 |
|
| 691 |
missing_vars_startup_list_global.clear()
|
| 692 |
if not GOOGLE_API_KEY: missing_vars_startup_list_global.append("GOOGLE_API_KEY")
|
|
|
|
| 709 |
|
| 710 |
logger.info(f"Space ID: {os.getenv('SPACE_ID', 'Not Set')}")
|
| 711 |
logger.info("Gradio Interface launching...")
|
| 712 |
+
demo.queue().launch(debug=os.getenv("GRADIO_DEBUG","false").lower()=="true", share=False, max_threads=20)```
|