# We need to create nodes per the LLMs # And for the tools to be used from dotenv import load_dotenv from openai import OpenAI from langchain_openai import ChatOpenAI from langchain_google_genai import ChatGoogleGenerativeAI import os load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") #gemini_api_key = os.getenv("GOOGLE_API_KEY") #llama4_api_key = os.getenv("HUGGINGFACE_API_TOKEN") #llama_client = OpenAI( # base_url="/static-proxy?url=https%3A%2F%2Frouter.huggingface.co%2Fv1", # api_key=llama4_api_key, #) openai_audio_model = ChatOpenAI( model="gpt-4o-audio-preview", # reasoning_effort="minimal", temperature=0, max_tokens=None, api_key=openai_api_key ) openai_model = ChatOpenAI( model="gpt-4o", # reasoning_effort="minimal", temperature=0, max_tokens=None, api_key=openai_api_key ) #gemini_model = ChatGoogleGenerativeAI( # model="gemini-2.5-flash", # temperature=0, # max_tokens=None, # google_api_key=gemini_api_key #)