semmyk commited on
Commit
a50f7ce
·
1 Parent(s): dc56f4d

v0.2.8.6: Baseline 03 - fix require einops for nomic - update Gradio embed components, add queue - update README - attempt fixing GenAI role:assistant with modify_history_in_place()

Browse files
Files changed (4) hide show
  1. README.md +6 -4
  2. app.py +36 -12
  3. app_gradio_lightrag.py +54 -3
  4. requirements.txt +1 -0
README.md CHANGED
@@ -27,7 +27,7 @@ owner: research-semmyk
27
  #---
28
 
29
  #short_description: PDF & HTML parser to markdown
30
- version: 0.2.0
31
  readme: README.md
32
  requires-python: ">=3.12"
33
  #dependencies: []
@@ -96,13 +96,15 @@ gradio app_gradio_lightrag.py --demo-name=gradio_ui
96
  - For Colab: install requirements and run the app cell.
97
 
98
  ## Usage
99
- - Select your data folder (default: `dataset/data/docs`)
100
- - Choose LLM backend (OpenAI or Ollama). GenAI has a bug yieling error: role: 'assistant' instead of 'user' when updating history.
101
  - Activate the RAG constructor
102
  - Click 'Index Documents' to build the KG entities
103
  - Click 'Query' to get answers
104
  -- Enter your query and select query mode
105
- - Click 'Show Knowledge Graph' to visualise the KG
 
 
106
 
107
  ## Notes
108
  - Only markdown files are supported for ingestion (images in `/images` subfolder are ignored for now). <br>NB: other formats will be enabled later: pdf, txt, html...
 
27
  #---
28
 
29
  #short_description: PDF & HTML parser to markdown
30
+ version: 0.2.8.6
31
  readme: README.md
32
  requires-python: ">=3.12"
33
  #dependencies: []
 
96
  - For Colab: install requirements and run the app cell.
97
 
98
  ## Usage
99
+ - Browse/Select your data folder (default: `dataset/data/docs`)
100
+ - Choose LLM backend (OpenAI or Ollama). [fix: GenAI has a bug yieling error: role:'assistant' instead of 'user' when updating history].
101
  - Activate the RAG constructor
102
  - Click 'Index Documents' to build the KG entities
103
  - Click 'Query' to get answers
104
  -- Enter your query and select query mode
105
+ - Click 'Show Knowledge Graph' to visualise the KG
106
+
107
+ NB: If using HuggingFace, log in first before browsing/selecting/uploading files and setting LLM parameters.
108
 
109
  ## Notes
110
  - Only markdown files are supported for ingestion (images in `/images` subfolder are ignored for now). <br>NB: other formats will be enabled later: pdf, txt, html...
app.py CHANGED
@@ -89,7 +89,7 @@ def gradio_ui(app_logic: LightRAGApp):
89
  with gr.Row():
90
  llm_backend_cb = gr.Radio(["OpenAI", "Ollama", "GenAI"], value="OpenAI", label="LLM Backend: OpenAI, Local or GenAI")
91
  llm_model_name_tb = gr.Textbox(value=os.getenv("LLM_MODEL", "openai/gpt-oss-120b"), label="LLM Model Name", show_copy_button=True) #.split('/')[1], label="LLM Model Name") "meta-llama/Llama-4-Maverick-17B-128E-Instruct")), #image-Text-to-Text #"openai/gpt-oss-120b",
92
- with gr.Row():
93
  with gr.Row(): #elem_classes="password-box"):
94
  #openai_key_tb = gr.Textbox(value=os.getenv("OPENAI_API_KEY", "jan-ai"), label="OpenAI API Key",
95
  # type="password", elem_classes="password-box", container=False, interactive=True, info="OpenAI API Key") #, show_copy_button=True)
@@ -98,15 +98,16 @@ def gradio_ui(app_logic: LightRAGApp):
98
  toggle_btn_openai_key = gr.Button(
99
  value="👁️", # Initial eye icon
100
  elem_classes="icon-button", size="sm") #, min_width=50)
101
- with gr.Row():
102
  openai_baseurl_tb = gr.Textbox(value=os.getenv("OPENAI_API_BASE", "https://router.huggingface.co/v1"), label="OpenAI baseurl", show_copy_button=True)
103
  ollama_host_tb = gr.Textbox(value=os.getenv("OLLAMA_HOST", "http://localhost:1234/v1"), label="Ollama Host", show_copy_button=True)
104
  #ollama_host_tb = gr.Textbox(value=os.getenv("OPENAI_API_EMBED_BASE", ""), label="Ollama Host")
105
- with gr.Row():
106
- openai_baseurl_embed_tb = gr.Textbox(value=os.getenv("OPENAI_API_EMBED_BASE", "http://localhost:1234/v1"), label="LLM Embed baseurl", show_copy_button=True)
107
- llm_model_embed_tb = gr.Textbox(value=os.getenv("LLM_MODEL_EMBED","text-embedding-bge-m3"), label="LLM Embedding Model", show_copy_button=True) #.split('/')[1], label="Embedding Model")
108
  with gr.Row():
109
- embed_backend_dd = gr.Dropdown(choices=["Transformer", "Provider"], value="Provider", label="Embedding Type")
 
 
 
 
110
  with gr.Row(): #elem_classes="password-box"):
111
  openai_key_embed_tb = gr.Textbox(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"), label="LLM API Key Embed", #lm-studio
112
  type="password", elem_classes="password-box", container=False, interactive=True, info="LLM API Key Embed") #, show_copy_button=True)
@@ -114,7 +115,7 @@ def gradio_ui(app_logic: LightRAGApp):
114
  value="👁️", # Initial eye icon
115
  elem_classes="icon-button", size="sm") #, min_width=50)
116
  #openai_key_embed_tb = gr.Textbox(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"), label="OpenAI API Key Embed", type="password", show_copy_button=True) #("OLLAMA_API_KEY", ""), label="OpenAI API Key Embed", type="password")
117
-
118
  # Step 1: Section 2
119
  with gr.Row():
120
  with gr.Column():
@@ -130,7 +131,7 @@ def gradio_ui(app_logic: LightRAGApp):
130
  working_dir_reset_cb = gr.Checkbox(value=False, label="Reset working files?")
131
  with gr.Accordion("🤗 HuggingFace Client Control", open=True): #, open=False):
132
  # HuggingFace controls
133
- hf_login_logout_btn = gr.LoginButton(value="Sign in to HuggingFace 🤗", logout_value="Logout of HF: ({}) 🤗", variant="huggingface")
134
 
135
  gr.Markdown("---") #gr.HTML("<hr>")
136
 
@@ -191,7 +192,7 @@ def gradio_ui(app_logic: LightRAGApp):
191
  openai_baseurl_tb: gr.update(value=os.getenv("OPENAI_API_BASE", "https://router.huggingface.co/v1")),
192
  ollama_host_tb: gr.update(value=os.getenv("OLLAMA_HOST", "http://localhost:1234/v1")), #"http://localhost:11434"
193
  openai_baseurl_embed_tb: gr.update(value=os.getenv("OPENAI_API_EMBED_BASE", "http://localhost:1234/v1")), #"http://localhost:1234/v1/embeddings"
194
- llm_model_embed_tb: gr.update(value=os.getenv("LLM_MODEL_EMBED","nomic-embed-text")),
195
  openai_key_embed_tb: gr.update(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"))
196
  }
197
  elif llm_backend == "GenAI":
@@ -201,7 +202,7 @@ def gradio_ui(app_logic: LightRAGApp):
201
  openai_baseurl_tb: gr.update(value=os.getenv("GEMINI_API_BASE", "https://generativelanguage.googleapis.com/v1beta/openai/"), label="GenAI baaseurl"),
202
  ollama_host_tb: gr.update(value=os.getenv("OLLAMA_HOST", "http://localhost:11434")), #"http://localhost:1234/v1"
203
  openai_baseurl_embed_tb: gr.update(value=os.getenv("OPENAI_API_EMBED_BASE", "http://localhost:1234/v1")), #"http://localhost:1234/v1/embeddings"
204
- llm_model_embed_tb: gr.update(value=os.getenv("LLM_MODEL_EMBED","all-MiniLM-L6-v2")),
205
  openai_key_embed_tb: gr.update(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"))
206
  }
207
  elif llm_backend == "OpenAI":
@@ -215,6 +216,24 @@ def gradio_ui(app_logic: LightRAGApp):
215
  openai_key_embed_tb: gr.update(value=os.getenv("OPENAI_API_KEY_EMBED", ""))
216
  }
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  # Change Handling: Update password reveal state - reusable function for toggling password visibility
219
  def toggle_password(current_state):
220
  """ Change state
@@ -235,8 +254,9 @@ def gradio_ui(app_logic: LightRAGApp):
235
  # Update gr.State values on HF login change.
236
  def custom_do_logout(openai_key, oauth_token: gr.OAuthToken | None=None,):
237
  #''' ##SMY: TO DELETE
 
238
  try:
239
- if oauth_token:
240
  st_openai_key_get= update_state_stored_value(oauth_token.token) ##SMY: currently not used optimally
241
  except AttributeError:
242
  st_openai_key_get= get_login_token(openai_key) #(openai_key_tb)
@@ -265,6 +285,9 @@ def gradio_ui(app_logic: LightRAGApp):
265
  llm_backend_cb.change(show_progress="hidden", fn=update_ollama, inputs=llm_backend_cb, #inputs=None,
266
  outputs=[llm_model_name_tb, openai_key_tb, openai_baseurl_tb, ollama_host_tb, openai_baseurl_embed_tb, llm_model_embed_tb, openai_key_embed_tb])
267
 
 
 
 
268
  ### Button handlers
269
 
270
  #hf_login_logout_btn.click(update_state_stored_value, inputs=openai_key_tb, outputs=st_openai_key)
@@ -340,7 +363,8 @@ if __name__ == "__main__":
340
  # Instantiate LightRAG and launch Gradio
341
  try:
342
  app_logic = LightRAGApp()
343
- gradio_ui(app_logic).launch() #(server_port=7866)
 
344
  except Exception as e:
345
  print(f"An error occurred: {e}")
346
  finally:
 
89
  with gr.Row():
90
  llm_backend_cb = gr.Radio(["OpenAI", "Ollama", "GenAI"], value="OpenAI", label="LLM Backend: OpenAI, Local or GenAI")
91
  llm_model_name_tb = gr.Textbox(value=os.getenv("LLM_MODEL", "openai/gpt-oss-120b"), label="LLM Model Name", show_copy_button=True) #.split('/')[1], label="LLM Model Name") "meta-llama/Llama-4-Maverick-17B-128E-Instruct")), #image-Text-to-Text #"openai/gpt-oss-120b",
92
+ #with gr.Row():
93
  with gr.Row(): #elem_classes="password-box"):
94
  #openai_key_tb = gr.Textbox(value=os.getenv("OPENAI_API_KEY", "jan-ai"), label="OpenAI API Key",
95
  # type="password", elem_classes="password-box", container=False, interactive=True, info="OpenAI API Key") #, show_copy_button=True)
 
98
  toggle_btn_openai_key = gr.Button(
99
  value="👁️", # Initial eye icon
100
  elem_classes="icon-button", size="sm") #, min_width=50)
101
+ with gr.Row():
102
  openai_baseurl_tb = gr.Textbox(value=os.getenv("OPENAI_API_BASE", "https://router.huggingface.co/v1"), label="OpenAI baseurl", show_copy_button=True)
103
  ollama_host_tb = gr.Textbox(value=os.getenv("OLLAMA_HOST", "http://localhost:1234/v1"), label="Ollama Host", show_copy_button=True)
104
  #ollama_host_tb = gr.Textbox(value=os.getenv("OPENAI_API_EMBED_BASE", ""), label="Ollama Host")
 
 
 
105
  with gr.Row():
106
+ embed_backend_dd = gr.Dropdown(choices=["Transformer", "Provider"], value="Transformer", label="Embedding Type")
107
+ openai_baseurl_embed_tb = gr.Textbox(placeholder=os.getenv("OPENAI_API_EMBED_BASE", "http://localhost:1234/v1"), label="LLM Embed baseurl", show_copy_button=True)
108
+
109
+ with gr.Row():
110
+ llm_model_embed_tb = gr.Textbox(placeholder=os.getenv("LLM_MODEL_EMBED","text-embedding-bge-m3"), label="LLM Embedding Model", show_copy_button=True) #.split('/')[1], label="Embedding Model")
111
  with gr.Row(): #elem_classes="password-box"):
112
  openai_key_embed_tb = gr.Textbox(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"), label="LLM API Key Embed", #lm-studio
113
  type="password", elem_classes="password-box", container=False, interactive=True, info="LLM API Key Embed") #, show_copy_button=True)
 
115
  value="👁️", # Initial eye icon
116
  elem_classes="icon-button", size="sm") #, min_width=50)
117
  #openai_key_embed_tb = gr.Textbox(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"), label="OpenAI API Key Embed", type="password", show_copy_button=True) #("OLLAMA_API_KEY", ""), label="OpenAI API Key Embed", type="password")
118
+
119
  # Step 1: Section 2
120
  with gr.Row():
121
  with gr.Column():
 
131
  working_dir_reset_cb = gr.Checkbox(value=False, label="Reset working files?")
132
  with gr.Accordion("🤗 HuggingFace Client Control", open=True): #, open=False):
133
  # HuggingFace controls
134
+ hf_login_logout_btn = gr.LoginButton( variant="huggingface", value="Sign in to HuggingFace 🤗", logout_value="Logout of HF: ({}) 🤗 \n [NB: check LLM settings & reload files (upload folder)]",)
135
 
136
  gr.Markdown("---") #gr.HTML("<hr>")
137
 
 
192
  openai_baseurl_tb: gr.update(value=os.getenv("OPENAI_API_BASE", "https://router.huggingface.co/v1")),
193
  ollama_host_tb: gr.update(value=os.getenv("OLLAMA_HOST", "http://localhost:1234/v1")), #"http://localhost:11434"
194
  openai_baseurl_embed_tb: gr.update(value=os.getenv("OPENAI_API_EMBED_BASE", "http://localhost:1234/v1")), #"http://localhost:1234/v1/embeddings"
195
+ llm_model_embed_tb: gr.update(value=os.getenv("LLM_MODEL_EMBED","nomic-embed-text")), #"nomic-ai/nomic-embed-text-v1.5"
196
  openai_key_embed_tb: gr.update(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"))
197
  }
198
  elif llm_backend == "GenAI":
 
202
  openai_baseurl_tb: gr.update(value=os.getenv("GEMINI_API_BASE", "https://generativelanguage.googleapis.com/v1beta/openai/"), label="GenAI baaseurl"),
203
  ollama_host_tb: gr.update(value=os.getenv("OLLAMA_HOST", "http://localhost:11434")), #"http://localhost:1234/v1"
204
  openai_baseurl_embed_tb: gr.update(value=os.getenv("OPENAI_API_EMBED_BASE", "http://localhost:1234/v1")), #"http://localhost:1234/v1/embeddings"
205
+ llm_model_embed_tb: gr.update(value=os.getenv("LLM_MODEL_EMBED", "nomic-ai/nomic-embed-text-v1.5")), #"all-MiniLM-L6-v2")),
206
  openai_key_embed_tb: gr.update(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"))
207
  }
208
  elif llm_backend == "OpenAI":
 
216
  openai_key_embed_tb: gr.update(value=os.getenv("OPENAI_API_KEY_EMBED", ""))
217
  }
218
 
219
+ # Change Handling: update Ollama
220
+ def update_embedding_backend(embedding_backend):
221
+ """ Update LLM settings fields with ollama values"""
222
+ # Get model name excluding the model provider: # llm_model_name.rpartition("/")[-1]
223
+
224
+ if embedding_backend == "Provider":
225
+ return {
226
+ openai_baseurl_embed_tb: gr.update(value=os.getenv("OPENAI_API_EMBED_BASE", "http://localhost:1234/v1")), #"http://localhost:1234/v1/embeddings"
227
+ llm_model_embed_tb: gr.update(value=os.getenv("LLM_MODEL_EMBED","nomic-embed-text")),
228
+ openai_key_embed_tb: gr.update(value=os.getenv("OPENAI_API_KEY_EMBED", "jan-ai"))
229
+ }
230
+ elif embedding_backend == "Transformer":
231
+ return {
232
+ openai_baseurl_embed_tb: gr.update(value=None, placeholder=os.getenv("OPENAI_API_EMBED_BASE", "http://localhost:1234/v1")), #"http://localhost:1234/v1/embeddings"
233
+ llm_model_embed_tb: gr.update(value=None, placeholder=os.getenv("LLM_MODEL_EMBED", "nomic-ai/nomic-embed-text-v1.5")), #(value="all-MiniLM-L6-v2"),
234
+ openai_key_embed_tb: gr.update(value=None, placeholder="jan-ai")
235
+ }
236
+
237
  # Change Handling: Update password reveal state - reusable function for toggling password visibility
238
  def toggle_password(current_state):
239
  """ Change state
 
254
  # Update gr.State values on HF login change.
255
  def custom_do_logout(openai_key, oauth_token: gr.OAuthToken | None=None,):
256
  #''' ##SMY: TO DELETE
257
+ st_openai_key_get = os.getenv("OPENAI_API_KEY", default="") #"" ##SMY: # UnboundLocalError: not catching
258
  try:
259
+ if oauth_token or oauth_token is not None: ##SMY: hack: is not None!
260
  st_openai_key_get= update_state_stored_value(oauth_token.token) ##SMY: currently not used optimally
261
  except AttributeError:
262
  st_openai_key_get= get_login_token(openai_key) #(openai_key_tb)
 
285
  llm_backend_cb.change(show_progress="hidden", fn=update_ollama, inputs=llm_backend_cb, #inputs=None,
286
  outputs=[llm_model_name_tb, openai_key_tb, openai_baseurl_tb, ollama_host_tb, openai_baseurl_embed_tb, llm_model_embed_tb, openai_key_embed_tb])
287
 
288
+ embed_backend_dd.change(show_progress="hidden", fn=update_embedding_backend, inputs=embed_backend_dd,
289
+ outputs=[openai_baseurl_embed_tb,llm_model_embed_tb, openai_key_embed_tb],)
290
+
291
  ### Button handlers
292
 
293
  #hf_login_logout_btn.click(update_state_stored_value, inputs=openai_key_tb, outputs=st_openai_key)
 
363
  # Instantiate LightRAG and launch Gradio
364
  try:
365
  app_logic = LightRAGApp()
366
+ # Launch Gradio with queue enable to enable >60s timeout
367
+ gradio_ui(app_logic).queue().launch() #(server_port=7866)
368
  except Exception as e:
369
  print(f"An error occurred: {e}")
370
  finally:
app_gradio_lightrag.py CHANGED
@@ -361,13 +361,63 @@ class LightRAGApp:
361
  **kwargs) -> Union[str, types.Content]:
362
  """ Create GenAI client and complete a prompt """
363
  # https://github.com/googleapis/python-genai/tree/main
364
-
365
  # 1. Combine prompts: system prompt, history, and user prompt
366
  if not history_messages or history_messages is None:
367
  history_messages = []
368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
  # prepare message
370
- #messages: list[dict[str, Any]] = []
371
  messages: list[types.Content] = []
372
 
373
  if system_prompt: ##See system_instruction
@@ -375,6 +425,7 @@ class LightRAGApp:
375
  new_user_content = types.Content(role="user", parts=[types.Part.from_text(text=prompt)])
376
  history_messages.append(new_user_content)
377
 
 
378
  logger.debug(f"Sending messages to Gemini: Model: {self.llm_model_name.rpartition('/')[-1]} \n~ Message: {prompt}")
379
  logger_kg.log(level=20, msg=f"Sending messages to Gemini: Model: {self.llm_model_name.rpartition('/')[-1]} \n~ Message: {prompt}")
380
 
@@ -396,7 +447,7 @@ class LightRAGApp:
396
  #automatic_function_calling=types.AutomaticFunctionCallingConfig(disable=False),
397
  system_instruction=["You are an expert in Knowledge graph.",
398
  "You are well versed in entities, relations, objects and ontology reasoning",
399
- "Your mission/task is to create/construct knowledge Graph"], #system_prompt,
400
  )
401
  )
402
  ## GenAI keeps giving pydantic error relating to 'role': 'assistant' #wierd
 
361
  **kwargs) -> Union[str, types.Content]:
362
  """ Create GenAI client and complete a prompt """
363
  # https://github.com/googleapis/python-genai/tree/main
364
+
365
  # 1. Combine prompts: system prompt, history, and user prompt
366
  if not history_messages or history_messages is None:
367
  history_messages = []
368
 
369
+ ## SMY: role mapping: attempting to map assistant with user #role:assistant pydantic error
370
+ ''' ##SMY working code: disused. See modify_history_in_place()
371
+ ## SMY: new dictionary by unpacking (**) the existing message dictionary, explicitly set "role" key to "user"
372
+ history_messages1 = [{**message, 'role': 'user'} if message.get('role', ) == 'assistant' else message
373
+ for message in history_messages]
374
+ for message in history_messages1: print(f"history len {len(history_messages1)} : \n {'\n'.join(f'{key}: {value[:25]}' for key, value in message.items())}") if isinstance(message, dict) else print(f"\n {str(message)[:25]}")
375
+ '''
376
+ '''
377
+ #idiomatic way to handle an in-place mutation within a list comprehension
378
+ #setattr(m, 'role', 'user') mutates the object in-place by changing its role attribute
379
+ #(..., m)[1] tricks returning the mutated object m after the setattr operation is completed, necessary for the list comprehension to work correctly
380
+ history_messages2 = [
381
+ (lambda m: (setattr(m, 'role', 'user'), m)[1] if message.get('role', ) == 'assistant' else m)(message)
382
+ for message in history_messages ]
383
+ for message in history_messages2: print(f"history len {len(history_messages2)} : \n {'\n'.join(f'{key}: {value[:25]}' for key, value in message.items())}") if isinstance(message, dict) else print(f"\n {str(message)[:25]}")
384
+ '''
385
+ ## one-liner to change all 'model' roles to 'user': List comprehensions are not intended for in-place mutations
386
+ #[(setattr(message, 'role', 'user'), message) for message in history_messages if hasattr(message, 'role') and message.role == 'assistant']
387
+
388
+
389
+ def modify_history_in_place(history_messages):
390
+ """
391
+ Modifies the history_messages list in-place, converting 'assistant' roles to 'user'.
392
+
393
+ Args:
394
+ history_messages: A list that may contain a mix of dicts and GenAI Content objects.
395
+ """
396
+ #history_messages_dict = history_messages #debug
397
+ ## enumerating to avoid potential "off-by-one" errors
398
+ for index, message in enumerate(history_messages):
399
+ # Handle the custom GenAI Content object using its API
400
+ if hasattr(message, 'to_dict'):
401
+ msg_dict = message.to_dict()
402
+ #history_messages[index] = {**msg_dict, 'role': 'user'} if msg_dict.get(key='role', default_value='user') == 'assistant' else msg_dict
403
+
404
+ ## SMY: {'role': []'user', 'model'], 'content': 'content_text'}
405
+ role = 'user' if msg_dict.get('role', 'user') == 'assistant' else msg_dict.get('role', 'user') #msg_dict.get(key='role', default_value='user')
406
+ parts = [types.Part.from_text(text=msg_dict.get('content', ''))]
407
+ history_messages[index] = types.Content(role=role, parts=parts)
408
+ # Handle standard Python dictionaries
409
+ elif isinstance(message, dict):
410
+ #history_messages[index] = {**message, 'role': 'user'} if message.get('role') == 'assistant' else message
411
+
412
+ role = 'user' if message.get('role', 'user') == 'assistant' else message.get('role', 'user')
413
+ parts = [types.Part.from_text(text=message.get('content', ''))]
414
+ history_messages[index] = types.Content(role=role, parts=parts)
415
+ ##debug
416
+ #for message in history_messages: print(f"history len {len(history_messages)} : \n {'\n'.join(f'{key}: {value[:50]}' for key, value in message.items())}") if isinstance(message, dict) else print(f"\n {str(message)[:50]}")
417
+ modify_history_in_place(history_messages)
418
+
419
+
420
  # prepare message
 
421
  messages: list[types.Content] = []
422
 
423
  if system_prompt: ##See system_instruction
 
425
  new_user_content = types.Content(role="user", parts=[types.Part.from_text(text=prompt)])
426
  history_messages.append(new_user_content)
427
 
428
+
429
  logger.debug(f"Sending messages to Gemini: Model: {self.llm_model_name.rpartition('/')[-1]} \n~ Message: {prompt}")
430
  logger_kg.log(level=20, msg=f"Sending messages to Gemini: Model: {self.llm_model_name.rpartition('/')[-1]} \n~ Message: {prompt}")
431
 
 
447
  #automatic_function_calling=types.AutomaticFunctionCallingConfig(disable=False),
448
  system_instruction=["You are an expert in Knowledge graph.",
449
  "You are well versed in entities, relations, objects and ontology reasoning",
450
+ "Your mission/task is to create/construct knowledge Graph, otherwise, query the Knowledge Graph when instructed"], #system_prompt,
451
  )
452
  )
453
  ## GenAI keeps giving pydantic error relating to 'role': 'assistant' #wierd
requirements.txt CHANGED
@@ -14,6 +14,7 @@ nest_asyncio>=1.6.0 ##SMY: #HF Spaces modulenotfounderror: No module name
14
  pyvis>=0.3.1 ##SMY: >=0.3.0 --> 0.3.2
15
  networkx>=3.4.2 ##SMY: 3.2.1 (3.4.2)
16
  sentence-transformers ##SMY:
 
17
  hf_xet ##SMY: #HF Xet Storage downloader
18
 
19
  plotly>=6.0.1 ##SMY: 6.0.1
 
14
  pyvis>=0.3.1 ##SMY: >=0.3.0 --> 0.3.2
15
  networkx>=3.4.2 ##SMY: 3.2.1 (3.4.2)
16
  sentence-transformers ##SMY:
17
+ einops>=0.8.1 ##SMY: sentence-transformer on HF Space - nomic-ai
18
  hf_xet ##SMY: #HF Xet Storage downloader
19
 
20
  plotly>=6.0.1 ##SMY: 6.0.1