Kennethdotse commited on
Commit
91fe1e0
·
1 Parent(s): 513c401

updated imports

Browse files
Files changed (2) hide show
  1. app.py +2 -15
  2. requirements.txt +2 -1
app.py CHANGED
@@ -11,10 +11,9 @@ from pathlib import Path
11
  from langchain_community.embeddings import HuggingFaceEmbeddings
12
  from langchain_community.vectorstores import Chroma
13
  from langchain_community.document_loaders.csv_loader import CSVLoader
14
- from langchain_community.document_loaders.pdf_loader import PyPDFLoader
15
  from langchain_community.document_loaders.dataframe import DataFrameLoader
16
- from langchain.text_splitter import CharacterTextSplitter
17
- from huggingface_hub import HfApi
18
 
19
 
20
  # ---------- Configuration ----------
@@ -28,17 +27,6 @@ _pipe = None
28
  _rag_vectorstore = None
29
  _embeddings = None
30
 
31
- HF_TOKEN = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACEHUB_API_TOKEN")
32
- if not HF_TOKEN:
33
- print("Error: no Hugging Face token found. Set HF_TOKEN or HUGGINGFACEHUB_API_TOKEN as an environment variable or Space secret.")
34
- sys.exit(1)
35
- else:
36
- try:
37
- HfApi().whoami(token=HF_TOKEN)
38
- print("Hugging Face token OK")
39
- except Exception as e:
40
- print("Invalid Hugging Face token:", e)
41
- sys.exit(1)
42
 
43
  # ---------- Lazy initialization helpers ----------
44
  def _init_pipeline():
@@ -68,7 +56,6 @@ def _init_pipeline():
68
  model=MODEL_ID,
69
  device_map=model_kwargs.get("device_map"),
70
  torch_dtype=model_kwargs.get("torch_dtype"),
71
- use_auth_token=HF_TOKEN,
72
  **({} if "quantization_config" not in model_kwargs else {"quantization_config": model_kwargs["quantization_config"]}),
73
  )
74
  try:
 
11
  from langchain_community.embeddings import HuggingFaceEmbeddings
12
  from langchain_community.vectorstores import Chroma
13
  from langchain_community.document_loaders.csv_loader import CSVLoader
14
+ from langchain_community.document_loaders import PyPDFLoader
15
  from langchain_community.document_loaders.dataframe import DataFrameLoader
16
+ from langchain_text_splitters import CharacterTextSplitter
 
17
 
18
 
19
  # ---------- Configuration ----------
 
27
  _rag_vectorstore = None
28
  _embeddings = None
29
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  # ---------- Lazy initialization helpers ----------
32
  def _init_pipeline():
 
56
  model=MODEL_ID,
57
  device_map=model_kwargs.get("device_map"),
58
  torch_dtype=model_kwargs.get("torch_dtype"),
 
59
  **({} if "quantization_config" not in model_kwargs else {"quantization_config": model_kwargs["quantization_config"]}),
60
  )
61
  try:
requirements.txt CHANGED
@@ -11,4 +11,5 @@ sentence-transformers
11
  pypdf
12
  bitsandbytes
13
  accelerate
14
- huggingface-hub
 
 
11
  pypdf
12
  bitsandbytes
13
  accelerate
14
+ huggingface-hub
15
+ langchain_text_splitters