Spaces:
Runtime error
Runtime error
ffreemt
commited on
Commit
·
41e9e77
1
Parent(s):
d017960
app.py
CHANGED
|
@@ -132,18 +132,6 @@ def bot(chat_history, **kwargs):
|
|
| 132 |
return chat_history
|
| 133 |
|
| 134 |
|
| 135 |
-
def bot_stream(chat_history, **kwargs):
|
| 136 |
-
try:
|
| 137 |
-
message = chat_history[-1][0]
|
| 138 |
-
except Exception as exc:
|
| 139 |
-
logger.error(f"{chat_history=}: {exc}")
|
| 140 |
-
raise gr.Error(f"{chat_history=}")
|
| 141 |
-
# yield chat_history
|
| 142 |
-
for elm in model.chat_stream(tokenizer, message, chat_history, **kwargs):
|
| 143 |
-
chat_history[-1] = [message, elm]
|
| 144 |
-
yield chat_history
|
| 145 |
-
|
| 146 |
-
|
| 147 |
SYSTEM_PROMPT = "You are a helpful assistant."
|
| 148 |
MAX_MAX_NEW_TOKENS = 1024
|
| 149 |
MAX_NEW_TOKENS = 128
|
|
@@ -158,7 +146,7 @@ class Config:
|
|
| 158 |
top_p: float = 0.9
|
| 159 |
|
| 160 |
|
| 161 |
-
stats_default = SimpleNamespace(llm=
|
| 162 |
|
| 163 |
theme = gr.themes.Soft(text_size="sm")
|
| 164 |
with gr.Blocks(
|
|
@@ -167,10 +155,22 @@ with gr.Blocks(
|
|
| 167 |
css=css,
|
| 168 |
) as block:
|
| 169 |
stats = gr.State(stats_default)
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
with gr.Accordion("🎈 Info", open=False):
|
| 176 |
gr.Markdown(
|
|
@@ -212,7 +212,7 @@ with gr.Blocks(
|
|
| 212 |
queue=True,
|
| 213 |
show_progress="full",
|
| 214 |
# api_name=None,
|
| 215 |
-
).then(
|
| 216 |
submit_click_event = submit.click(
|
| 217 |
# fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
|
| 218 |
fn=user, # clear msg
|
|
@@ -221,7 +221,7 @@ with gr.Blocks(
|
|
| 221 |
queue=True,
|
| 222 |
show_progress="full",
|
| 223 |
# api_name=None,
|
| 224 |
-
).then(
|
| 225 |
stop.click(
|
| 226 |
fn=None,
|
| 227 |
inputs=None,
|
|
|
|
| 132 |
return chat_history
|
| 133 |
|
| 134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
SYSTEM_PROMPT = "You are a helpful assistant."
|
| 136 |
MAX_MAX_NEW_TOKENS = 1024
|
| 137 |
MAX_NEW_TOKENS = 128
|
|
|
|
| 146 |
top_p: float = 0.9
|
| 147 |
|
| 148 |
|
| 149 |
+
stats_default = SimpleNamespace(llm=model, system_prompt=SYSTEM_PROMPT, config=Config())
|
| 150 |
|
| 151 |
theme = gr.themes.Soft(text_size="sm")
|
| 152 |
with gr.Blocks(
|
|
|
|
| 155 |
css=css,
|
| 156 |
) as block:
|
| 157 |
stats = gr.State(stats_default)
|
| 158 |
+
if not torch.cuda.is_available():
|
| 159 |
+
raise gr.Error("GPU not available, cant run. Turn on GPU and restart")
|
| 160 |
+
|
| 161 |
+
model_ = stats.value.llm
|
| 162 |
+
config = stats.value.config
|
| 163 |
+
model_.generation_config.update(**asdict(config))
|
| 164 |
+
def bot_stream(chat_history):
|
| 165 |
+
try:
|
| 166 |
+
message = chat_history[-1][0]
|
| 167 |
+
except Exception as exc:
|
| 168 |
+
logger.error(f"{chat_history=}: {exc}")
|
| 169 |
+
raise gr.Error(f"{chat_history=}")
|
| 170 |
+
# yield chat_history
|
| 171 |
+
for elm in model.chat_stream(tokenizer, message, chat_history):
|
| 172 |
+
chat_history[-1] = [message, elm]
|
| 173 |
+
yield chat_history
|
| 174 |
|
| 175 |
with gr.Accordion("🎈 Info", open=False):
|
| 176 |
gr.Markdown(
|
|
|
|
| 212 |
queue=True,
|
| 213 |
show_progress="full",
|
| 214 |
# api_name=None,
|
| 215 |
+
).then(bot_stream, chatbot, chatbot, queue=True)
|
| 216 |
submit_click_event = submit.click(
|
| 217 |
# fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
|
| 218 |
fn=user, # clear msg
|
|
|
|
| 221 |
queue=True,
|
| 222 |
show_progress="full",
|
| 223 |
# api_name=None,
|
| 224 |
+
).then(bot_stream, chatbot, chatbot, queue=True)
|
| 225 |
stop.click(
|
| 226 |
fn=None,
|
| 227 |
inputs=None,
|