Update app.py
Browse files
app.py
CHANGED
|
@@ -16,8 +16,8 @@ from presets import *
|
|
| 16 |
#chatbot = hugchat.ChatBot(cookie_path="cookies.json")
|
| 17 |
|
| 18 |
#Alternativ mit beliebigen Modellen:
|
| 19 |
-
|
| 20 |
-
base_model = "EleutherAI/gpt-neo-1.3B"
|
| 21 |
tokenizer,model,device = load_tokenizer_and_model(base_model)
|
| 22 |
|
| 23 |
|
|
@@ -31,17 +31,17 @@ def predict(text,
|
|
| 31 |
max_length_tokens,
|
| 32 |
max_context_length_tokens,):
|
| 33 |
if text=="":
|
| 34 |
-
yield chatbotGr,history,"
|
| 35 |
return
|
| 36 |
try:
|
| 37 |
model
|
| 38 |
except:
|
| 39 |
-
yield [[text,"
|
| 40 |
return
|
| 41 |
|
| 42 |
inputs = generate_prompt_with_history(text,history,tokenizer,max_length=max_context_length_tokens)
|
| 43 |
if inputs is None:
|
| 44 |
-
yield chatbotGr,history,"Input
|
| 45 |
return
|
| 46 |
else:
|
| 47 |
prompt,inputs=inputs
|
|
@@ -104,9 +104,9 @@ with open("custom.css", "r", encoding="utf-8") as f:
|
|
| 104 |
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
| 105 |
history = gr.State([])
|
| 106 |
user_question = gr.State("")
|
| 107 |
-
gr.Markdown("
|
| 108 |
with gr.Tabs():
|
| 109 |
-
with gr.TabItem("
|
| 110 |
with gr.Row():
|
| 111 |
gr.HTML(title)
|
| 112 |
status_display = gr.Markdown("Erfolg", elem_id="status_display")
|
|
@@ -166,15 +166,15 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
|
| 166 |
)
|
| 167 |
gr.Markdown(description)
|
| 168 |
|
| 169 |
-
with gr.TabItem("
|
| 170 |
with gr.Row():
|
| 171 |
gr.Textbox(
|
| 172 |
-
show_label=False, placeholder="
|
| 173 |
).style(container=False)
|
| 174 |
-
with gr.TabItem("
|
| 175 |
with gr.Row():
|
| 176 |
gr.Textbox(
|
| 177 |
-
show_label=False, placeholder="
|
| 178 |
).style(container=False)
|
| 179 |
|
| 180 |
predict_args = dict(
|
|
@@ -215,6 +215,6 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
|
| 215 |
)
|
| 216 |
emptyBtn.click(**reset_args)
|
| 217 |
|
| 218 |
-
demo.title = "
|
| 219 |
#demo.queue(concurrency_count=1).launch(share=True)
|
| 220 |
demo.queue(concurrency_count=1).launch(debug=True)
|
|
|
|
| 16 |
#chatbot = hugchat.ChatBot(cookie_path="cookies.json")
|
| 17 |
|
| 18 |
#Alternativ mit beliebigen Modellen:
|
| 19 |
+
base_model = "project-baize/baize-v2-7b"
|
| 20 |
+
#base_model = "EleutherAI/gpt-neo-1.3B"
|
| 21 |
tokenizer,model,device = load_tokenizer_and_model(base_model)
|
| 22 |
|
| 23 |
|
|
|
|
| 31 |
max_length_tokens,
|
| 32 |
max_context_length_tokens,):
|
| 33 |
if text=="":
|
| 34 |
+
yield chatbotGr,history,"Testo vuoto."
|
| 35 |
return
|
| 36 |
try:
|
| 37 |
model
|
| 38 |
except:
|
| 39 |
+
yield [[text,"Nessun modello trovato"]],[],"Nessun modello trovato"
|
| 40 |
return
|
| 41 |
|
| 42 |
inputs = generate_prompt_with_history(text,history,tokenizer,max_length=max_context_length_tokens)
|
| 43 |
if inputs is None:
|
| 44 |
+
yield chatbotGr,history,"Input troppo lungo."
|
| 45 |
return
|
| 46 |
else:
|
| 47 |
prompt,inputs=inputs
|
|
|
|
| 104 |
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
| 105 |
history = gr.State([])
|
| 106 |
user_question = gr.State("")
|
| 107 |
+
gr.Markdown("Scegli cosa vuoi provare")
|
| 108 |
with gr.Tabs():
|
| 109 |
+
with gr.TabItem("Chat"):
|
| 110 |
with gr.Row():
|
| 111 |
gr.HTML(title)
|
| 112 |
status_display = gr.Markdown("Erfolg", elem_id="status_display")
|
|
|
|
| 166 |
)
|
| 167 |
gr.Markdown(description)
|
| 168 |
|
| 169 |
+
with gr.TabItem("Traduzioni"):
|
| 170 |
with gr.Row():
|
| 171 |
gr.Textbox(
|
| 172 |
+
show_label=False, placeholder="In costruzione ..."
|
| 173 |
).style(container=False)
|
| 174 |
+
with gr.TabItem("Generazione di codice"):
|
| 175 |
with gr.Row():
|
| 176 |
gr.Textbox(
|
| 177 |
+
show_label=False, placeholder="In costruzione ..."
|
| 178 |
).style(container=False)
|
| 179 |
|
| 180 |
predict_args = dict(
|
|
|
|
| 215 |
)
|
| 216 |
emptyBtn.click(**reset_args)
|
| 217 |
|
| 218 |
+
demo.title = "Chat"
|
| 219 |
#demo.queue(concurrency_count=1).launch(share=True)
|
| 220 |
demo.queue(concurrency_count=1).launch(debug=True)
|