mbwolff commited on
Commit
beead2c
·
1 Parent(s): 2241248

Trying something else

Browse files
Files changed (2) hide show
  1. app.py +19 -15
  2. appOLD.py +20 -0
app.py CHANGED
@@ -1,20 +1,24 @@
1
  import gradio as gr
2
- # from huggingface_hub import InferenceClient
3
- import random
4
  from transformers import pipeline
5
 
6
- def random_response(message, history):
7
- return random.choice(["Yes", "No"])
8
 
9
- def eqa(message, history):
10
- if len(history) >= 1 and history[-1]['role'] == 'assistant' and history[-1]['content'] == 'Okay':
11
- pipe = pipeline(model="mbwolff/distilbert-base-uncased-finetuned-squad")
12
- return pipe(question=message, context=history[-2]['content'])['answer']
13
- else:
14
- return 'Okay'
15
 
16
- gr.ChatInterface(
17
- # fn=random_response,
18
- fn=eqa,
19
- type="messages"
20
- ).launch()
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
2
  from transformers import pipeline
3
 
4
+ # Load the question answering model
5
+ question_answerer = pipeline("question-answering", model="mbwolff/distilbert-base-uncased-finetuned-squad")
6
 
7
+ def answer_question(question, context):
8
+ result = question_answerer(question=question, context=context)
9
+ return result["answer"]
 
 
 
10
 
11
+ # Create the Gradio interface
12
+ iface = gr.Interface(
13
+ fn=answer_question,
14
+ inputs=[
15
+ gr.inputs.Textbox(lines=7, placeholder="Enter your question here..."),
16
+ gr.inputs.Textbox(lines=7, placeholder="Enter the context here..."),
17
+ ],
18
+ outputs="text",
19
+ title="Question Answering Chatbot",
20
+ description="Ask questions about the provided context."
21
+ )
22
+
23
+ # Launch the interface
24
+ iface.launch()
appOLD.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ # from huggingface_hub import InferenceClient
3
+ import random
4
+ from transformers import pipeline
5
+
6
+ def random_response(message, history):
7
+ return random.choice(["Yes", "No"])
8
+
9
+ def eqa(message, history):
10
+ if len(history) >= 1 and history[-1]['role'] == 'assistant' and history[-1]['content'] == 'Okay':
11
+ pipe = pipeline(model="mbwolff/distilbert-base-uncased-finetuned-squad")
12
+ return pipe(question=message, context=history[-2]['content'])['answer']
13
+ else:
14
+ return 'Okay'
15
+
16
+ gr.ChatInterface(
17
+ # fn=random_response,
18
+ fn=eqa,
19
+ type="messages"
20
+ ).launch()