update quickstart
Browse files
README.md
CHANGED
|
@@ -104,8 +104,8 @@ with open(example_path, "r") as f:
|
|
| 104 |
full_prompt = f.read()
|
| 105 |
answer = full_prompt.split('Answer:')[-1].strip()
|
| 106 |
prompt_without_answer = full_prompt[:-len(answer)]
|
| 107 |
-
print("Prompt
|
| 108 |
-
print("
|
| 109 |
|
| 110 |
# Inference
|
| 111 |
inputs = tokenizer(prompt_without_answer, return_tensors="pt")
|
|
@@ -118,7 +118,7 @@ outputs = model.generate(
|
|
| 118 |
)
|
| 119 |
|
| 120 |
# Print the answer
|
| 121 |
-
print("
|
| 122 |
```
|
| 123 |
|
| 124 |
## Responsible AI Considerations
|
|
|
|
| 104 |
full_prompt = f.read()
|
| 105 |
answer = full_prompt.split('Answer:')[-1].strip()
|
| 106 |
prompt_without_answer = full_prompt[:-len(answer)]
|
| 107 |
+
print("Prompt:", prompt_without_answer)
|
| 108 |
+
print("Groundtruth:", answer)
|
| 109 |
|
| 110 |
# Inference
|
| 111 |
inputs = tokenizer(prompt_without_answer, return_tensors="pt")
|
|
|
|
| 118 |
)
|
| 119 |
|
| 120 |
# Print the answer
|
| 121 |
+
print("Generated answer:", tokenizer.decode(outputs[0][input_ids.shape[-1]:]))
|
| 122 |
```
|
| 123 |
|
| 124 |
## Responsible AI Considerations
|