Update README.md
Browse files
README.md
CHANGED
|
@@ -123,7 +123,7 @@ def extract_important_sentences(text):
|
|
| 123 |
paragraph = ' '.join([' '.join(sentence) for sentence in top_sentences])
|
| 124 |
return paragraph
|
| 125 |
|
| 126 |
-
def summarize(text, max_tokens
|
| 127 |
|
| 128 |
config = PeftConfig.from_pretrained(Nevidu/LexBartLo_2)
|
| 129 |
|
|
@@ -141,6 +141,8 @@ def summarize(text, max_tokens, model_type):
|
|
| 141 |
outputs = model.generate(input_ids=input_ids, max_new_tokens=max_tokens, do_sample=True, top_p=0.9)
|
| 142 |
summary = tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]
|
| 143 |
return summary
|
|
|
|
|
|
|
| 144 |
```
|
| 145 |
|
| 146 |
### Training Procedure
|
|
|
|
| 123 |
paragraph = ' '.join([' '.join(sentence) for sentence in top_sentences])
|
| 124 |
return paragraph
|
| 125 |
|
| 126 |
+
def summarize(text, max_tokens):
|
| 127 |
|
| 128 |
config = PeftConfig.from_pretrained(Nevidu/LexBartLo_2)
|
| 129 |
|
|
|
|
| 141 |
outputs = model.generate(input_ids=input_ids, max_new_tokens=max_tokens, do_sample=True, top_p=0.9)
|
| 142 |
summary = tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]
|
| 143 |
return summary
|
| 144 |
+
|
| 145 |
+
summary = summarize(text, max_tokens)
|
| 146 |
```
|
| 147 |
|
| 148 |
### Training Procedure
|