Update README.md
Browse files
README.md
CHANGED
|
@@ -61,7 +61,7 @@ batch = next(iter(dataloader))
|
|
| 61 |
output_ids = model.generate(
|
| 62 |
pixel_values=batch['images'],
|
| 63 |
max_length=512,
|
| 64 |
-
num_beams=
|
| 65 |
bad_words_ids=[[tokenizer.convert_tokens_to_ids('[NF]')], [tokenizer.convert_tokens_to_ids('[NI]')]],
|
| 66 |
)
|
| 67 |
findings, impression = model.split_and_decode_sections(output_ids, tokenizer)
|
|
@@ -73,7 +73,7 @@ findings, impression = model.split_and_decode_sections(output_ids, tokenizer)
|
|
| 73 |
output_ids = model.generate(
|
| 74 |
pixel_values=batch['images'],
|
| 75 |
max_length=512,
|
| 76 |
-
num_beams=
|
| 77 |
bad_words_ids=[[tokenizer.convert_tokens_to_ids('[NF]')]],
|
| 78 |
eos_token_id=tokenizer.sep_token_id,
|
| 79 |
)
|
|
@@ -86,7 +86,7 @@ findings, _ = model.split_and_decode_sections(output_ids, tokenizer)
|
|
| 86 |
output_ids = model.generate(
|
| 87 |
pixel_values=batch['images'],
|
| 88 |
max_length=512,
|
| 89 |
-
num_beams=
|
| 90 |
bad_words_ids=[[tokenizer.convert_tokens_to_ids('[NI]')]],
|
| 91 |
input_ids=torch.tensor([[tokenizer.bos_token_id, tokenizer.convert_tokens_to_ids('[NF]'), tokenizer.sep_token_id]]*mbatch_size, device=device, dtype=torch.long),
|
| 92 |
)
|
|
|
|
| 61 |
output_ids = model.generate(
|
| 62 |
pixel_values=batch['images'],
|
| 63 |
max_length=512,
|
| 64 |
+
num_beams=1,
|
| 65 |
bad_words_ids=[[tokenizer.convert_tokens_to_ids('[NF]')], [tokenizer.convert_tokens_to_ids('[NI]')]],
|
| 66 |
)
|
| 67 |
findings, impression = model.split_and_decode_sections(output_ids, tokenizer)
|
|
|
|
| 73 |
output_ids = model.generate(
|
| 74 |
pixel_values=batch['images'],
|
| 75 |
max_length=512,
|
| 76 |
+
num_beams=1,
|
| 77 |
bad_words_ids=[[tokenizer.convert_tokens_to_ids('[NF]')]],
|
| 78 |
eos_token_id=tokenizer.sep_token_id,
|
| 79 |
)
|
|
|
|
| 86 |
output_ids = model.generate(
|
| 87 |
pixel_values=batch['images'],
|
| 88 |
max_length=512,
|
| 89 |
+
num_beams=1,
|
| 90 |
bad_words_ids=[[tokenizer.convert_tokens_to_ids('[NI]')]],
|
| 91 |
input_ids=torch.tensor([[tokenizer.bos_token_id, tokenizer.convert_tokens_to_ids('[NF]'), tokenizer.sep_token_id]]*mbatch_size, device=device, dtype=torch.long),
|
| 92 |
)
|