Modified Downstream Usage Code
Browse files
README.md
CHANGED
|
@@ -91,7 +91,7 @@ print(response['choices'][0]['text'].strip())
|
|
| 91 |
# USING ON GPU MACHINE
|
| 92 |
import torch
|
| 93 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 94 |
-
from peft import PeftModel, PeftConfig
|
| 95 |
|
| 96 |
model_name = "pavankumarbalijepalli/phi2-sqlcoder"
|
| 97 |
|
|
@@ -101,7 +101,10 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 101 |
device_map="auto"
|
| 102 |
)
|
| 103 |
|
|
|
|
|
|
|
| 104 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
|
|
|
| 105 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
| 106 |
inputs.to('cuda')
|
| 107 |
|
|
|
|
| 91 |
# USING ON GPU MACHINE
|
| 92 |
import torch
|
| 93 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 94 |
+
# from peft import PeftModel, PeftConfig
|
| 95 |
|
| 96 |
model_name = "pavankumarbalijepalli/phi2-sqlcoder"
|
| 97 |
|
|
|
|
| 101 |
device_map="auto"
|
| 102 |
)
|
| 103 |
|
| 104 |
+
prompt = ""
|
| 105 |
+
|
| 106 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 107 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 108 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
| 109 |
inputs.to('cuda')
|
| 110 |
|