|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
import tempfile |
|
|
|
|
|
import pytest |
|
|
import torch |
|
|
from torch.testing import assert_close |
|
|
from transformers import AutoModelForCausalLM |
|
|
|
|
|
from peft import get_peft_model |
|
|
from peft.peft_model import PeftModel |
|
|
from peft.tuners.adaption_prompt import AdaptionPromptConfig |
|
|
from peft.utils import infer_device |
|
|
from peft.utils.other import prepare_model_for_kbit_training |
|
|
from peft.utils.save_and_load import get_peft_model_state_dict |
|
|
|
|
|
|
|
|
MODELS_TO_TEST = [ |
|
|
"hf-internal-testing/tiny-random-gpt2", |
|
|
"trl-internal-testing/tiny-random-LlamaForCausalLM", |
|
|
"hf-internal-testing/tiny-random-MistralForCausalLM", |
|
|
] |
|
|
|
|
|
|
|
|
class TestAdaptionPrompt: |
|
|
""" |
|
|
Tests for the AdaptionPrompt model. |
|
|
|
|
|
Some of these tests were adapted from `test_peft_model.py` (which has been refactored since), but since we haven't |
|
|
checked in the test checkpoints for Llama into `hf-internal-testing`, we separate them for now. |
|
|
""" |
|
|
|
|
|
transformers_class = AutoModelForCausalLM |
|
|
torch_device = infer_device() |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_attributes(self, model_id): |
|
|
model = self.transformers_class.from_pretrained(model_id) |
|
|
config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4) |
|
|
model = get_peft_model(model, config) |
|
|
|
|
|
assert hasattr(model, "save_pretrained") |
|
|
assert hasattr(model, "from_pretrained") |
|
|
assert hasattr(model, "push_to_hub") |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_prepare_for_training(self, model_id): |
|
|
model = self.transformers_class.from_pretrained(model_id) |
|
|
config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") |
|
|
model = get_peft_model(model, config) |
|
|
model = model.to(self.torch_device) |
|
|
|
|
|
dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) |
|
|
dummy_output = model.get_input_embeddings()(dummy_input) |
|
|
|
|
|
assert not dummy_output.requires_grad |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_prepare_for_int8_training(self, model_id): |
|
|
model = self.transformers_class.from_pretrained(model_id) |
|
|
model = prepare_model_for_kbit_training(model) |
|
|
model = model.to(self.torch_device) |
|
|
|
|
|
for param in model.parameters(): |
|
|
assert not param.requires_grad |
|
|
|
|
|
config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") |
|
|
model = get_peft_model(model, config) |
|
|
|
|
|
|
|
|
if hasattr(model, "enable_input_require_grads"): |
|
|
model.enable_input_require_grads() |
|
|
else: |
|
|
|
|
|
def make_inputs_require_grad(module, input, output): |
|
|
output.requires_grad_(True) |
|
|
|
|
|
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) |
|
|
|
|
|
dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) |
|
|
dummy_output = model.get_input_embeddings()(dummy_input) |
|
|
|
|
|
assert dummy_output.requires_grad |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_save_pretrained_regression(self, model_id): |
|
|
seed = 420 |
|
|
torch.manual_seed(seed) |
|
|
model = self.transformers_class.from_pretrained(model_id) |
|
|
config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
model = get_peft_model(model, config) |
|
|
model = model.to(self.torch_device) |
|
|
|
|
|
with tempfile.TemporaryDirectory() as tmp_dirname: |
|
|
model.save_pretrained(tmp_dirname, safe_serialization=False) |
|
|
|
|
|
torch.manual_seed(seed) |
|
|
model_from_pretrained = self.transformers_class.from_pretrained(model_id) |
|
|
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) |
|
|
|
|
|
|
|
|
state_dict = get_peft_model_state_dict(model) |
|
|
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) |
|
|
|
|
|
|
|
|
assert state_dict.keys() == state_dict_from_pretrained.keys() |
|
|
|
|
|
|
|
|
assert len(state_dict) == 4 |
|
|
|
|
|
|
|
|
for key in state_dict.keys(): |
|
|
assert torch.allclose( |
|
|
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) |
|
|
) |
|
|
|
|
|
|
|
|
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin")) |
|
|
|
|
|
|
|
|
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) |
|
|
|
|
|
|
|
|
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) |
|
|
|
|
|
|
|
|
assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_save_pretrained(self, model_id): |
|
|
seed = 420 |
|
|
torch.manual_seed(seed) |
|
|
model = self.transformers_class.from_pretrained(model_id) |
|
|
config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
model = get_peft_model(model, config) |
|
|
model = model.to(self.torch_device) |
|
|
|
|
|
with tempfile.TemporaryDirectory() as tmp_dirname: |
|
|
model.save_pretrained(tmp_dirname) |
|
|
|
|
|
torch.manual_seed(seed) |
|
|
model_from_pretrained = self.transformers_class.from_pretrained(model_id) |
|
|
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) |
|
|
|
|
|
|
|
|
state_dict = get_peft_model_state_dict(model) |
|
|
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) |
|
|
|
|
|
|
|
|
assert state_dict.keys() == state_dict_from_pretrained.keys() |
|
|
|
|
|
|
|
|
assert len(state_dict) == 4 |
|
|
|
|
|
|
|
|
for key in state_dict.keys(): |
|
|
assert torch.allclose( |
|
|
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) |
|
|
) |
|
|
|
|
|
|
|
|
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors")) |
|
|
|
|
|
|
|
|
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) |
|
|
|
|
|
|
|
|
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) |
|
|
|
|
|
|
|
|
assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_save_pretrained_selected_adapters(self, model_id): |
|
|
seed = 420 |
|
|
torch.manual_seed(seed) |
|
|
model = self.transformers_class.from_pretrained(model_id) |
|
|
config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
model = get_peft_model(model, config) |
|
|
model = model.to(self.torch_device) |
|
|
|
|
|
new_adapter_config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
model.add_adapter("new_adapter", new_adapter_config) |
|
|
|
|
|
with tempfile.TemporaryDirectory() as tmp_dirname: |
|
|
model.save_pretrained(tmp_dirname) |
|
|
|
|
|
torch.manual_seed(seed) |
|
|
model_from_pretrained = self.transformers_class.from_pretrained(model_id) |
|
|
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) |
|
|
|
|
|
model_from_pretrained.load_adapter(tmp_dirname, "new_adapter") |
|
|
|
|
|
|
|
|
state_dict = get_peft_model_state_dict(model) |
|
|
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) |
|
|
|
|
|
|
|
|
assert state_dict.keys() == state_dict_from_pretrained.keys() |
|
|
|
|
|
|
|
|
assert len(state_dict) == 4 |
|
|
|
|
|
|
|
|
for key in state_dict.keys(): |
|
|
assert torch.allclose( |
|
|
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) |
|
|
) |
|
|
|
|
|
|
|
|
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors")) |
|
|
|
|
|
|
|
|
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) |
|
|
|
|
|
|
|
|
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) |
|
|
|
|
|
|
|
|
assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_generate(self, model_id): |
|
|
model = self.transformers_class.from_pretrained(model_id) |
|
|
config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
model = get_peft_model(model, config) |
|
|
model = model.to(self.torch_device) |
|
|
|
|
|
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) |
|
|
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) |
|
|
|
|
|
|
|
|
_ = model.generate(input_ids=input_ids, attention_mask=attention_mask) |
|
|
|
|
|
|
|
|
_ = model.generate(input_ids, attention_mask=attention_mask) |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_sequence_adapter_ops(self, model_id): |
|
|
"""Test sequence of adapter operations.""" |
|
|
|
|
|
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) |
|
|
target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device) |
|
|
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) |
|
|
|
|
|
|
|
|
original = self.transformers_class.from_pretrained(model_id) |
|
|
original = original.to(self.torch_device) |
|
|
original_before = original(input_ids=input_ids, attention_mask=attention_mask) |
|
|
|
|
|
|
|
|
adapted = get_peft_model( |
|
|
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
) |
|
|
adapted = adapted.to(self.torch_device) |
|
|
default_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
|
|
|
|
|
|
assert_close(original_before.logits, default_before.logits, rtol=0, atol=0) |
|
|
|
|
|
|
|
|
optimizer = torch.optim.SGD(adapted.parameters(), lr=1) |
|
|
optimizer.zero_grad() |
|
|
default_before.loss.backward() |
|
|
optimizer.step() |
|
|
|
|
|
|
|
|
default_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert not torch.allclose(default_before.logits, default_after.logits) |
|
|
|
|
|
with adapted.disable_adapter(): |
|
|
|
|
|
default_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert_close(original_before.logits, default_disabled.logits, rtol=0, atol=0) |
|
|
|
|
|
|
|
|
adapted.add_adapter("adapter 1", AdaptionPromptConfig(adapter_layers=2, adapter_len=8, task_type="CAUSAL_LM")) |
|
|
|
|
|
adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0) |
|
|
|
|
|
|
|
|
optimizer = torch.optim.SGD(adapted.parameters(), lr=1) |
|
|
optimizer.zero_grad() |
|
|
adapter_1_before.loss.backward() |
|
|
optimizer.step() |
|
|
|
|
|
|
|
|
adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert not torch.allclose(adapter_1_before.logits, adapter_1_after.logits) |
|
|
assert not torch.allclose(original_before.logits, adapter_1_after.logits) |
|
|
assert not torch.allclose(default_after.logits, adapter_1_after.logits) |
|
|
|
|
|
with adapted.disable_adapter(): |
|
|
|
|
|
adapter_1_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert_close(original_before.logits, adapter_1_disabled.logits, rtol=0, atol=0) |
|
|
|
|
|
|
|
|
adapted.set_adapter("default") |
|
|
|
|
|
|
|
|
default_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert_close(default_after.logits, default_after_set.logits, rtol=0, atol=0) |
|
|
assert not torch.allclose(original_before.logits, default_after_set.logits) |
|
|
assert not torch.allclose(adapter_1_after.logits, default_after_set.logits) |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_add_and_set_while_disabled(self, model_id): |
|
|
"""Test that adding and setting adapters while disabled works as intended.""" |
|
|
|
|
|
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) |
|
|
target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device) |
|
|
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) |
|
|
|
|
|
|
|
|
original = self.transformers_class.from_pretrained(model_id) |
|
|
original = original.to(self.torch_device) |
|
|
original_before = original(input_ids=input_ids, attention_mask=attention_mask) |
|
|
|
|
|
|
|
|
adapted = get_peft_model( |
|
|
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
) |
|
|
adapted = adapted.to(self.torch_device) |
|
|
|
|
|
with adapted.disable_adapter(): |
|
|
adapted.add_adapter( |
|
|
"adapter 1", AdaptionPromptConfig(adapter_layers=2, adapter_len=8, task_type="CAUSAL_LM") |
|
|
) |
|
|
|
|
|
|
|
|
adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0) |
|
|
|
|
|
|
|
|
optimizer = torch.optim.SGD(adapted.parameters(), lr=1) |
|
|
optimizer.zero_grad() |
|
|
adapter_1_before.loss.backward() |
|
|
optimizer.step() |
|
|
|
|
|
|
|
|
adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert not torch.allclose(original_before.logits, adapter_1_after.logits) |
|
|
|
|
|
adapted.set_adapter("default") |
|
|
with adapted.disable_adapter(): |
|
|
adapted.set_adapter("adapter 1") |
|
|
|
|
|
|
|
|
adapter_1_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) |
|
|
assert_close(adapter_1_after.logits, adapter_1_after_set.logits, rtol=0, atol=0) |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_use_cache(self, model_id): |
|
|
"""Test that AdaptionPrompt works when Llama config use_cache=True.""" |
|
|
torch.manual_seed(0) |
|
|
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) |
|
|
original = self.transformers_class.from_pretrained(model_id, use_cache=False) |
|
|
adapted = get_peft_model( |
|
|
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
) |
|
|
adapted = adapted.to(self.torch_device) |
|
|
expected = adapted.generate(input_ids=input_ids, max_length=8) |
|
|
|
|
|
|
|
|
adapted.base_model.config.use_cache = True |
|
|
actual = adapted.generate(input_ids=input_ids, max_length=8) |
|
|
assert_close(expected, actual, rtol=0, atol=0) |
|
|
|
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_bf16_inference(self, model_id): |
|
|
if self.torch_device == "mps": |
|
|
return pytest.skip("Skipping bf16 test on MPS") |
|
|
|
|
|
"""Test that AdaptionPrompt works when Llama using a half-precision model.""" |
|
|
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) |
|
|
original = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.bfloat16) |
|
|
adapted = get_peft_model( |
|
|
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") |
|
|
) |
|
|
adapted = adapted.to(self.torch_device) |
|
|
adapted.generate(input_ids=input_ids) |
|
|
|
|
|
@pytest.mark.xfail(reason="currently this fails because scores are zeroed out", raises=AssertionError) |
|
|
@pytest.mark.parametrize("model_id", MODELS_TO_TEST) |
|
|
def test_disable_adapter(self, model_id): |
|
|
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) |
|
|
dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) |
|
|
output_before = model(dummy_input).logits |
|
|
|
|
|
config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") |
|
|
model = get_peft_model(model, config).to(self.torch_device) |
|
|
output_peft = model(dummy_input).logits |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assert not torch.allclose(output_before, output_peft) |
|
|
|
|
|
with model.disable_adapter(): |
|
|
output_peft_disabled = model(dummy_input).logits |
|
|
assert torch.allclose(output_before, output_peft_disabled) |
|
|
|