Spaces:
Runtime error
Runtime error
| import os | |
| import warnings | |
| import shutil | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig | |
| import torch | |
| from blip3o.model import * | |
| from blip3o.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN | |
| from blip3o.train.train import smart_tokenizer_and_embedding_resize | |
| def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda", use_flash_attn=False, **kwargs): | |
| kwargs = {"device_map": device_map, **kwargs} | |
| if device != "cuda": | |
| kwargs['device_map'] = {"": device} | |
| if load_8bit: | |
| kwargs['load_in_8bit'] = True | |
| elif load_4bit: | |
| kwargs['load_in_4bit'] = True | |
| kwargs['quantization_config'] = BitsAndBytesConfig( | |
| load_in_4bit=True, | |
| bnb_4bit_compute_dtype=torch.float16, | |
| bnb_4bit_use_double_quant=True, | |
| bnb_4bit_quant_type='nf4' | |
| ) | |
| else: | |
| kwargs['torch_dtype'] = torch.float16 | |
| if use_flash_attn: | |
| kwargs['attn_implementation'] = 'flash_attention_2' | |
| tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) | |
| model = blip3oQwenForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, torch_dtype=torch.float16).to('cuda:0') | |
| image_processor = None | |
| mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False) | |
| mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True) | |
| if mm_use_im_patch_token: | |
| tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) | |
| if mm_use_im_start_end: | |
| tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) | |
| model.resize_token_embeddings(len(tokenizer)) | |
| if hasattr(model.config, "max_sequence_length"): | |
| context_len = model.config.max_sequence_length | |
| else: | |
| context_len = 2048 | |
| return tokenizer, model, context_len |