Upload folder using huggingface_hub
Browse files
README.md
CHANGED
|
@@ -76,6 +76,7 @@ We also welcome you to experience the InternVL2 series models in our [online dem
|
|
| 76 |
> Please use transformers==4.37.2 to ensure the model works normally.
|
| 77 |
|
| 78 |
```python
|
|
|
|
| 79 |
import numpy as np
|
| 80 |
import torch
|
| 81 |
import torchvision.transforms as T
|
|
@@ -163,6 +164,32 @@ def load_image(image_file, input_size=448, max_num=6):
|
|
| 163 |
return pixel_values
|
| 164 |
|
| 165 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
path = 'OpenGVLab/InternVL2-26B'
|
| 167 |
# If you have an 80G A100 GPU, you can put the entire model on a single GPU.
|
| 168 |
model = AutoModel.from_pretrained(
|
|
@@ -170,16 +197,15 @@ model = AutoModel.from_pretrained(
|
|
| 170 |
torch_dtype=torch.bfloat16,
|
| 171 |
low_cpu_mem_usage=True,
|
| 172 |
trust_remote_code=True).eval().cuda()
|
| 173 |
-
# Otherwise, you need to set device_map
|
| 174 |
-
#
|
| 175 |
-
#
|
| 176 |
# model = AutoModel.from_pretrained(
|
| 177 |
# path,
|
| 178 |
# torch_dtype=torch.bfloat16,
|
| 179 |
# low_cpu_mem_usage=True,
|
| 180 |
# trust_remote_code=True,
|
| 181 |
-
# device_map=
|
| 182 |
-
|
| 183 |
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
|
| 184 |
# set the max number of tiles in `max_num`
|
| 185 |
pixel_values = load_image('./examples/image1.jpg', max_num=6).to(torch.bfloat16).cuda()
|
|
@@ -323,6 +349,10 @@ print(f'User: {question}')
|
|
| 323 |
print(f'Assistant: {response}')
|
| 324 |
```
|
| 325 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 326 |
## Deployment
|
| 327 |
|
| 328 |
### LMDeploy
|
|
@@ -581,6 +611,10 @@ InternVL 2.0 是一个多模态大语言模型系列,包含各种规模的模
|
|
| 581 |
|
| 582 |
示例代码请[点击这里](#quick-start)。
|
| 583 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 584 |
## 部署
|
| 585 |
|
| 586 |
### LMDeploy
|
|
|
|
| 76 |
> Please use transformers==4.37.2 to ensure the model works normally.
|
| 77 |
|
| 78 |
```python
|
| 79 |
+
import math
|
| 80 |
import numpy as np
|
| 81 |
import torch
|
| 82 |
import torchvision.transforms as T
|
|
|
|
| 164 |
return pixel_values
|
| 165 |
|
| 166 |
|
| 167 |
+
def split_model(model_name):
|
| 168 |
+
device_map = {}
|
| 169 |
+
world_size = torch.cuda.device_count()
|
| 170 |
+
num_layers = {'InternVL2-8B': 32, 'InternVL2-26B': 48,
|
| 171 |
+
'InternVL2-40B': 60, 'InternVL2-Llama3-76B': 80}[model_name]
|
| 172 |
+
# Since the first GPU will be used for ViT, treat it as half a GPU.
|
| 173 |
+
num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
|
| 174 |
+
num_layers_per_gpu = [num_layers_per_gpu] * world_size
|
| 175 |
+
num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
|
| 176 |
+
layer_cnt = 0
|
| 177 |
+
for i, num_layer in enumerate(num_layers_per_gpu):
|
| 178 |
+
for j in range(num_layer):
|
| 179 |
+
device_map[f'language_model.model.layers.{layer_cnt}'] = i
|
| 180 |
+
layer_cnt += 1
|
| 181 |
+
device_map['vision_model'] = 0
|
| 182 |
+
device_map['mlp1'] = 0
|
| 183 |
+
device_map['language_model.model.tok_embeddings'] = 0
|
| 184 |
+
device_map['language_model.model.embed_tokens'] = 0
|
| 185 |
+
device_map['language_model.output'] = 0
|
| 186 |
+
device_map['language_model.model.norm'] = 0
|
| 187 |
+
device_map['language_model.lm_head'] = 0
|
| 188 |
+
device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
|
| 189 |
+
|
| 190 |
+
return device_map
|
| 191 |
+
|
| 192 |
+
|
| 193 |
path = 'OpenGVLab/InternVL2-26B'
|
| 194 |
# If you have an 80G A100 GPU, you can put the entire model on a single GPU.
|
| 195 |
model = AutoModel.from_pretrained(
|
|
|
|
| 197 |
torch_dtype=torch.bfloat16,
|
| 198 |
low_cpu_mem_usage=True,
|
| 199 |
trust_remote_code=True).eval().cuda()
|
| 200 |
+
# Otherwise, you need to set device_map to use multiple GPUs for inference.
|
| 201 |
+
# device_map = split_model('InternVL2-26B')
|
| 202 |
+
# print(device_map)
|
| 203 |
# model = AutoModel.from_pretrained(
|
| 204 |
# path,
|
| 205 |
# torch_dtype=torch.bfloat16,
|
| 206 |
# low_cpu_mem_usage=True,
|
| 207 |
# trust_remote_code=True,
|
| 208 |
+
# device_map=device_map).eval()
|
|
|
|
| 209 |
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
|
| 210 |
# set the max number of tiles in `max_num`
|
| 211 |
pixel_values = load_image('./examples/image1.jpg', max_num=6).to(torch.bfloat16).cuda()
|
|
|
|
| 349 |
print(f'Assistant: {response}')
|
| 350 |
```
|
| 351 |
|
| 352 |
+
## Finetune
|
| 353 |
+
|
| 354 |
+
SWIFT from ModelScope community has supported the fine-tuning (Image/Video) of InternVL, please check [this link](https://github.com/modelscope/swift/blob/main/docs/source_en/Multi-Modal/internvl-best-practice.md) for more details.
|
| 355 |
+
|
| 356 |
## Deployment
|
| 357 |
|
| 358 |
### LMDeploy
|
|
|
|
| 611 |
|
| 612 |
示例代码请[点击这里](#quick-start)。
|
| 613 |
|
| 614 |
+
## 微调
|
| 615 |
+
|
| 616 |
+
来自ModelScope社区的SWIFT已经支持对InternVL进行微调(图像/视频),详情请查看[此链接](https://github.com/modelscope/swift/blob/main/docs/source_en/Multi-Modal/internvl-best-practice.md)。
|
| 617 |
+
|
| 618 |
## 部署
|
| 619 |
|
| 620 |
### LMDeploy
|