| { | |
| "aligner_config": { | |
| "cls": "MlpProjector", | |
| "model_type": "aligner", | |
| "params": { | |
| "depth": 2, | |
| "input_dim": 1024, | |
| "n_embed": 2048, | |
| "projector_type": "mlp_gelu" | |
| } | |
| }, | |
| "architectures": [ | |
| "MultiModalityCausalLM" | |
| ], | |
| "language_config": { | |
| "hidden_size": 2048, | |
| "intermediate_size": 5632, | |
| "max_position_embeddings": 16384, | |
| "model_type": "llama", | |
| "num_attention_heads": 16, | |
| "num_hidden_layers": 24, | |
| "num_key_value_heads": 16, | |
| "torch_dtype": "float16", | |
| "vocab_size": 102400 | |
| }, | |
| "model_type": "multi_modality", | |
| "torch_dtype": "float16", | |
| "transformers_version": "4.38.2", | |
| "vision_config": { | |
| "cls": "CLIPVisionTower", | |
| "model_type": "vision", | |
| "params": { | |
| "image_size": 384, | |
| "model_name": "siglip_large_patch16_384", | |
| "pixel_mean": [ | |
| 0.5, | |
| 0.5, | |
| 0.5 | |
| ], | |
| "pixel_std": [ | |
| 0.5, | |
| 0.5, | |
| 0.5 | |
| ], | |
| "select_feature": "same", | |
| "select_layer": -1 | |
| } | |
| } | |
| } | |