prithivMLmods's picture
Upload folder using huggingface_hub
a62c5d4 verified
{
"architectures": [
"SiglipForImageClassification"
],
"id2label": {
"0": "calling",
"1": "clapping",
"2": "cycling",
"3": "dancing",
"4": "drinking",
"5": "eating",
"6": "fighting",
"7": "hugging",
"8": "laughing",
"9": "listening_to_music",
"10": "running",
"11": "sitting",
"12": "sleeping",
"13": "texting",
"14": "using_laptop"
},
"initializer_factor": 1.0,
"label2id": {
"calling": 0,
"clapping": 1,
"cycling": 2,
"dancing": 3,
"drinking": 4,
"eating": 5,
"fighting": 6,
"hugging": 7,
"laughing": 8,
"listening_to_music": 9,
"running": 10,
"sitting": 11,
"sleeping": 12,
"texting": 13,
"using_laptop": 14
},
"model_type": "siglip",
"problem_type": "single_label_classification",
"text_config": {
"attention_dropout": 0.0,
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 768,
"intermediate_size": 3072,
"layer_norm_eps": 1e-06,
"max_position_embeddings": 64,
"model_type": "siglip_text_model",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"projection_size": 768,
"torch_dtype": "float32",
"vocab_size": 256000
},
"torch_dtype": "float32",
"transformers_version": "4.50.3",
"vision_config": {
"attention_dropout": 0.0,
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 768,
"image_size": 224,
"intermediate_size": 3072,
"layer_norm_eps": 1e-06,
"model_type": "siglip_vision_model",
"num_attention_heads": 12,
"num_channels": 3,
"num_hidden_layers": 12,
"patch_size": 16,
"torch_dtype": "float32"
}
}