Add model
Browse files- open_clip_config.json +1 -29
- open_clip_model.safetensors +1 -1
- open_clip_pytorch_model.bin +1 -1
open_clip_config.json
CHANGED
|
@@ -1,33 +1,5 @@
|
|
| 1 |
{
|
| 2 |
-
"model_cfg":
|
| 3 |
-
"embed_dim": 1024,
|
| 4 |
-
"init_logit_bias": -10,
|
| 5 |
-
"custom_text": true,
|
| 6 |
-
"vision_cfg": {
|
| 7 |
-
"image_size": 256,
|
| 8 |
-
"timm_model_name": "vit_large_patch16_siglip_256",
|
| 9 |
-
"timm_model_pretrained": false,
|
| 10 |
-
"timm_pool": "map",
|
| 11 |
-
"timm_proj": "none"
|
| 12 |
-
},
|
| 13 |
-
"text_cfg": {
|
| 14 |
-
"context_length": 64,
|
| 15 |
-
"vocab_size": 32000,
|
| 16 |
-
"tokenizer_kwargs": {
|
| 17 |
-
"clean": "canonicalize"
|
| 18 |
-
},
|
| 19 |
-
"width": 1024,
|
| 20 |
-
"heads": 16,
|
| 21 |
-
"layers": 24,
|
| 22 |
-
"no_causal_mask": true,
|
| 23 |
-
"proj_bias": true,
|
| 24 |
-
"pool_type": "last",
|
| 25 |
-
"norm_kwargs": {
|
| 26 |
-
"eps": 1e-06
|
| 27 |
-
},
|
| 28 |
-
"hf_tokenizer_name": "timm/ViT-L-16-SigLIP-256"
|
| 29 |
-
}
|
| 30 |
-
},
|
| 31 |
"preprocess_cfg": {
|
| 32 |
"mean": [
|
| 33 |
0.5,
|
|
|
|
| 1 |
{
|
| 2 |
+
"model_cfg": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
"preprocess_cfg": {
|
| 4 |
"mean": [
|
| 5 |
0.5,
|
open_clip_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 2608671232
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4fae9655a086d121f4c83ec5787e56f174a0c5dd4fd87c480262725eb2a6bbaf
|
| 3 |
size 2608671232
|
open_clip_pytorch_model.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 2608847582
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2721f97b8bce38abed1e96506101c63e62afc50b2cfaf8a27913176e43e9cabd
|
| 3 |
size 2608847582
|