| { | |
| "_name_or_path": "jinaai/jina-bert-v2-qk-post-norm", | |
| "architectures": [ | |
| "JinaBertForMaskedLM" | |
| ], | |
| "attention_probs_dropout_prob": 0.0, | |
| "attn_implementation": "torch", | |
| "auto_map": { | |
| "AutoConfig": "jinaai/jina-bert-v2-qk-post-norm--configuration_bert.JinaBertConfig", | |
| "AutoModel": "jinaai/jina-bert-v2-qk-post-norm--modeling_bert.JinaBertModel", | |
| "AutoModelForMaskedLM": "jinaai/jina-bert-v2-qk-post-norm--modeling_bert.JinaBertForMaskedLM", | |
| "AutoModelForSequenceClassification": "jinaai/jina-bert-v2-qk-post-norm--modeling_bert.JinaBertForSequenceClassification" | |
| }, | |
| "classifier_dropout": null, | |
| "emb_pooler": "mean", | |
| "feed_forward_type": "geglu", | |
| "gradient_checkpointing": false, | |
| "hidden_act": "gelu", | |
| "hidden_dropout_prob": 0.0, | |
| "hidden_size": 768, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 3072, | |
| "layer_norm_eps": 1e-12, | |
| "max_position_embeddings": 8192, | |
| "model_max_length": 8192, | |
| "model_type": "bert", | |
| "num_attention_heads": 12, | |
| "num_hidden_layers": 12, | |
| "pad_token_id": 0, | |
| "position_embedding_type": "alibi", | |
| "torch_dtype": "float16", | |
| "transformers_version": "4.35.2", | |
| "type_vocab_size": 2, | |
| "use_cache": true, | |
| "vocab_size": 61056 | |
| } | |