chore-metadata
#28
by
bwang0911
- opened
- .gitattributes +0 -1
- LICENSE +0 -52
- README.md +16 -40
- config.json +7 -55
- configuration_jina_embeddings_v4.py +1 -3
- custom_st.py +10 -24
- modeling_jina_embeddings_v4.py +23 -27
- qwen2_5_vl.py +2 -2
- results.json +0 -1
.gitattributes
CHANGED
|
@@ -33,4 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
*.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
LICENSE
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
Qwen RESEARCH LICENSE AGREEMENT
|
| 2 |
-
|
| 3 |
-
Qwen RESEARCH LICENSE AGREEMENT Release Date: September 19, 2024
|
| 4 |
-
|
| 5 |
-
By clicking to agree or by using or distributing any portion or element of the Qwen Materials, you will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
|
| 6 |
-
|
| 7 |
-
1. Definitions
|
| 8 |
-
a. This Qwen RESEARCH LICENSE AGREEMENT (this "Agreement") shall mean the terms and conditions for use, reproduction, distribution and modification of the Materials as defined by this Agreement.
|
| 9 |
-
b. "We" (or "Us") shall mean Alibaba Cloud.
|
| 10 |
-
c. "You" (or "Your") shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Materials for any purpose and in any field of use.
|
| 11 |
-
d. "Third Parties" shall mean individuals or legal entities that are not under common control with us or you.
|
| 12 |
-
e. "Qwen" shall mean the large language models, and software and algorithms, consisting of trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by us.
|
| 13 |
-
f. "Materials" shall mean, collectively, Alibaba Cloud's proprietary Qwen and Documentation (and any portion thereof) made available under this Agreement.
|
| 14 |
-
g. "Source" form shall mean the preferred form for making modifications, including but not limited to model source code, documentation source, and configuration files.
|
| 15 |
-
h. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
| 16 |
-
i. "Non-Commercial" shall mean for research or evaluation purposes only.
|
| 17 |
-
|
| 18 |
-
2. Grant of Rights
|
| 19 |
-
a. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Alibaba Cloud's intellectual property or other rights owned by us embodied in the Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Materials FOR NON-COMMERCIAL PURPOSES ONLY.
|
| 20 |
-
b. If you are commercially using the Materials, you shall request a license from us.
|
| 21 |
-
|
| 22 |
-
3. Redistribution
|
| 23 |
-
You may distribute copies or make the Materials, or derivative works thereof, available as part of a product or service that contains any of them, with or without modifications, and in Source or Object form, provided that you meet the following conditions:
|
| 24 |
-
a. You shall give any other recipients of the Materials or derivative works a copy of this Agreement;
|
| 25 |
-
b. You shall cause any modified files to carry prominent notices stating that you changed the files;
|
| 26 |
-
c. You shall retain in all copies of the Materials that you distribute the following attribution notices within a "Notice" text file distributed as a part of such copies: "Qwen is licensed under the Qwen RESEARCH LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved."; and
|
| 27 |
-
d. You may add your own copyright statement to your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the work otherwise complies with the terms and conditions of this Agreement.
|
| 28 |
-
|
| 29 |
-
4. Rules of use
|
| 30 |
-
a. The Materials may be subject to export controls or restrictions in China, the United States or other countries or regions. You shall comply with applicable laws and regulations in your use of the Materials.
|
| 31 |
-
b. If you use the Materials or any outputs or results therefrom to create, train, fine-tune, or improve an AI model that is distributed or made available, you shall prominently display “Built with Qwen” or “Improved using Qwen” in the related product documentation.
|
| 32 |
-
|
| 33 |
-
5. Intellectual Property
|
| 34 |
-
a. We retain ownership of all intellectual property rights in and to the Materials and derivatives made by or for us. Conditioned upon compliance with the terms and conditions of this Agreement, with respect to any derivative works and modifications of the Materials that are made by you, you are and will be the owner of such derivative works and modifications.
|
| 35 |
-
b. No trademark license is granted to use the trade names, trademarks, service marks, or product names of us, except as required to fulfill notice requirements under this Agreement or as required for reasonable and customary use in describing and redistributing the Materials.
|
| 36 |
-
c. If you commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against us or any entity alleging that the Materials or any output therefrom, or any part of the foregoing, infringe any intellectual property or other right owned or licensable by you, then all licenses granted to you under this Agreement shall terminate as of the date such lawsuit or other proceeding is commenced or brought.
|
| 37 |
-
6. Disclaimer of Warranty and Limitation of Liability
|
| 38 |
-
a. We are not obligated to support, update, provide training for, or develop any further version of the Qwen Materials or to grant any license thereto.
|
| 39 |
-
b. THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. WE MAKE NO WARRANTY AND ASSUME NO RESPONSIBILITY FOR THE SAFETY OR STABILITY OF THE MATERIALS AND ANY OUTPUT THEREFROM.
|
| 40 |
-
c. IN NO EVENT SHALL WE BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE MATERIALS OR ANY OUTPUT OF IT, NO MATTER HOW IT’S CAUSED.
|
| 41 |
-
d. You will defend, indemnify and hold harmless us from and against any claim by any third party arising out of or related to your use or distribution of the Materials.
|
| 42 |
-
|
| 43 |
-
7. Survival and Termination.
|
| 44 |
-
a. The term of this Agreement shall commence upon your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
|
| 45 |
-
b. We may terminate this Agreement if you breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, you must delete and cease use of the Materials. Sections 6 and 8 shall survive the termination of this Agreement.
|
| 46 |
-
|
| 47 |
-
8. Governing Law and Jurisdiction.
|
| 48 |
-
a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
|
| 49 |
-
b. The People's Courts in Hangzhou City shall have exclusive jurisdiction over any dispute arising out of this Agreement.
|
| 50 |
-
9. Other Terms and Conditions.
|
| 51 |
-
a. Any arrangements, understandings, or agreements regarding the Material not stated herein are separate from and independent of the terms and conditions of this Agreement. You shall request a separate license from us, if you use the Materials in ways not expressly agreed to in this Agreement.
|
| 52 |
-
b. We shall not be bound by any additional or different terms or conditions communicated by you unless expressly agreed.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,20 +1,6 @@
|
|
| 1 |
---
|
| 2 |
tags:
|
| 3 |
- vidore
|
| 4 |
-
- colpali
|
| 5 |
-
- multimodal-embedding
|
| 6 |
-
- multilingual-embedding
|
| 7 |
-
- Text-to-Visual Document (T→VD) retrieval
|
| 8 |
-
- feature-extraction
|
| 9 |
-
- sentence-similarity
|
| 10 |
-
- mteb
|
| 11 |
-
- sentence-transformers
|
| 12 |
-
- vllm
|
| 13 |
-
language:
|
| 14 |
-
- multilingual
|
| 15 |
-
inference: false
|
| 16 |
-
library_name: transformers
|
| 17 |
-
pipeline_tag: visual-document-retrieval
|
| 18 |
---
|
| 19 |
<br><br>
|
| 20 |
|
|
@@ -27,21 +13,26 @@ pipeline_tag: visual-document-retrieval
|
|
| 27 |
<b>The embedding model trained by <a href="https://jina.ai/"><b>Jina AI</b></a>.</b>
|
| 28 |
</p>
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
| 31 |
|
|
|
|
| 32 |
|
| 33 |
-
[
|
| 34 |
|
| 35 |
|
| 36 |
## Intended Usage & Model Info
|
| 37 |
-
`jina-embeddings-v4` is a
|
| 38 |
-
The model is
|
|
|
|
| 39 |
|
| 40 |
|
| 41 |
-
Built on [Qwen/Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct), `jina-embeddings-v4` features:
|
| 42 |
|
| 43 |
- **Unified embeddings** for text, images, and visual documents, supporting both dense (single-vector) and late-interaction (multi-vector) retrieval.
|
| 44 |
-
- **Multilingual support** (
|
| 45 |
- **Task-specific adapters** for retrieval, text matching, and code-related tasks, which can be selected at inference time.
|
| 46 |
- **Flexible embedding size**: dense embeddings are 2048 dimensions by default but can be truncated to as low as 128 with minimal performance loss.
|
| 47 |
|
|
@@ -62,9 +53,9 @@ Summary of features:
|
|
| 62 |
|
| 63 |
|
| 64 |
|
| 65 |
-
## Training
|
| 66 |
|
| 67 |
-
Please refer to our [technical report of jina-embeddings-v4](https://arxiv.org/abs/2506.18902) for
|
| 68 |
|
| 69 |
|
| 70 |
## Usage
|
|
@@ -153,7 +144,7 @@ from transformers import AutoModel
|
|
| 153 |
import torch
|
| 154 |
|
| 155 |
# Initialize the model
|
| 156 |
-
model = AutoModel.from_pretrained("jinaai/jina-embeddings-v4", trust_remote_code=True
|
| 157 |
|
| 158 |
model.to("cuda")
|
| 159 |
|
|
@@ -320,21 +311,6 @@ code_embeddings = model.encode(
|
|
| 320 |
```
|
| 321 |
</details>
|
| 322 |
|
| 323 |
-
<details>
|
| 324 |
-
<summary>via <a href="https://github.com/vllm-project/vllm">vLLM</a></summary>
|
| 325 |
-
|
| 326 |
-
We provide separate model versions for each task (`retrieval`, `text-matching`, `code`) where specific adapter is merged into the base `Qwen2.5-VL` weights.
|
| 327 |
-
This modification enables native compatibility with vLLM.
|
| 328 |
-
|
| 329 |
-
Instructions and usage examples for each task are available in their respective directories:
|
| 330 |
-
- [jina-embeddings-v4-vllm-retrieval](https://huggingface.co/jinaai/jina-embeddings-v4-vllm-retrieval)
|
| 331 |
-
- [jina-embeddings-v4-vllm-text-matching](https://huggingface.co/jinaai/jina-embeddings-v4-vllm-text-matching)
|
| 332 |
-
- [jina-embeddings-v4-vllm-code](https://huggingface.co/jinaai/jina-embeddings-v4-vllm-code)
|
| 333 |
-
|
| 334 |
-
Please refer to the directory that matches your task for more details.
|
| 335 |
-
|
| 336 |
-
</details>
|
| 337 |
-
|
| 338 |
|
| 339 |
## Jina-VDR
|
| 340 |
Alongside `jina-embeddings-v4`, we’re releasing [Jina VDR](https://github.com/jina-ai/jina-vdr), a multilingual, multi-domain benchmark for visual document retrieval. The task collection can be viewed [here](https://huggingface.co/collections/jinaai/jinavdr-visual-document-retrieval-684831c022c53b21c313b449), and evaluation instructions can be found [here](https://github.com/jina-ai/jina-vdr).
|
|
@@ -342,8 +318,8 @@ Alongside `jina-embeddings-v4`, we’re releasing [Jina VDR](https://github.com/
|
|
| 342 |
|
| 343 |
## License
|
| 344 |
|
| 345 |
-
This model
|
| 346 |
-
|
| 347 |
|
| 348 |
## Contact
|
| 349 |
|
|
|
|
| 1 |
---
|
| 2 |
tags:
|
| 3 |
- vidore
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
---
|
| 5 |
<br><br>
|
| 6 |
|
|
|
|
| 13 |
<b>The embedding model trained by <a href="https://jina.ai/"><b>Jina AI</b></a>.</b>
|
| 14 |
</p>
|
| 15 |
|
| 16 |
+
<p align="center">
|
| 17 |
+
<b>Jina Embeddings v4: Universal Embeddings for Multimodal Multilingual Retrieval</b>
|
| 18 |
+
</p>
|
| 19 |
+
|
| 20 |
|
| 21 |
+
## Quick Start
|
| 22 |
|
| 23 |
+
[Blog](https://jina.ai/news/) | [Technical Report](https://arxiv.org/abs/2506.18902) | [API](https://jina.ai/embeddings)
|
| 24 |
|
| 25 |
|
| 26 |
## Intended Usage & Model Info
|
| 27 |
+
`jina-embeddings-v4` is a multilingual, multimodal embedding model designed for unified representation of text and images.
|
| 28 |
+
The model is specialized for complex document retrieval, including visually rich documents with charts, tables, and illustrations.
|
| 29 |
+
Embeddings produced by `jina-embeddings-v4` serve as the backbone for neural information retrieval and multimodal GenAI applications.
|
| 30 |
|
| 31 |
|
| 32 |
+
Built based on [Qwen/Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct), `jina-embeddings-v4` has the following features:
|
| 33 |
|
| 34 |
- **Unified embeddings** for text, images, and visual documents, supporting both dense (single-vector) and late-interaction (multi-vector) retrieval.
|
| 35 |
+
- **Multilingual support** (20+ languages) and compatibility with a wide range of domains, including technical and visually complex documents.
|
| 36 |
- **Task-specific adapters** for retrieval, text matching, and code-related tasks, which can be selected at inference time.
|
| 37 |
- **Flexible embedding size**: dense embeddings are 2048 dimensions by default but can be truncated to as low as 128 with minimal performance loss.
|
| 38 |
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
|
| 56 |
+
## Training, Data, Parameters
|
| 57 |
|
| 58 |
+
Please refer to our [technical report of jina-embeddings-v4](https://arxiv.org/abs/2506.18902) for the model and training details.
|
| 59 |
|
| 60 |
|
| 61 |
## Usage
|
|
|
|
| 144 |
import torch
|
| 145 |
|
| 146 |
# Initialize the model
|
| 147 |
+
model = AutoModel.from_pretrained("jinaai/jina-embeddings-v4", trust_remote_code=True)
|
| 148 |
|
| 149 |
model.to("cuda")
|
| 150 |
|
|
|
|
| 311 |
```
|
| 312 |
</details>
|
| 313 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 314 |
|
| 315 |
## Jina-VDR
|
| 316 |
Alongside `jina-embeddings-v4`, we’re releasing [Jina VDR](https://github.com/jina-ai/jina-vdr), a multilingual, multi-domain benchmark for visual document retrieval. The task collection can be viewed [here](https://huggingface.co/collections/jinaai/jinavdr-visual-document-retrieval-684831c022c53b21c313b449), and evaluation instructions can be found [here](https://github.com/jina-ai/jina-vdr).
|
|
|
|
| 318 |
|
| 319 |
## License
|
| 320 |
|
| 321 |
+
This model is licensed to download and run under [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/deed.en). It is available for commercial use via the [Jina Embeddings API](https://jina.ai/embeddings/), [AWS](https://longdogechallenge.com/), [Azure](https://longdogechallenge.com/), and [GCP](https://longdogechallenge.com/). To download for commercial use, please [contact us](https://jina.ai/contact-sales).
|
| 322 |
+
|
| 323 |
|
| 324 |
## Contact
|
| 325 |
|
config.json
CHANGED
|
@@ -35,74 +35,26 @@
|
|
| 35 |
"single_vector_pool_strategy": "mean",
|
| 36 |
"sliding_window": 32768,
|
| 37 |
"tie_word_embeddings": true,
|
| 38 |
-
"text_config": {
|
| 39 |
-
"attention_dropout": 0.0,
|
| 40 |
-
"bos_token_id": 151643,
|
| 41 |
-
"eos_token_id": 151645,
|
| 42 |
-
"hidden_act": "silu",
|
| 43 |
-
"hidden_size": 2048,
|
| 44 |
-
"image_token_id": null,
|
| 45 |
-
"initializer_range": 0.02,
|
| 46 |
-
"intermediate_size": 11008,
|
| 47 |
-
"max_position_embeddings": 128000,
|
| 48 |
-
"max_window_layers": 70,
|
| 49 |
-
"model_type": "qwen2_5_vl_text",
|
| 50 |
-
"num_attention_heads": 16,
|
| 51 |
-
"num_hidden_layers": 36,
|
| 52 |
-
"num_key_value_heads": 2,
|
| 53 |
-
"rms_norm_eps": 1e-06,
|
| 54 |
-
"rope_scaling": {
|
| 55 |
-
"mrope_section": [
|
| 56 |
-
16,
|
| 57 |
-
24,
|
| 58 |
-
24
|
| 59 |
-
],
|
| 60 |
-
"rope_type": "default",
|
| 61 |
-
"type": "default"
|
| 62 |
-
},
|
| 63 |
-
"rope_theta": 1000000.0,
|
| 64 |
-
"sliding_window": null,
|
| 65 |
-
"tie_word_embeddings": true,
|
| 66 |
-
"torch_dtype": "bfloat16",
|
| 67 |
-
"use_cache": true,
|
| 68 |
-
"use_sliding_window": false,
|
| 69 |
-
"vocab_size": 151936
|
| 70 |
-
},
|
| 71 |
"torch_dtype": "bfloat16",
|
| 72 |
"transformers_version": "4.52.0",
|
| 73 |
"use_cache": true,
|
| 74 |
"use_sliding_window": false,
|
| 75 |
"video_token_id": 151656,
|
| 76 |
"vision_config": {
|
| 77 |
-
"depth": 32,
|
| 78 |
-
"fullatt_block_indexes": [
|
| 79 |
-
7,
|
| 80 |
-
15,
|
| 81 |
-
23,
|
| 82 |
-
31
|
| 83 |
-
],
|
| 84 |
-
"hidden_act": "silu",
|
| 85 |
"hidden_size": 1280,
|
| 86 |
-
"in_channels": 3,
|
| 87 |
"in_chans": 3,
|
| 88 |
-
"initializer_range": 0.02,
|
| 89 |
-
"intermediate_size": 3420,
|
| 90 |
"model_type": "qwen2_5_vl",
|
| 91 |
-
"num_heads": 16,
|
| 92 |
"out_hidden_size": 2048,
|
| 93 |
-
"patch_size": 14,
|
| 94 |
-
"spatial_merge_size": 2,
|
| 95 |
"spatial_patch_size": 14,
|
| 96 |
-
"temporal_patch_size": 2,
|
| 97 |
"tokens_per_second": 2,
|
| 98 |
-
"torch_dtype": "bfloat16"
|
| 99 |
-
"window_size": 112
|
| 100 |
},
|
| 101 |
-
"task_names": ["retrieval", "text-matching", "code"],
|
| 102 |
-
"matryoshka_dims": [128, 256, 512, 1024, 2048],
|
| 103 |
-
"_attn_implementation": "flash_attention_2",
|
| 104 |
-
"truncate_dim": null,
|
| 105 |
"vision_end_token_id": 151653,
|
| 106 |
"vision_start_token_id": 151652,
|
| 107 |
-
"vision_token_id": 151654
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
}
|
|
|
|
| 35 |
"single_vector_pool_strategy": "mean",
|
| 36 |
"sliding_window": 32768,
|
| 37 |
"tie_word_embeddings": true,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
"torch_dtype": "bfloat16",
|
| 39 |
"transformers_version": "4.52.0",
|
| 40 |
"use_cache": true,
|
| 41 |
"use_sliding_window": false,
|
| 42 |
"video_token_id": 151656,
|
| 43 |
"vision_config": {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
"hidden_size": 1280,
|
|
|
|
| 45 |
"in_chans": 3,
|
|
|
|
|
|
|
| 46 |
"model_type": "qwen2_5_vl",
|
|
|
|
| 47 |
"out_hidden_size": 2048,
|
|
|
|
|
|
|
| 48 |
"spatial_patch_size": 14,
|
|
|
|
| 49 |
"tokens_per_second": 2,
|
| 50 |
+
"torch_dtype": "bfloat16"
|
|
|
|
| 51 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
"vision_end_token_id": 151653,
|
| 53 |
"vision_start_token_id": 151652,
|
| 54 |
+
"vision_token_id": 151654,
|
| 55 |
+
"vocab_size": 151936,
|
| 56 |
+
"truncate_dim": null,
|
| 57 |
+
"task_names": ["retrieval", "text-matching", "code"],
|
| 58 |
+
"matryoshka_dims": [128, 256, 512, 1024, 2048],
|
| 59 |
+
"_attn_implementation": "flash_attention_2"
|
| 60 |
}
|
configuration_jina_embeddings_v4.py
CHANGED
|
@@ -2,7 +2,6 @@ from transformers.models.qwen2_5_vl import Qwen2_5_VLConfig
|
|
| 2 |
|
| 3 |
from typing import Optional
|
| 4 |
|
| 5 |
-
|
| 6 |
class JinaEmbeddingsV4Config(Qwen2_5_VLConfig):
|
| 7 |
"""
|
| 8 |
Configuration for the JinaEmbeddingsV4 model.
|
|
@@ -13,11 +12,10 @@ class JinaEmbeddingsV4Config(Qwen2_5_VLConfig):
|
|
| 13 |
single_vector_pool_strategy: str = "mean",
|
| 14 |
multi_vector_projector_dim: int = 128,
|
| 15 |
pretrained_peft_model_name_or_path: Optional[str] = None,
|
| 16 |
-
verbosity: int = 1,
|
| 17 |
**kwargs,
|
| 18 |
):
|
| 19 |
super().__init__(**kwargs)
|
| 20 |
self.single_vector_pool_strategy = single_vector_pool_strategy
|
| 21 |
self.multi_vector_projector_dim = multi_vector_projector_dim
|
| 22 |
self.pretrained_peft_model_name_or_path = pretrained_peft_model_name_or_path
|
| 23 |
-
|
|
|
|
| 2 |
|
| 3 |
from typing import Optional
|
| 4 |
|
|
|
|
| 5 |
class JinaEmbeddingsV4Config(Qwen2_5_VLConfig):
|
| 6 |
"""
|
| 7 |
Configuration for the JinaEmbeddingsV4 model.
|
|
|
|
| 12 |
single_vector_pool_strategy: str = "mean",
|
| 13 |
multi_vector_projector_dim: int = 128,
|
| 14 |
pretrained_peft_model_name_or_path: Optional[str] = None,
|
|
|
|
| 15 |
**kwargs,
|
| 16 |
):
|
| 17 |
super().__init__(**kwargs)
|
| 18 |
self.single_vector_pool_strategy = single_vector_pool_strategy
|
| 19 |
self.multi_vector_projector_dim = multi_vector_projector_dim
|
| 20 |
self.pretrained_peft_model_name_or_path = pretrained_peft_model_name_or_path
|
| 21 |
+
|
custom_st.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
from io import BytesIO
|
| 4 |
from pathlib import Path
|
| 5 |
from typing import Any, Dict, List, Literal, Optional, Union
|
|
@@ -74,15 +72,14 @@ class Transformer(nn.Module):
|
|
| 74 |
response = requests.get(clean_text)
|
| 75 |
texts[i] = Image.open(BytesIO(response.content)).convert("RGB")
|
| 76 |
image_indices.append(i)
|
| 77 |
-
|
| 78 |
try:
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
image_indices.append(i)
|
| 82 |
-
else:
|
| 83 |
-
text_indices.append(i)
|
| 84 |
except Exception as e:
|
| 85 |
text_indices.append(i)
|
|
|
|
|
|
|
| 86 |
elif isinstance(text, Image.Image):
|
| 87 |
image_indices.append(i)
|
| 88 |
else:
|
|
@@ -106,10 +103,7 @@ class Transformer(nn.Module):
|
|
| 106 |
return encoding
|
| 107 |
|
| 108 |
def forward(
|
| 109 |
-
self,
|
| 110 |
-
features: Dict[str, torch.Tensor],
|
| 111 |
-
task: Optional[str] = None,
|
| 112 |
-
truncate_dim: Optional[int] = None,
|
| 113 |
) -> Dict[str, torch.Tensor]:
|
| 114 |
self.model.eval()
|
| 115 |
|
|
@@ -143,10 +137,8 @@ class Transformer(nn.Module):
|
|
| 143 |
**text_batch, task_label=task
|
| 144 |
).single_vec_emb
|
| 145 |
if truncate_dim:
|
| 146 |
-
text_embeddings = text_embeddings[:, :truncate_dim]
|
| 147 |
-
text_embeddings = torch.nn.functional.normalize(
|
| 148 |
-
text_embeddings, p=2, dim=-1
|
| 149 |
-
)
|
| 150 |
for i, embedding in enumerate(text_embeddings):
|
| 151 |
all_embeddings.append((text_indices[i], embedding))
|
| 152 |
|
|
@@ -163,10 +155,8 @@ class Transformer(nn.Module):
|
|
| 163 |
**image_batch, task_label=task
|
| 164 |
).single_vec_emb
|
| 165 |
if truncate_dim:
|
| 166 |
-
img_embeddings = img_embeddings[:, :truncate_dim]
|
| 167 |
-
img_embeddings = torch.nn.functional.normalize(
|
| 168 |
-
img_embeddings, p=2, dim=-1
|
| 169 |
-
)
|
| 170 |
|
| 171 |
for i, embedding in enumerate(img_embeddings):
|
| 172 |
all_embeddings.append((image_indices[i], embedding))
|
|
@@ -179,7 +169,3 @@ class Transformer(nn.Module):
|
|
| 179 |
features["sentence_embedding"] = combined_embeddings
|
| 180 |
|
| 181 |
return features
|
| 182 |
-
|
| 183 |
-
@classmethod
|
| 184 |
-
def load(cls, input_path: str) -> "Transformer":
|
| 185 |
-
return cls(model_name_or_path=input_path)
|
|
|
|
|
|
|
|
|
|
| 1 |
from io import BytesIO
|
| 2 |
from pathlib import Path
|
| 3 |
from typing import Any, Dict, List, Literal, Optional, Union
|
|
|
|
| 72 |
response = requests.get(clean_text)
|
| 73 |
texts[i] = Image.open(BytesIO(response.content)).convert("RGB")
|
| 74 |
image_indices.append(i)
|
| 75 |
+
elif Path(clean_text).is_file():
|
| 76 |
try:
|
| 77 |
+
texts[i] = Image.open(clean_text).convert("RGB")
|
| 78 |
+
image_indices.append(i)
|
|
|
|
|
|
|
|
|
|
| 79 |
except Exception as e:
|
| 80 |
text_indices.append(i)
|
| 81 |
+
else:
|
| 82 |
+
text_indices.append(i)
|
| 83 |
elif isinstance(text, Image.Image):
|
| 84 |
image_indices.append(i)
|
| 85 |
else:
|
|
|
|
| 103 |
return encoding
|
| 104 |
|
| 105 |
def forward(
|
| 106 |
+
self, features: Dict[str, torch.Tensor], task: Optional[str] = None, truncate_dim: Optional[int] = None
|
|
|
|
|
|
|
|
|
|
| 107 |
) -> Dict[str, torch.Tensor]:
|
| 108 |
self.model.eval()
|
| 109 |
|
|
|
|
| 137 |
**text_batch, task_label=task
|
| 138 |
).single_vec_emb
|
| 139 |
if truncate_dim:
|
| 140 |
+
text_embeddings = text_embeddings[:, : truncate_dim]
|
| 141 |
+
text_embeddings = torch.nn.functional.normalize(text_embeddings, p=2, dim=-1)
|
|
|
|
|
|
|
| 142 |
for i, embedding in enumerate(text_embeddings):
|
| 143 |
all_embeddings.append((text_indices[i], embedding))
|
| 144 |
|
|
|
|
| 155 |
**image_batch, task_label=task
|
| 156 |
).single_vec_emb
|
| 157 |
if truncate_dim:
|
| 158 |
+
img_embeddings = img_embeddings[:, : truncate_dim]
|
| 159 |
+
img_embeddings = torch.nn.functional.normalize(img_embeddings, p=2, dim=-1)
|
|
|
|
|
|
|
| 160 |
|
| 161 |
for i, embedding in enumerate(img_embeddings):
|
| 162 |
all_embeddings.append((image_indices[i], embedding))
|
|
|
|
| 169 |
features["sentence_embedding"] = combined_embeddings
|
| 170 |
|
| 171 |
return features
|
|
|
|
|
|
|
|
|
|
|
|
modeling_jina_embeddings_v4.py
CHANGED
|
@@ -37,7 +37,7 @@ class JinaEmbeddingsV4Processor(Qwen2_5_VLProcessor):
|
|
| 37 |
def __init__(self, *args, **kwargs) -> None:
|
| 38 |
Qwen2_5_VLProcessor.__init__(self, *args, **kwargs)
|
| 39 |
self.assistant_prefix_len = 58
|
| 40 |
-
self.text_max_length =
|
| 41 |
|
| 42 |
def process_images(
|
| 43 |
self,
|
|
@@ -146,7 +146,6 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 146 |
self.name_or_path, trust_remote_code=True, use_fast=True
|
| 147 |
)
|
| 148 |
self.multi_vector_projector_dim = config.multi_vector_projector_dim
|
| 149 |
-
self.verbosity = config.verbosity
|
| 150 |
self._task = None
|
| 151 |
|
| 152 |
@property
|
|
@@ -243,6 +242,7 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 243 |
pooled_output = masked_hidden_states.sum(dim=1) / image_mask.sum(
|
| 244 |
dim=1, keepdim=True
|
| 245 |
)
|
|
|
|
| 246 |
else: # got query text
|
| 247 |
pooled_output = torch.sum(
|
| 248 |
hidden_states * attention_mask.unsqueeze(-1), dim=1
|
|
@@ -332,12 +332,10 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 332 |
collate_fn=processor_fn,
|
| 333 |
)
|
| 334 |
if return_multivector and len(data) > 1:
|
| 335 |
-
assert
|
| 336 |
-
not return_numpy
|
| 337 |
-
), "`return_numpy` is not supported when `return_multivector=True` and more than one data is encoded"
|
| 338 |
results = []
|
| 339 |
self.eval()
|
| 340 |
-
for batch in tqdm(dataloader, desc=desc
|
| 341 |
with torch.no_grad():
|
| 342 |
batch = {k: v.to(self.device) for k, v in batch.items()}
|
| 343 |
with torch.autocast(
|
|
@@ -348,12 +346,10 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 348 |
embeddings = embeddings.single_vec_emb
|
| 349 |
if truncate_dim is not None:
|
| 350 |
embeddings = embeddings[:, :truncate_dim]
|
| 351 |
-
embeddings = torch.nn.functional.normalize(
|
| 352 |
-
embeddings, p=2, dim=-1
|
| 353 |
-
)
|
| 354 |
else:
|
| 355 |
embeddings = embeddings.multi_vec_emb
|
| 356 |
-
|
| 357 |
if return_multivector and not return_numpy:
|
| 358 |
valid_tokens = batch["attention_mask"].bool()
|
| 359 |
embeddings = [
|
|
@@ -417,7 +413,7 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 417 |
self,
|
| 418 |
texts: Union[str, List[str]],
|
| 419 |
task: Optional[str] = None,
|
| 420 |
-
max_length: int =
|
| 421 |
batch_size: int = 8,
|
| 422 |
return_multivector: bool = False,
|
| 423 |
return_numpy: bool = False,
|
|
@@ -440,9 +436,7 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 440 |
List of text embeddings as tensors or numpy arrays when encoding multiple texts, or single text embedding as tensor when encoding a single text
|
| 441 |
"""
|
| 442 |
prompt_name = prompt_name or "query"
|
| 443 |
-
encode_kwargs = self._validate_encoding_params(
|
| 444 |
-
truncate_dim=truncate_dim, prompt_name=prompt_name
|
| 445 |
-
)
|
| 446 |
|
| 447 |
task = self._validate_task(task)
|
| 448 |
|
|
@@ -457,11 +451,9 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 457 |
# If return_multivector is True and encoding multiple texts, ignore return_numpy
|
| 458 |
if return_multivector and return_list and len(texts) > 1:
|
| 459 |
if return_numpy:
|
| 460 |
-
print(
|
| 461 |
-
"Warning: `return_numpy` is ignored when `return_multivector=True` and `len(texts) > 1`"
|
| 462 |
-
)
|
| 463 |
return_numpy = False
|
| 464 |
-
|
| 465 |
if isinstance(texts, str):
|
| 466 |
texts = [texts]
|
| 467 |
|
|
@@ -476,7 +468,7 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 476 |
**encode_kwargs,
|
| 477 |
)
|
| 478 |
|
| 479 |
-
return embeddings if return_list else embeddings[0]
|
| 480 |
|
| 481 |
def _load_images_if_needed(
|
| 482 |
self, images: List[Union[str, Image.Image]]
|
|
@@ -523,21 +515,19 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 523 |
)
|
| 524 |
encode_kwargs = self._validate_encoding_params(truncate_dim=truncate_dim)
|
| 525 |
task = self._validate_task(task)
|
| 526 |
-
|
| 527 |
return_list = isinstance(images, list)
|
| 528 |
|
| 529 |
# If return_multivector is True and encoding multiple images, ignore return_numpy
|
| 530 |
if return_multivector and return_list and len(images) > 1:
|
| 531 |
if return_numpy:
|
| 532 |
-
print(
|
| 533 |
-
"Warning: `return_numpy` is ignored when `return_multivector=True` and `len(images) > 1`"
|
| 534 |
-
)
|
| 535 |
return_numpy = False
|
| 536 |
|
| 537 |
# Convert single image to list
|
| 538 |
if isinstance(images, (str, Image.Image)):
|
| 539 |
images = [images]
|
| 540 |
-
|
| 541 |
images = self._load_images_if_needed(images)
|
| 542 |
embeddings = self._process_batches(
|
| 543 |
data=images,
|
|
@@ -598,12 +588,18 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
|
|
| 598 |
config=lora_config,
|
| 599 |
)
|
| 600 |
|
| 601 |
-
|
|
|
|
| 602 |
return self.model.task
|
| 603 |
|
| 604 |
-
|
|
|
|
| 605 |
self.model.task = value
|
| 606 |
|
| 607 |
-
peft_model.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 608 |
|
| 609 |
return peft_model
|
|
|
|
| 37 |
def __init__(self, *args, **kwargs) -> None:
|
| 38 |
Qwen2_5_VLProcessor.__init__(self, *args, **kwargs)
|
| 39 |
self.assistant_prefix_len = 58
|
| 40 |
+
self.text_max_length = 8192
|
| 41 |
|
| 42 |
def process_images(
|
| 43 |
self,
|
|
|
|
| 146 |
self.name_or_path, trust_remote_code=True, use_fast=True
|
| 147 |
)
|
| 148 |
self.multi_vector_projector_dim = config.multi_vector_projector_dim
|
|
|
|
| 149 |
self._task = None
|
| 150 |
|
| 151 |
@property
|
|
|
|
| 242 |
pooled_output = masked_hidden_states.sum(dim=1) / image_mask.sum(
|
| 243 |
dim=1, keepdim=True
|
| 244 |
)
|
| 245 |
+
|
| 246 |
else: # got query text
|
| 247 |
pooled_output = torch.sum(
|
| 248 |
hidden_states * attention_mask.unsqueeze(-1), dim=1
|
|
|
|
| 332 |
collate_fn=processor_fn,
|
| 333 |
)
|
| 334 |
if return_multivector and len(data) > 1:
|
| 335 |
+
assert not return_numpy, "`return_numpy` is not supported when `return_multivector=True` and more than one data is encoded"
|
|
|
|
|
|
|
| 336 |
results = []
|
| 337 |
self.eval()
|
| 338 |
+
for batch in tqdm(dataloader, desc=desc):
|
| 339 |
with torch.no_grad():
|
| 340 |
batch = {k: v.to(self.device) for k, v in batch.items()}
|
| 341 |
with torch.autocast(
|
|
|
|
| 346 |
embeddings = embeddings.single_vec_emb
|
| 347 |
if truncate_dim is not None:
|
| 348 |
embeddings = embeddings[:, :truncate_dim]
|
| 349 |
+
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=-1)
|
|
|
|
|
|
|
| 350 |
else:
|
| 351 |
embeddings = embeddings.multi_vec_emb
|
| 352 |
+
|
| 353 |
if return_multivector and not return_numpy:
|
| 354 |
valid_tokens = batch["attention_mask"].bool()
|
| 355 |
embeddings = [
|
|
|
|
| 413 |
self,
|
| 414 |
texts: Union[str, List[str]],
|
| 415 |
task: Optional[str] = None,
|
| 416 |
+
max_length: int = 8192,
|
| 417 |
batch_size: int = 8,
|
| 418 |
return_multivector: bool = False,
|
| 419 |
return_numpy: bool = False,
|
|
|
|
| 436 |
List of text embeddings as tensors or numpy arrays when encoding multiple texts, or single text embedding as tensor when encoding a single text
|
| 437 |
"""
|
| 438 |
prompt_name = prompt_name or "query"
|
| 439 |
+
encode_kwargs = self._validate_encoding_params(truncate_dim=truncate_dim, prompt_name=prompt_name)
|
|
|
|
|
|
|
| 440 |
|
| 441 |
task = self._validate_task(task)
|
| 442 |
|
|
|
|
| 451 |
# If return_multivector is True and encoding multiple texts, ignore return_numpy
|
| 452 |
if return_multivector and return_list and len(texts) > 1:
|
| 453 |
if return_numpy:
|
| 454 |
+
print("Warning: `return_numpy` is ignored when `return_multivector=True` and `len(texts) > 1`")
|
|
|
|
|
|
|
| 455 |
return_numpy = False
|
| 456 |
+
|
| 457 |
if isinstance(texts, str):
|
| 458 |
texts = [texts]
|
| 459 |
|
|
|
|
| 468 |
**encode_kwargs,
|
| 469 |
)
|
| 470 |
|
| 471 |
+
return embeddings if return_list else embeddings[0]
|
| 472 |
|
| 473 |
def _load_images_if_needed(
|
| 474 |
self, images: List[Union[str, Image.Image]]
|
|
|
|
| 515 |
)
|
| 516 |
encode_kwargs = self._validate_encoding_params(truncate_dim=truncate_dim)
|
| 517 |
task = self._validate_task(task)
|
| 518 |
+
|
| 519 |
return_list = isinstance(images, list)
|
| 520 |
|
| 521 |
# If return_multivector is True and encoding multiple images, ignore return_numpy
|
| 522 |
if return_multivector and return_list and len(images) > 1:
|
| 523 |
if return_numpy:
|
| 524 |
+
print("Warning: `return_numpy` is ignored when `return_multivector=True` and `len(images) > 1`")
|
|
|
|
|
|
|
| 525 |
return_numpy = False
|
| 526 |
|
| 527 |
# Convert single image to list
|
| 528 |
if isinstance(images, (str, Image.Image)):
|
| 529 |
images = [images]
|
| 530 |
+
|
| 531 |
images = self._load_images_if_needed(images)
|
| 532 |
embeddings = self._process_batches(
|
| 533 |
data=images,
|
|
|
|
| 588 |
config=lora_config,
|
| 589 |
)
|
| 590 |
|
| 591 |
+
@property
|
| 592 |
+
def task(self):
|
| 593 |
return self.model.task
|
| 594 |
|
| 595 |
+
@task.setter
|
| 596 |
+
def task(self, value):
|
| 597 |
self.model.task = value
|
| 598 |
|
| 599 |
+
peft_model.task = property(task.fget, task.fset)
|
| 600 |
+
peft_model.__class__.task = property(
|
| 601 |
+
lambda self: self.model.task,
|
| 602 |
+
lambda self, value: setattr(self.model, "task", value),
|
| 603 |
+
)
|
| 604 |
|
| 605 |
return peft_model
|
qwen2_5_vl.py
CHANGED
|
@@ -345,8 +345,8 @@ from transformers.utils import auto_docstring, can_return_tuple, is_torch_flex_a
|
|
| 345 |
|
| 346 |
|
| 347 |
if is_flash_attn_available():
|
| 348 |
-
from
|
| 349 |
-
|
| 350 |
|
| 351 |
if is_flash_attn_available():
|
| 352 |
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|
|
|
|
| 345 |
|
| 346 |
|
| 347 |
if is_flash_attn_available():
|
| 348 |
+
from transformers.modeling_flash_attention_utils import apply_rotary_emb, flash_attn_varlen_func
|
| 349 |
+
|
| 350 |
|
| 351 |
if is_flash_attn_available():
|
| 352 |
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|
results.json
CHANGED
|
@@ -578,5 +578,4 @@
|
|
| 578 |
"naucs_at_100_max": null,
|
| 579 |
"naucs_at_100_std": null,
|
| 580 |
"naucs_at_100_diff1": null
|
| 581 |
-
}
|
| 582 |
}
|
|
|
|
| 578 |
"naucs_at_100_max": null,
|
| 579 |
"naucs_at_100_std": null,
|
| 580 |
"naucs_at_100_diff1": null
|
|
|
|
| 581 |
}
|