| # Copyright (c) OpenMMLab. All rights reserved. | |
| from transformers import PretrainedConfig | |
| class HformerConfig(PretrainedConfig): | |
| model_type = 'hformer' | |
| _auto_class = 'AutoConfig' | |
| def __init__( | |
| self, | |
| num_query_token=32, | |
| visual_hidden_size=4096, | |
| llm_hidden_size=768, | |
| cross_attention_freq=2, | |
| bert="bert-base-uncased", | |
| bias=True, | |
| qformer_pth=None, | |
| **kwargs, | |
| ): | |
| self.num_query_token=num_query_token | |
| self.visual_hidden_size = visual_hidden_size | |
| self.llm_hidden_size = llm_hidden_size | |
| self.bias = bias | |
| self.bert = bert | |
| self.cross_attention_freq = cross_attention_freq | |
| self.qformer_pth = qformer_pth | |
| super().__init__(**kwargs) | |