| { | |
| "model_type": "gpt_bigcode", | |
| "quantization": "q4f32_1", | |
| "model_config": { | |
| "n_embd": 6144, | |
| "n_inner": 24576, | |
| "n_head": 48, | |
| "n_layer": 40, | |
| "n_positions": 8192, | |
| "layer_norm_epsilon": 1e-05, | |
| "vocab_size": 49152, | |
| "context_window_size": 8192, | |
| "prefill_chunk_size": 8192, | |
| "tensor_parallel_shards": 1 | |
| }, | |
| "vocab_size": 49152, | |
| "context_window_size": 8192, | |
| "sliding_window_size": -1, | |
| "prefill_chunk_size": 8192, | |
| "attention_sink_size": -1, | |
| "tensor_parallel_shards": 1, | |
| "max_batch_size": 80, | |
| "mean_gen_len": 128, | |
| "max_gen_len": 512, | |
| "shift_fill_factor": 0.3, | |
| "temperature": 0.7, | |
| "repetition_penalty": 1.0, | |
| "top_p": 0.95, | |
| "conv_template": "LM", | |
| "pad_token_id": 0, | |
| "bos_token_id": 0, | |
| "eos_token_id": 0, | |
| "tokenizer_files": [ | |
| "tokenizer.json", | |
| "vocab.json", | |
| "merges.txt", | |
| "tokenizer_config.json" | |
| ], | |
| "version": "0.1.0" | |
| } |