{ "architectures": [ "GPTModel" ], "model_type": "gpt", "vocab_size": 32000, "n_layer": 6, "n_head": 8, "n_embd": 512, "block_size": 1024, "dropout": 0.1, "bias": true, "torch_dtype": "float32", "transformers_version": "4.0.0", "openllm_version": "0.1.0", "training_steps": 10000, "model_size": "small" }