| { | |
| "metadata": { | |
| "total_size": 140737488355328, | |
| "format": "safetensors", | |
| "model_name": "DeepXR/Helion-2.5-Rnd", | |
| "version": "2.5.0-rnd", | |
| "precision": "bfloat16", | |
| "parameters": "70B", | |
| "total_shards": 96, | |
| "created_at": "2025-01-30T00:00:00Z", | |
| "sha256_checksums_available": true, | |
| "shard_size_avg": "1.46GB", | |
| "note": "SafeTensors shards will be available soon. Model weights distributed across 96 files for optimal loading." | |
| }, | |
| "weight_map": { | |
| "model.embed_tokens.weight": "model-00001-of-00096.safetensors", | |
| "model.layers.0.self_attn.q_proj.weight": "model-00002-of-00096.safetensors", | |
| "model.layers.0.self_attn.k_proj.weight": "model-00002-of-00096.safetensors", | |
| "model.layers.0.self_attn.v_proj.weight": "model-00003-of-00096.safetensors", | |
| "model.layers.0.self_attn.o_proj.weight": "model-00003-of-00096.safetensors", | |
| "model.layers.0.mlp.gate_proj.weight": "model-00004-of-00096.safetensors", | |
| "model.layers.0.mlp.up_proj.weight": "model-00004-of-00096.safetensors", | |
| "model.layers.0.mlp.down_proj.weight": "model-00005-of-00096.safetensors", | |
| "model.layers.0.input_layernorm.weight": "model-00005-of-00096.safetensors", | |
| "model.layers.0.post_attention_layernorm.weight": "model-00005-of-00096.safetensors", | |
| "model.layers.1.self_attn.q_proj.weight": "model-00006-of-00096.safetensors", | |
| "model.layers.1.self_attn.k_proj.weight": "model-00006-of-00096.safetensors", | |
| "model.layers.1.self_attn.v_proj.weight": "model-00007-of-00096.safetensors", | |
| "model.layers.1.self_attn.o_proj.weight": "model-00007-of-00096.safetensors", | |
| "model.layers.1.mlp.gate_proj.weight": "model-00008-of-00096.safetensors", | |
| "model.layers.1.mlp.up_proj.weight": "model-00008-of-00096.safetensors", | |
| "model.layers.1.mlp.down_proj.weight": "model-00009-of-00096.safetensors", | |
| "model.layers.31.self_attn.q_proj.weight": "model-00092-of-00096.safetensors", | |
| "model.layers.31.self_attn.k_proj.weight": "model-00093-of-00096.safetensors", | |
| "model.layers.31.self_attn.v_proj.weight": "model-00093-of-00096.safetensors", | |
| "model.layers.31.self_attn.o_proj.weight": "model-00094-of-00096.safetensors", | |
| "model.layers.31.mlp.gate_proj.weight": "model-00094-of-00096.safetensors", | |
| "model.layers.31.mlp.up_proj.weight": "model-00095-of-00096.safetensors", | |
| "model.layers.31.mlp.down_proj.weight": "model-00095-of-00096.safetensors", | |
| "model.layers.31.input_layernorm.weight": "model-00096-of-00096.safetensors", | |
| "model.layers.31.post_attention_layernorm.weight": "model-00096-of-00096.safetensors", | |
| "model.norm.weight": "model-00096-of-00096.safetensors", | |
| "lm_head.weight": "model-00096-of-00096.safetensors" | |
| }, | |
| "safetensors_info": { | |
| "description": "SafeTensors format provides secure, fast, and zero-copy tensor serialization", | |
| "total_shards": 96, | |
| "parameters": "70B", | |
| "shard_distribution": "Each transformer layer distributed across ~3 shards for balanced loading", | |
| "benefits": [ | |
| "No arbitrary code execution during loading", | |
| "Lazy loading support for memory efficiency", | |
| "Fast deserialization without pickle", | |
| "Tensor metadata validation", | |
| "Cross-platform compatibility", | |
| "Memory-mapped file support", | |
| "Parallel loading across multiple GPUs" | |
| ], | |
| "verification": "Each file will include SHA256 checksum for integrity verification", | |
| "status": "In preparation - 96 shards to be released soon" | |
| }, | |
| "shard_structure": { | |
| "embedding_layer": { | |
| "shards": ["model-00001-of-00096.safetensors"], | |
| "size_estimate": "~1.0GB" | |
| }, | |
| "transformer_layers": { | |
| "layers_per_shard": "~0.33", | |
| "shards_range": "model-00002-of-00096 to model-00095-of-00096", | |
| "size_estimate": "~1.5GB per shard" | |
| }, | |
| "output_layer": { | |
| "shards": ["model-00096-of-00096.safetensors"], | |
| "includes": ["model.norm.weight", "lm_head.weight"], | |
| "size_estimate": "~1.0GB" | |
| } | |
| } | |
| } |