Update model.safetensors.index.json

#5
by AlexGall - opened
Files changed (1) hide show
  1. model.safetensors.index.json +109 -0
model.safetensors.index.json CHANGED
@@ -1,5 +1,114 @@
1
  {
2
  "metadata": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "total_size": 140737488355328,
4
  "format": "safetensors",
5
  "model_name": "DeepXR/Helion-2.5-Rnd",
 
1
  {
2
  "metadata": {
3
+ "total_size": 128849018880,
4
+ "format": "safetensors",
5
+ "model_name": "DeepXR/Helion-2.5-Rnd",
6
+ "version": "2.5.0-rnd",
7
+ "precision": "float16",
8
+ "parameters": "70B",
9
+ "total_shards": 82,
10
+ "created_at": "2025-01-30T00:00:00Z",
11
+ "sha256_checksums_available": true,
12
+ "shard_size_bytes": 1686376448,
13
+ "shard_size_gb": 1.57,
14
+ "note": "Model weights distributed across 82 SafeTensors shards (shard_01 to shard_82), each approximately 1.57GB in FP16 precision."
15
+ },
16
+ "weight_map": {
17
+ "model.embed_tokens.weight": "shard_01.safetensors",
18
+ "model.layers.0.self_attn.q_proj.weight": "shard_02.safetensors",
19
+ "model.layers.0.self_attn.k_proj.weight": "shard_02.safetensors",
20
+ "model.layers.0.self_attn.v_proj.weight": "shard_03.safetensors",
21
+ "model.layers.0.self_attn.o_proj.weight": "shard_03.safetensors",
22
+ "model.layers.0.mlp.gate_proj.weight": "shard_04.safetensors",
23
+ "model.layers.0.mlp.up_proj.weight": "shard_04.safetensors",
24
+ "model.layers.0.mlp.down_proj.weight": "shard_05.safetensors",
25
+ "model.layers.0.input_layernorm.weight": "shard_05.safetensors",
26
+ "model.layers.0.post_attention_layernorm.weight": "shard_05.safetensors",
27
+ "model.layers.1.self_attn.q_proj.weight": "shard_06.safetensors",
28
+ "model.layers.1.self_attn.k_proj.weight": "shard_06.safetensors",
29
+ "model.layers.1.self_attn.v_proj.weight": "shard_07.safetensors",
30
+ "model.layers.1.self_attn.o_proj.weight": "shard_07.safetensors",
31
+ "model.layers.1.mlp.gate_proj.weight": "shard_08.safetensors",
32
+ "model.layers.1.mlp.up_proj.weight": "shard_08.safetensors",
33
+ "model.layers.1.mlp.down_proj.weight": "shard_09.safetensors",
34
+ "model.layers.2.self_attn.q_proj.weight": "shard_10.safetensors",
35
+ "model.layers.2.self_attn.k_proj.weight": "shard_10.safetensors",
36
+ "model.layers.2.self_attn.v_proj.weight": "shard_11.safetensors",
37
+ "model.layers.2.self_attn.o_proj.weight": "shard_11.safetensors",
38
+ "model.layers.2.mlp.gate_proj.weight": "shard_12.safetensors",
39
+ "model.layers.2.mlp.up_proj.weight": "shard_12.safetensors",
40
+ "model.layers.2.mlp.down_proj.weight": "shard_13.safetensors",
41
+ "model.layers.3.self_attn.q_proj.weight": "shard_14.safetensors",
42
+ "model.layers.3.self_attn.k_proj.weight": "shard_14.safetensors",
43
+ "model.layers.3.self_attn.v_proj.weight": "shard_15.safetensors",
44
+ "model.layers.3.self_attn.o_proj.weight": "shard_15.safetensors",
45
+ "model.layers.3.mlp.gate_proj.weight": "shard_16.safetensors",
46
+ "model.layers.3.mlp.up_proj.weight": "shard_16.safetensors",
47
+ "model.layers.3.mlp.down_proj.weight": "shard_17.safetensors",
48
+ "model.layers.29.self_attn.q_proj.weight": "shard_74.safetensors",
49
+ "model.layers.29.self_attn.k_proj.weight": "shard_74.safetensors",
50
+ "model.layers.29.self_attn.v_proj.weight": "shard_75.safetensors",
51
+ "model.layers.29.self_attn.o_proj.weight": "shard_75.safetensors",
52
+ "model.layers.29.mlp.gate_proj.weight": "shard_76.safetensors",
53
+ "model.layers.29.mlp.up_proj.weight": "shard_76.safetensors",
54
+ "model.layers.29.mlp.down_proj.weight": "shard_77.safetensors",
55
+ "model.layers.30.self_attn.q_proj.weight": "shard_78.safetensors",
56
+ "model.layers.30.self_attn.k_proj.weight": "shard_78.safetensors",
57
+ "model.layers.30.self_attn.v_proj.weight": "shard_79.safetensors",
58
+ "model.layers.30.self_attn.o_proj.weight": "shard_79.safetensors",
59
+ "model.layers.30.mlp.gate_proj.weight": "shard_80.safetensors",
60
+ "model.layers.30.mlp.up_proj.weight": "shard_80.safetensors",
61
+ "model.layers.30.mlp.down_proj.weight": "shard_81.safetensors",
62
+ "model.layers.31.self_attn.q_proj.weight": "shard_81.safetensors",
63
+ "model.layers.31.self_attn.k_proj.weight": "shard_81.safetensors",
64
+ "model.layers.31.self_attn.v_proj.weight": "shard_82.safetensors",
65
+ "model.layers.31.self_attn.o_proj.weight": "shard_82.safetensors",
66
+ "model.layers.31.mlp.gate_proj.weight": "shard_82.safetensors",
67
+ "model.layers.31.mlp.up_proj.weight": "shard_82.safetensors",
68
+ "model.layers.31.mlp.down_proj.weight": "shard_82.safetensors",
69
+ "model.layers.31.input_layernorm.weight": "shard_82.safetensors",
70
+ "model.layers.31.post_attention_layernorm.weight": "shard_82.safetensors",
71
+ "model.norm.weight": "shard_82.safetensors",
72
+ "lm_head.weight": "shard_82.safetensors"
73
+ },
74
+ "safetensors_info": {
75
+ "description": "SafeTensors format provides secure, fast, and zero-copy tensor serialization",
76
+ "total_shards": 82,
77
+ "shard_naming": "shard_01.safetensors through shard_82.safetensors",
78
+ "parameters": "70B",
79
+ "precision": "float16",
80
+ "shard_size_gb": 1.57,
81
+ "total_size_gb": 120.0,
82
+ "shard_distribution": "Each transformer layer distributed across ~2.5 shards for balanced loading",
83
+ "benefits": [
84
+ "No arbitrary code execution during loading",
85
+ "Lazy loading support for memory efficiency",
86
+ "Fast deserialization without pickle",
87
+ "Tensor metadata validation",
88
+ "Cross-platform compatibility",
89
+ "Memory-mapped file support",
90
+ "Parallel loading across multiple GPUs"
91
+ ],
92
+ "verification": "Each file includes SHA256 checksum for integrity verification",
93
+ "status": "released"
94
+ },
95
+ "shard_structure": {
96
+ "embedding_layer": {
97
+ "shards": ["shard_01.safetensors"],
98
+ "size_gb": 1.57
99
+ },
100
+ "transformer_layers": {
101
+ "layers_per_shard": "~0.39",
102
+ "shards_range": "shard_02.safetensors to shard_81.safetensors",
103
+ "size_per_shard_gb": 1.57
104
+ },
105
+ "output_layer": {
106
+ "shards": ["shard_82.safetensors"],
107
+ "includes": ["model.norm.weight", "lm_head.weight"],
108
+ "size_gb": 1.57
109
+ }
110
+ }
111
+ } {
112
  "total_size": 140737488355328,
113
  "format": "safetensors",
114
  "model_name": "DeepXR/Helion-2.5-Rnd",