Trouter-Library commited on
Commit
ab0856f
·
verified ·
1 Parent(s): 8d9d189

Update model.safetensors.index.json

Browse files
Files changed (1) hide show
  1. model.safetensors.index.json +16 -9
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 128849018880,
4
  "format": "safetensors",
5
  "model_name": "DeepXR/Helion-2.5-Rnd",
6
  "version": "2.5.0-rnd",
@@ -9,9 +9,10 @@
9
  "total_shards": 83,
10
  "created_at": "2025-01-30T00:00:00Z",
11
  "sha256_checksums_available": true,
12
- "shard_size_bytes": 1686376448,
13
- "shard_size_gb": 1.57,
14
- "note": "Model weights distributed across 83 SafeTensors shards (shard_00 to shard_82), each approximately 1.57GB in FP16 precision."
 
15
  },
16
  "weight_map": {
17
  "model.embed_tokens.weight": "shard_00.safetensors",
@@ -77,8 +78,11 @@
77
  "shard_naming": "shard_00.safetensors through shard_82.safetensors",
78
  "parameters": "70B",
79
  "precision": "float16",
80
- "shard_size_gb": 1.57,
81
- "total_size_gb": 130.31,
 
 
 
82
  "shard_distribution": "Each transformer layer distributed across ~2.6 shards for balanced loading",
83
  "benefits": [
84
  "No arbitrary code execution during loading",
@@ -95,17 +99,20 @@
95
  "shard_structure": {
96
  "embedding_layer": {
97
  "shards": ["shard_00.safetensors"],
98
- "size_gb": 1.57
 
99
  },
100
  "transformer_layers": {
101
  "layers_per_shard": "~0.39",
102
  "shards_range": "shard_01.safetensors to shard_81.safetensors",
103
- "size_per_shard_gb": 1.57
 
104
  },
105
  "output_layer": {
106
  "shards": ["shard_82.safetensors"],
107
  "includes": ["model.norm.weight", "lm_head.weight"],
108
- "size_gb": 1.57
 
109
  }
110
  }
111
  } {
 
1
  {
2
  "metadata": {
3
+ "total_size": 140323266560,
4
  "format": "safetensors",
5
  "model_name": "DeepXR/Helion-2.5-Rnd",
6
  "version": "2.5.0-rnd",
 
9
  "total_shards": 83,
10
  "created_at": "2025-01-30T00:00:00Z",
11
  "sha256_checksums_available": true,
12
+ "shard_size_bytes": 1690280320,
13
+ "shard_size_gb": 1.69,
14
+ "shard_size_gib": 1.57,
15
+ "note": "Model weights distributed across 83 SafeTensors shards (shard_00 to shard_82), each 1.69GB (1.57GiB) in FP16 precision."
16
  },
17
  "weight_map": {
18
  "model.embed_tokens.weight": "shard_00.safetensors",
 
78
  "shard_naming": "shard_00.safetensors through shard_82.safetensors",
79
  "parameters": "70B",
80
  "precision": "float16",
81
+ "shard_size_gb": 1.69,
82
+ "shard_size_gib": 1.57,
83
+ "total_size_gb": 140.27,
84
+ "total_size_gib": 130.71,
85
+ "size_note": "1.69GB (decimal) = 1.57GiB (binary). File managers show GiB, imports show GB.",
86
  "shard_distribution": "Each transformer layer distributed across ~2.6 shards for balanced loading",
87
  "benefits": [
88
  "No arbitrary code execution during loading",
 
99
  "shard_structure": {
100
  "embedding_layer": {
101
  "shards": ["shard_00.safetensors"],
102
+ "size_gb": 1.69,
103
+ "size_gib": 1.57
104
  },
105
  "transformer_layers": {
106
  "layers_per_shard": "~0.39",
107
  "shards_range": "shard_01.safetensors to shard_81.safetensors",
108
+ "size_per_shard_gb": 1.69,
109
+ "size_per_shard_gib": 1.57
110
  },
111
  "output_layer": {
112
  "shards": ["shard_82.safetensors"],
113
  "includes": ["model.norm.weight", "lm_head.weight"],
114
+ "size_gb": 1.69,
115
+ "size_gib": 1.57
116
  }
117
  }
118
  } {