Trouter-Library commited on
Commit
2bad6cc
·
verified ·
1 Parent(s): 7082d77

Update model.safetensors.index.json

Browse files
Files changed (1) hide show
  1. model.safetensors.index.json +1 -44
model.safetensors.index.json CHANGED
@@ -12,7 +12,7 @@
12
  "shard_size_bytes": 1690280320,
13
  "shard_size_gb": 1.69,
14
  "shard_size_gib": 1.57,
15
- "note": "Model weights distributed across 83 SafeTensors shards (shard_00 to shard_82), each 1.69GB (1.57GiB) in FP16 precision."
16
  },
17
  "weight_map": {
18
  "model.embed_tokens.weight": "shard_00.safetensors",
@@ -71,49 +71,6 @@
71
  "model.layers.31.post_attention_layernorm.weight": "shard_82.safetensors",
72
  "model.norm.weight": "shard_82.safetensors",
73
  "lm_head.weight": "shard_82.safetensors"
74
- },
75
- "safetensors_info": {
76
- "description": "SafeTensors format provides secure, fast, and zero-copy tensor serialization",
77
- "total_shards": 83,
78
- "shard_naming": "shard_00.safetensors through shard_82.safetensors",
79
- "parameters": "70B",
80
- "precision": "float16",
81
- "shard_size_gb": 1.69,
82
- "shard_size_gib": 1.57,
83
- "total_size_gb": 140.27,
84
- "total_size_gib": 130.71,
85
- "size_note": "1.69GB (decimal) = 1.57GiB (binary). File managers show GiB, imports show GB.",
86
- "shard_distribution": "Each transformer layer distributed across ~2.6 shards for balanced loading",
87
- "benefits": [
88
- "No arbitrary code execution during loading",
89
- "Lazy loading support for memory efficiency",
90
- "Fast deserialization without pickle",
91
- "Tensor metadata validation",
92
- "Cross-platform compatibility",
93
- "Memory-mapped file support",
94
- "Parallel loading across multiple GPUs"
95
- ],
96
- "verification": "Each file includes SHA256 checksum for integrity verification",
97
- "status": "released"
98
- },
99
- "shard_structure": {
100
- "embedding_layer": {
101
- "shards": ["shard_00.safetensors"],
102
- "size_gb": 1.69,
103
- "size_gib": 1.57
104
- },
105
- "transformer_layers": {
106
- "layers_per_shard": "~0.39",
107
- "shards_range": "shard_01.safetensors to shard_81.safetensors",
108
- "size_per_shard_gb": 1.69,
109
- "size_per_shard_gib": 1.57
110
- },
111
- "output_layer": {
112
- "shards": ["shard_82.safetensors"],
113
- "includes": ["model.norm.weight", "lm_head.weight"],
114
- "size_gb": 1.69,
115
- "size_gib": 1.57
116
- }
117
  }
118
  } {
119
  "total_size": 140737488355328,
 
12
  "shard_size_bytes": 1690280320,
13
  "shard_size_gb": 1.69,
14
  "shard_size_gib": 1.57,
15
+ "note": "Model weights distributed across 83 SafeTensors shards in FP16 precision"
16
  },
17
  "weight_map": {
18
  "model.embed_tokens.weight": "shard_00.safetensors",
 
71
  "model.layers.31.post_attention_layernorm.weight": "shard_82.safetensors",
72
  "model.norm.weight": "shard_82.safetensors",
73
  "lm_head.weight": "shard_82.safetensors"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  }
75
  } {
76
  "total_size": 140737488355328,