waterhorse1 commited on
Commit
77a1397
·
1 Parent(s): 7569f89
Files changed (3) hide show
  1. config.json +1 -1
  2. model.safetensors +2 -2
  3. quantize_config.json +2 -2
config.json CHANGED
@@ -28,7 +28,7 @@
28
  "vocab_size": 125696,
29
  "quantization_config": {
30
  "bits": 4,
31
- "group_size": 128,
32
  "damp_percent": 0.1,
33
  "desc_act": true,
34
  "sym": true,
 
28
  "vocab_size": 125696,
29
  "quantization_config": {
30
  "bits": 4,
31
+ "group_size": 32,
32
  "damp_percent": 0.1,
33
  "desc_act": true,
34
  "sym": true,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c9740867ddd687b30025fd9e23140578e04cd29600bf15d58b66ef65575c52f
3
- size 9135747280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d35828544b57eb5658ba85808499d4e3acf679070df7bc332d462014506f5b5b
3
+ size 9874563608
quantize_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "bits": 4,
3
- "group_size": 128,
4
  "damp_percent": 0.1,
5
  "desc_act": true,
6
  "static_groups": false,
@@ -8,4 +8,4 @@
8
  "true_sequential": true,
9
  "model_name_or_path": "Baichuan2-7B-Chat-gptq",
10
  "model_file_base_name": "gptq_model-4bit-128g"
11
- }
 
1
  {
2
  "bits": 4,
3
+ "group_size": 32,
4
  "damp_percent": 0.1,
5
  "desc_act": true,
6
  "static_groups": false,
 
8
  "true_sequential": true,
9
  "model_name_or_path": "Baichuan2-7B-Chat-gptq",
10
  "model_file_base_name": "gptq_model-4bit-128g"
11
+ }