Training in progress, epoch 2
Browse files- model.safetensors +1 -1
- run-0/checkpoint-1070/config.json +25 -0
- run-0/checkpoint-1070/model.safetensors +3 -0
- run-0/checkpoint-1070/optimizer.pt +3 -0
- run-0/checkpoint-1070/rng_state.pth +3 -0
- run-0/checkpoint-1070/scheduler.pt +3 -0
- run-0/checkpoint-1070/special_tokens_map.json +7 -0
- run-0/checkpoint-1070/tokenizer.json +0 -0
- run-0/checkpoint-1070/tokenizer_config.json +55 -0
- run-0/checkpoint-1070/trainer_state.json +58 -0
- run-0/checkpoint-1070/training_args.bin +3 -0
- run-0/checkpoint-1070/vocab.txt +0 -0
- run-0/checkpoint-1605/config.json +25 -0
- run-0/checkpoint-1605/model.safetensors +3 -0
- run-0/checkpoint-1605/optimizer.pt +3 -0
- run-0/checkpoint-1605/rng_state.pth +3 -0
- run-0/checkpoint-1605/scheduler.pt +3 -0
- run-0/checkpoint-1605/special_tokens_map.json +7 -0
- run-0/checkpoint-1605/tokenizer.json +0 -0
- run-0/checkpoint-1605/tokenizer_config.json +55 -0
- run-0/checkpoint-1605/trainer_state.json +74 -0
- run-0/checkpoint-1605/training_args.bin +3 -0
- run-0/checkpoint-1605/vocab.txt +0 -0
- run-1/checkpoint-2138/config.json +25 -0
- run-1/checkpoint-2138/model.safetensors +3 -0
- run-1/checkpoint-2138/optimizer.pt +3 -0
- run-1/checkpoint-2138/rng_state.pth +3 -0
- run-1/checkpoint-2138/scheduler.pt +3 -0
- run-1/checkpoint-2138/special_tokens_map.json +7 -0
- run-1/checkpoint-2138/tokenizer.json +0 -0
- run-1/checkpoint-2138/tokenizer_config.json +55 -0
- run-1/checkpoint-2138/trainer_state.json +63 -0
- run-1/checkpoint-2138/training_args.bin +3 -0
- run-1/checkpoint-2138/vocab.txt +0 -0
- run-1/checkpoint-4276/config.json +25 -0
- run-1/checkpoint-4276/model.safetensors +3 -0
- run-1/checkpoint-4276/optimizer.pt +3 -0
- run-1/checkpoint-4276/rng_state.pth +3 -0
- run-1/checkpoint-4276/scheduler.pt +3 -0
- run-1/checkpoint-4276/special_tokens_map.json +7 -0
- run-1/checkpoint-4276/tokenizer.json +0 -0
- run-1/checkpoint-4276/tokenizer_config.json +55 -0
- run-1/checkpoint-4276/trainer_state.json +100 -0
- run-1/checkpoint-4276/training_args.bin +3 -0
- run-1/checkpoint-4276/vocab.txt +0 -0
- runs/Mar04_02-28-38_78dd22f13cdf/events.out.tfevents.1709519746.78dd22f13cdf.294.2 +2 -2
- runs/Mar04_02-28-38_78dd22f13cdf/events.out.tfevents.1709519889.78dd22f13cdf.294.3 +3 -0
- training_args.bin +1 -1
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 267832560
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61a9f0d1597105b2ae2c4068c002b554e7fab84818ec9eb58143a840cbff4794
|
| 3 |
size 267832560
|
run-0/checkpoint-1070/config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "distilbert-base-uncased",
|
| 3 |
+
"activation": "gelu",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"DistilBertForSequenceClassification"
|
| 6 |
+
],
|
| 7 |
+
"attention_dropout": 0.1,
|
| 8 |
+
"dim": 768,
|
| 9 |
+
"dropout": 0.1,
|
| 10 |
+
"hidden_dim": 3072,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "distilbert",
|
| 14 |
+
"n_heads": 12,
|
| 15 |
+
"n_layers": 6,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"problem_type": "single_label_classification",
|
| 18 |
+
"qa_dropout": 0.1,
|
| 19 |
+
"seq_classif_dropout": 0.2,
|
| 20 |
+
"sinusoidal_pos_embds": false,
|
| 21 |
+
"tie_weights_": true,
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.38.2",
|
| 24 |
+
"vocab_size": 30522
|
| 25 |
+
}
|
run-0/checkpoint-1070/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0b83b744bd34547e593e587e47993f9deff931003bb95b350c7af99a302309e
|
| 3 |
+
size 267832560
|
run-0/checkpoint-1070/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9cd73d3b70d45a4e56ef2f8c4255ee4612a2e3d321b98349093187b8934e7010
|
| 3 |
+
size 535727290
|
run-0/checkpoint-1070/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:add3dd6d8b9f04b2da0b21b3141bd6c35e192c09abf0ae08675ad972ac1c8384
|
| 3 |
+
size 14244
|
run-0/checkpoint-1070/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89a69ebdcb9cbad7bddba9d2f7a2c470c830a7cf12d53a5b8cd958b53b2ab6cf
|
| 3 |
+
size 1064
|
run-0/checkpoint-1070/special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
run-0/checkpoint-1070/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
run-0/checkpoint-1070/tokenizer_config.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_lower_case": true,
|
| 47 |
+
"mask_token": "[MASK]",
|
| 48 |
+
"model_max_length": 512,
|
| 49 |
+
"pad_token": "[PAD]",
|
| 50 |
+
"sep_token": "[SEP]",
|
| 51 |
+
"strip_accents": null,
|
| 52 |
+
"tokenize_chinese_chars": true,
|
| 53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
| 54 |
+
"unk_token": "[UNK]"
|
| 55 |
+
}
|
run-0/checkpoint-1070/trainer_state.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.49357249556059846,
|
| 3 |
+
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-0/checkpoint-1070",
|
| 4 |
+
"epoch": 2.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 1070,
|
| 7 |
+
"is_hyper_param_search": true,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.93,
|
| 13 |
+
"grad_norm": 4.838195323944092,
|
| 14 |
+
"learning_rate": 2.542737154981418e-05,
|
| 15 |
+
"loss": 0.5194,
|
| 16 |
+
"step": 500
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 1.0,
|
| 20 |
+
"eval_loss": 0.49024975299835205,
|
| 21 |
+
"eval_matthews_correlation": 0.43262718467553657,
|
| 22 |
+
"eval_runtime": 2.2476,
|
| 23 |
+
"eval_samples_per_second": 464.059,
|
| 24 |
+
"eval_steps_per_second": 29.365,
|
| 25 |
+
"step": 535
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"epoch": 1.87,
|
| 29 |
+
"grad_norm": 5.74407434463501,
|
| 30 |
+
"learning_rate": 1.3921773563472922e-05,
|
| 31 |
+
"loss": 0.3182,
|
| 32 |
+
"step": 1000
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"epoch": 2.0,
|
| 36 |
+
"eval_loss": 0.5491473078727722,
|
| 37 |
+
"eval_matthews_correlation": 0.49357249556059846,
|
| 38 |
+
"eval_runtime": 1.7076,
|
| 39 |
+
"eval_samples_per_second": 610.785,
|
| 40 |
+
"eval_steps_per_second": 38.65,
|
| 41 |
+
"step": 1070
|
| 42 |
+
}
|
| 43 |
+
],
|
| 44 |
+
"logging_steps": 500,
|
| 45 |
+
"max_steps": 1605,
|
| 46 |
+
"num_input_tokens_seen": 0,
|
| 47 |
+
"num_train_epochs": 3,
|
| 48 |
+
"save_steps": 500,
|
| 49 |
+
"total_flos": 86031883468536.0,
|
| 50 |
+
"train_batch_size": 16,
|
| 51 |
+
"trial_name": null,
|
| 52 |
+
"trial_params": {
|
| 53 |
+
"learning_rate": 3.693296953615544e-05,
|
| 54 |
+
"num_train_epochs": 3,
|
| 55 |
+
"per_device_train_batch_size": 16,
|
| 56 |
+
"seed": 9
|
| 57 |
+
}
|
| 58 |
+
}
|
run-0/checkpoint-1070/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3addc0a08604c4a6c2673cf97006fa11946224a2547ad12bbf16d166666cf45b
|
| 3 |
+
size 4984
|
run-0/checkpoint-1070/vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
run-0/checkpoint-1605/config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "distilbert-base-uncased",
|
| 3 |
+
"activation": "gelu",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"DistilBertForSequenceClassification"
|
| 6 |
+
],
|
| 7 |
+
"attention_dropout": 0.1,
|
| 8 |
+
"dim": 768,
|
| 9 |
+
"dropout": 0.1,
|
| 10 |
+
"hidden_dim": 3072,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "distilbert",
|
| 14 |
+
"n_heads": 12,
|
| 15 |
+
"n_layers": 6,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"problem_type": "single_label_classification",
|
| 18 |
+
"qa_dropout": 0.1,
|
| 19 |
+
"seq_classif_dropout": 0.2,
|
| 20 |
+
"sinusoidal_pos_embds": false,
|
| 21 |
+
"tie_weights_": true,
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.38.2",
|
| 24 |
+
"vocab_size": 30522
|
| 25 |
+
}
|
run-0/checkpoint-1605/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e4bdaf7943b482689de3f0a447349d070a284586c5f04194b5fe1d8e80b01b0e
|
| 3 |
+
size 267832560
|
run-0/checkpoint-1605/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e007842fbddf48cd55037a20ff83918cc3f87db180cc83d12596992737cd5665
|
| 3 |
+
size 535727290
|
run-0/checkpoint-1605/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:077934fbf5a5d9d1e451ef6fa416a906729b6234eb30b1dc44ec361bd9ba1274
|
| 3 |
+
size 14244
|
run-0/checkpoint-1605/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7dc3857d9eb77942f26f91c9977d164c41ad477679e8f359e4d4de0d4942451c
|
| 3 |
+
size 1064
|
run-0/checkpoint-1605/special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
run-0/checkpoint-1605/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
run-0/checkpoint-1605/tokenizer_config.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_lower_case": true,
|
| 47 |
+
"mask_token": "[MASK]",
|
| 48 |
+
"model_max_length": 512,
|
| 49 |
+
"pad_token": "[PAD]",
|
| 50 |
+
"sep_token": "[SEP]",
|
| 51 |
+
"strip_accents": null,
|
| 52 |
+
"tokenize_chinese_chars": true,
|
| 53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
| 54 |
+
"unk_token": "[UNK]"
|
| 55 |
+
}
|
run-0/checkpoint-1605/trainer_state.json
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.5039642659976749,
|
| 3 |
+
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-0/checkpoint-1605",
|
| 4 |
+
"epoch": 3.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 1605,
|
| 7 |
+
"is_hyper_param_search": true,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.93,
|
| 13 |
+
"grad_norm": 4.838195323944092,
|
| 14 |
+
"learning_rate": 2.542737154981418e-05,
|
| 15 |
+
"loss": 0.5194,
|
| 16 |
+
"step": 500
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 1.0,
|
| 20 |
+
"eval_loss": 0.49024975299835205,
|
| 21 |
+
"eval_matthews_correlation": 0.43262718467553657,
|
| 22 |
+
"eval_runtime": 2.2476,
|
| 23 |
+
"eval_samples_per_second": 464.059,
|
| 24 |
+
"eval_steps_per_second": 29.365,
|
| 25 |
+
"step": 535
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"epoch": 1.87,
|
| 29 |
+
"grad_norm": 5.74407434463501,
|
| 30 |
+
"learning_rate": 1.3921773563472922e-05,
|
| 31 |
+
"loss": 0.3182,
|
| 32 |
+
"step": 1000
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"epoch": 2.0,
|
| 36 |
+
"eval_loss": 0.5491473078727722,
|
| 37 |
+
"eval_matthews_correlation": 0.49357249556059846,
|
| 38 |
+
"eval_runtime": 1.7076,
|
| 39 |
+
"eval_samples_per_second": 610.785,
|
| 40 |
+
"eval_steps_per_second": 38.65,
|
| 41 |
+
"step": 1070
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"epoch": 2.8,
|
| 45 |
+
"grad_norm": 18.21695899963379,
|
| 46 |
+
"learning_rate": 2.416175577131664e-06,
|
| 47 |
+
"loss": 0.1905,
|
| 48 |
+
"step": 1500
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"epoch": 3.0,
|
| 52 |
+
"eval_loss": 0.7505267858505249,
|
| 53 |
+
"eval_matthews_correlation": 0.5039642659976749,
|
| 54 |
+
"eval_runtime": 0.7556,
|
| 55 |
+
"eval_samples_per_second": 1380.405,
|
| 56 |
+
"eval_steps_per_second": 87.351,
|
| 57 |
+
"step": 1605
|
| 58 |
+
}
|
| 59 |
+
],
|
| 60 |
+
"logging_steps": 500,
|
| 61 |
+
"max_steps": 1605,
|
| 62 |
+
"num_input_tokens_seen": 0,
|
| 63 |
+
"num_train_epochs": 3,
|
| 64 |
+
"save_steps": 500,
|
| 65 |
+
"total_flos": 128908113493284.0,
|
| 66 |
+
"train_batch_size": 16,
|
| 67 |
+
"trial_name": null,
|
| 68 |
+
"trial_params": {
|
| 69 |
+
"learning_rate": 3.693296953615544e-05,
|
| 70 |
+
"num_train_epochs": 3,
|
| 71 |
+
"per_device_train_batch_size": 16,
|
| 72 |
+
"seed": 9
|
| 73 |
+
}
|
| 74 |
+
}
|
run-0/checkpoint-1605/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3addc0a08604c4a6c2673cf97006fa11946224a2547ad12bbf16d166666cf45b
|
| 3 |
+
size 4984
|
run-0/checkpoint-1605/vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
run-1/checkpoint-2138/config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "distilbert-base-uncased",
|
| 3 |
+
"activation": "gelu",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"DistilBertForSequenceClassification"
|
| 6 |
+
],
|
| 7 |
+
"attention_dropout": 0.1,
|
| 8 |
+
"dim": 768,
|
| 9 |
+
"dropout": 0.1,
|
| 10 |
+
"hidden_dim": 3072,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "distilbert",
|
| 14 |
+
"n_heads": 12,
|
| 15 |
+
"n_layers": 6,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"problem_type": "single_label_classification",
|
| 18 |
+
"qa_dropout": 0.1,
|
| 19 |
+
"seq_classif_dropout": 0.2,
|
| 20 |
+
"sinusoidal_pos_embds": false,
|
| 21 |
+
"tie_weights_": true,
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.38.2",
|
| 24 |
+
"vocab_size": 30522
|
| 25 |
+
}
|
run-1/checkpoint-2138/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e0d46842c11f7003319cc5a53597fcf099c5f2d69350478889e45c7bfa9f5d68
|
| 3 |
+
size 267832560
|
run-1/checkpoint-2138/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6553afa883babb9bfc4d8aa0f55d2ad1f8a2ae31e9f11804d55282de8db5cf1
|
| 3 |
+
size 535727290
|
run-1/checkpoint-2138/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a22d1498697868d4fd51fedc2d243dfeb0c202fd49e378d2e958096437df1b02
|
| 3 |
+
size 14244
|
run-1/checkpoint-2138/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abec49b72941ac05681ffdea503b4983b2576df3a7538bc1f229b8bd74e024dc
|
| 3 |
+
size 1064
|
run-1/checkpoint-2138/special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
run-1/checkpoint-2138/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
run-1/checkpoint-2138/tokenizer_config.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_lower_case": true,
|
| 47 |
+
"mask_token": "[MASK]",
|
| 48 |
+
"model_max_length": 512,
|
| 49 |
+
"pad_token": "[PAD]",
|
| 50 |
+
"sep_token": "[SEP]",
|
| 51 |
+
"strip_accents": null,
|
| 52 |
+
"tokenize_chinese_chars": true,
|
| 53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
| 54 |
+
"unk_token": "[UNK]"
|
| 55 |
+
}
|
run-1/checkpoint-2138/trainer_state.json
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.15833456940979748,
|
| 3 |
+
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-1/checkpoint-2138",
|
| 4 |
+
"epoch": 1.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 2138,
|
| 7 |
+
"is_hyper_param_search": true,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.23,
|
| 13 |
+
"grad_norm": 2.2052836418151855,
|
| 14 |
+
"learning_rate": 5.527461334920752e-05,
|
| 15 |
+
"loss": 0.6278,
|
| 16 |
+
"step": 500
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 0.47,
|
| 20 |
+
"grad_norm": 1.5090547800064087,
|
| 21 |
+
"learning_rate": 5.1842260309639244e-05,
|
| 22 |
+
"loss": 0.6192,
|
| 23 |
+
"step": 1000
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 0.7,
|
| 27 |
+
"grad_norm": 7.86550760269165,
|
| 28 |
+
"learning_rate": 4.840990727007098e-05,
|
| 29 |
+
"loss": 0.6103,
|
| 30 |
+
"step": 1500
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"epoch": 0.94,
|
| 34 |
+
"grad_norm": 2.385833740234375,
|
| 35 |
+
"learning_rate": 4.49775542305027e-05,
|
| 36 |
+
"loss": 0.5852,
|
| 37 |
+
"step": 2000
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"epoch": 1.0,
|
| 41 |
+
"eval_loss": 0.6027922034263611,
|
| 42 |
+
"eval_matthews_correlation": 0.15833456940979748,
|
| 43 |
+
"eval_runtime": 0.7524,
|
| 44 |
+
"eval_samples_per_second": 1386.172,
|
| 45 |
+
"eval_steps_per_second": 87.716,
|
| 46 |
+
"step": 2138
|
| 47 |
+
}
|
| 48 |
+
],
|
| 49 |
+
"logging_steps": 500,
|
| 50 |
+
"max_steps": 8552,
|
| 51 |
+
"num_input_tokens_seen": 0,
|
| 52 |
+
"num_train_epochs": 4,
|
| 53 |
+
"save_steps": 500,
|
| 54 |
+
"total_flos": 32633550639216.0,
|
| 55 |
+
"train_batch_size": 4,
|
| 56 |
+
"trial_name": null,
|
| 57 |
+
"trial_params": {
|
| 58 |
+
"learning_rate": 5.87069663887758e-05,
|
| 59 |
+
"num_train_epochs": 4,
|
| 60 |
+
"per_device_train_batch_size": 4,
|
| 61 |
+
"seed": 11
|
| 62 |
+
}
|
| 63 |
+
}
|
run-1/checkpoint-2138/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0aee351340990176090794d80423bc66843abb9d835babac4ae2c2cf2a474913
|
| 3 |
+
size 4984
|
run-1/checkpoint-2138/vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
run-1/checkpoint-4276/config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "distilbert-base-uncased",
|
| 3 |
+
"activation": "gelu",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"DistilBertForSequenceClassification"
|
| 6 |
+
],
|
| 7 |
+
"attention_dropout": 0.1,
|
| 8 |
+
"dim": 768,
|
| 9 |
+
"dropout": 0.1,
|
| 10 |
+
"hidden_dim": 3072,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "distilbert",
|
| 14 |
+
"n_heads": 12,
|
| 15 |
+
"n_layers": 6,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"problem_type": "single_label_classification",
|
| 18 |
+
"qa_dropout": 0.1,
|
| 19 |
+
"seq_classif_dropout": 0.2,
|
| 20 |
+
"sinusoidal_pos_embds": false,
|
| 21 |
+
"tie_weights_": true,
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.38.2",
|
| 24 |
+
"vocab_size": 30522
|
| 25 |
+
}
|
run-1/checkpoint-4276/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61a9f0d1597105b2ae2c4068c002b554e7fab84818ec9eb58143a840cbff4794
|
| 3 |
+
size 267832560
|
run-1/checkpoint-4276/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:83b7f500638b0c8d1d418712f5fd073a635afc3ca31fd2acdedc668af0877953
|
| 3 |
+
size 535727290
|
run-1/checkpoint-4276/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f61a246d0a6581c82fe4876987da0fa2112a41aa062420f2d0b74fe07b53d3b1
|
| 3 |
+
size 14244
|
run-1/checkpoint-4276/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a06709d34c11373eed98ab1a6efdbf7e4f4ea58d126782bc9a4ccfbf9395bc3f
|
| 3 |
+
size 1064
|
run-1/checkpoint-4276/special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
run-1/checkpoint-4276/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
run-1/checkpoint-4276/tokenizer_config.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_lower_case": true,
|
| 47 |
+
"mask_token": "[MASK]",
|
| 48 |
+
"model_max_length": 512,
|
| 49 |
+
"pad_token": "[PAD]",
|
| 50 |
+
"sep_token": "[SEP]",
|
| 51 |
+
"strip_accents": null,
|
| 52 |
+
"tokenize_chinese_chars": true,
|
| 53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
| 54 |
+
"unk_token": "[UNK]"
|
| 55 |
+
}
|
run-1/checkpoint-4276/trainer_state.json
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.35221451726824415,
|
| 3 |
+
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-1/checkpoint-4276",
|
| 4 |
+
"epoch": 2.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 4276,
|
| 7 |
+
"is_hyper_param_search": true,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.23,
|
| 13 |
+
"grad_norm": 2.2052836418151855,
|
| 14 |
+
"learning_rate": 5.527461334920752e-05,
|
| 15 |
+
"loss": 0.6278,
|
| 16 |
+
"step": 500
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 0.47,
|
| 20 |
+
"grad_norm": 1.5090547800064087,
|
| 21 |
+
"learning_rate": 5.1842260309639244e-05,
|
| 22 |
+
"loss": 0.6192,
|
| 23 |
+
"step": 1000
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 0.7,
|
| 27 |
+
"grad_norm": 7.86550760269165,
|
| 28 |
+
"learning_rate": 4.840990727007098e-05,
|
| 29 |
+
"loss": 0.6103,
|
| 30 |
+
"step": 1500
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"epoch": 0.94,
|
| 34 |
+
"grad_norm": 2.385833740234375,
|
| 35 |
+
"learning_rate": 4.49775542305027e-05,
|
| 36 |
+
"loss": 0.5852,
|
| 37 |
+
"step": 2000
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"epoch": 1.0,
|
| 41 |
+
"eval_loss": 0.6027922034263611,
|
| 42 |
+
"eval_matthews_correlation": 0.15833456940979748,
|
| 43 |
+
"eval_runtime": 0.7524,
|
| 44 |
+
"eval_samples_per_second": 1386.172,
|
| 45 |
+
"eval_steps_per_second": 87.716,
|
| 46 |
+
"step": 2138
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"epoch": 1.17,
|
| 50 |
+
"grad_norm": 23.48023796081543,
|
| 51 |
+
"learning_rate": 4.154520119093442e-05,
|
| 52 |
+
"loss": 0.5672,
|
| 53 |
+
"step": 2500
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"epoch": 1.4,
|
| 57 |
+
"grad_norm": 34.38399124145508,
|
| 58 |
+
"learning_rate": 3.811284815136614e-05,
|
| 59 |
+
"loss": 0.5388,
|
| 60 |
+
"step": 3000
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"epoch": 1.64,
|
| 64 |
+
"grad_norm": 15.397554397583008,
|
| 65 |
+
"learning_rate": 3.468049511179787e-05,
|
| 66 |
+
"loss": 0.5675,
|
| 67 |
+
"step": 3500
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"epoch": 1.87,
|
| 71 |
+
"grad_norm": 4.596796035766602,
|
| 72 |
+
"learning_rate": 3.124814207222959e-05,
|
| 73 |
+
"loss": 0.5658,
|
| 74 |
+
"step": 4000
|
| 75 |
+
},
|
| 76 |
+
{
|
| 77 |
+
"epoch": 2.0,
|
| 78 |
+
"eval_loss": 0.7189152836799622,
|
| 79 |
+
"eval_matthews_correlation": 0.35221451726824415,
|
| 80 |
+
"eval_runtime": 0.816,
|
| 81 |
+
"eval_samples_per_second": 1278.13,
|
| 82 |
+
"eval_steps_per_second": 80.879,
|
| 83 |
+
"step": 4276
|
| 84 |
+
}
|
| 85 |
+
],
|
| 86 |
+
"logging_steps": 500,
|
| 87 |
+
"max_steps": 8552,
|
| 88 |
+
"num_input_tokens_seen": 0,
|
| 89 |
+
"num_train_epochs": 4,
|
| 90 |
+
"save_steps": 500,
|
| 91 |
+
"total_flos": 65223117962472.0,
|
| 92 |
+
"train_batch_size": 4,
|
| 93 |
+
"trial_name": null,
|
| 94 |
+
"trial_params": {
|
| 95 |
+
"learning_rate": 5.87069663887758e-05,
|
| 96 |
+
"num_train_epochs": 4,
|
| 97 |
+
"per_device_train_batch_size": 4,
|
| 98 |
+
"seed": 11
|
| 99 |
+
}
|
| 100 |
+
}
|
run-1/checkpoint-4276/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0aee351340990176090794d80423bc66843abb9d835babac4ae2c2cf2a474913
|
| 3 |
+
size 4984
|
run-1/checkpoint-4276/vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
runs/Mar04_02-28-38_78dd22f13cdf/events.out.tfevents.1709519746.78dd22f13cdf.294.2
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1df6d7618d4451d701aa47e48ba84ce991f3dea55a7ac2a3ae57393c67b8e45d
|
| 3 |
+
size 6552
|
runs/Mar04_02-28-38_78dd22f13cdf/events.out.tfevents.1709519889.78dd22f13cdf.294.3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebf73eb11ca7057d9a3688f6d5c8d1d05856b56c17a50dc07887e367083dec33
|
| 3 |
+
size 7128
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 4984
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0aee351340990176090794d80423bc66843abb9d835babac4ae2c2cf2a474913
|
| 3 |
size 4984
|