chenyn66's picture
Add Qwen2.5-Math-7B-multibacktrack-wstep-sft-n2-still-numina-math-ckpt444
eefc339 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9994372537985369,
"eval_steps": 500,
"global_step": 444,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022509848058525603,
"grad_norm": 2.8050291538238525,
"learning_rate": 2.222222222222222e-06,
"loss": 6.1587,
"step": 10
},
{
"epoch": 0.04501969611705121,
"grad_norm": 1.1963684558868408,
"learning_rate": 4.444444444444444e-06,
"loss": 5.506,
"step": 20
},
{
"epoch": 0.06752954417557681,
"grad_norm": 0.5479600429534912,
"learning_rate": 6.666666666666667e-06,
"loss": 4.7216,
"step": 30
},
{
"epoch": 0.09003939223410241,
"grad_norm": 0.39808419346809387,
"learning_rate": 8.888888888888888e-06,
"loss": 4.3317,
"step": 40
},
{
"epoch": 0.11254924029262803,
"grad_norm": 0.3332847058773041,
"learning_rate": 9.87468671679198e-06,
"loss": 4.1643,
"step": 50
},
{
"epoch": 0.13505908835115363,
"grad_norm": 0.37553367018699646,
"learning_rate": 9.62406015037594e-06,
"loss": 4.2239,
"step": 60
},
{
"epoch": 0.15756893640967923,
"grad_norm": 0.34793761372566223,
"learning_rate": 9.3734335839599e-06,
"loss": 4.0662,
"step": 70
},
{
"epoch": 0.18007878446820483,
"grad_norm": 0.3539280593395233,
"learning_rate": 9.12280701754386e-06,
"loss": 4.1014,
"step": 80
},
{
"epoch": 0.20258863252673046,
"grad_norm": 0.31421107053756714,
"learning_rate": 8.87218045112782e-06,
"loss": 3.9875,
"step": 90
},
{
"epoch": 0.22509848058525606,
"grad_norm": 0.3437129259109497,
"learning_rate": 8.62155388471178e-06,
"loss": 3.956,
"step": 100
},
{
"epoch": 0.24760832864378166,
"grad_norm": 0.34449219703674316,
"learning_rate": 8.370927318295739e-06,
"loss": 4.1064,
"step": 110
},
{
"epoch": 0.27011817670230726,
"grad_norm": 0.3443593978881836,
"learning_rate": 8.1203007518797e-06,
"loss": 3.964,
"step": 120
},
{
"epoch": 0.2926280247608329,
"grad_norm": 0.3369867503643036,
"learning_rate": 7.86967418546366e-06,
"loss": 3.9857,
"step": 130
},
{
"epoch": 0.31513787281935846,
"grad_norm": 0.37080493569374084,
"learning_rate": 7.61904761904762e-06,
"loss": 3.961,
"step": 140
},
{
"epoch": 0.3376477208778841,
"grad_norm": 0.2911035418510437,
"learning_rate": 7.368421052631579e-06,
"loss": 3.8308,
"step": 150
},
{
"epoch": 0.36015756893640966,
"grad_norm": 0.29406118392944336,
"learning_rate": 7.117794486215539e-06,
"loss": 3.911,
"step": 160
},
{
"epoch": 0.3826674169949353,
"grad_norm": 0.3039765954017639,
"learning_rate": 6.867167919799499e-06,
"loss": 3.8607,
"step": 170
},
{
"epoch": 0.4051772650534609,
"grad_norm": 0.31193187832832336,
"learning_rate": 6.616541353383459e-06,
"loss": 3.9159,
"step": 180
},
{
"epoch": 0.4276871131119865,
"grad_norm": 0.30840519070625305,
"learning_rate": 6.365914786967419e-06,
"loss": 3.8885,
"step": 190
},
{
"epoch": 0.4501969611705121,
"grad_norm": 0.32841718196868896,
"learning_rate": 6.115288220551378e-06,
"loss": 3.8233,
"step": 200
},
{
"epoch": 0.4727068092290377,
"grad_norm": 0.2887263596057892,
"learning_rate": 5.864661654135339e-06,
"loss": 3.879,
"step": 210
},
{
"epoch": 0.4952166572875633,
"grad_norm": 0.3095759451389313,
"learning_rate": 5.6140350877192985e-06,
"loss": 3.823,
"step": 220
},
{
"epoch": 0.5177265053460889,
"grad_norm": 0.3127283453941345,
"learning_rate": 5.363408521303258e-06,
"loss": 3.8775,
"step": 230
},
{
"epoch": 0.5402363534046145,
"grad_norm": 0.31057149171829224,
"learning_rate": 5.112781954887218e-06,
"loss": 3.7333,
"step": 240
},
{
"epoch": 0.5627462014631401,
"grad_norm": 0.3335554897785187,
"learning_rate": 4.862155388471178e-06,
"loss": 3.906,
"step": 250
},
{
"epoch": 0.5852560495216658,
"grad_norm": 0.2978381812572479,
"learning_rate": 4.611528822055138e-06,
"loss": 3.8683,
"step": 260
},
{
"epoch": 0.6077658975801913,
"grad_norm": 0.29987016320228577,
"learning_rate": 4.360902255639098e-06,
"loss": 3.8687,
"step": 270
},
{
"epoch": 0.6302757456387169,
"grad_norm": 0.274650514125824,
"learning_rate": 4.110275689223058e-06,
"loss": 3.6737,
"step": 280
},
{
"epoch": 0.6527855936972425,
"grad_norm": 0.2877696454524994,
"learning_rate": 3.859649122807018e-06,
"loss": 3.8675,
"step": 290
},
{
"epoch": 0.6752954417557682,
"grad_norm": 0.34041789174079895,
"learning_rate": 3.6090225563909775e-06,
"loss": 3.8597,
"step": 300
},
{
"epoch": 0.6978052898142938,
"grad_norm": 0.2784375548362732,
"learning_rate": 3.3583959899749375e-06,
"loss": 3.7454,
"step": 310
},
{
"epoch": 0.7203151378728193,
"grad_norm": 0.2802869379520416,
"learning_rate": 3.107769423558897e-06,
"loss": 3.6879,
"step": 320
},
{
"epoch": 0.7428249859313449,
"grad_norm": 0.3532256782054901,
"learning_rate": 2.8571428571428573e-06,
"loss": 3.7785,
"step": 330
},
{
"epoch": 0.7653348339898706,
"grad_norm": 0.2827741205692291,
"learning_rate": 2.606516290726817e-06,
"loss": 3.6453,
"step": 340
},
{
"epoch": 0.7878446820483962,
"grad_norm": 0.30722615122795105,
"learning_rate": 2.355889724310777e-06,
"loss": 3.6902,
"step": 350
},
{
"epoch": 0.8103545301069218,
"grad_norm": 0.29311564564704895,
"learning_rate": 2.105263157894737e-06,
"loss": 3.7355,
"step": 360
},
{
"epoch": 0.8328643781654473,
"grad_norm": 0.2665780484676361,
"learning_rate": 1.8546365914786967e-06,
"loss": 3.7245,
"step": 370
},
{
"epoch": 0.855374226223973,
"grad_norm": 0.2625872790813446,
"learning_rate": 1.6040100250626568e-06,
"loss": 3.6712,
"step": 380
},
{
"epoch": 0.8778840742824986,
"grad_norm": 0.30255696177482605,
"learning_rate": 1.3533834586466167e-06,
"loss": 3.6844,
"step": 390
},
{
"epoch": 0.9003939223410242,
"grad_norm": 0.29376691579818726,
"learning_rate": 1.1027568922305765e-06,
"loss": 3.665,
"step": 400
},
{
"epoch": 0.9229037703995498,
"grad_norm": 0.28853243589401245,
"learning_rate": 8.521303258145364e-07,
"loss": 3.6584,
"step": 410
},
{
"epoch": 0.9454136184580754,
"grad_norm": 0.32992106676101685,
"learning_rate": 6.015037593984962e-07,
"loss": 3.7337,
"step": 420
},
{
"epoch": 0.967923466516601,
"grad_norm": 0.3192669749259949,
"learning_rate": 3.5087719298245616e-07,
"loss": 3.591,
"step": 430
},
{
"epoch": 0.9904333145751266,
"grad_norm": 0.30559441447257996,
"learning_rate": 1.0025062656641605e-07,
"loss": 3.7753,
"step": 440
}
],
"logging_steps": 10,
"max_steps": 444,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.61753997427422e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}