From e6455067830cdcf8f08e66ffe46188295a78bb1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=92=8B=E5=AD=90=E8=88=AA?= <32448880+zihangJiang@users.noreply.github.com> Date: Sat, 16 Nov 2019 15:28:05 +0800 Subject: [PATCH 1/2] Update optim.py fix weight decay in param_optimizer to agree with the original (hugging face's) implementation --- optim.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/optim.py b/optim.py index 5d2e9c3..64d9772 100644 --- a/optim.py +++ b/optim.py @@ -151,8 +151,8 @@ def optim4GPU(cfg, model): param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [ - {'params': [p for n, p in param_optimizer if n not in no_decay], 'weight_decay_rate': 0.01}, - {'params': [p for n, p in param_optimizer if n in no_decay], 'weight_decay_rate': 0.0}] + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] return BertAdam(optimizer_grouped_parameters, lr=cfg.lr, warmup=cfg.warmup, From 53d7b1bb600a86bc947700a938c9e92c5bf1d47e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=92=8B=E5=AD=90=E8=88=AA?= <32448880+zihangJiang@users.noreply.github.com> Date: Sat, 16 Nov 2019 15:38:18 +0800 Subject: [PATCH 2/2] Update optim.py fix weight decay in param_optimizer to agree with the original (hugging face's) implementation. (Current implementation seems to apply weight decay to all parameters, since "n not in no_decay" is always True.) --- optim.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/optim.py b/optim.py index 64d9772..f4a74b8 100644 --- a/optim.py +++ b/optim.py @@ -151,8 +151,8 @@ def optim4GPU(cfg, model): param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [ - {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, - {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}] return BertAdam(optimizer_grouped_parameters, lr=cfg.lr, warmup=cfg.warmup,