-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
64 lines (53 loc) · 2.13 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os
import random
import torch
import numpy as np
from torch.optim import AdamW
from transformers import get_linear_schedule_with_warmup
def prompt_direct_inferring_emotion(config, context, target):
new_context = f"Given the conversation: {context}. "
prompt = new_context + \
config.instruct.format(target=target) + \
" Choose from: {}.".format(", ".join(config.label_list))
return prompt
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
# torch.set_deterministic(True)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
os.environ['PYTHONHASHSEED'] = str(seed)
def load_params_LLM(config, model, fold_data):
no_decay = ['bias', 'LayerNorm.weight']
named = (list(model.named_parameters()))
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in named if not any(nd in n for nd in no_decay)],
'lr': float(config.bert_lr),
'weight_decay': float(config.weight_decay)},
{'params': [p for n, p in named if any(nd in n for nd in no_decay)],
'lr': float(config.bert_lr),
'weight_decay': 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, eps=float(config.adam_epsilon))
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config.warmup_steps,
num_training_steps=config.epoch_size * fold_data.__len__())
config.score_manager = ScoreManager()
config.optimizer = optimizer
config.scheduler = scheduler
return config
class ScoreManager:
def __init__(self) -> None:
self.score = []
self.line = []
def add_instance(self, score, res):
self.score.append(score)
self.line.append(res)
def get_best(self):
best_id = np.argmax(self.score)
res = self.line[best_id]
return res