-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
36 lines (29 loc) · 1000 Bytes
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import dgl
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
from sklearn import metrics
def seed_everything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
dgl.random.seed(seed)
def evaluate(y_true, y_score):
"""calculate evaluation metrics"""
y_true = pd.Series(y_true)
y_score = pd.Series(y_score)
roc_auc = metrics.roc_auc_score(y_true, y_score)
ap = metrics.average_precision_score(y_true, y_score)
ratio = 100.0 * len(np.where(y_true == 0)[0]) / len(y_true)
thres = np.percentile(y_score, ratio)
y_pred = (y_score >= thres).astype(int)
y_true = y_true.astype(int)
_, _, f1, _ = metrics.precision_recall_fscore_support(y_true, y_pred, average='binary')
return roc_auc, ap, f1