-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutil.py
More file actions
executable file
·128 lines (113 loc) · 4.08 KB
/
util.py
File metadata and controls
executable file
·128 lines (113 loc) · 4.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import sys
import dill
import torch
import warnings
import numpy as np
from sklearn.metrics import roc_auc_score, f1_score, average_precision_score
warnings.filterwarnings('ignore')
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
def llprint(message):
sys.stdout.write(message)
sys.stdout.flush()
def multi_label_metric(y_gt, y_pred, y_prob):
def jaccard(y_gt, y_pred):
score = []
for b in range(y_gt.shape[0]):
target = np.where(y_gt[b] == 1)[0]
out_list = np.where(y_pred[b] == 1)[0]
inter = set(out_list) & set(target)
union = set(out_list) | set(target)
jaccard_score = 0 if union == 0 else len(inter) / len(union)
score.append(jaccard_score)
return np.mean(score)
def average_prc(y_gt, y_pred):
score = []
for b in range(y_gt.shape[0]):
target = np.where(y_gt[b] == 1)[0]
out_list = np.where(y_pred[b] == 1)[0]
inter = set(out_list) & set(target)
prc_score = 0 if len(out_list) == 0 else len(inter) / len(out_list)
score.append(prc_score)
return score
def average_recall(y_gt, y_pred):
score = []
for b in range(y_gt.shape[0]):
target = np.where(y_gt[b] == 1)[0]
out_list = np.where(y_pred[b] == 1)[0]
inter = set(out_list) & set(target)
recall_score = 0 if len(target) == 0 else len(inter) / len(target)
score.append(recall_score)
return score
def average_f1(average_prc, average_recall):
score = []
for idx in range(len(average_prc)):
if average_prc[idx] + average_recall[idx] == 0:
score.append(0)
else:
score.append(2*average_prc[idx]*average_recall[idx] / (average_prc[idx] + average_recall[idx]))
return score
def f1(y_gt, y_pred):
all_micro = []
for b in range(y_gt.shape[0]):
all_micro.append(f1_score(y_gt[b], y_pred[b], average='macro'))
return np.mean(all_micro)
def roc_auc(y_gt, y_prob):
all_micro = []
for b in range(len(y_gt)):
all_micro.append(roc_auc_score(y_gt[b], y_prob[b], average='macro'))
return np.mean(all_micro)
def precision_auc(y_gt, y_prob):
all_micro = []
for b in range(len(y_gt)):
all_micro.append(average_precision_score(y_gt[b], y_prob[b], average='macro'))
return np.mean(all_micro)
def precision_at_k(y_gt, y_prob, k=3):
precision = 0
sort_index = np.argsort(y_prob, axis=-1)[:, ::-1][:, :k]
for i in range(len(y_gt)):
TP = 0
for j in range(len(sort_index[i])):
if y_gt[i, sort_index[i, j]] == 1:
TP += 1
precision += TP / len(sort_index[i])
return precision / len(y_gt)
# roc_auc
try:
auc = roc_auc(y_gt, y_prob)
except:
auc = 0
# precision
# p_1 = precision_at_k(y_gt, y_prob, k=1)
# p_3 = precision_at_k(y_gt, y_prob, k=3)
# p_5 = precision_at_k(y_gt, y_prob, k=5)
# macro f1
f1 = f1(y_gt, y_pred)
# precision
prauc = precision_auc(y_gt, y_prob)
# jaccard
ja = jaccard(y_gt, y_pred)
# pre, recall, f1
avg_prc = average_prc(y_gt, y_pred)
avg_recall = average_recall(y_gt, y_pred)
avg_f1 = average_f1(avg_prc, avg_recall)
return ja, prauc, np.mean(avg_prc), np.mean(avg_recall), np.mean(avg_f1)
def ddi_rate_score(record, path='data/output/ddi_A_final.pkl'):
# ddi rate
ddi_A = dill.load(open(path, 'rb'))
all_cnt = 0
dd_cnt = 0
for patient in record:
for adm in patient:
med_code_set = adm
for i, med_i in enumerate(med_code_set):
for j, med_j in enumerate(med_code_set):
if j <= i:
continue
all_cnt += 1
if ddi_A[med_i, med_j] == 1 or ddi_A[med_j, med_i] == 1:
dd_cnt += 1
if all_cnt == 0:
return 0
return dd_cnt / all_cnt