forked from sfzhou5678/TextualAdversarialAttack-Tianchi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathselect_new_obscenities.py
30 lines (27 loc) · 904 Bytes
/
select_new_obscenities.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
thres = 0.8
obscenities_set = set()
with open('data/obscenities.txt', encoding='utf-8') as f:
for line in f:
content = line.strip()
if content not in obscenities_set:
obscenities_set.add(content)
white_list_path = 'data/obscenities_white_list.txt'
white_list_set = set()
with open(white_list_path, encoding='utf-8') as f:
for line in f:
white_list_set.add(line.strip())
obscenities = []
with open('data/outputs/output.txt', encoding='utf-8') as f:
for line in f:
content, score = line.split('\t')
if content in obscenities_set or content in white_list_set:
continue
score = float(score)
if score >= thres:
obscenities.append(content)
obscenities = list(set(obscenities))
with open('data/outputs/new_obscenities.txt', 'w', encoding='utf-8') as wf:
for content in obscenities:
wf.write(content + '\n')
print(obscenities)
print(len(obscenities))