forked from successar/AttentionExplanation
-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathExperimentsQA.py
84 lines (67 loc) · 3.31 KB
/
ExperimentsQA.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from Transparency.common_code.common import *
from Transparency.Trainers.PlottingQA import generate_graphs
from Transparency.configurations import configurations_qa
from Transparency.Trainers.TrainerQA import Trainer, Evaluator
def train_dataset_and_get_atn_map(dataset, encoders, num_iters=15):
for e in encoders:
config = configurations_qa[e](dataset)
trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
trainer.train(dataset.train_data, dataset.dev_data, n_iters=num_iters,
save_on_metric=dataset.save_on_metric)
# Get train losses as well?
evaluator = Evaluator(dataset, trainer.model.dirname)
_, attentions, scores = evaluator.evaluate(dataset.test_data, save_results=True)
return scores, attentions
def train_dataset(dataset, config):
try:
config = configurations_qa[config](dataset)
n_iters = dataset.n_iters if hasattr(dataset, "n_iters") else 25
trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
trainer.train(dataset.train_data, dataset.dev_data, n_iters=n_iters, save_on_metric=dataset.save_on_metric)
evaluator = Evaluator(dataset, trainer.model.dirname)
_ = evaluator.evaluate(dataset.test_data, save_results=True)
return trainer, evaluator
except Exception as e:
print(e)
return
def run_evaluator_on_latest_model(dataset, config):
config = configurations_qa[config](dataset)
latest_model = get_latest_model(os.path.join(config["training"]["basepath"], config["training"]["exp_dirname"]))
evaluator = Evaluator(dataset, latest_model)
_ = evaluator.evaluate(dataset.test_data, save_results=True)
return evaluator
def run_experiments_on_latest_model(dataset, config, force_run=True):
try:
evaluator = run_evaluator_on_latest_model(dataset, config)
test_data = dataset.test_data
evaluator.gradient_experiment(test_data, force_run=force_run)
evaluator.permutation_experiment(test_data, force_run=force_run)
evaluator.adversarial_experiment(test_data, force_run=force_run)
evaluator.remove_and_run_experiment(test_data, force_run=force_run)
except:
return
def generate_graphs_on_latest_model(dataset, config):
# try:
config = configurations_qa[config](dataset)
latest_model = get_latest_model(os.path.join(config["training"]["basepath"], config["training"]["exp_dirname"]))
if latest_model is not None:
evaluator = Evaluator(dataset, latest_model)
_ = evaluator.evaluate(dataset.test_data, save_results=True)
generate_graphs(dataset, config["training"]["exp_dirname"], evaluator.model, test_data=dataset.test_data)
# except Exception as e:
# print(e)
# return
def train_dataset_on_encoders(dataset, encoders):
for e in encoders:
train_dataset(dataset, e)
run_experiments_on_latest_model(dataset, e)
def generate_graphs_on_encoders(dataset, encoders):
for e in encoders:
generate_graphs_on_latest_model(dataset, e)
def get_results(path):
latest_model = get_latest_model(path)
if latest_model is not None:
evaluations = json.load(open(os.path.join(latest_model, "evaluate.json"), "r"))
return evaluations
else:
raise LookupError("No Latest Model ... ")