-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgenerate_task_data.py
76 lines (61 loc) · 2.79 KB
/
generate_task_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import argparse
import json
from lm_eval import evaluator, tasks
from tasks import EvalHarnessAdaptor
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog = 'ProgramName',
description = 'What the program does',
epilog = 'Text at the bottom of help')
parser.add_argument('--output-file', type=str, default='input.jsonl')
parser.add_argument('--task-name', type=str, default='hellaswag')
parser.add_argument('--num-fewshot', type=int, default=0)
args = parser.parse_args()
seq = 1024
total_batch = 1
pe = 'fixed'
with open(args.output_file, 'w') as f:
pass
class DryRunner:
def eval(self, batch):
with open(args.output_file, 'a') as f:
for text in batch['text']:
item = {
"best_of": 1,
"echo": True,
"logprobs": 1,
"max_tokens": 0,
"model": "x",
"n": 1,
"prompt": text,
"request_type": "language-model-inference",
"stop": None,
"temperature": 0,
"top_p": 1
}
f.write(json.dumps(item) + '\n')
out = {
'mask_loss': [1.0] * len(batch),
'each_correct': [True] * len(batch),
}
return out
t = DryRunner()
adaptor = EvalHarnessAdaptor(t, seq, total_batch, shrink=pe != "fixed")
results = evaluator.evaluate(adaptor, tasks.get_task_dict([args.task_name
#"lambada_openai",
#"piqa",
#"hellaswag",
#"winogrande",
#"mathqa",
#"pubmedqa",
# "boolq",
# "cb",
# "copa",
# "multirc",
# "record",
# "wic",
# "wsc",
]), False, args.num_fewshot, None)
print('Finished')
# dumped = json.dumps(results, indent=2)
# print(dumped)