-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathevaluation.py
More file actions
99 lines (86 loc) · 4.17 KB
/
evaluation.py
File metadata and controls
99 lines (86 loc) · 4.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import os
import json
import argparse
def clear_words(text):
"""Remove spaces, quotes, newlines, and colons from text."""
return text.replace(' ','').replace('\"','').replace("\'",'').replace('\n','').replace(':','')
def extract_answer(response):
"""
Extract answer from model response.
Supports multiple formats: boxed{}, JSON, 'answer is', 'Answer:', and single-letter answers.
Returns 'O' if no answer found.
"""
response = response.replace('<answer>','').replace('</answer>','')
if response is None or 'no answer' in response:
return 'O'
if 'boxed{' in response:
split_text = response.split('boxed{')[1].split('}')[0]
split_text = clear_words(split_text)
return split_text
words = ['\"answer\":','answer is','answer:','\"Answer\":','Answer is','Answer:']
for word in words:
if word in response:
split_text = response.split(word)[-1]
split_text = split_text.split(',')[0].split('.')[0]
split_text = clear_words(split_text)
return split_text
if clear_words(response.split('.')[0]) in ['A','B','C','D','E','F']:
return clear_words(response.split('.')[0])
else:
return 'O'
if __name__=='__main__':
parser = argparse.ArgumentParser(
description='MMSI-Video-Bench Evaluation')
parser.add_argument('--eval_dir', type=str, default='./output/Uniform-50/Qwen3-VL-8B-Instruct')
parser.add_argument('--bench',
choices=['main', 'robot_bench', 'ground_bench','indoor_perception_bench','easy2hard_bench'],
default='main')
args = parser.parse_args()
result_dir = args.eval_dir
if args.bench != 'main':
bench_type_id = json.load(open(f'./meta_data/{args.bench}.json'))
bench_id_type = {}
for sub_type in bench_type_id:
for id_ in bench_type_id[sub_type]:
bench_id_type[id_] = sub_type
ORDER_LIST = list(bench_type_id.keys())
else:
ORDER_LIST = ['(Cross-Video) Memoery Update', '(Cross-Video) Multi-View Integration', 'Planning',
'Prediction', '(Motion Understanding) Camera Motion', '(Motion Understanding) Instance Motion',
'(Motion Understanding) Interactive Motion', '(Spatial Construction) Instance-Instance Spatial Relationship',
'(Spatial Construction) Instance-Scene Spatial Relationship', '(Spatial Construction) Scene-Scene Spatial Relationship',
'(Spatial Construction) Instance/Scene Attribute', '(Spatial Construction) Camera-Instance Spatial Relationship',
'(Spatial Construction) Camera-Scene Spatial Relationship']
score_dict = {'Overall':[]}
error_list = []
skip_it = 0
for json_file in os.listdir(result_dir):
json_data = json.load(open(os.path.join(result_dir,json_file)))
q_id = json_data['id']
response = json_data['response']
gt = json_data["ground_truth"]
if args.bench == 'main':
question_type = json_data["type"]
else:
if q_id not in bench_id_type:
continue
else:
question_type = bench_id_type[q_id]
try:
pred = extract_answer(response)
assert pred in ['A','B','C','D','E','F']
if question_type not in score_dict:
score_dict[question_type] = []
score_dict[question_type].append(float(pred==gt))
score_dict['Overall'].append(float(pred==gt))
except:
if question_type not in score_dict:
score_dict[question_type] = []
score_dict[question_type].append(0.0)
score_dict['Overall'].append(0.0)
skip_it+=1
print('Fail to extract the answer from the response in',os.path.join(result_dir,json_file))
error_list.append(os.path.join(result_dir,json_file))
for key in ['Overall']+ORDER_LIST:
print(key,': ',sum(score_dict[key])/len(score_dict[key]),len(score_dict[key]))
print(f'failure count/ total count: {len(error_list)} / {len(os.listdir(result_dir))}')