Skip to content

Commit 9bed4fa

Browse files
committed
benchmark implemented
1 parent e337a41 commit 9bed4fa

File tree

2 files changed

+128
-0
lines changed

2 files changed

+128
-0
lines changed

superannotate/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@
6464
SANonExistingProjectNameException
6565
)
6666
from .consensus_benchmark.consensus import consensus
67+
from .consensus_benchmark.benchmark import benchmark
6768
from .input_converters.conversion import (
6869
convert_platform, convert_project_type, export_annotation_format,
6970
import_annotation_format
Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
"""
2+
Main module for benchmark computation
3+
"""
4+
import logging
5+
import tempfile
6+
import pandas as pd
7+
from pathlib import Path
8+
9+
from .helpers import image_consensus, consensus_plot
10+
from ..db.exports import prepare_export, download_export
11+
from ..analytics.common import aggregate_annotations_as_df
12+
13+
logger = logging.getLogger("superannotate-python-sdk")
14+
15+
16+
def benchmark(
17+
gt_project_name,
18+
project_names,
19+
export_root=None,
20+
image_list=None,
21+
annot_type='bbox',
22+
show_plots=False
23+
):
24+
"""Computes benchmark score for each instance of given images that are present both gt_project_name project and projects in project_names list:
25+
26+
:param gt_project_name: Project name that contains the ground truth annotations
27+
:type gt_project_name: str
28+
:param project_names: list of project names to aggregate through
29+
:type project_names: list of str
30+
:param export_root: root export path of the projects
31+
:type export_root: Pathlike (str or Path)
32+
:param image_list: List of image names from the projects list that must be used. If None, then all images from the projects list will be used. Default: None
33+
:type image_list: list
34+
:param annot_type: Type of annotation instances to consider. Available candidates are: ["bbox", "polygon", "point"]
35+
:type annot_type: str
36+
:param show_plots: If True, show plots based on results of consensus computation. Default: False
37+
:type show_plots: bool
38+
39+
"""
40+
def aggregate_attributes(instance_df):
41+
def attribute_to_list(attribute_df):
42+
attribute_names = list(attribute_df["attributeName"])
43+
attribute_df["attributeNames"] = len(attribute_df) * [
44+
attribute_names
45+
]
46+
return attribute_df
47+
48+
attributes = None
49+
if not instance_df["attributeGroupName"].isna().all():
50+
attrib_group_name = instance_df.groupby("attributeGroupName")[[
51+
"attributeGroupName", "attributeName"
52+
]].apply(attribute_to_list)
53+
attributes = dict(
54+
zip(
55+
attrib_group_name["attributeGroupName"],
56+
attrib_group_name["attributeNames"]
57+
)
58+
)
59+
60+
instance_df.drop(
61+
["attributeGroupName", "attributeName"], axis=1, inplace=True
62+
)
63+
instance_df.drop_duplicates(
64+
subset=["imageName", "instanceId", "project"], inplace=True
65+
)
66+
instance_df["attributes"] = [attributes]
67+
return instance_df
68+
69+
supported_types = ['polygon', 'bbox', 'point']
70+
if annot_type not in supported_types:
71+
raise NotImplementedError
72+
73+
if export_root is None:
74+
with tempfile.TemporaryDirectory() as export_dir:
75+
gt_project_meta = prepare_export(gt_project_name)
76+
download_export(gt_project_name, gt_project_meta, export_dir)
77+
gt_project_df = aggregate_annotations_as_df(export_dir)
78+
else:
79+
export_dir = Path(export_root) / gt_project_name
80+
gt_project_df = aggregate_annotations_as_df(export_dir)
81+
gt_project_df["project"] = gt_project_name
82+
83+
benchmark_dfs = []
84+
for project_name in project_names:
85+
if export_root is None:
86+
with tempfile.TemporaryDirectory() as export_dir:
87+
proj_export_meta = prepare_export(project_name)
88+
download_export(project_name, proj_export_meta, export_dir)
89+
project_df = aggregate_annotations_as_df(export_dir)
90+
else:
91+
export_dir = Path(export_root) / project_name
92+
project_df = aggregate_annotations_as_df(export_dir)
93+
94+
project_df["project"] = project_name
95+
project_gt_df = pd.concat([project_df, gt_project_df])
96+
project_gt_df = project_gt_df[project_gt_df["instanceId"].notna()]
97+
98+
if image_list is not None:
99+
project_gt_df = project_gt_df.loc[
100+
project_gt_df["imageName"].isin(image_list)]
101+
102+
project_gt_df.query("type == '" + annot_type + "'", inplace=True)
103+
104+
project_gt_df = project_gt_df.groupby(
105+
["imageName", "instanceId", "project"]
106+
)
107+
project_gt_df = project_gt_df.apply(aggregate_attributes).reset_index(
108+
drop=True
109+
)
110+
unique_images = set(project_gt_df["imageName"])
111+
all_benchmark_data = []
112+
for image_name in unique_images:
113+
image_data = image_consensus(project_gt_df, image_name, annot_type)
114+
all_benchmark_data.append(pd.DataFrame(image_data))
115+
116+
benchmark_project_df = pd.concat(all_benchmark_data, ignore_index=True)
117+
benchmark_project_df = benchmark_project_df[
118+
benchmark_project_df["projectName"] == project_name]
119+
120+
benchmark_dfs.append(benchmark_project_df)
121+
122+
benchmark_df = pd.concat(benchmark_dfs, ignore_index=True)
123+
124+
if show_plots:
125+
consensus_plot(benchmark_df, project_names)
126+
127+
return benchmark_df

0 commit comments

Comments
 (0)