Skip to content

Commit e92d1f2

Browse files
committed
conforming to yapf style
1 parent 9c73348 commit e92d1f2

File tree

8 files changed

+113
-60
lines changed

8 files changed

+113
-60
lines changed

superannotate/common.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,14 @@
3333
"FailedAfterEvaluationWithSavedModel": 6
3434
}
3535

36+
3637
class PredictionSegmentationStatuses(IntEnum):
3738
NotStarted = 1
3839
InProgress = 2
3940
Completed = 3
4041
Failed = 4
4142

43+
4244
def model_training_status_int_to_str(project_status):
4345
for item in _MODEL_TRAINING_STATUSES:
4446
if _MODEL_TRAINING_STATUSES[item] == project_status:
@@ -289,6 +291,8 @@ def process_api_response(data):
289291
return data
290292

291293
return data['data']
294+
295+
292296
def tqdm_converter(
293297
total_num, images_converted, images_not_converted, finish_event
294298
):

superannotate/db/images.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,7 @@ def process_result(x):
132132
else:
133133
return result_list
134134

135+
135136
@project_metadata
136137
def get_image_metadata(project, image_names):
137138
"""Returns image metadata
@@ -148,14 +149,14 @@ def get_image_metadata(project, image_names):
148149
image_names = [image_names]
149150

150151
json_req = {
151-
'project_id' : project['id'],
152-
'team_id' : _api.team_id,
153-
'names' : image_names
152+
'project_id': project['id'],
153+
'team_id': _api.team_id,
154+
'names': image_names
154155
}
155-
response =_api.send_request(
156+
response = _api.send_request(
156157
req_type='POST',
157-
path = '/images/getBulk',
158-
json_req = json_req,
158+
path='/images/getBulk',
159+
json_req=json_req,
159160
)
160161

161162
metadata = response.json()

superannotate/ml/ml_funcs.py

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,8 @@ def run_prediction(project, images_list, model):
5252
model = model.get(project['type'], None)
5353
if not model:
5454
raise SABaseException(
55-
0, f"Specified project has type {project['type']}, and does not correspond to the type of provided model"
55+
0,
56+
f"Specified project has type {project['type']}, and does not correspond to the type of provided model"
5657
)
5758
project_id = project["id"]
5859

@@ -126,9 +127,7 @@ def run_segmentation(project, images_list, model):
126127
raise SABaseException(0, "Model Does not exist")
127128

128129
images_metadata = get_image_metadata(project, images_list)
129-
images_metadata.sort(
130-
key=lambda x: x["name"]
131-
)
130+
images_metadata.sort(key=lambda x: x["name"])
132131

133132
if len(images_metadata) == 0:
134133
raise SABaseException(0, "No valid image names were provided")
@@ -223,7 +222,8 @@ def run_training(
223222
"The base model has to be of the same type (vector or pixel) as the projects"
224223
)
225224
raise SABaseException(
226-
0, f"The type of provided projects is {project_type}, and does not correspond to the type of provided model"
225+
0,
226+
f"The type of provided projects is {project_type}, and does not correspond to the type of provided model"
227227
)
228228

229229
for item in DEFAULT_HYPERPARAMETERS:
@@ -368,7 +368,11 @@ def get_plottable_cols(df):
368368
plottable_cols = []
369369
for sub_df in df:
370370
col_names = sub_df.columns.values.tolist()
371-
plottable_cols += [col_name for col_name in col_names if col_name not in plottable_cols and col_name not in NON_PLOTABLE_KEYS]
371+
plottable_cols += [
372+
col_name
373+
for col_name in col_names if col_name not in plottable_cols and
374+
col_name not in NON_PLOTABLE_KEYS
375+
]
372376
return plottable_cols
373377

374378
if not isinstance(metric_json_list, list):
@@ -399,11 +403,14 @@ def get_plottable_cols(df):
399403
specs=figure_specs,
400404
subplot_titles=plottable_cols,
401405
)
402-
figure.update_layout(height=1000* num_rows)
406+
figure.update_layout(height=1000 * num_rows)
403407
models = [os.path.basename(x).split('.')[0] for x in metric_json_list]
404408

405409
plot_df(full_c_metrics, plottable_c_cols, figure)
406-
plot_df(full_pe_metrics, plottable_pe_cols, figure, len(plottable_c_cols) + 1)
410+
plot_df(
411+
full_pe_metrics, plottable_pe_cols, figure,
412+
len(plottable_c_cols) + 1
413+
)
407414
figure.show()
408415

409416

superannotate/ml/ml_models.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
from ..common import project_type_int_to_str
55
import logging
66

7-
87
logger = logging.getLogger("superannotate-python-sdk")
98
_api = API.get_instance()
109

@@ -38,4 +37,3 @@ def search_models(
3837
if not result['data']:
3938
raise SABaseException(0, "Model with such a name does not exist")
4039
return result['data']
41-

superannotate/ml/utils.py

Lines changed: 31 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,13 @@
66
import time
77
import os
88

9+
910
def metric_is_plottable(key):
10-
if key in PLOTTABLE_METRICS or 'mIoU'in key or 'mAP' in key or key == 'iteration':
11+
if key in PLOTTABLE_METRICS or 'mIoU' in key or 'mAP' in key or key == 'iteration':
1112
return True
1213
return False
1314

15+
1416
def reformat_metrics_json(data, name):
1517
continuous_metrics = []
1618
per_evaluation_metrics = []
@@ -26,18 +28,18 @@ def reformat_metrics_json(data, name):
2628
per_evaluation_metrics += [item]
2729
continuous_metrics_df = pd.DataFrame.from_dict(continuous_metrics)
2830
per_evaluation_metrics_df = pd.DataFrame.from_dict(per_evaluation_metrics)
29-
continuous_metrics_df = drop_non_plotable_cols(
30-
continuous_metrics_df
31-
)
31+
continuous_metrics_df = drop_non_plotable_cols(continuous_metrics_df)
3232
per_evaluation_metrics_df = drop_non_plotable_cols(
3333
per_evaluation_metrics_df
3434
)
3535
continuous_metrics_df['model'] = name
3636
per_evaluation_metrics_df['model'] = name
3737
if 'total_loss' in per_evaluation_metrics_df:
38-
per_evaluation_metrics_df = per_evaluation_metrics_df.drop(columns='total_loss')
38+
per_evaluation_metrics_df = per_evaluation_metrics_df.drop(
39+
columns='total_loss'
40+
)
3941

40-
per_evaluation_metrics_df = per_evaluation_metrics_df.dropna(axis = 'rows')
42+
per_evaluation_metrics_df = per_evaluation_metrics_df.dropna(axis='rows')
4143
return continuous_metrics_df, per_evaluation_metrics_df
4244

4345

@@ -53,25 +55,41 @@ def make_plotly_specs(num_rows):
5355
specs = [[{"secondary_y": True}] for x in range(num_rows)]
5456
return specs
5557

58+
5659
def get_images_prediction_segmentation_status(project, image_names, task):
57-
metadata = get_image_metadata(project,image_names)
60+
metadata = get_image_metadata(project, image_names)
5861
if isinstance(metadata, dict):
5962
metadata = [metadata]
60-
success_names = [x['name'] for x in metadata if x[task] == PredictionSegmentationStatuses.Completed]
61-
failure_names = [x['name'] for x in metadata if x[task] == PredictionSegmentationStatuses.Failed]
63+
success_names = [
64+
x['name']
65+
for x in metadata if x[task] == PredictionSegmentationStatuses.Completed
66+
]
67+
failure_names = [
68+
x['name']
69+
for x in metadata if x[task] == PredictionSegmentationStatuses.Failed
70+
]
6271
return success_names, failure_names
6372

64-
def log_process(project, image_name_set, total_image_count,status_key, task, logger):
73+
74+
def log_process(
75+
project, image_name_set, total_image_count, status_key, task, logger
76+
):
6577
num_complete = 0
6678
succeded_imgs = []
6779
failed_imgs = []
6880
while image_name_set:
69-
succeded_imgs_batch, failed_imgs_batch = get_images_prediction_segmentation_status(project, list(image_name_set), status_key)
81+
succeded_imgs_batch, failed_imgs_batch = get_images_prediction_segmentation_status(
82+
project, list(image_name_set), status_key
83+
)
7084
complete_images = succeded_imgs_batch + failed_imgs_batch
7185
succeded_imgs += succeded_imgs_batch
7286
failed_imgs += failed_imgs_batch
7387
num_complete += len(complete_images)
74-
logger.info(f"{task} complete on {num_complete} / {total_image_count} images")
75-
image_name_set = image_name_set.symmetric_difference(set(complete_images))
88+
logger.info(
89+
f"{task} complete on {num_complete} / {total_image_count} images"
90+
)
91+
image_name_set = image_name_set.symmetric_difference(
92+
set(complete_images)
93+
)
7694
time.sleep(5)
7795
return (succeded_imgs, failed_imgs)

superannotate/parameter_decorators.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@ def inner(*args, **kwargs):
3737
if current_chosen_type is str:
3838
for idx, item in enumerate(new_kwargs['project']):
3939
new_kwargs['project'][idx] = get_project_metadata_bare(
40-
item, include_complete_image_count=include_complete_image_count
40+
item,
41+
include_complete_image_count=include_complete_image_count
4142
)
4243
elif current_chosen_type is not dict:
4344
raise SABaseException(
@@ -86,8 +87,11 @@ def inner(*args, **kwargs):
8687
0,
8788
f"The specifed model does not exist. Available models are {list(all_models_name_map.keys())}"
8889
)
89-
elif (func.__name__ == 'run_prediction' or func.__name__ == 'run_training'):
90-
new_model_arg = {x['type']:x for x in all_models}
90+
elif (
91+
func.__name__ == 'run_prediction' or
92+
func.__name__ == 'run_training'
93+
):
94+
new_model_arg = {x['type']: x for x in all_models}
9195
elif len(all_models) == 2:
9296
raise SABaseException(
9397
0,

tests/test_ml_funcs.py

Lines changed: 49 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
from .common import upload_project
55
import pytest
66

7-
87
sa.init(Path.home() / ".superannotate" / "config.json")
98
PROJECT_NAME_VECTOR = 'ML Functionality Test Vector'
109
PROJECT_NAME_PIXEL = 'ML Functionality Test Pixel'
@@ -14,46 +13,63 @@
1413

1514
MODEL_NAME = 'Instance segmentation (trained on COCO)'
1615

16+
1717
@pytest.mark.skipif(
1818
"SA_STRESS_TESTS" not in os.environ,
1919
reason="Requires env variable to be set"
2020
)
2121
def test_run_prediction():
2222

2323
upload_project(
24-
Path(PROJECT_PATH_VECTOR) ,PROJECT_NAME_VECTOR, "Test for ml functionality", "Vector"
24+
Path(PROJECT_PATH_VECTOR), PROJECT_NAME_VECTOR,
25+
"Test for ml functionality", "Vector"
2526
)
2627

2728
upload_project(
28-
Path(PROJECT_PATH_PIXEL),PROJECT_NAME_PIXEL, "Test for ml functionality", "Pixel"
29+
Path(PROJECT_PATH_PIXEL), PROJECT_NAME_PIXEL,
30+
"Test for ml functionality", "Pixel"
2931
)
3032

3133
#Tests for the case when provided images do not exist in the project
3234
with pytest.raises(SABaseException) as e:
33-
sa.run_prediction(PROJECT_NAME_VECTOR, ["NonExistantImage.jpg"], MODEL_NAME)
35+
sa.run_prediction(
36+
PROJECT_NAME_VECTOR, ["NonExistantImage.jpg"], MODEL_NAME
37+
)
3438
assert str(e) == "No valid image names were provided"
3539

3640
#Tests that the argument 'project' is valid
3741
with pytest.raises(SABaseException) as e:
38-
sa.run_prediction([PROJECT_NAME_VECTOR, PROJECT_NAME_PIXEL], ["DoesntMatter.jpg"], MODEL_NAME)
39-
assert str(e) == "smart prediction cannot be run on images from different projects simultaneously"
42+
sa.run_prediction(
43+
[PROJECT_NAME_VECTOR, PROJECT_NAME_PIXEL], ["DoesntMatter.jpg"],
44+
MODEL_NAME
45+
)
46+
assert str(
47+
e
48+
) == "smart prediction cannot be run on images from different projects simultaneously"
4049

4150
#Tests if prediction on all available images gets run
4251
image_names_pixel = sa.search_images(PROJECT_NAME_PIXEL)
4352
image_names_vector = sa.search_images(PROJECT_NAME_VECTOR)
4453

45-
succeded_imgs, failed_imgs = sa.run_prediction(PROJECT_NAME_VECTOR, image_names_vector[:4], MODEL_NAME)
46-
assert(len(succeded_imgs) + len(failed_imgs)) == 4
47-
48-
succeded_imgs, failed_imgs = sa.run_prediction(PROJECT_NAME_PIXEL,image_names_pixel[:4], MODEL_NAME)
49-
assert(len(succeded_imgs) + len(failed_imgs)) == 4
54+
succeded_imgs, failed_imgs = sa.run_prediction(
55+
PROJECT_NAME_VECTOR, image_names_vector[:4], MODEL_NAME
56+
)
57+
assert (len(succeded_imgs) + len(failed_imgs)) == 4
5058

51-
succeded_imgs, failed_imgs = sa.run_prediction(PROJECT_NAME_PIXEL, image_names_pixel[:4] + ["NA.jpg"], MODEL_NAME)
52-
assert(len(succeded_imgs) + len(failed_imgs)) == 4
59+
succeded_imgs, failed_imgs = sa.run_prediction(
60+
PROJECT_NAME_PIXEL, image_names_pixel[:4], MODEL_NAME
61+
)
62+
assert (len(succeded_imgs) + len(failed_imgs)) == 4
5363

54-
succeded_imgs, failed_imgs = sa.run_prediction(PROJECT_NAME_VECTOR, image_names_vector[:4] + ["NA.jpg"], MODEL_NAME)
55-
assert(len(succeded_imgs) + len(failed_imgs)) == 4
64+
succeded_imgs, failed_imgs = sa.run_prediction(
65+
PROJECT_NAME_PIXEL, image_names_pixel[:4] + ["NA.jpg"], MODEL_NAME
66+
)
67+
assert (len(succeded_imgs) + len(failed_imgs)) == 4
5668

69+
succeded_imgs, failed_imgs = sa.run_prediction(
70+
PROJECT_NAME_VECTOR, image_names_vector[:4] + ["NA.jpg"], MODEL_NAME
71+
)
72+
assert (len(succeded_imgs) + len(failed_imgs)) == 4
5773

5874

5975
@pytest.mark.skipif(
@@ -66,40 +82,46 @@ def test_run_segmentation():
6682
model_generic = 'generic'
6783

6884
upload_project(
69-
Path(PROJECT_PATH_PIXEL), PROJECT_NAME_PIXEL, "Test for ml functionality", "Pixel"
85+
Path(PROJECT_PATH_PIXEL), PROJECT_NAME_PIXEL,
86+
"Test for ml functionality", "Pixel"
7087
)
7188

7289
image_names_pixel = sa.search_images(PROJECT_NAME_PIXEL)
7390
with pytest.raises(SABaseException) as e:
74-
res = sa.run_segmentation(PROJECT_NAME_VECTOR, image_names_pixel, model_auto)
91+
res = sa.run_segmentation(
92+
PROJECT_NAME_VECTOR, image_names_pixel, model_auto
93+
)
7594
assert str(e) == "Operation not supported for given project type"
7695
with pytest.raises(SABaseException) as e:
7796
sa.run_segmentation(
7897
PROJECT_NAME_PIXEL, image_names_pixel[:2], "NonExistantModel"
79-
8098
)
8199
assert str(e) == "Model Does not exist"
82100

83101
with pytest.raises(SABaseException) as e:
84-
sa.run_segmentation(PROJECT_NAME_PIXEL, ["NonExistantImage.jpg"], MODEL_NAME)
102+
sa.run_segmentation(
103+
PROJECT_NAME_PIXEL, ["NonExistantImage.jpg"], MODEL_NAME
104+
)
85105
assert str(e) == "No valid image names were provided"
86106

87-
succeded_imgs, failed_imgs = sa.run_segmentation(PROJECT_NAME_PIXEL, image_names_pixel[:4] + ["NA.jpg"], model_generic)
88-
assert(len(succeded_imgs) + len(failed_imgs)) == 4
107+
succeded_imgs, failed_imgs = sa.run_segmentation(
108+
PROJECT_NAME_PIXEL, image_names_pixel[:4] + ["NA.jpg"], model_generic
109+
)
110+
assert (len(succeded_imgs) + len(failed_imgs)) == 4
89111

90-
succeded_imgs, failed_imgs = sa.run_segmentation(ROJECT_NAME_PIXEL, image_names_pixel[:4] + ["NA.jpg"], Pmodel_auto)
112+
succeded_imgs, failed_imgs = sa.run_segmentation(
113+
ROJECT_NAME_PIXEL, image_names_pixel[:4] + ["NA.jpg"], Pmodel_auto
114+
)
115+
116+
assert (len(succeded_imgs) + len(failed_imgs)) == 4
91117

92-
assert(len(succeded_imgs) + len(failed_imgs)) == 4
93118

94119
def test_download_model(tmpdir):
95120
print(tmpdir)
96121
export_dir = Path(tmpdir / 'export')
97122
export_dir.mkdir(parents=True, exist_ok=True)
98123

99-
ml_model = sa.search_models(include_global = True)[0]
124+
ml_model = sa.search_models(include_global=True)[0]
100125
Path.rmdir(Path(tmpdir) / './export')
101126
assert (sa.download_model(ml_model, './export') == True)
102127
assert (sa.download_model(ml_model['name'], './export') == True)
103-
104-
105-

tests/test_preannotation_upload.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import superannotate as sa
88

99

10-
1110
@pytest.mark.parametrize(
1211
"project_type,name,description,from_folder", [
1312
(

0 commit comments

Comments
 (0)