Skip to content

Commit 2023061

Browse files
authored
Decoupling metrics from benchmark to allow different kinds of forward process (#14)
* create metrics for different types of behavior * workable impl calling utils.METRICS in benchmark
1 parent 661ca25 commit 2023061

17 files changed

+220
-134
lines changed

benchmark/benchmark.py

Lines changed: 15 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -2,61 +2,30 @@
22
import argparse
33

44
import yaml
5-
import tqdm
65
import numpy as np
76
import cv2 as cv
87

8+
# from ..models import MODELS
99
from models import MODELS
10+
from utils import METRICS
1011

1112
parser = argparse.ArgumentParser("Benchmarks for OpenCV Zoo.")
1213
parser.add_argument('--cfg', '-c', type=str,
1314
help='Benchmarking on the given config.')
1415
args = parser.parse_args()
1516

16-
class Timer:
17-
def __init__(self, warmup=0, reduction='median'):
18-
self._warmup = warmup
19-
self._reduction = reduction
20-
self._tm = cv.TickMeter()
21-
self._time_record = []
22-
self._calls = 0
23-
24-
def start(self):
25-
self._tm.start()
26-
27-
def stop(self):
28-
self._tm.stop()
29-
self._calls += 1
30-
self._time_record.append(self._tm.getTimeMilli())
31-
self._tm.reset()
32-
33-
def reset(self):
34-
self._time_record = []
35-
self._calls = 0
36-
37-
def getResult(self):
38-
if self._reduction == 'median':
39-
return self._getMedian(self._time_record[self._warmup:])
40-
elif self._reduction == 'gmean':
41-
return self._getGMean(self._time_record[self._warmup:])
42-
else:
43-
raise NotImplementedError()
44-
45-
def _getMedian(self, records):
46-
''' Return median time
47-
'''
48-
l = len(records)
49-
mid = int(l / 2)
50-
if l % 2 == 0:
51-
return (records[mid] + records[mid - 1]) / 2
52-
else:
53-
return records[mid]
17+
def build_from_cfg(cfg, registery, key='name'):
18+
obj_name = cfg.pop(key)
19+
obj = registery.get(obj_name)
20+
return obj(**cfg)
5421

55-
def _getGMean(self, records, drop_largest=3):
56-
''' Return geometric mean of time
57-
'''
58-
time_record_sorted = sorted(records, reverse=True)
59-
return sum(records[drop_largest:]) / (self._calls - drop_largest)
22+
def prepend_pythonpath(cfg):
23+
for k, v in cfg.items():
24+
if isinstance(v, dict):
25+
prepend_pythonpath(v)
26+
else:
27+
if 'path' in k.lower():
28+
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)
6029

6130
class Data:
6231
def __init__(self, **kwargs):
@@ -105,64 +74,15 @@ def __getitem__(self, idx):
10574
else:
10675
return self._files[idx], image
10776

108-
class Metric:
109-
def __init__(self, **kwargs):
110-
self._sizes = kwargs.pop('sizes', None)
111-
self._warmup = kwargs.pop('warmup', 3)
112-
self._repeat = kwargs.pop('repeat', 10)
113-
assert self._warmup < self._repeat, 'The value of warmup must be smaller than the value of repeat.'
114-
self._batch_size = kwargs.pop('batchSize', 1)
115-
self._reduction = kwargs.pop('reduction', 'median')
116-
117-
self._timer = Timer(self._warmup, self._reduction)
118-
119-
def getReduction(self):
120-
return self._reduction
121-
122-
def forward(self, model, *args, **kwargs):
123-
img = args[0]
124-
h, w, _ = img.shape
125-
if not self._sizes:
126-
self._sizes = [[w, h]]
127-
128-
results = dict()
129-
self._timer.reset()
130-
if len(args) == 1:
131-
for size in self._sizes:
132-
img_r = cv.resize(img, size)
133-
try:
134-
model.setInputSize(size)
135-
except:
136-
pass
137-
# TODO: batched inference
138-
# input_data = [img] * self._batch_size
139-
input_data = img_r
140-
for _ in range(self._repeat+self._warmup):
141-
self._timer.start()
142-
model.infer(input_data)
143-
self._timer.stop()
144-
results[str(size)] = self._timer.getResult()
145-
else:
146-
# TODO: batched inference
147-
# input_data = [args] * self._batch_size
148-
bboxes = args[1]
149-
for idx, bbox in enumerate(bboxes):
150-
for _ in range(self._repeat+self._warmup):
151-
self._timer.start()
152-
model.infer(img, bbox)
153-
self._timer.stop()
154-
results['bbox{}'.format(idx)] = self._timer.getResult()
155-
156-
return results
157-
15877
class Benchmark:
15978
def __init__(self, **kwargs):
16079
self._data_dict = kwargs.pop('data', None)
16180
assert self._data_dict, 'Benchmark[\'data\'] cannot be empty and must have path and files.'
16281
self._data = Data(**self._data_dict)
16382

16483
self._metric_dict = kwargs.pop('metric', None)
165-
self._metric = Metric(**self._metric_dict)
84+
# self._metric = Metric(**self._metric_dict)
85+
self._metric = build_from_cfg(self._metric_dict, registery=METRICS, key='type')
16686

16787
backend_id = kwargs.pop('backend', 'default')
16888
available_backends = dict(
@@ -206,20 +126,6 @@ def printResults(self):
206126
total_latency += latency
207127
print(' {}, latency ({}): {:.4f} ms'.format(key, self._metric.getReduction(), latency))
208128

209-
210-
def build_from_cfg(cfg, registery):
211-
obj_name = cfg.pop('name')
212-
obj = registery.get(obj_name)
213-
return obj(**cfg)
214-
215-
def prepend_pythonpath(cfg):
216-
for k, v in cfg.items():
217-
if isinstance(v, dict):
218-
prepend_pythonpath(v)
219-
else:
220-
if 'path' in k.lower():
221-
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)
222-
223129
if __name__ == '__main__':
224130
assert args.cfg.endswith('yaml'), 'Currently support configs of yaml format only.'
225131
with open(args.cfg, 'r') as f:

benchmark/config/face_detection_yunet.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,13 @@ Benchmark:
44
path: "benchmark/data/face/detection"
55
files: ["group.jpg", "concerts.jpg", "dance.jpg"]
66
metric:
7+
type: "Detection"
78
sizes: # [[w1, h1], ...], Omit to run at original scale
89
- [160, 120]
910
- [640, 480]
10-
warmup: 3
11+
warmup: 30
1112
repeat: 10
12-
batchSize: 1
13-
reduction: 'median'
13+
reduction: "median"
1414
backend: "default"
1515
target: "cpu"
1616

benchmark/config/face_recognition_sface.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@ Benchmark:
55
files: ["Aaron_Tippin_0001.jpg", "Alvaro_Uribe_0028.jpg", "Alvaro_Uribe_0029.jpg", "Jose_Luis_Rodriguez_Zapatero_0001.jpg"]
66
useLabel: True
77
metric: # 'sizes' is omitted since this model requires input of fixed size
8-
warmup: 3
8+
type: "Recognition"
9+
warmup: 30
910
repeat: 10
10-
batchSize: 1
11-
reduction: 'median'
11+
reduction: "median"
1212
backend: "default"
1313
target: "cpu"
1414

benchmark/config/human_segmentation_pphumanseg.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@ Benchmark:
66
toRGB: True
77
resize: [192, 192]
88
metric:
9-
warmup: 3
9+
type: "Base"
10+
warmup: 30
1011
repeat: 10
11-
batchSize: 1
12-
reduction: 'median'
12+
reduction: "median"
1313
backend: "default"
1414
target: "cpu"
1515

benchmark/config/image_classification_ppresnet.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@ Benchmark:
77
resize: [256, 256]
88
centerCrop: 224
99
metric:
10-
warmup: 3
10+
type: "Base"
11+
warmup: 30
1112
repeat: 10
12-
batchSize: 1
13-
reduction: 'median'
13+
reduction: "median"
1414
backend: "default"
1515
target: "cpu"
1616

benchmark/config/qrcode_wechatqrcode.yaml

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,19 @@ Benchmark:
44
path: "benchmark/data/qrcode"
55
files: ["opencv.png", "opencv_zoo.png"]
66
metric:
7+
type: "Detection"
78
sizes:
89
- [100, 100]
910
- [300, 300]
10-
warmup: 3
11+
warmup: 30
1112
repeat: 10
1213
reduction: "median"
1314
backend: "default"
1415
target: "cpu"
1516

1617
Model:
1718
name: "WeChatQRCode"
18-
detect_prototxt_path: "models/qrcode_wechatqrcode/detect_2021sep.prototxt"
19-
detect_model_path: "models/qrcode_wechatqrcode/detect_2021sep.caffemodel"
20-
sr_prototxt_path: "models/qrcode_wechatqrcode/sr_2021sep.prototxt"
21-
sr_model_path: "models/qrcode_wechatqrcode/sr_2021sep.caffemodel"
19+
detect_prototxt_path: "models/qrcode_wechatqrcode/detect_2021nov.prototxt"
20+
detect_model_path: "models/qrcode_wechatqrcode/detect_2021nov.caffemodel"
21+
sr_prototxt_path: "models/qrcode_wechatqrcode/sr_2021nov.prototxt"
22+
sr_model_path: "models/qrcode_wechatqrcode/sr_2021nov.caffemodel"

benchmark/config/text_detection_db.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@ Benchmark:
44
path: "benchmark/data/text"
55
files: ["1.jpg", "2.jpg", "3.jpg"]
66
metric:
7+
type: "Detection"
78
sizes: # [[w1, h1], ...], Omit to run at original scale
89
- [640, 480]
9-
warmup: 3
10+
warmup: 30
1011
repeat: 10
11-
batchSize: 1
12-
reduction: 'median'
12+
reduction: "median"
1313
backend: "default"
1414
target: "cpu"
1515

benchmark/config/text_recognition_crnn.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@ Benchmark:
55
files: ["1.jpg", "2.jpg", "3.jpg"]
66
useLabel: True
77
metric: # 'sizes' is omitted since this model requires input of fixed size
8-
warmup: 3
8+
type: "Recognition"
9+
warmup: 30
910
repeat: 10
10-
batchSize: 1
11-
reduction: 'median'
11+
reduction: "median"
1212
backend: "default"
1313
target: "cpu"
1414

benchmark/requirements.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
numpy==1.21.2
1+
numpy
22
opencv-python==4.5.4.58
3-
tqdm
43
pyyaml
54
requests

benchmark/utils/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
from .factory import (METRICS, DATALOADERS)
2+
from .metrics import *
3+
4+
__all__ = ['METRICS', 'DATALOADERS']

0 commit comments

Comments
 (0)