|
2 | 2 | import argparse |
3 | 3 |
|
4 | 4 | import yaml |
5 | | -import tqdm |
6 | 5 | import numpy as np |
7 | 6 | import cv2 as cv |
8 | 7 |
|
| 8 | +# from ..models import MODELS |
9 | 9 | from models import MODELS |
| 10 | +from utils import METRICS |
10 | 11 |
|
11 | 12 | parser = argparse.ArgumentParser("Benchmarks for OpenCV Zoo.") |
12 | 13 | parser.add_argument('--cfg', '-c', type=str, |
13 | 14 | help='Benchmarking on the given config.') |
14 | 15 | args = parser.parse_args() |
15 | 16 |
|
16 | | -class Timer: |
17 | | - def __init__(self, warmup=0, reduction='median'): |
18 | | - self._warmup = warmup |
19 | | - self._reduction = reduction |
20 | | - self._tm = cv.TickMeter() |
21 | | - self._time_record = [] |
22 | | - self._calls = 0 |
23 | | - |
24 | | - def start(self): |
25 | | - self._tm.start() |
26 | | - |
27 | | - def stop(self): |
28 | | - self._tm.stop() |
29 | | - self._calls += 1 |
30 | | - self._time_record.append(self._tm.getTimeMilli()) |
31 | | - self._tm.reset() |
32 | | - |
33 | | - def reset(self): |
34 | | - self._time_record = [] |
35 | | - self._calls = 0 |
36 | | - |
37 | | - def getResult(self): |
38 | | - if self._reduction == 'median': |
39 | | - return self._getMedian(self._time_record[self._warmup:]) |
40 | | - elif self._reduction == 'gmean': |
41 | | - return self._getGMean(self._time_record[self._warmup:]) |
42 | | - else: |
43 | | - raise NotImplementedError() |
44 | | - |
45 | | - def _getMedian(self, records): |
46 | | - ''' Return median time |
47 | | - ''' |
48 | | - l = len(records) |
49 | | - mid = int(l / 2) |
50 | | - if l % 2 == 0: |
51 | | - return (records[mid] + records[mid - 1]) / 2 |
52 | | - else: |
53 | | - return records[mid] |
| 17 | +def build_from_cfg(cfg, registery, key='name'): |
| 18 | + obj_name = cfg.pop(key) |
| 19 | + obj = registery.get(obj_name) |
| 20 | + return obj(**cfg) |
54 | 21 |
|
55 | | - def _getGMean(self, records, drop_largest=3): |
56 | | - ''' Return geometric mean of time |
57 | | - ''' |
58 | | - time_record_sorted = sorted(records, reverse=True) |
59 | | - return sum(records[drop_largest:]) / (self._calls - drop_largest) |
| 22 | +def prepend_pythonpath(cfg): |
| 23 | + for k, v in cfg.items(): |
| 24 | + if isinstance(v, dict): |
| 25 | + prepend_pythonpath(v) |
| 26 | + else: |
| 27 | + if 'path' in k.lower(): |
| 28 | + cfg[k] = os.path.join(os.environ['PYTHONPATH'], v) |
60 | 29 |
|
61 | 30 | class Data: |
62 | 31 | def __init__(self, **kwargs): |
@@ -105,64 +74,15 @@ def __getitem__(self, idx): |
105 | 74 | else: |
106 | 75 | return self._files[idx], image |
107 | 76 |
|
108 | | -class Metric: |
109 | | - def __init__(self, **kwargs): |
110 | | - self._sizes = kwargs.pop('sizes', None) |
111 | | - self._warmup = kwargs.pop('warmup', 3) |
112 | | - self._repeat = kwargs.pop('repeat', 10) |
113 | | - assert self._warmup < self._repeat, 'The value of warmup must be smaller than the value of repeat.' |
114 | | - self._batch_size = kwargs.pop('batchSize', 1) |
115 | | - self._reduction = kwargs.pop('reduction', 'median') |
116 | | - |
117 | | - self._timer = Timer(self._warmup, self._reduction) |
118 | | - |
119 | | - def getReduction(self): |
120 | | - return self._reduction |
121 | | - |
122 | | - def forward(self, model, *args, **kwargs): |
123 | | - img = args[0] |
124 | | - h, w, _ = img.shape |
125 | | - if not self._sizes: |
126 | | - self._sizes = [[w, h]] |
127 | | - |
128 | | - results = dict() |
129 | | - self._timer.reset() |
130 | | - if len(args) == 1: |
131 | | - for size in self._sizes: |
132 | | - img_r = cv.resize(img, size) |
133 | | - try: |
134 | | - model.setInputSize(size) |
135 | | - except: |
136 | | - pass |
137 | | - # TODO: batched inference |
138 | | - # input_data = [img] * self._batch_size |
139 | | - input_data = img_r |
140 | | - for _ in range(self._repeat+self._warmup): |
141 | | - self._timer.start() |
142 | | - model.infer(input_data) |
143 | | - self._timer.stop() |
144 | | - results[str(size)] = self._timer.getResult() |
145 | | - else: |
146 | | - # TODO: batched inference |
147 | | - # input_data = [args] * self._batch_size |
148 | | - bboxes = args[1] |
149 | | - for idx, bbox in enumerate(bboxes): |
150 | | - for _ in range(self._repeat+self._warmup): |
151 | | - self._timer.start() |
152 | | - model.infer(img, bbox) |
153 | | - self._timer.stop() |
154 | | - results['bbox{}'.format(idx)] = self._timer.getResult() |
155 | | - |
156 | | - return results |
157 | | - |
158 | 77 | class Benchmark: |
159 | 78 | def __init__(self, **kwargs): |
160 | 79 | self._data_dict = kwargs.pop('data', None) |
161 | 80 | assert self._data_dict, 'Benchmark[\'data\'] cannot be empty and must have path and files.' |
162 | 81 | self._data = Data(**self._data_dict) |
163 | 82 |
|
164 | 83 | self._metric_dict = kwargs.pop('metric', None) |
165 | | - self._metric = Metric(**self._metric_dict) |
| 84 | + # self._metric = Metric(**self._metric_dict) |
| 85 | + self._metric = build_from_cfg(self._metric_dict, registery=METRICS, key='type') |
166 | 86 |
|
167 | 87 | backend_id = kwargs.pop('backend', 'default') |
168 | 88 | available_backends = dict( |
@@ -206,20 +126,6 @@ def printResults(self): |
206 | 126 | total_latency += latency |
207 | 127 | print(' {}, latency ({}): {:.4f} ms'.format(key, self._metric.getReduction(), latency)) |
208 | 128 |
|
209 | | - |
210 | | -def build_from_cfg(cfg, registery): |
211 | | - obj_name = cfg.pop('name') |
212 | | - obj = registery.get(obj_name) |
213 | | - return obj(**cfg) |
214 | | - |
215 | | -def prepend_pythonpath(cfg): |
216 | | - for k, v in cfg.items(): |
217 | | - if isinstance(v, dict): |
218 | | - prepend_pythonpath(v) |
219 | | - else: |
220 | | - if 'path' in k.lower(): |
221 | | - cfg[k] = os.path.join(os.environ['PYTHONPATH'], v) |
222 | | - |
223 | 129 | if __name__ == '__main__': |
224 | 130 | assert args.cfg.endswith('yaml'), 'Currently support configs of yaml format only.' |
225 | 131 | with open(args.cfg, 'r') as f: |
|
0 commit comments