77import cv2 as cv
88
99from models import MODELS
10- from download import Downloader
1110
1211parser = argparse .ArgumentParser ("Benchmarks for OpenCV Zoo." )
1312parser .add_argument ('--cfg' , '-c' , type = str ,
1413 help = 'Benchmarking on the given config.' )
1514args = parser .parse_args ()
1615
1716class Timer :
18- def __init__ (self ):
17+ def __init__ (self , warmup = 0 , reduction = 'median' ):
18+ self ._warmup = warmup
19+ self ._reduction = reduction
1920 self ._tm = cv .TickMeter ()
20-
2121 self ._time_record = []
22- self ._average_time = 0
2322 self ._calls = 0
2423
2524 def start (self ):
@@ -29,22 +28,121 @@ def stop(self):
2928 self ._tm .stop ()
3029 self ._calls += 1
3130 self ._time_record .append (self ._tm .getTimeMilli ())
32- self ._average_time = sum (self ._time_record ) / self ._calls
3331 self ._tm .reset ()
3432
3533 def reset (self ):
3634 self ._time_record = []
37- self ._average_time = 0
3835 self ._calls = 0
3936
40- def getAverageTime (self ):
41- return self ._average_time
37+ def getResult (self ):
38+ if self ._reduction == 'median' :
39+ return self ._getMedian (self ._time_record [self ._warmup :])
40+ elif self ._reduction == 'gmean' :
41+ return self ._getGMean (self ._time_record [self ._warmup :])
42+ else :
43+ raise NotImplementedError ()
44+
45+ def _getMedian (self , records ):
46+ ''' Return median time
47+ '''
48+ l = len (records )
49+ mid = int (l / 2 )
50+ if l % 2 == 0 :
51+ return (records [mid ] + records [mid - 1 ]) / 2
52+ else :
53+ return records [mid ]
54+
55+ def _getGMean (self , records , drop_largest = 3 ):
56+ ''' Return geometric mean of time
57+ '''
58+ time_record_sorted = sorted (records , reverse = True )
59+ return sum (records [drop_largest :]) / (self ._calls - drop_largest )
60+
61+ class Data :
62+ def __init__ (self , ** kwargs ):
63+ self ._path = kwargs .pop ('path' , None )
64+ assert self ._path , 'Benchmark[\' data\' ][\' path\' ] cannot be empty.'
65+
66+ self ._files = kwargs .pop ('files' , None )
67+ if not self ._files :
68+ print ('Benchmark[\' data\' ][\' files\' ] is empty, loading all images by default.' )
69+ self ._files = list ()
70+ for filename in os .listdir (self ._path ):
71+ if filename .endswith ('jpg' ) or filename .endswith ('png' ):
72+ self ._files .append (filename )
73+
74+ self ._use_label = kwargs .pop ('useLabel' , False )
75+ if self ._use_label :
76+ self ._labels = self ._load_label ()
77+
78+ def _load_label (self ):
79+ labels = dict .fromkeys (self ._files , None )
80+ for filename in self ._files :
81+ labels [filename ] = np .loadtxt (os .path .join (self ._path , '{}.txt' .format (filename [:- 4 ])))
82+ return labels
83+
84+ def __getitem__ (self , idx ):
85+ image = cv .imread (os .path .join (self ._path , self ._files [idx ]))
86+ if self ._use_label :
87+ return self ._files [idx ], image , self ._labels [self ._files [idx ]]
88+ else :
89+ return self ._files [idx ], image
90+
91+ class Metric :
92+ def __init__ (self , ** kwargs ):
93+ self ._sizes = kwargs .pop ('sizes' , None )
94+ self ._warmup = kwargs .pop ('warmup' , 3 )
95+ self ._repeat = kwargs .pop ('repeat' , 10 )
96+ assert self ._warmup < self ._repeat , 'The value of warmup must be smaller than the value of repeat.'
97+ self ._batch_size = kwargs .pop ('batchSize' , 1 )
98+ self ._reduction = kwargs .pop ('reduction' , 'median' )
99+
100+ self ._timer = Timer (self ._warmup , self ._reduction )
42101
102+ def getReduction (self ):
103+ return self ._reduction
104+
105+ def forward (self , model , * args , ** kwargs ):
106+ img = args [0 ]
107+ h , w , _ = img .shape
108+ if not self ._sizes :
109+ self ._sizes = [[w , h ]]
110+
111+ results = dict ()
112+ self ._timer .reset ()
113+ if len (args ) == 1 :
114+ for size in self ._sizes :
115+ img_r = cv .resize (img , size )
116+ model .setInputSize (size )
117+ # TODO: batched inference
118+ # input_data = [img] * self._batch_size
119+ input_data = img_r
120+ for _ in range (self ._repeat + self ._warmup ):
121+ self ._timer .start ()
122+ model .infer (input_data )
123+ self ._timer .stop ()
124+ results [str (size )] = self ._timer .getResult ()
125+ else :
126+ # TODO: batched inference
127+ # input_data = [args] * self._batch_size
128+ bboxes = args [1 ]
129+ for idx , bbox in enumerate (bboxes ):
130+ for _ in range (self ._repeat + self ._warmup ):
131+ self ._timer .start ()
132+ model .infer (img , bbox )
133+ self ._timer .stop ()
134+ results ['bbox{}' .format (idx )] = self ._timer .getResult ()
135+
136+ return results
43137
44138class Benchmark :
45139 def __init__ (self , ** kwargs ):
46- self ._fileList = kwargs .pop ('fileList' , None )
47- assert self ._fileList , 'fileList cannot be empty'
140+ self ._data_dict = kwargs .pop ('data' , None )
141+ assert self ._data_dict , 'Benchmark[\' data\' ] cannot be empty and must have path and files.'
142+ self ._data = Data (** self ._data_dict )
143+
144+ self ._metric_dict = kwargs .pop ('metric' , None )
145+ self ._metric = Metric (** self ._metric_dict )
48146
49147 backend_id = kwargs .pop ('backend' , 'default' )
50148 available_backends = dict (
@@ -71,76 +169,22 @@ def __init__(self, **kwargs):
71169 )
72170 self ._target = available_targets [target_id ]
73171
74- self ._sizes = kwargs .pop ('sizes' , None )
75- self ._repeat = kwargs .pop ('repeat' , 100 )
76- self ._parentPath = kwargs .pop ('parentPath' , 'benchmark/data' )
77- self ._useGroundTruth = kwargs .pop ('useDetectionLabel' , False ) # If it is enable, 'sizes' will not work
78- assert (self ._sizes and not self ._useGroundTruth ) or (not self ._sizes and self ._useGroundTruth ), 'If \' useDetectionLabel\' is True, \' sizes\' should not exist.'
79-
80- self ._timer = Timer ()
81- self ._benchmark_results = dict .fromkeys (self ._fileList , dict ())
82-
83- if self ._useGroundTruth :
84- self .loadLabel ()
85-
86- def loadLabel (self ):
87- self ._labels = dict .fromkeys (self ._fileList , None )
88- for imgName in self ._fileList :
89- self ._labels [imgName ] = np .loadtxt (os .path .join (self ._parentPath , '{}.txt' .format (imgName [:- 4 ])))
172+ self ._benchmark_results = dict ()
90173
91174 def run (self , model ):
92175 model .setBackend (self ._backend )
93176 model .setTarget (self ._target )
94177
95- for imgName in self ._fileList :
96- img = cv .imread (os .path .join (self ._parentPath , imgName ))
97- if self ._useGroundTruth :
98- for idx , gt in enumerate (self ._labels [imgName ]):
99- self ._benchmark_results [imgName ]['gt{}' .format (idx )] = self ._run (
100- model ,
101- img ,
102- gt ,
103- pbar_msg = ' {}, gt{}' .format (imgName , idx )
104- )
105- else :
106- if self ._sizes is None :
107- h , w , _ = img .shape
108- model .setInputSize ([w , h ])
109- self ._benchmark_results [imgName ][str ([w , h ])] = self ._run (
110- model ,
111- img ,
112- pbar_msg = ' {}, original size {}' .format (imgName , str ([w , h ]))
113- )
114- else :
115- for size in self ._sizes :
116- imgResized = cv .resize (img , size )
117- model .setInputSize (size )
118- self ._benchmark_results [imgName ][str (size )] = self ._run (
119- model ,
120- imgResized ,
121- pbar_msg = ' {}, size {}' .format (imgName , str (size ))
122- )
178+ for data in self ._data :
179+ self ._benchmark_results [data [0 ]] = self ._metric .forward (model , * data [1 :])
123180
124181 def printResults (self ):
125- print (' Results:' )
126182 for imgName , results in self ._benchmark_results .items ():
127- print (' image: {}' .format (imgName ))
183+ print (' image: {}' .format (imgName ))
128184 total_latency = 0
129185 for key , latency in results .items ():
130186 total_latency += latency
131- print (' {}, latency: {:.4f} ms' .format (key , latency ))
132- print (' Average latency: {:.4f} ms' .format (total_latency / len (results )))
133-
134- def _run (self , model , * args , ** kwargs ):
135- self ._timer .reset ()
136- pbar = tqdm .tqdm (range (self ._repeat ))
137- for _ in pbar :
138- pbar .set_description (kwargs .get ('pbar_msg' , None ))
139-
140- self ._timer .start ()
141- results = model .infer (* args )
142- self ._timer .stop ()
143- return self ._timer .getAverageTime ()
187+ print (' {}, latency ({}): {:.4f} ms' .format (key , self ._metric .getReduction (), latency ))
144188
145189
146190def build_from_cfg (cfg , registery ):
@@ -160,16 +204,9 @@ def prepend_pythonpath(cfg, key1, key2):
160204 cfg = yaml .safe_load (f )
161205
162206 # prepend PYTHONPATH to each path
163- prepend_pythonpath (cfg , key1 = 'Data' , key2 = 'parentPath' )
164- prepend_pythonpath (cfg , key1 = 'Benchmark' , key2 = 'parentPath' )
207+ prepend_pythonpath (cfg ['Benchmark' ], key1 = 'data' , key2 = 'path' )
165208 prepend_pythonpath (cfg , key1 = 'Model' , key2 = 'modelPath' )
166209
167-
168- # Download data if not exist
169- print ('Loading data:' )
170- downloader = Downloader (** cfg ['Data' ])
171- downloader .get ()
172-
173210 # Instantiate benchmarking
174211 benchmark = Benchmark (** cfg ['Benchmark' ])
175212
0 commit comments