Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 15 additions & 109 deletions benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,61 +2,30 @@
import argparse

import yaml
import tqdm
import numpy as np
import cv2 as cv

# from ..models import MODELS
from models import MODELS
from utils import METRICS

parser = argparse.ArgumentParser("Benchmarks for OpenCV Zoo.")
parser.add_argument('--cfg', '-c', type=str,
help='Benchmarking on the given config.')
args = parser.parse_args()

class Timer:
def __init__(self, warmup=0, reduction='median'):
self._warmup = warmup
self._reduction = reduction
self._tm = cv.TickMeter()
self._time_record = []
self._calls = 0

def start(self):
self._tm.start()

def stop(self):
self._tm.stop()
self._calls += 1
self._time_record.append(self._tm.getTimeMilli())
self._tm.reset()

def reset(self):
self._time_record = []
self._calls = 0

def getResult(self):
if self._reduction == 'median':
return self._getMedian(self._time_record[self._warmup:])
elif self._reduction == 'gmean':
return self._getGMean(self._time_record[self._warmup:])
else:
raise NotImplementedError()

def _getMedian(self, records):
''' Return median time
'''
l = len(records)
mid = int(l / 2)
if l % 2 == 0:
return (records[mid] + records[mid - 1]) / 2
else:
return records[mid]
def build_from_cfg(cfg, registery, key='name'):
obj_name = cfg.pop(key)
obj = registery.get(obj_name)
return obj(**cfg)

def _getGMean(self, records, drop_largest=3):
''' Return geometric mean of time
'''
time_record_sorted = sorted(records, reverse=True)
return sum(records[drop_largest:]) / (self._calls - drop_largest)
def prepend_pythonpath(cfg):
for k, v in cfg.items():
if isinstance(v, dict):
prepend_pythonpath(v)
else:
if 'path' in k.lower():
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)

class Data:
def __init__(self, **kwargs):
Expand Down Expand Up @@ -105,64 +74,15 @@ def __getitem__(self, idx):
else:
return self._files[idx], image

class Metric:
def __init__(self, **kwargs):
self._sizes = kwargs.pop('sizes', None)
self._warmup = kwargs.pop('warmup', 3)
self._repeat = kwargs.pop('repeat', 10)
assert self._warmup < self._repeat, 'The value of warmup must be smaller than the value of repeat.'
self._batch_size = kwargs.pop('batchSize', 1)
self._reduction = kwargs.pop('reduction', 'median')

self._timer = Timer(self._warmup, self._reduction)

def getReduction(self):
return self._reduction

def forward(self, model, *args, **kwargs):
img = args[0]
h, w, _ = img.shape
if not self._sizes:
self._sizes = [[w, h]]

results = dict()
self._timer.reset()
if len(args) == 1:
for size in self._sizes:
img_r = cv.resize(img, size)
try:
model.setInputSize(size)
except:
pass
# TODO: batched inference
# input_data = [img] * self._batch_size
input_data = img_r
for _ in range(self._repeat+self._warmup):
self._timer.start()
model.infer(input_data)
self._timer.stop()
results[str(size)] = self._timer.getResult()
else:
# TODO: batched inference
# input_data = [args] * self._batch_size
bboxes = args[1]
for idx, bbox in enumerate(bboxes):
for _ in range(self._repeat+self._warmup):
self._timer.start()
model.infer(img, bbox)
self._timer.stop()
results['bbox{}'.format(idx)] = self._timer.getResult()

return results

class Benchmark:
def __init__(self, **kwargs):
self._data_dict = kwargs.pop('data', None)
assert self._data_dict, 'Benchmark[\'data\'] cannot be empty and must have path and files.'
self._data = Data(**self._data_dict)

self._metric_dict = kwargs.pop('metric', None)
self._metric = Metric(**self._metric_dict)
# self._metric = Metric(**self._metric_dict)
self._metric = build_from_cfg(self._metric_dict, registery=METRICS, key='type')

backend_id = kwargs.pop('backend', 'default')
available_backends = dict(
Expand Down Expand Up @@ -206,20 +126,6 @@ def printResults(self):
total_latency += latency
print(' {}, latency ({}): {:.4f} ms'.format(key, self._metric.getReduction(), latency))


def build_from_cfg(cfg, registery):
obj_name = cfg.pop('name')
obj = registery.get(obj_name)
return obj(**cfg)

def prepend_pythonpath(cfg):
for k, v in cfg.items():
if isinstance(v, dict):
prepend_pythonpath(v)
else:
if 'path' in k.lower():
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)

if __name__ == '__main__':
assert args.cfg.endswith('yaml'), 'Currently support configs of yaml format only.'
with open(args.cfg, 'r') as f:
Expand Down
6 changes: 3 additions & 3 deletions benchmark/config/face_detection_yunet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@ Benchmark:
path: "benchmark/data/face/detection"
files: ["group.jpg", "concerts.jpg", "dance.jpg"]
metric:
type: "Detection"
sizes: # [[w1, h1], ...], Omit to run at original scale
- [160, 120]
- [640, 480]
warmup: 3
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"

Expand Down
6 changes: 3 additions & 3 deletions benchmark/config/face_recognition_sface.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ Benchmark:
files: ["Aaron_Tippin_0001.jpg", "Alvaro_Uribe_0028.jpg", "Alvaro_Uribe_0029.jpg", "Jose_Luis_Rodriguez_Zapatero_0001.jpg"]
useLabel: True
metric: # 'sizes' is omitted since this model requires input of fixed size
warmup: 3
type: "Recognition"
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"

Expand Down
6 changes: 3 additions & 3 deletions benchmark/config/human_segmentation_pphumanseg.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ Benchmark:
toRGB: True
resize: [192, 192]
metric:
warmup: 3
type: "Base"
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"

Expand Down
6 changes: 3 additions & 3 deletions benchmark/config/image_classification_ppresnet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ Benchmark:
resize: [256, 256]
centerCrop: 224
metric:
warmup: 3
type: "Base"
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"

Expand Down
11 changes: 6 additions & 5 deletions benchmark/config/qrcode_wechatqrcode.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,19 @@ Benchmark:
path: "benchmark/data/qrcode"
files: ["opencv.png", "opencv_zoo.png"]
metric:
type: "Detection"
sizes:
- [100, 100]
- [300, 300]
warmup: 3
warmup: 30
repeat: 10
reduction: "median"
backend: "default"
target: "cpu"

Model:
name: "WeChatQRCode"
detect_prototxt_path: "models/qrcode_wechatqrcode/detect_2021sep.prototxt"
detect_model_path: "models/qrcode_wechatqrcode/detect_2021sep.caffemodel"
sr_prototxt_path: "models/qrcode_wechatqrcode/sr_2021sep.prototxt"
sr_model_path: "models/qrcode_wechatqrcode/sr_2021sep.caffemodel"
detect_prototxt_path: "models/qrcode_wechatqrcode/detect_2021nov.prototxt"
detect_model_path: "models/qrcode_wechatqrcode/detect_2021nov.caffemodel"
sr_prototxt_path: "models/qrcode_wechatqrcode/sr_2021nov.prototxt"
sr_model_path: "models/qrcode_wechatqrcode/sr_2021nov.caffemodel"
Comment on lines +19 to +22
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@fengyuentau how do you define the suffix? Isn't QRCode models trained quite some time ago?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The suffix is the time I uploaded to zoo. Previous was wrong.

6 changes: 3 additions & 3 deletions benchmark/config/text_detection_db.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ Benchmark:
path: "benchmark/data/text"
files: ["1.jpg", "2.jpg", "3.jpg"]
metric:
type: "Detection"
sizes: # [[w1, h1], ...], Omit to run at original scale
- [640, 480]
warmup: 3
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"

Expand Down
6 changes: 3 additions & 3 deletions benchmark/config/text_recognition_crnn.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ Benchmark:
files: ["1.jpg", "2.jpg", "3.jpg"]
useLabel: True
metric: # 'sizes' is omitted since this model requires input of fixed size
warmup: 3
type: "Recognition"
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"

Expand Down
3 changes: 1 addition & 2 deletions benchmark/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
numpy==1.21.2
numpy
opencv-python==4.5.4.58
tqdm
pyyaml
requests
4 changes: 4 additions & 0 deletions benchmark/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .factory import (METRICS, DATALOADERS)
from .metrics import *

__all__ = ['METRICS', 'DATALOADERS']
13 changes: 13 additions & 0 deletions benchmark/utils/factory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
class Registery:
def __init__(self, name):
self._name = name
self._dict = dict()

def get(self, key):
return self._dict[key]

def register(self, item):
self._dict[item.__name__] = item

METRICS = Registery('Metrics')
DATALOADERS = Registery('DataLoaders')
5 changes: 5 additions & 0 deletions benchmark/utils/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .base import Base
from .detection import Detection
from .recognition import Recognition

__all__ = ['Base', 'Detection', 'Recognition']
29 changes: 29 additions & 0 deletions benchmark/utils/metrics/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import cv2 as cv

from .base_metric import BaseMetric
from ..factory import METRICS

@METRICS.register
class Base(BaseMetric):
def __init__(self, **kwargs):
super().__init__(**kwargs)

def forward(self, model, *args, **kwargs):
img = args[0]
if not self._sizes:
h, w, _ = img.shape
self._sizes.append([w, h])

results = dict()
self._timer.reset()
for size in self._sizes:
input_data = cv.resize(img, size)
for _ in range(self._warmup):
model.infer(input_data)
for _ in range(self._repeat):
self._timer.start()
model.infer(input_data)
self._timer.stop()
results[str(size)] = self._getResult()

return results
48 changes: 48 additions & 0 deletions benchmark/utils/metrics/base_metric.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import cv2 as cv

from ..timer import Timer

class BaseMetric:
def __init__(self, **kwargs):
self._sizes = kwargs.pop('sizes', None)
if self._sizes is None:
self._sizes = []
self._warmup = kwargs.pop('warmup', 3)
self._repeat = kwargs.pop('repeat', 10)
self._reduction = kwargs.pop('reduction', 'median')

self._timer = Timer()

def _calcMedian(self, records):
''' Return the median of records
'''
l = len(records)
mid = int(l / 2)
if l % 2 == 0:
return (records[mid] + records[mid - 1]) / 2
else:
return records[mid]

def _calcGMean(self, records, drop_largest=3):
''' Return the geometric mean of records after drop the first drop_largest
'''
l = len(records)
if l <= drop_largest:
print('len(records)({}) <= drop_largest({}), stop dropping.'.format(l, drop_largest))
records_sorted = sorted(records, reverse=True)
return sum(records_sorted[drop_largest:]) / (l - drop_largest)

def _getResult(self):
records = self._timer.getRecords()
if self._reduction == 'median':
return self._calcMedian(records)
elif self._reduction == 'gmean':
return self._calcGMean(records)
else:
raise NotImplementedError('Reduction {} is not supported'.format(self._reduction))

def getReduction(self):
return self._reduction

def forward(self, model, *args, **kwargs):
raise NotImplementedError('Not implemented')
Loading