使用python API,对openvino和onNX Runtime的推理时间进行比较。有学习并借用一些其他up的代码。
#openvino
from openvino.inference_engine import IECore
import numpy as np
import cv2
import time
ie = IECore()
model="shufflenet-v2-10.onnx"
#model="shufflenet-v2-10/shufflenet-v2-10.xml"
net = ie.read_network(model=model)
input_blob = next(iter(net.input_info))
out_blob = next(iter(net.outputs))
net.batch_size=1#batchsize
n, c, h, w = net.input_info[input_blob].input_data.shape
print(n, c, h, w)
images = np.ndarray(shape=(n, c, h, w))
for i in range(n):
image = cv2.imread("person_detection.png")
if image.shape[:-1] != (h, w):
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
images[i] = image
exec_net = ie.load_network(network=net, device_name="CPU")
start=time.time()
res = exec_net.infer(inputs={input_blob: images})
end=time.time()
#print(res)
openvinoTime=end-start
# onnxruntime
import onnxruntime as nxrun
from skimage.transform import resize
from skimage import io
img = io.imread("./person_detection.png")
img = np.rollaxis(img, 2, 0)
img224 = resize(img / 255, (3, 224, 224), anti_aliasing=True)
ximg = img224[np.newaxis, :, :, :]
ximg = ximg.astype(np.float32)
# ximg = np.random.rand(1, 3, 224, 224).astype(np.float32)
sess = nxrun.InferenceSession("./shufflenet-v2-10.onnx")
print("The model expects input shape: ", sess.get_inputs()[0].shape)
print("The shape of the Image is: ", ximg.shape)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
start=time.time()
result = sess.run(None, {input_name: ximg})
end=time.time()
prob = result[0]
print(prob.ravel()[:10])
onnxtime=end-start
大致过程:
初始化模型地址
初始化图片地址
准备输入输出
推理



