File: //home/ekspardev/onarbeni-master/tools/model_serve_sample.py
from __future__ import print_function
import requests
import glob
from PIL import Image
import numpy as np
import json
import tensorflow as tf
import base64
import time
import pprint
import base64
import io
import json
import os
'''
docker run -t --rm -p 8501:8501 \
-v "$PWD/models/effi:/models/effi4" \
-e MODEL_NAME=effi4 \
-e OMP_NUM_THREADS=$num_physical_cores \
-e TENSORFLOW_INTER_OP_PARALLELISM=2 \
-e TENSORFLOW_INTRA_OP_PARALLELISM=$num_physical_cores \
tensorflow/serving &
--name=tf_serving \
docker run -t --rm\
-d \
-p 8500:8500 \
-p 8501:8501 \
-v "$(pwd)/models/effi:/models/effi4" \
-e MODEL_NAME=effi4 \
-e OMP_NUM_THREADS=$num_physical_cores \
-e TENSORFLOW_INTER_OP_PARALLELISM=2 \
-e TENSORFLOW_INTRA_OP_PARALLELISM=$num_physical_cores \
intel/intel-optimized-tensorflow-serving:latest
'''
project= 'possitive'
image_path = 'data/%s' % project
threshold = 0.5
SERVER_URL='http://localhost:8501/v1/models/effi:predict'
#model input size 1024x1024
input_size = (320, 320)
files=glob.glob(image_path + '/*.*')
MODEL_ACCEPT_JPG = False
headers = {"content-type": "application/json"}
for file in files[:2]:
image = Image.open(file)
image_np = np.array(image)
start = time.perf_counter()
if MODEL_ACCEPT_JPG:
# Compose a JSON Predict request (send JPEG image in base64).
jpeg_bytes = base64.b64encode(image).decode('utf-8')
predict_request = '{"instances" : [{"b64": "%s"}]}' % jpeg_bytes
else:
jpeg_rgb = np.expand_dims(np.array(image_np) / 255.0, 0).tolist()
predict_request = json.dumps({'instances': jpeg_rgb})
# Send few requests to warm-up the model.
for _ in range(3):
response = requests.post(SERVER_URL, data=predict_request)
response.raise_for_status()
# Send few actual requests and report average latency.
total_time = 0
num_requests = 10
for _ in range(num_requests):
response = requests.post(SERVER_URL, data=predict_request)
response.raise_for_status()
total_time += response.elapsed.total_seconds()
prediction = response.json()['predictions'][0]
print('Prediction class: {}, avg latency: {} ms'.format(
np.argmax(prediction), (total_time * 1000) / num_requests))