HEX
Server: Apache
System: Linux srv4.garantili.com.tr 4.18.0-477.21.1.lve.1.el8.x86_64 #1 SMP Tue Sep 5 23:08:35 UTC 2023 x86_64
User: ekspardev (1006)
PHP: 7.4.33
Disabled: exec,passthru,shell_exec,system
Upload Files
File: /home/ekspardev/onarbeni-master/tools/mp_detection_with_saved_model.py
import cv2
from PIL import Image
import tensorflow as tf
import numpy as np
import time 
import glob
import os
import re
import warnings
warnings.filterwarnings('ignore')
base_path = os.getcwd()

from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
today = time.strftime("%Y-%m-%d")

###
# Load the TFLite model and allocate tensors.
models= ['{}/models/detect/0'.format(base_path), # detection model v0  
         '{}/models/classif/1'.format(base_path) # clasification model v1
         ]
### Previous constants  ###
PATH_TO_SAVED_MODEL_DETECTION=models[0]
LABEL_PATH = '{}/models/detect/0/label_map.pbtxt'.format(base_path)
PATH_TO_SAVED_MODEL_CLASSIFICATION=models[1]
DETECTION_THRESHOLD = 0.80
ROI=0.6
#########################################




# Load saved model and build the detection function
detect_fn=tf.saved_model.load(PATH_TO_SAVED_MODEL_DETECTION)
detect_cl=tf.saved_model.load(PATH_TO_SAVED_MODEL_CLASSIFICATION)
print('Done!')

category_index=label_map_util.create_category_index_from_labelmap(LABEL_PATH, use_display_name=True)

# project name to be used for the input and output file
projects=['negative', 'possitive']

files=[]
for project in projects:
    images_path = '%s/data/%s' % (base_path, project)
    files += glob.glob(images_path + '/*.*') 

print('Total images: ', len(files))
for count, image in enumerate(files):
    #print('Running inference for {}... '.format(image_path), end='')
    start=time.monotonic()
    image_np = np.array(Image.open(image))
    # continue if image is empty
    if not image_np.any():
        continue
    
    hh,ww,alf = image_np.shape
    
    # replace space and brackets with underscore in the image name with regex
    image = re.sub(r'\(|\)|\s+', '_', image)
    re.sub(r"_{2,}", "_", image)
    image_path = re.sub('.JPG', '.jpg', image)

    # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
    input_tensor = tf.convert_to_tensor(image_np)
    # The model expects a batch of images, so add an axis with `tf.newaxis`.
    input_tensor = input_tensor[tf.newaxis, ...]
    detections = detect_fn(input_tensor)

    
    if detections:
        # Print the time and detections using the correct placement of the % operator
        print('Detectin time: %.3f ms' % ((time.monotonic()-start)*1000))
        # All outputs are batches tensors.
        # Convert to numpy arrays, and take index [0] to remove the batch dimension.
        # We're only interested in the first num_detections.
        num_detections = int(detections.pop('num_detections'))
        detections = {key: value[0, :2].numpy()
                    for key, value in detections.items()}
        
        # detection_classes should be ints.
        image_np_with_detections = image_np.copy()

        ymin, xmin, ymax, xmax = detections['detection_boxes'][0]
        xmin, ymin, xmax, ymax = int(xmin * ww), int(ymin * hh), int(xmax * ww), int(ymax * hh)  # xmax  
        
        cropted_image = cv2.resize(image_np_with_detections[ymin:ymax, xmin:xmax], (160, 160))        
        input_cropted = tf.image.convert_image_dtype(cropted_image, tf.float32)
        ## Check calssification
        # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
        input_tensor_cl = tf.convert_to_tensor(input_cropted)
        # The model expects a batch of images, so add an axis with `tf.newaxis`.
        input_tensor_cl = input_tensor_cl[tf.newaxis, ...]
        detections_cl = detect_cl(input_tensor_cl)
        
        # red color for kirik green for saglam
        color = [0,0,255] if detections_cl[0][0] > 0.0 else [0,255,0]
        text = 'KIRIK' if detections_cl[0][0] > 0.0 else 'SAGLAM'
        for project in projects: 
            if project in image_path:
                save_path = image_path.replace(project, 'results/'+project+ '/'+text)
            
            
        cv2.rectangle(image_np_with_detections, (xmin, ymin), (xmax, ymax), color, 3)
        cv2.putText(image_np_with_detections, text, (xmin + 20, ymin+30), cv2.FONT_HERSHEY_SIMPLEX, 1.5, color, 2)
        cv2.imwrite(save_path, image_np_with_detections)
        cv2.imshow('object detection',  cv2.resize(image_np_with_detections, (800, 600)))

        key = cv2.waitKey(1) & 0xFF
            # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
        
        
# so far so good no neede to filter
def filter_detections(detections, threshold):
    # keep first 2 detections
    filtered_detections = {}
    for key, value in detections.items():
        print(key, ' -> ', detections[key].shape)
        if key == 'num_detections':
            filtered_detections[key] = [2]  # keep only the first two values
        elif key == 'detection_anchor_indices' or key == 'detection_classes' or key == 'detection_scores':
            filtered_detections[key] = value[:, :2]  # keep only the first two detections
        else:
            filtered_detections[key] = value[:, :2, :]
    filtered_detections
    return filtered_detections