HEX
Server: Apache
System: Linux srv4.garantili.com.tr 4.18.0-477.21.1.lve.1.el8.x86_64 #1 SMP Tue Sep 5 23:08:35 UTC 2023 x86_64
User: ekspardev (1006)
PHP: 7.4.33
Disabled: exec,passthru,shell_exec,system
Upload Files
File: //home/ekspardev/onarbeni-master/onar/views.py
from django.shortcuts import render, get_object_or_404
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.http import HttpResponse, JsonResponse

from rest_framework.decorators import api_view, renderer_classes
from rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer, BaseRenderer
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.decorators import parser_classes
# custome data render
import cv2
from PIL import Image
import io
import tensorflow as tf
import numpy as np
import time 
import glob
import os
import re
import warnings
from .forms import UploadFileForm
warnings.filterwarnings('ignore')
base_path = os.getcwd()

from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
import socket


today = time.strftime("%Y%m%d%H%M")
# Load the TFLite model and allocate tensors.
models= ['{}/models/detect/0'.format(base_path), # detection model v0  
         '{}/models/classif/1'.format(base_path) # clasification model v1
         ]

### Previous constants  ###
PATH_TO_SAVED_MODEL_DETECTION=models[0]
LABEL_PATH = '{}/models/detect/0/label_map.pbtxt'.format(base_path)
PATH_TO_SAVED_MODEL_CLASSIFICATION=models[1]
DETECTION_THRESHOLD = 0.80
ROI=0.6
detect_fn= tf.saved_model.load(PATH_TO_SAVED_MODEL_DETECTION)
classy_fn= tf.saved_model.load(PATH_TO_SAVED_MODEL_CLASSIFICATION) 
print('Done loading !')

   
@renderer_classes((TemplateHTMLRenderer, JSONRenderer))
@method_decorator(csrf_exempt, name='dispatch')
class UploadFileView(APIView):
    img_np=np.array([])
    def setup(self, request, *args, **kwargs):
        super().setup(request, *args, **kwargs)

    @method_decorator(csrf_exempt)
    def post(self, request, *args, **kwargs):
        print('Burda...')    
        domain = request.META['HTTP_HOST']
        print('Current site domain: ', domain, request.FILES)

        file = request.FILES['file']
        print('file: ', file.name, file.size, file.content_type, file.charset, \
            file.content_type_extra)
        img=file.read()
           
        img_np = np.array(Image.open(io.BytesIO(img)))
        ww, hh, alf = img_np.shape
        print(img_np.shape)
        if not img_np.any():
            print('empty image')
            return JsonResponse({'status': 'Bos resim'})
        
        input_tensor = tf.convert_to_tensor(img_np)
        input_tensor = input_tensor[tf.newaxis, ...]
        
        start = time.monotonic()
        detections = detect_fn(input_tensor)
        image_np_with_detections = img_np.copy()
        
        if detections:
            # Print the time and detections using the correct placement of the % operator
            print('Detectin time: %.3f ms' % ((time.monotonic()-start)*1000))
            num_detections = int(detections.pop('num_detections'))
            detections = {key: value[0, :1].numpy()
                        for key, value in detections.items()}
            ymin, xmin, ymax, xmax = detections['detection_boxes'][0]
            # xmin, ymin, xmax, ymax = detections['detection_boxes'][0]
            xmin, ymin, xmax, ymax = int(xmin * ww), int(ymin * hh), int(xmax * ww), int(ymax * hh)  # xmax  
            print(xmin, ymin, xmax, ymax)
            # detected image will cropted and resized to 160x160 for classification
            cropted_image = cv2.resize(image_np_with_detections[ymin:ymax, xmin:xmax], (160, 160))
            input_cropted = tf.image.convert_image_dtype(cropted_image, tf.float32)
            # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
            input_tensor_cl = tf.convert_to_tensor(input_cropted)
            # The model expects a batch of images, so add an axis with `tf.newaxis`.
            input_tensor_cl = input_tensor_cl[tf.newaxis, ...]
            detections_cl = classy_fn(input_tensor_cl)
            # red color for kirik green for saglam
            color = [0,0,255] if detections_cl[0][0] > 0.0 else [0,255,0]
            text = 'KIRIK' if detections_cl[0][0] > 0.0 else 'SAGLAM'
            # save image
            save_path = '{}/static/image/{}.jpg'.format(base_path, today)
            save_path_cropted = '{}/static/cropted/{}.jpg'.format(base_path, today)
            
            # save_path = 'http://{}:8000/static/{}.jpg'.format(socket.getfqdn().split('.', 1)[1], today)
            link = '{}/static/image/{}.jpg'.format(domain, today)
        
            # draw rectangle and text
            cv2.rectangle(image_np_with_detections, (xmin, ymin), (xmax, ymax), color, 3)
            cv2.putText(image_np_with_detections, text, (xmin + 20, ymin+30), cv2.FONT_HERSHEY_SIMPLEX, 1.5, color, 2)
            cv2.imwrite(save_path, image_np_with_detections)
            cv2.imwrite(save_path_cropted, cropted_image)
            #cv2.imshow('object detection',  cv2.resize(image_np_with_detections, (800, 600)))

            # return the result
            print({'status': 'ok', 'img': save_path, 'link': link, 'text': text})
            response = JsonResponse({'status': 'ok', 'img': save_path, 'link': link, 'text': text})
            return response
        else:
            return JsonResponse({'status': 'Resimde telefon bulunamadi'})
        
        
def index(request):
    context = {}
    template = 'onar/index.html'
    return render(request, template, context)