Source code for luna_handlers.app.handlers.detector_handler

""" Detector Handler

Module realize detector handler.
"""
from typing import Awaitable, List, Union

from sanic.response import HTTPResponse

from app.api_sdk_adaptors.base import ISO_FACE_ESTIMATION_TARGETS, LoopEstimationsAlwaysOn
from app.api_sdk_adaptors.detector import APISDKDetectorAdaptor
from app.api_sdk_adaptors.orientation import handleImageOrientation
from app.handlers.base_handler import BaseHandlerWithMultipart
from app.handlers.custom_query_getters import detectorImageTypeGetter, int0180Getter, multifacePolicyGetter
from classes.image_meta import InputImageData
from classes.multipart_processing import DetectorMultipartProcessor
from classes.schemas.detector import Detector
from crutches_on_wheels.errors.errors import ErrorInfo
from crutches_on_wheels.errors.exception import VLException
from crutches_on_wheels.monitoring.points import monitorTime
from crutches_on_wheels.web.query_getters import boolFrom01Getter, int01Getter, uuidGetter
from sdk.sdk_loop.enums import LoopEstimations, MultifacePolicy
from sdk.sdk_loop.models.image import ImageType
from sdk.sdk_loop.task import HandlersTask
from sdk.sdk_loop.tasks.filters import FaceDetectionFilters, Filters
from sdk.sdk_loop.tasks.task import TaskParams


[docs]class DetectorHandler(BaseHandlerWithMultipart): """ Handler for detecting faces on images. Resource: "/{api_version}/detector" """
[docs] async def getDataFromMultipart(self, imageType: ImageType = ImageType.IMAGE) -> List[InputImageData]: """Description see :func:`~BaseHandlerWithMultipart.getDataFromMultipart`.""" dataFromRequest = await DetectorMultipartProcessor().getData(self.request) estimationDataFromMultiPart = self._getDataFromMultipart(dataFromRequest.images, imageType) return estimationDataFromMultiPart
def _getImagesFromSamples( self, inputJson: dict, imageType: Union[ImageType, None], defaultDetectTime: str ) -> Awaitable[List[InputImageData]]: """ Stub unknown image type for face samples. """ sampleImageType = imageType if imageType is None: sampleImageType = ImageType.FACE_WARP return super()._getImagesFromSamples( inputJson=inputJson, imageType=sampleImageType, defaultDetectTime=defaultDetectTime ) def _getImagesFromUrls( self, inputJson: dict, imageType: Union[ImageType, None], defaultDetectTime: str ) -> Awaitable[List[InputImageData]]: """ Stub unknown image type for urls. """ urlImageType = imageType if imageType is None: urlImageType = ImageType.IMAGE return super()._getImagesFromUrls( inputJson=inputJson, imageType=urlImageType, defaultDetectTime=defaultDetectTime )
[docs] async def post(self) -> HTTPResponse: """ Detect faces on images. See `spec_detector`_. .. _spec_detector: _static/api.html#operation/detectFaces Returns: response with succeeded processed images and failed processed images """ self.accountId = self.getQueryParam("account_id", uuidGetter, require=True) areFaceQualityChecksEnabled = False sdkTargets = {LoopEstimations.faceDetection, LoopEstimationsAlwaysOn.faceLandmarks5} if self.getQueryParam("detect_landmarks68", int01Getter, default=0): sdkTargets.add(LoopEstimations.faceLandmarks68) if self.getQueryParam("estimate_head_pose", int01Getter, default=0): sdkTargets.add(LoopEstimations.headPose) if self.getQueryParam("estimate_gaze", int01Getter, default=0): sdkTargets.add(LoopEstimations.gaze) if self.getQueryParam("estimate_eyes_attributes", int01Getter, default=0): sdkTargets.add(LoopEstimations.eyes) if self.getQueryParam("estimate_mouth_attributes", int01Getter, default=0): sdkTargets.add(LoopEstimations.mouthAttributes) if self.getQueryParam("estimate_emotions", int01Getter, default=0): sdkTargets.add(LoopEstimations.emotions) if self.getQueryParam("estimate_mask", int01Getter, default=0): sdkTargets.add(LoopEstimations.mask) if self.getQueryParam("estimate_quality", int01Getter, default=0): sdkTargets.add(LoopEstimations.faceWarpQuality) if self.getQueryParam("extract_exif", int01Getter, default=0): sdkTargets.add(LoopEstimations.exif) if self.getQueryParam("estimate_face_quality", int01Getter, default=0): areFaceQualityChecksEnabled = True self.assertLicence(iso=True) faceFilters = FaceDetectionFilters( rollThreshold=self.getQueryParam("roll_threshold", int0180Getter), yawThreshold=self.getQueryParam("yaw_threshold", int0180Getter), pitchThreshold=self.getQueryParam("pitch_threshold", int0180Getter), ) if faceFilters.rollFilter or faceFilters.yawFilter or faceFilters.pitchFilter: sdkTargets.add(LoopEstimations.headPose) multifacePolicy = self.getQueryParam("multiface_policy", multifacePolicyGetter, default=MultifacePolicy.allowed) params = TaskParams( targets=sdkTargets | ISO_FACE_ESTIMATION_TARGETS if areFaceQualityChecksEnabled else sdkTargets, filters=Filters(faceDetection=faceFilters), multifacePolicy=multifacePolicy, useExifInfo=self.getQueryParam("use_exif_info", boolFrom01Getter, default=True), autoRotation=self.config.useAutoRotation, ) imageType = self.getQueryParam("warped_image", detectorImageTypeGetter) with monitorTime(self.request.dataForMonitoring, "download_images_time"): inputData = await self.getInputEstimationData(self.request, imageType=imageType, validationModel=Detector) task = HandlersTask(data=[metaImage.image for metaImage in inputData], params=params) await task.execute() if task.result.error: raise VLException(ErrorInfo.fromDict(task.result.error.asDict()), 400, isCriticalError=False) if self.config.useAutoRotation: handleImageOrientation(task.result.images) detectorAdaptor = APISDKDetectorAdaptor( estimationTargets=sdkTargets, accountId=self.accountId, sampleStore=self.luna3Client.lunaFaceSamplesStore, sampleBucket=self.config.faceSamplesStorage.bucket, areFaceQualityChecksEnabled=areFaceQualityChecksEnabled, ) result, monitoringData = await detectorAdaptor.buildResult( task.result, meta=[metaImage.meta for metaImage in inputData], sdkTargets=sdkTargets ) self.handleMonitoringData(monitoringData) return self.success(201, outputJson=result)