""" SDK estimator handler"""
from typing import Union, List
import msgpack
from lunavl.sdk.estimators.face_estimators.livenessv1 import LivenessPrediction
from lunavl.sdk.estimators.face_estimators.mask import MaskState
from sanic.response import HTTPResponse
from werkzeug.http import parse_accept_header
from app.api_sdk_adaptors.sdk_adaptor import APISDKAdaptor
from app.global_vars.enums import ImageType
from app.handlers.base_handler import BaseHandlerWithMultipart
from app.handlers.custom_query_getters import (
multifacePolicyGetter,
int0180Getter,
maskStatesValidator,
livenessStatesValidator,
)
from classes.multipart_processing import SDKMultipartProcessor
from classes.schemas.sdk import SDKInputEstimationsModel
from configs.config import LIVENESS_V2_QUALITY_THRESHOLD
from crutches_on_wheels.enums.attributes import Liveness
from crutches_on_wheels.monitoring.points import monitorTime
from crutches_on_wheels.web.query_getters import int01Getter, float01Getter, boolFrom01Getter
from sdk.sdk_loop.enums import MultifacePolicy
from sdk.sdk_loop.estimation_targets import (
SDKFaceEstimationTargets,
SDKEstimationTargets,
SDKHumanEstimationTargets,
SDKLivenessEstimationPolicy,
)
from sdk.sdk_loop.sdk_task import SDKTask, SDKTaskFilters, SDKDetectableImage, FaceWarp
[docs]class SDKHandler(BaseHandlerWithMultipart):
"""
SDK estimator handler
Resource: "/{api_version}/sdk"
"""
[docs] async def getDataFromMultipart(
self, imageType: ImageType = ImageType.rawImage
) -> Union[List[SDKDetectableImage], List[FaceWarp]]:
"""Description see :func:`~BaseHandlerWithMultipart.getDataFromMultipart`."""
dataFromRequest = await SDKMultipartProcessor().getData(self.request)
estimationDataFromMultiPart = self._getDataFromMultipart(dataFromRequest.images, imageType)
return estimationDataFromMultiPart
[docs] async def post(self) -> HTTPResponse:
"""
SDK estimations handler. See `spec sdk`_.
.. _`spec sdk`:
_static/api.html#operation/sdk
Returns:
Response with estimations
"""
detectFace = self.getQueryParam("detect_face", int01Getter, default=0)
detectBody = self.getQueryParam("detect_body", int01Getter, default=0)
estimateHeadPose = self.getQueryParam("estimate_head_pose", int01Getter, default=0)
estimateLandmarks68 = self.getQueryParam("estimate_landmarks68", int01Getter, default=0)
estimateLandmarks5 = self.getQueryParam("estimate_landmarks5", int01Getter, default=0)
estimateQuality = self.getQueryParam("estimate_quality", int01Getter, default=0)
estimateGaze = self.getQueryParam("estimate_gaze", int01Getter, default=0)
estimateEyesAttributes = self.getQueryParam("estimate_eyes_attributes", int01Getter, default=0)
estimateMouthAttributes = self.getQueryParam("estimate_mouth_attributes", int01Getter, default=0)
estimateEmotions = self.getQueryParam("estimate_emotions", int01Getter, default=0)
estimateMask = self.getQueryParam("estimate_mask", int01Getter, default=0)
estimateGlasses = self.getQueryParam("estimate_glasses", int01Getter, default=0)
estimateLiveness = self.getQueryParam("estimate_liveness", int01Getter, default=0)
multifacePolicy = self.getQueryParam("multiface_policy", multifacePolicyGetter, default=MultifacePolicy.allowed)
extractFaceDescriptor = self.getQueryParam("estimate_face_descriptor", int01Getter, default=0)
extractBasicAttributes = self.getQueryParam("estimate_basic_attributes", int01Getter, default=0)
extractHumanDescriptor = self.getQueryParam("estimate_body_descriptor", int01Getter, default=0)
aggregate = self.getQueryParam("aggregate_attributes", int01Getter, default=0)
imageType = self.getQueryParam("image_type", lambda x: ImageType(int(x)), default=ImageType.rawImage)
estimateHumanWarp = self.getQueryParam("estimate_body_warp", int01Getter, default=0)
estimateFaceWarp = self.getQueryParam("estimate_face_warp", int01Getter, default=0)
pitchThreshold = self.getQueryParam("pitch_threshold", int0180Getter, default=None)
rollThreshold = self.getQueryParam("roll_threshold", int0180Getter, default=None)
yawThreshold = self.getQueryParam("yaw_threshold", int0180Getter, default=None)
scoreThreshold = self.getQueryParam("score_threshold", float01Getter, default=None)
maskStates = self.getQueryParam("mask_states", maskStatesValidator, default=None)
if maskStates:
maskStates = [MaskState(int(x)) for x in maskStates]
livenessStates = self.getQueryParam("liveness_states", livenessStatesValidator, default=None)
if livenessStates:
livenessStates = [LivenessPrediction(Liveness(int(x)).name) for x in livenessStates]
autoOrient = self.getQueryParam("use_exif_info", boolFrom01Getter, default=True)
filters = SDKTaskFilters(
yawThreshold=yawThreshold,
pitchThreshold=pitchThreshold,
rollThreshold=rollThreshold,
garbageScoreThreshold=scoreThreshold,
maskStates=maskStates,
livenessStates=livenessStates,
)
with monitorTime(self.request.dataForMonitoring, "load_images_for_processing_time"):
inputData = await self.getInputEstimationData(
self.request, imageType=imageType, validationModel=SDKInputEstimationsModel, autoOrient=autoOrient
)
faceTargets = SDKFaceEstimationTargets(
estimateQuality=estimateQuality,
estimateMouthAttributes=estimateMouthAttributes,
estimateAGS=0,
estimateGaze=estimateGaze,
estimateEyesAttributes=estimateEyesAttributes,
estimateEmotions=estimateEmotions,
estimateHeadPose=estimateHeadPose,
estimateFaceDescriptor=extractFaceDescriptor,
estimateBasicAttributes=extractBasicAttributes,
estimateMask=estimateMask,
estimateGlasses=estimateGlasses,
estimateLiveness=SDKLivenessEstimationPolicy(
estimate=estimateLiveness, qualityThreshold=LIVENESS_V2_QUALITY_THRESHOLD
),
)
self.checkLivenessEstimationLicensing(faceTargets.estimateLiveness.estimate)
humanTargets = SDKHumanEstimationTargets(estimateHumanDescriptor=extractHumanDescriptor)
estimateFace = int(
any((estimateLandmarks68, estimateLandmarks5, detectFace, estimateFaceWarp, not faceTargets.isEmpty()))
)
if scoreThreshold is not None:
faceTargets.estimateFaceDescriptor = 1
if filters.needFilterByAngles():
faceTargets.estimateHeadPose = 1
estimateHuman = int(detectBody or estimateHumanWarp or not humanTargets.isEmpty())
toEstimate = SDKEstimationTargets(
estimateHuman=estimateHuman,
faceEstimationTargets=faceTargets,
estimateFace=estimateFace,
humanEstimationTargets=humanTargets,
)
task = SDKTask(
toEstimate, data=inputData, filters=filters, multifacePolicy=multifacePolicy, aggregateAttributes=aggregate
)
detector = APISDKAdaptor(accountId="", logger=self.logger, sdkLoop=self.sdkLoop)
acceptHeader = self.request.headers.get("Accept", "application/json")
responseContentType = parse_accept_header(acceptHeader).best_match(
("application/msgpack", "application/json"), "application/json"
)
encodeToBase64 = True
if responseContentType == "application/msgpack":
encodeToBase64 = False
resTask, monitoringData = await detector.estimate(
task,
estimateLandmarks68,
estimateHumanWarp=estimateHumanWarp,
estimateFaceWarp=estimateFaceWarp,
estimateLandmarks5=estimateLandmarks5,
estimateHeadPose=estimateHeadPose,
estimateFaceDescriptor=extractFaceDescriptor,
encodeBytesToBase64=encodeToBase64,
)
if faceTargets.estimateLiveness.estimate:
self.countLivenessEstimationsPerformed(sum(map(lambda x: not x.error, task.images)))
self.handleMonitoringData(monitoringData)
if responseContentType == "application/msgpack":
body = msgpack.packb(resTask, use_bin_type=True)
return self.success(200, body=body, contentType="application/msgpack")
return self.success(200, outputJson=resTask)