Source code for luna_handlers.app.handlers.sdk_handler
""" SDK estimator handler"""
from typing import List
import msgpack
from sanic.response import HTTPResponse
from app.api_sdk_adaptors.orientation import handleImageOrientation
from app.api_sdk_adaptors.sdk_adaptor import APISDKAdaptor
from app.handlers.base_handler import BaseHandlerWithMultipart
from classes.image_meta import InputImageData
from classes.multipart_processing import SDKMultipartProcessor
from classes.query_schemas.sdk import SDKQueries
from classes.schemas.sdk import SDKInputEstimationsModel
from crutches_on_wheels.cow.errors.errors import ErrorInfo
from crutches_on_wheels.cow.errors.exception import VLException
from crutches_on_wheels.cow.monitoring.points import monitorTime
from sdk.sdk_loop.enums import LoopEstimations
from sdk.sdk_loop.models.image import ImageType
from sdk.sdk_loop.task import HandlersTask
from sdk.sdk_loop.tasks.filters import FaceDetectionFilters, Filters
from sdk.sdk_loop.tasks.task import LivenessV1Params, TaskEstimationParams, TaskParams
[docs]class SDKHandler(BaseHandlerWithMultipart):
"""
SDK estimator handler
Resource: "/{api_version}/sdk"
"""
[docs] async def getDataFromMultipart(self, imageType: ImageType = ImageType.IMAGE) -> List[InputImageData]:
"""Description see :func:`~BaseHandlerWithMultipart.getDataFromMultipart`."""
dataFromRequest = await SDKMultipartProcessor().getData(self.request)
estimationDataFromMultiPart = self._getDataFromMultipart(dataFromRequest.images, imageType)
return estimationDataFromMultiPart
[docs] async def post(self) -> HTTPResponse:
"""
SDK estimations handler. See `spec sdk`_.
.. _`spec sdk`:
_static/api.html#operation/sdk
Returns:
Response with estimations
"""
queries: SDKQueries = self.loadDataFromQuery(SDKQueries)
faceFilters = FaceDetectionFilters(
yawThreshold=queries.yawThreshold,
pitchThreshold=queries.pitchThreshold,
rollThreshold=queries.rollThreshold,
gcThreshold=queries.scoreThreshold or None, # ignore 0.0
maskStates=queries.maskStates,
livenessStates=queries.livenessStates,
)
params = TaskParams(
targets=queries.targets,
filters=Filters(faceDetection=faceFilters),
estimatorsParams=TaskEstimationParams(
livenessv1=LivenessV1Params(
qualityThreshold=self.config.livenessSettings.qualityThreshold,
scoreThreshold=self.config.livenessSettings.realThreshold,
),
faceDescriptorVersion=self.config.defaultFaceDescriptorVersion,
bodyDescriptorVersion=self.config.defaultHumanDescriptorVersion,
),
multifacePolicy=queries.multifacePolicy,
useExifInfo=queries.useExifInfo,
autoRotation=self.config.useAutoRotation,
aggregate=bool(queries.aggregateAttributes),
)
with monitorTime(self.request.dataForMonitoring, "load_images_for_processing_time"):
inputData = await self.getInputEstimationData(
self.request, imageType=queries.imageType, validationModel=SDKInputEstimationsModel
)
self.assertLicence(
liveness=(LoopEstimations.livenessV1 in queries.targets or faceFilters.livenessFilter),
bodyAttributes=(LoopEstimations.bodyAttributes in queries.targets),
)
task = HandlersTask(data=[metaImage.image for metaImage in inputData], params=params)
await task.execute()
if task.result.error:
raise VLException(ErrorInfo.fromDict(task.result.error.asDict()), 400, isCriticalError=False)
if self.config.useAutoRotation:
handleImageOrientation(task.result.images)
encodeToBase64 = True
if (responseContentType := self.getResponseContentType()) == "application/msgpack":
encodeToBase64 = False
sdkAdaptor = APISDKAdaptor(
responseEstimationsTargets=queries.targets,
aggregationEnabled=params.aggregate,
encodeBytesToBase64=encodeToBase64,
)
result, monitoringData = await sdkAdaptor.buildResult(
task.result, meta=[metaImage.meta for metaImage in inputData]
)
self.countLivenessEstimationsPerformed(monitoringData.sdkUsages.livenessEstimator)
self.handleMonitoringData(monitoringData)
if responseContentType == "application/msgpack":
body = msgpack.packb(result, use_bin_type=True)
return self.success(200, body=body, contentType="application/msgpack")
return self.success(200, outputJson=result)