add text detection to ML debug

This commit is contained in:
Abhinav 2022-04-11 16:07:03 +05:30
parent 60e81e9e84
commit f4c9ec12d3

View file

@ -15,6 +15,7 @@ import { ibExtractFaceImageFromCrop } from 'utils/machineLearning/faceCrop';
import { FaceCropsRow, FaceImagesRow, ImageBitmapView } from './ImageViews'; import { FaceCropsRow, FaceImagesRow, ImageBitmapView } from './ImageViews';
import ssdMobileNetV2Service from 'services/machineLearning/ssdMobileNetV2Service'; import ssdMobileNetV2Service from 'services/machineLearning/ssdMobileNetV2Service';
import { DEFAULT_ML_SYNC_CONFIG } from 'constants/machineLearning/config'; import { DEFAULT_ML_SYNC_CONFIG } from 'constants/machineLearning/config';
import tesseractService from 'services/machineLearning/tesseractService';
interface MLFileDebugViewProps { interface MLFileDebugViewProps {
file: File; file: File;
@ -100,6 +101,12 @@ export default function MLFileDebugView(props: MLFileDebugViewProps) {
); );
console.log('detectedObjects: ', objectDetections); console.log('detectedObjects: ', objectDetections);
const textDetections = await tesseractService.detectText(
imageBitmap,
DEFAULT_ML_SYNC_CONFIG.textDetection.minAccuracy
);
console.log('detectedTexts: ', textDetections);
const mlSyncConfig = await getMLSyncConfig(); const mlSyncConfig = await getMLSyncConfig();
const faceCropPromises = faceDetections.map(async (faceDetection) => const faceCropPromises = faceDetections.map(async (faceDetection) =>
arcfaceCropService.getFaceCrop( arcfaceCropService.getFaceCrop(