Inline
This commit is contained in:
parent
5eb21fafbe
commit
eaadc54184
|
@ -1,88 +0,0 @@
|
|||
import { Matrix } from "ml-matrix";
|
||||
import { Point } from "services/face/geom";
|
||||
import { FaceAlignment, FaceDetection } from "services/face/types";
|
||||
import { getSimilarityTransformation } from "similarity-transformation";
|
||||
|
||||
const ARCFACE_LANDMARKS = [
|
||||
[38.2946, 51.6963],
|
||||
[73.5318, 51.5014],
|
||||
[56.0252, 71.7366],
|
||||
[56.1396, 92.2848],
|
||||
] as Array<[number, number]>;
|
||||
|
||||
const ARCFACE_LANDMARKS_FACE_SIZE = 112;
|
||||
|
||||
const ARC_FACE_5_LANDMARKS = [
|
||||
[38.2946, 51.6963],
|
||||
[73.5318, 51.5014],
|
||||
[56.0252, 71.7366],
|
||||
[41.5493, 92.3655],
|
||||
[70.7299, 92.2041],
|
||||
] as Array<[number, number]>;
|
||||
|
||||
/**
|
||||
* Compute and return an {@link FaceAlignment} for the given face detection.
|
||||
*
|
||||
* @param faceDetection A geometry indicating a face detected in an image.
|
||||
*/
|
||||
export const faceAlignment = (faceDetection: FaceDetection): FaceAlignment => {
|
||||
const landmarkCount = faceDetection.landmarks.length;
|
||||
return getFaceAlignmentUsingSimilarityTransform(
|
||||
faceDetection,
|
||||
normalizeLandmarks(
|
||||
landmarkCount === 5 ? ARC_FACE_5_LANDMARKS : ARCFACE_LANDMARKS,
|
||||
ARCFACE_LANDMARKS_FACE_SIZE,
|
||||
),
|
||||
);
|
||||
};
|
||||
|
||||
function getFaceAlignmentUsingSimilarityTransform(
|
||||
faceDetection: FaceDetection,
|
||||
alignedLandmarks: Array<[number, number]>,
|
||||
): FaceAlignment {
|
||||
const landmarksMat = new Matrix(
|
||||
faceDetection.landmarks
|
||||
.map((p) => [p.x, p.y])
|
||||
.slice(0, alignedLandmarks.length),
|
||||
).transpose();
|
||||
const alignedLandmarksMat = new Matrix(alignedLandmarks).transpose();
|
||||
|
||||
const simTransform = getSimilarityTransformation(
|
||||
landmarksMat,
|
||||
alignedLandmarksMat,
|
||||
);
|
||||
|
||||
const RS = Matrix.mul(simTransform.rotation, simTransform.scale);
|
||||
const TR = simTransform.translation;
|
||||
|
||||
const affineMatrix = [
|
||||
[RS.get(0, 0), RS.get(0, 1), TR.get(0, 0)],
|
||||
[RS.get(1, 0), RS.get(1, 1), TR.get(1, 0)],
|
||||
[0, 0, 1],
|
||||
];
|
||||
|
||||
const size = 1 / simTransform.scale;
|
||||
const meanTranslation = simTransform.toMean.sub(0.5).mul(size);
|
||||
const centerMat = simTransform.fromMean.sub(meanTranslation);
|
||||
const center = new Point(centerMat.get(0, 0), centerMat.get(1, 0));
|
||||
const rotation = -Math.atan2(
|
||||
simTransform.rotation.get(0, 1),
|
||||
simTransform.rotation.get(0, 0),
|
||||
);
|
||||
|
||||
return {
|
||||
affineMatrix,
|
||||
center,
|
||||
size,
|
||||
rotation,
|
||||
};
|
||||
}
|
||||
|
||||
function normalizeLandmarks(
|
||||
landmarks: Array<[number, number]>,
|
||||
faceSize: number,
|
||||
): Array<[number, number]> {
|
||||
return landmarks.map((landmark) =>
|
||||
landmark.map((p) => p / faceSize),
|
||||
) as Array<[number, number]>;
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
import { Box, enlargeBox } from "services/face/geom";
|
||||
import { FaceCrop, FaceDetection } from "services/face/types";
|
||||
import { cropWithRotation } from "utils/image";
|
||||
import { faceAlignment } from "./align";
|
||||
|
||||
export const getFaceCrop = (
|
||||
imageBitmap: ImageBitmap,
|
||||
faceDetection: FaceDetection,
|
||||
): FaceCrop => {
|
||||
const alignment = faceAlignment(faceDetection);
|
||||
|
||||
const padding = 0.25;
|
||||
const maxSize = 256;
|
||||
|
||||
const alignmentBox = new Box({
|
||||
x: alignment.center.x - alignment.size / 2,
|
||||
y: alignment.center.y - alignment.size / 2,
|
||||
width: alignment.size,
|
||||
height: alignment.size,
|
||||
}).round();
|
||||
const scaleForPadding = 1 + padding * 2;
|
||||
const paddedBox = enlargeBox(alignmentBox, scaleForPadding).round();
|
||||
const faceImageBitmap = cropWithRotation(imageBitmap, paddedBox, 0, {
|
||||
width: maxSize,
|
||||
height: maxSize,
|
||||
});
|
||||
|
||||
return {
|
||||
image: faceImageBitmap,
|
||||
imageBox: paddedBox,
|
||||
};
|
||||
};
|
|
@ -4,18 +4,24 @@ import { faceAlignment } from "services/face/align";
|
|||
import mlIDbStorage from "services/face/db";
|
||||
import { detectFaces, getRelativeDetection } from "services/face/detect";
|
||||
import { faceEmbeddings, mobileFaceNetFaceSize } from "services/face/embed";
|
||||
import { Box, enlargeBox } from "services/face/geom";
|
||||
import {
|
||||
DetectedFace,
|
||||
Face,
|
||||
FaceCrop,
|
||||
FaceDetection,
|
||||
MLSyncFileContext,
|
||||
type FaceAlignment,
|
||||
type MlFileData,
|
||||
} from "services/face/types";
|
||||
import { defaultMLVersion } from "services/machineLearning/machineLearningService";
|
||||
import type { EnteFile } from "types/file";
|
||||
import { imageBitmapToBlob, warpAffineFloat32List } from "utils/image";
|
||||
import {
|
||||
cropWithRotation,
|
||||
imageBitmapToBlob,
|
||||
warpAffineFloat32List,
|
||||
} from "utils/image";
|
||||
import { detectBlur } from "./blur";
|
||||
import { getFaceCrop } from "./crop";
|
||||
import {
|
||||
fetchImageBitmap,
|
||||
fetchImageBitmapForContext,
|
||||
|
@ -185,6 +191,34 @@ export const saveFaceCrop = async (imageBitmap: ImageBitmap, face: Face) => {
|
|||
return blob;
|
||||
};
|
||||
|
||||
export const getFaceCrop = (
|
||||
imageBitmap: ImageBitmap,
|
||||
faceDetection: FaceDetection,
|
||||
): FaceCrop => {
|
||||
const alignment = faceAlignment(faceDetection);
|
||||
|
||||
const padding = 0.25;
|
||||
const maxSize = 256;
|
||||
|
||||
const alignmentBox = new Box({
|
||||
x: alignment.center.x - alignment.size / 2,
|
||||
y: alignment.center.y - alignment.size / 2,
|
||||
width: alignment.size,
|
||||
height: alignment.size,
|
||||
}).round();
|
||||
const scaleForPadding = 1 + padding * 2;
|
||||
const paddedBox = enlargeBox(alignmentBox, scaleForPadding).round();
|
||||
const faceImageBitmap = cropWithRotation(imageBitmap, paddedBox, 0, {
|
||||
width: maxSize,
|
||||
height: maxSize,
|
||||
});
|
||||
|
||||
return {
|
||||
image: faceImageBitmap,
|
||||
imageBox: paddedBox,
|
||||
};
|
||||
};
|
||||
|
||||
export const regenerateFaceCrop = async (faceID: string) => {
|
||||
const fileID = Number(faceID.split("-")[0]);
|
||||
const personFace = await mlIDbStorage.getFace(fileID, faceID);
|
||||
|
|
|
@ -19,7 +19,7 @@ import {
|
|||
import { getLocalFiles } from "services/fileService";
|
||||
import { EnteFile } from "types/file";
|
||||
import { isInternalUserForML } from "utils/user";
|
||||
import { regenerateFaceCrop } from "../face/f-index";
|
||||
import { indexFaces, regenerateFaceCrop } from "../face/f-index";
|
||||
|
||||
/**
|
||||
* TODO-ML(MR): What and why.
|
||||
|
|
Loading…
Reference in a new issue