Remove unused face-api files

This commit is contained in:
Shailesh Pandit 2022-01-24 09:08:41 +05:30
parent bce97ce579
commit 868d19b3d0
111 changed files with 5 additions and 3372 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
[{"weights":[{"name":"dense0/conv0/filters","shape":[3,3,3,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008194216092427571,"min":-0.9423348506291708}},{"name":"dense0/conv0/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006839508168837603,"min":-0.8412595047670252}},{"name":"dense0/conv1/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009194007106855804,"min":-1.2779669878529567}},{"name":"dense0/conv1/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0036026100317637128,"min":-0.3170296827952067}},{"name":"dense0/conv1/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.000740380117706224,"min":-0.06367269012273527}},{"name":"dense0/conv2/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":1,"min":0}},{"name":"dense0/conv2/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":1,"min":0}},{"name":"dense0/conv2/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0037702228508743585,"min":-0.6220867703942692}},{"name":"dense1/conv0/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0033707996209462483,"min":-0.421349952618281}},{"name":"dense1/conv0/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.014611541991140328,"min":-1.8556658328748217}},{"name":"dense1/conv0/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002832523046755323,"min":-0.30307996600281956}},{"name":"dense1/conv1/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006593170586754294,"min":-0.6329443763284123}},{"name":"dense1/conv1/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.012215249211180444,"min":-1.6001976466646382}},{"name":"dense1/conv1/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002384825547536214,"min":-0.3028728445370992}},{"name":"dense1/conv2/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005859645441466687,"min":-0.7617539073906693}},{"name":"dense1/conv2/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.013121426806730382,"min":-1.7845140457153321}},{"name":"dense1/conv2/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0032247188044529336,"min":-0.46435950784122243}},{"name":"dense2/conv0/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002659512618008782,"min":-0.32977956463308894}},{"name":"dense2/conv0/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015499923743453681,"min":-1.9839902391620712}},{"name":"dense2/conv0/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0032450980999890497,"min":-0.522460794098237}},{"name":"dense2/conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005911862382701799,"min":-0.792189559282041}},{"name":"dense2/conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.021025861478319356,"min":-2.2077154552235325}},{"name":"dense2/conv1/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00349616945958605,"min":-0.46149436866535865}},{"name":"dense2/conv2/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008104994250278847,"min":-1.013124281284856}},{"name":"dense2/conv2/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.029337059282789044,"min":-3.5791212325002633}},{"name":"dense2/conv2/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0038808938334969913,"min":-0.4230174278511721}},{"name":"fc/weights","shape":[128,136],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.014016061670639936,"min":-1.8921683255363912}},{"name":"fc/bias","shape":[136],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0029505149698724935,"min":0.088760145008564}}],"paths":["face_landmark_68_tiny_model-shard1"]}]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
[{"paths":["mtcnn_model-shard1"],"weights":[{"dtype":"float32","name":"pnet/conv1/weights","shape":[3,3,3,10]},{"dtype":"float32","name":"pnet/conv1/bias","shape":[10]},{"dtype":"float32","name":"pnet/prelu1_alpha","shape":[10]},{"dtype":"float32","name":"pnet/conv2/weights","shape":[3,3,10,16]},{"dtype":"float32","name":"pnet/conv2/bias","shape":[16]},{"dtype":"float32","name":"pnet/prelu2_alpha","shape":[16]},{"dtype":"float32","name":"pnet/conv3/weights","shape":[3,3,16,32]},{"dtype":"float32","name":"pnet/conv3/bias","shape":[32]},{"dtype":"float32","name":"pnet/prelu3_alpha","shape":[32]},{"dtype":"float32","name":"pnet/conv4_1/weights","shape":[1,1,32,2]},{"dtype":"float32","name":"pnet/conv4_1/bias","shape":[2]},{"dtype":"float32","name":"pnet/conv4_2/weights","shape":[1,1,32,4]},{"dtype":"float32","name":"pnet/conv4_2/bias","shape":[4]},{"dtype":"float32","name":"rnet/conv1/weights","shape":[3,3,3,28]},{"dtype":"float32","name":"rnet/conv1/bias","shape":[28]},{"dtype":"float32","name":"rnet/prelu1_alpha","shape":[28]},{"dtype":"float32","name":"rnet/conv2/weights","shape":[3,3,28,48]},{"dtype":"float32","name":"rnet/conv2/bias","shape":[48]},{"dtype":"float32","name":"rnet/prelu2_alpha","shape":[48]},{"dtype":"float32","name":"rnet/conv3/weights","shape":[2,2,48,64]},{"dtype":"float32","name":"rnet/conv3/bias","shape":[64]},{"dtype":"float32","name":"rnet/prelu3_alpha","shape":[64]},{"dtype":"float32","name":"rnet/fc1/weights","shape":[576,128]},{"dtype":"float32","name":"rnet/fc1/bias","shape":[128]},{"dtype":"float32","name":"rnet/prelu4_alpha","shape":[128]},{"dtype":"float32","name":"rnet/fc2_1/weights","shape":[128,2]},{"dtype":"float32","name":"rnet/fc2_1/bias","shape":[2]},{"dtype":"float32","name":"rnet/fc2_2/weights","shape":[128,4]},{"dtype":"float32","name":"rnet/fc2_2/bias","shape":[4]},{"dtype":"float32","name":"onet/conv1/weights","shape":[3,3,3,32]},{"dtype":"float32","name":"onet/conv1/bias","shape":[32]},{"dtype":"float32","name":"onet/prelu1_alpha","shape":[32]},{"dtype":"float32","name":"onet/conv2/weights","shape":[3,3,32,64]},{"dtype":"float32","name":"onet/conv2/bias","shape":[64]},{"dtype":"float32","name":"onet/prelu2_alpha","shape":[64]},{"dtype":"float32","name":"onet/conv3/weights","shape":[3,3,64,64]},{"dtype":"float32","name":"onet/conv3/bias","shape":[64]},{"dtype":"float32","name":"onet/prelu3_alpha","shape":[64]},{"dtype":"float32","name":"onet/conv4/weights","shape":[2,2,64,128]},{"dtype":"float32","name":"onet/conv4/bias","shape":[128]},{"dtype":"float32","name":"onet/prelu4_alpha","shape":[128]},{"dtype":"float32","name":"onet/fc1/weights","shape":[1152,256]},{"dtype":"float32","name":"onet/fc1/bias","shape":[256]},{"dtype":"float32","name":"onet/prelu5_alpha","shape":[256]},{"dtype":"float32","name":"onet/fc2_1/weights","shape":[256,2]},{"dtype":"float32","name":"onet/fc2_1/bias","shape":[2]},{"dtype":"float32","name":"onet/fc2_2/weights","shape":[256,4]},{"dtype":"float32","name":"onet/fc2_2/bias","shape":[4]},{"dtype":"float32","name":"onet/fc2_3/weights","shape":[256,10]},{"dtype":"float32","name":"onet/fc2_3/bias","shape":[10]}]}]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
[{"weights":[{"name":"conv0/filters","shape":[3,3,3,16],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009007044399485869,"min":-1.2069439495311063}},{"name":"conv0/bias","shape":[16],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005263455241334205,"min":-0.9211046672334858}},{"name":"conv1/depthwise_filter","shape":[3,3,16,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004001977630690033,"min":-0.5042491814669441}},{"name":"conv1/pointwise_filter","shape":[1,1,16,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.013836609615999109,"min":-1.411334180831909}},{"name":"conv1/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0015159862590771096,"min":-0.30926119685173037}},{"name":"conv2/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002666276225856706,"min":-0.317286870876948}},{"name":"conv2/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015265831292844286,"min":-1.6792414422128714}},{"name":"conv2/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0020280554598453,"min":-0.37113414915168985}},{"name":"conv3/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006100742489683862,"min":-0.8907084034938438}},{"name":"conv3/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.016276211832083907,"min":-2.0508026908425725}},{"name":"conv3/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003394414279975143,"min":-0.7637432129944072}},{"name":"conv4/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006716050119961009,"min":-0.8059260143953211}},{"name":"conv4/pointwise_filter","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.021875603993733724,"min":-2.8875797271728514}},{"name":"conv4/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0041141652009066415,"min":-0.8187188749804216}},{"name":"conv5/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008423839597141042,"min":-0.9013508368940915}},{"name":"conv5/pointwise_filter","shape":[1,1,256,512],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.030007277283014035,"min":-3.8709387695088107}},{"name":"conv5/bias","shape":[512],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008402082966823203,"min":-1.4871686851277068}},{"name":"conv8/filters","shape":[1,1,512,25],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.028336129469030042,"min":-4.675461362389957}},{"name":"conv8/bias","shape":[25],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002268134028303857,"min":-0.41053225912299807}}],"paths":["tiny_face_detector_model-shard1"]}]

View file

@ -1,101 +0,0 @@
/* eslint no-prototype-builtins: "off", no-case-declarations: "off", no-undef: "off", no-constant-condition: "off", eqeqeq: "off" */
// From: https://github.com/justadudewhohacks/face-api.js/issues/47
// This is needed because face-api.js does not support working in a WebWorker natively
// Updated Dec 1 2020 to work on latest Chrome (tested in WebWorkers on Chrome Mobile on Android / Google Pixel 3 as well)
self.Canvas = self.HTMLCanvasElement = OffscreenCanvas;
// self.HTMLCanvasElement.name = 'HTMLCanvasElement';
// self.Canvas.name = 'Canvas';
self.CanvasRenderingContext2D = OffscreenCanvasRenderingContext2D;
function HTMLImageElement() {}
function HTMLVideoElement() {}
self.Image = HTMLImageElement;
self.Video = HTMLVideoElement;
function Storage() {
let _data = {};
this.clear = function () {
return (_data = {});
};
this.getItem = function (id) {
return _data.hasOwnProperty(id) ? _data[id] : undefined;
};
this.removeItem = function (id) {
return delete _data[id];
};
this.setItem = function (id, val) {
return (_data[id] = String(val));
};
}
class Document extends EventTarget {}
self.document = new Document();
self.window = self.Window = self;
self.localStorage = new Storage();
function createElement(element) {
switch (element) {
case 'canvas':
const canvas = new Canvas(1, 1);
canvas.localName = 'canvas';
canvas.nodeName = 'CANVAS';
canvas.tagName = 'CANVAS';
canvas.nodeType = 1;
canvas.innerHTML = '';
canvas.remove = () => {
console.log('nope');
};
return canvas;
default:
console.log('arg', element);
break;
}
}
document.createElement = createElement;
document.location = self.location;
// These are the same checks face-api.js/isBrowser does
if (!typeof window == 'object') {
console.warn('Check failed: window');
}
if (typeof document === 'undefined') {
console.warn('Check failed: document');
}
if (typeof HTMLImageElement === 'undefined') {
console.warn('Check failed: HTMLImageElement');
}
if (typeof HTMLCanvasElement === 'undefined') {
console.warn('Check failed: HTMLCanvasElement');
}
if (typeof HTMLVideoElement === 'undefined') {
console.warn('Check failed: HTMLVideoElement');
}
if (typeof ImageData === 'undefined') {
console.warn('Check failed: ImageData');
}
if (typeof CanvasRenderingContext2D === 'undefined') {
console.warn('Check failed: CanvasRenderingContext2D');
}
self.window = window;
self.document = document;
self.HTMLImageElement = HTMLImageElement;
self.HTMLVideoElement = HTMLVideoElement;
// These are the same checks face-api.js/isBrowser does
const isBrowserCheck =
typeof window === 'object' &&
typeof document !== 'undefined' &&
typeof HTMLImageElement !== 'undefined' &&
typeof HTMLCanvasElement !== 'undefined' &&
typeof HTMLVideoElement !== 'undefined' &&
typeof ImageData !== 'undefined' &&
typeof CanvasRenderingContext2D !== 'undefined';
if (!isBrowserCheck) {
throw new Error('Failed to monkey patch for face-api, face-api will fail');
}

View file

@ -1,70 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { gather } from '@tensorflow/tfjs';
import { extractFaces } from 'utils/machineLearning';
import { AlignedFace, FaceEmbedding } from 'types/machineLearning';
import { FaceRecognitionNet } from '../../../thirdparty/face-api/faceRecognitionNet';
class FAPIFaceEmbeddingService {
private faceRecognitionNet: FaceRecognitionNet;
private faceSize: number;
public constructor(faceSize: number = 112) {
this.faceRecognitionNet = new FaceRecognitionNet();
this.faceSize = faceSize;
}
public async init() {
await this.faceRecognitionNet.loadFromUri('/models/face-api/');
console.log(
'loaded faceRecognitionNet: ',
this.faceRecognitionNet,
await tf.getBackend()
);
}
public async getEmbeddingsBatch(faceImagesTensor) {
const embeddings = [];
for (let i = 0; i < faceImagesTensor.shape[0]; i++) {
const face = tf.tidy(() =>
gather(faceImagesTensor, i).expandDims()
);
const embedding =
await this.faceRecognitionNet.computeFaceDescriptor(face);
tf.dispose(face);
embeddings[i] = embedding;
}
return embeddings;
}
public async getEmbeddings(image: tf.Tensor3D, faces: AlignedFace[]) {
if (!faces || faces.length < 1) {
return {
embeddings: [],
faceImages: [],
};
}
const boxes = faces.map((f) => f.detection.box);
const faceImagesTensor = extractFaces(image, boxes, this.faceSize);
// const embeddingsTensor = await this.mobileFaceNetModel.predict(faceImagesTensor);
const f32embeddings = await this.getEmbeddingsBatch(faceImagesTensor);
const embeddings = f32embeddings;
// const embeddings = await embeddingsTensor.array();
// const faceImages = await faceImagesTensor.array();
tf.dispose(faceImagesTensor);
// tf.dispose(embeddingsTensor);
// console.log('embeddings: ', embeddings[0]);
return {
embeddings: embeddings as FaceEmbedding[],
// faceImages: faceImages as FaceImage[],
};
}
public async dispose() {
return this.faceRecognitionNet.dispose();
}
}
export default FAPIFaceEmbeddingService;

View file

@ -1,99 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { gather } from '@tensorflow/tfjs';
import { extractFaces } from 'utils/machineLearning';
import { DetectedFace, FaceDetection } from 'types/machineLearning';
import { FaceLandmarks68 } from '../../../thirdparty/face-api/classes';
import { FaceLandmark68Net } from '../../../thirdparty/face-api/faceLandmarkNet';
import { getRotatedFaceImage } from 'utils/machineLearning/faceAlign';
class FAPIFaceLandmarksService {
private faceLandmarkNet: FaceLandmark68Net;
private faceSize;
public constructor(faceSize: number = 112) {
this.faceLandmarkNet = new FaceLandmark68Net();
this.faceSize = faceSize;
}
public async init() {
await this.faceLandmarkNet.loadFromUri('/models/face-api/');
console.log(
'loaded faceLandmarkNet: ',
this.faceLandmarkNet,
await tf.getBackend()
);
}
private async getLandmarksBatch(faceImagesTensor) {
const landmarks = [];
for (let i = 0; i < faceImagesTensor.shape[0]; i++) {
const face = tf.tidy(() =>
gather(faceImagesTensor, i).expandDims()
);
const landmark = await this.faceLandmarkNet.detectLandmarks(face);
tf.dispose(face);
landmarks[i] = landmark;
}
return landmarks;
}
public async getAlignedFaces(
image: tf.Tensor3D,
faceDetections: Array<FaceDetection>
): Promise<tf.Tensor4D> {
if (!faceDetections || faceDetections.length < 1) {
return null as tf.Tensor4D;
}
const alignedFaceImages = new Array<tf.Tensor3D>(faceDetections.length);
for (let i = 0; i < faceDetections.length; i++) {
const rotFaceImageTensor = getRotatedFaceImage(
image,
faceDetections[i]
);
const landmarks = await this.faceLandmarkNet.detectLandmarks(
rotFaceImageTensor
);
Array.isArray(landmarks) &&
console.log('multiple landmarks for single face');
const landmark = Array.isArray(landmarks)
? landmarks[0]
: landmarks;
const alignedBox = landmark.align();
const face = extractFaces(
rotFaceImageTensor,
[alignedBox],
this.faceSize
);
alignedFaceImages[i] = tf.tidy(() => tf.squeeze(face, [0]));
tf.dispose(rotFaceImageTensor);
}
return tf.stack(alignedFaceImages) as tf.Tensor4D;
}
public async detectLandmarks(image: tf.Tensor3D, faces: DetectedFace[]) {
if (!faces || faces.length < 1) {
return [] as Array<FaceLandmarks68>;
}
const boxes = faces.map((f) => f.detection.box);
const faceImagesTensor = extractFaces(image, boxes, this.faceSize);
const landmarks = await this.getLandmarksBatch(faceImagesTensor);
tf.dispose(faceImagesTensor);
return landmarks as Array<FaceLandmarks68>;
}
public async dispose() {
return this.faceLandmarkNet.dispose();
}
}
export default FAPIFaceLandmarksService;

View file

@ -1,7 +1,6 @@
import { TreeNode } from 'hdbscan';
import { euclidean, TreeNode } from 'hdbscan';
import { RawNodeDatum } from 'react-d3-tree/lib/types/common';
import { f32Average, getAllFacesFromMap } from '.';
import { euclideanDistance } from '../../../thirdparty/face-api/euclideanDistance';
import {
FacesCluster,
Cluster,
@ -50,7 +49,10 @@ export function getNearestCluster(
let nearest: FacesCluster = null;
let nearestDist = 100000;
syncContext.mlLibraryData.faceClustersWithNoise.clusters.forEach((c) => {
const dist = euclideanDistance(noise.embedding, c.summary);
const dist = euclidean(
Array.from(noise.embedding),
Array.from(c.summary)
);
if (dist < nearestDist) {
nearestDist = dist;
nearest = c;

View file

@ -1,159 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { ParamMapping } from './common';
import { getModelUris } from './common/getModelUris';
import { loadWeightMap } from './dom';
import { env } from './env';
export abstract class NeuralNetwork<TNetParams> {
protected _params: TNetParams | undefined = undefined
protected _paramMappings: ParamMapping[] = []
constructor(protected _name: string) {}
public get params(): TNetParams | undefined { return this._params }
public get paramMappings(): ParamMapping[] { return this._paramMappings }
public get isLoaded(): boolean { return !!this.params }
public getParamFromPath(paramPath: string): tf.Tensor {
const { obj, objProp } = this.traversePropertyPath(paramPath)
return obj[objProp]
}
public reassignParamFromPath(paramPath: string, tensor: tf.Tensor) {
const { obj, objProp } = this.traversePropertyPath(paramPath)
obj[objProp].dispose()
obj[objProp] = tensor
}
public getParamList() {
return this._paramMappings.map(({ paramPath }) => ({
path: paramPath,
tensor: this.getParamFromPath(paramPath)
}))
}
public getTrainableParams() {
return this.getParamList().filter(param => param.tensor instanceof tf.Variable)
}
public getFrozenParams() {
return this.getParamList().filter(param => !(param.tensor instanceof tf.Variable))
}
public variable() {
this.getFrozenParams().forEach(({ path, tensor }) => {
this.reassignParamFromPath(path, tensor.variable())
})
}
public freeze() {
this.getTrainableParams().forEach(({ path, tensor: variable }) => {
const tensor = tf.tensor(variable.dataSync())
variable.dispose()
this.reassignParamFromPath(path, tensor)
})
}
public dispose(throwOnRedispose: boolean = true) {
this.getParamList().forEach(param => {
if (throwOnRedispose && param.tensor.isDisposed) {
throw new Error(`param tensor has already been disposed for path ${param.path}`)
}
param.tensor.dispose()
})
this._params = undefined
}
public serializeParams(): Float32Array {
return new Float32Array(
this.getParamList()
.map(({ tensor }) => Array.from(tensor.dataSync()) as number[])
.reduce((flat, arr) => flat.concat(arr))
)
}
public async load(weightsOrUrl: Float32Array | string | undefined): Promise<void> {
if (weightsOrUrl instanceof Float32Array) {
this.extractWeights(weightsOrUrl)
return
}
await this.loadFromUri(weightsOrUrl)
}
public async loadFromUri(uri: string | undefined) {
if (uri && typeof uri !== 'string') {
throw new Error(`${this._name}.loadFromUri - expected model uri`)
}
const weightMap = await loadWeightMap(uri, this.getDefaultModelName())
this.loadFromWeightMap(weightMap)
}
public async loadFromDisk(filePath: string | undefined) {
if (filePath && typeof filePath !== 'string') {
throw new Error(`${this._name}.loadFromDisk - expected model file path`)
}
const { readFile } = env.getEnv()
const { manifestUri, modelBaseUri } = getModelUris(filePath, this.getDefaultModelName())
const fetchWeightsFromDisk = (filePaths: string[]) => Promise.all(
filePaths.map(filePath => readFile(filePath).then(buf => buf.buffer))
)
const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)
const manifest = JSON.parse((await readFile(manifestUri)).toString())
const weightMap = await loadWeights(manifest, modelBaseUri)
this.loadFromWeightMap(weightMap)
}
public loadFromWeightMap(weightMap: tf.NamedTensorMap) {
const {
paramMappings,
params
} = this.extractParamsFromWeigthMap(weightMap)
this._paramMappings = paramMappings
this._params = params
}
public extractWeights(weights: Float32Array) {
const {
paramMappings,
params
} = this.extractParams(weights)
this._paramMappings = paramMappings
this._params = params
}
private traversePropertyPath(paramPath: string) {
if (!this.params) {
throw new Error(`traversePropertyPath - model has no loaded params`)
}
const result = paramPath.split('/').reduce((res: { nextObj: any, obj?: any, objProp?: string }, objProp) => {
if (!res.nextObj.hasOwnProperty(objProp)) {
throw new Error(`traversePropertyPath - object does not have property ${objProp}, for path ${paramPath}`)
}
return { obj: res.nextObj, objProp, nextObj: res.nextObj[objProp] }
}, { nextObj: this.params })
const { obj, objProp } = result
if (!obj || !objProp || !(obj[objProp] instanceof tf.Tensor)) {
throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`)
}
return { obj, objProp }
}
protected abstract getDefaultModelName(): string
protected abstract extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): { params: TNetParams, paramMappings: ParamMapping[] }
protected abstract extractParams(weights: Float32Array): { params: TNetParams, paramMappings: ParamMapping[] }
}

View file

@ -1,24 +0,0 @@
import { Box } from './Box';
import { IDimensions } from './Dimensions';
import { ObjectDetection } from './ObjectDetection';
import { Rect } from './Rect';
export interface IFaceDetecion {
score: number
box: Box
}
export class FaceDetection extends ObjectDetection implements IFaceDetecion {
constructor(
score: number,
relativeBox: Rect,
imageDims: IDimensions
) {
super(score, score, '', relativeBox, imageDims)
}
public forSize(width: number, height: number): FaceDetection {
const { score, relativeBox, imageDims } = super.forSize(width, height)
return new FaceDetection(score, relativeBox, imageDims)
}
}

View file

@ -1,125 +0,0 @@
import { minBbox } from '../ops';
import { getCenterPoint } from '../utils';
import { IBoundingBox } from './BoundingBox';
import { Box } from './Box';
import { Dimensions, IDimensions } from './Dimensions';
import { FaceDetection } from './FaceDetection';
import { Point } from './Point';
import { IRect, Rect } from './Rect';
// face alignment constants
const relX = 0.5
const relY = 0.43
const relScale = 0.45
export interface IFaceLandmarks {
positions: Point[]
shift: Point
}
export class FaceLandmarks implements IFaceLandmarks {
protected _shift: Point
protected _positions: Point[]
protected _imgDims: Dimensions
constructor(
relativeFaceLandmarkPositions: Point[],
imgDims: IDimensions,
shift: Point = new Point(0, 0)
) {
const { width, height } = imgDims
this._imgDims = new Dimensions(width, height)
this._shift = shift
this._positions = relativeFaceLandmarkPositions.map(
pt => pt.mul(new Point(width, height)).add(shift)
)
}
public get shift(): Point { return new Point(this._shift.x, this._shift.y) }
public get imageWidth(): number { return this._imgDims.width }
public get imageHeight(): number { return this._imgDims.height }
public get positions(): Point[] { return this._positions }
public get relativePositions(): Point[] {
return this._positions.map(
pt => pt.sub(this._shift).div(new Point(this.imageWidth, this.imageHeight))
)
}
public forSize<T extends FaceLandmarks>(width: number, height: number): T {
return new (this.constructor as any)(
this.relativePositions,
{ width, height }
)
}
public shiftBy<T extends FaceLandmarks>(x: number, y: number): T {
return new (this.constructor as any)(
this.relativePositions,
this._imgDims,
new Point(x, y)
)
}
public shiftByPoint<T extends FaceLandmarks>(pt: Point): T {
return this.shiftBy(pt.x, pt.y)
}
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
public align(
detection?: FaceDetection | IRect | IBoundingBox | null,
options: { useDlibAlignment?: boolean, minBoxPadding?: number } = { }
): Box {
if (detection) {
const box = detection instanceof FaceDetection
? detection.box.floor()
: new Box(detection)
return this.shiftBy(box.x, box.y).align(null, options)
}
const { useDlibAlignment, minBoxPadding } = Object.assign({}, { useDlibAlignment: false, minBoxPadding: 0.2 }, options)
if (useDlibAlignment) {
return this.alignDlib()
}
return this.alignMinBbox(minBoxPadding)
}
private alignDlib(): Box {
const centers = this.getRefPointsForAlignment()
const [leftEyeCenter, rightEyeCenter, mouthCenter] = centers
const distToMouth = (pt: Point) => mouthCenter.sub(pt).magnitude()
const eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2
const size = Math.floor(eyeToMouthDist / relScale)
const refPoint = getCenterPoint(centers)
// TODO: pad in case rectangle is out of image bounds
const x = Math.floor(Math.max(0, refPoint.x - (relX * size)))
const y = Math.floor(Math.max(0, refPoint.y - (relY * size)))
return new Rect(x, y, Math.min(size, this.imageWidth + x), Math.min(size, this.imageHeight + y))
}
private alignMinBbox(padding: number): Box {
const box = minBbox(this.positions)
return box.pad(box.width * padding, box.height * padding)
}
protected getRefPointsForAlignment(): Point[] {
throw new Error('getRefPointsForAlignment not implemented by base class')
}
}

View file

@ -1,16 +0,0 @@
import { getCenterPoint } from '../utils';
import { FaceLandmarks } from './FaceLandmarks';
import { Point } from './Point';
export class FaceLandmarks5 extends FaceLandmarks {
protected getRefPointsForAlignment(): Point[] {
const pts = this.positions
return [
pts[0],
pts[1],
getCenterPoint([pts[3], pts[4]])
]
}
}

View file

@ -1,41 +0,0 @@
import { getCenterPoint } from '../utils';
import { FaceLandmarks } from './FaceLandmarks';
import { Point } from './Point';
export class FaceLandmarks68 extends FaceLandmarks {
public getJawOutline(): Point[] {
return this.positions.slice(0, 17)
}
public getLeftEyeBrow(): Point[] {
return this.positions.slice(17, 22)
}
public getRightEyeBrow(): Point[] {
return this.positions.slice(22, 27)
}
public getNose(): Point[] {
return this.positions.slice(27, 36)
}
public getLeftEye(): Point[] {
return this.positions.slice(36, 42)
}
public getRightEye(): Point[] {
return this.positions.slice(42, 48)
}
public getMouth(): Point[] {
return this.positions.slice(48, 68)
}
protected getRefPointsForAlignment(): Point[] {
return [
this.getLeftEye(),
this.getRightEye(),
this.getMouth()
].map(getCenterPoint)
}
}

View file

@ -1,23 +0,0 @@
import { round } from '../utils';
export interface IFaceMatch {
label: string
distance: number
}
export class FaceMatch implements IFaceMatch {
private _label: string
private _distance: number
constructor(label: string, distance: number) {
this._label = label
this._distance = distance
}
public get label(): string { return this._label }
public get distance(): number { return this._distance }
public toString(withDistance: boolean = true): string {
return `${this.label}${withDistance ? ` (${round(this.distance)})` : ''}`
}
}

View file

@ -1,25 +0,0 @@
import { isValidNumber } from '../utils';
import { IBoundingBox } from './BoundingBox';
import { Box } from './Box';
import { IRect } from './Rect';
export class LabeledBox extends Box<LabeledBox> {
public static assertIsValidLabeledBox(box: any, callee: string) {
Box.assertIsValidBox(box, callee)
if (!isValidNumber(box.label)) {
throw new Error(`${callee} - expected property label (${box.label}) to be a number`)
}
}
private _label: number
constructor(box: IBoundingBox | IRect | any, label: number) {
super(box)
this._label = label
}
public get label(): number { return this._label }
}

View file

@ -1,35 +0,0 @@
export class LabeledFaceDescriptors {
private _label: string
private _descriptors: Float32Array[]
constructor(label: string, descriptors: Float32Array[]) {
if (!(typeof label === 'string')) {
throw new Error('LabeledFaceDescriptors - constructor expected label to be a string')
}
if (!Array.isArray(descriptors) || descriptors.some(desc => !(desc instanceof Float32Array))) {
throw new Error('LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array')
}
this._label = label
this._descriptors = descriptors
}
public get label(): string { return this._label }
public get descriptors(): Float32Array[] { return this._descriptors }
public toJSON(): any {
return {
label: this.label,
descriptors: this.descriptors.map((d) => Array.from(d))
};
}
public static fromJSON(json: any): LabeledFaceDescriptors {
const descriptors = json.descriptors.map((d: any) => {
return new Float32Array(d);
});
return new LabeledFaceDescriptors(json.label, descriptors);
}
}

View file

@ -1,44 +0,0 @@
import { Box } from './Box';
import { Dimensions, IDimensions } from './Dimensions';
import { IRect, Rect } from './Rect';
export class ObjectDetection {
private _score: number
private _classScore: number
private _className: string
private _box: Rect
private _imageDims: Dimensions
constructor(
score: number,
classScore: number,
className: string,
relativeBox: IRect,
imageDims: IDimensions
) {
this._imageDims = new Dimensions(imageDims.width, imageDims.height)
this._score = score
this._classScore = classScore
this._className = className
this._box = new Box(relativeBox).rescale(this._imageDims)
}
public get score(): number { return this._score }
public get classScore(): number { return this._classScore }
public get className(): string { return this._className }
public get box(): Box { return this._box }
public get imageDims(): Dimensions { return this._imageDims }
public get imageWidth(): number { return this.imageDims.width }
public get imageHeight(): number { return this.imageDims.height }
public get relativeBox(): Box { return new Box(this._box).rescale(this.imageDims.reverse()) }
public forSize(width: number, height: number): ObjectDetection {
return new ObjectDetection(
this.score,
this.classScore,
this.className,
this.relativeBox,
{ width, height}
)
}
}

View file

@ -1,31 +0,0 @@
import { isValidProbablitiy } from '../utils';
import { IBoundingBox } from './BoundingBox';
import { LabeledBox } from './LabeledBox';
import { IRect } from './Rect';
export class PredictedBox extends LabeledBox {
public static assertIsValidPredictedBox(box: any, callee: string) {
LabeledBox.assertIsValidLabeledBox(box, callee)
if (
!isValidProbablitiy(box.score)
|| !isValidProbablitiy(box.classScore)
) {
throw new Error(`${callee} - expected properties score (${box.score}) and (${box.classScore}) to be a number between [0, 1]`)
}
}
private _score: number
private _classScore: number
constructor(box: IBoundingBox | IRect | any, label: number, score: number, classScore: number) {
super(box, label)
this._score = score
this._classScore = classScore
}
public get score(): number { return this._score }
public get classScore(): number { return this._classScore }
}

View file

@ -1,14 +1,5 @@
export * from './BoundingBox'
export * from './Box'
export * from './Dimensions'
export * from './FaceDetection';
export * from './FaceLandmarks';
export * from './FaceLandmarks5';
export * from './FaceLandmarks68';
export * from './FaceMatch';
export * from './LabeledBox'
export * from './LabeledFaceDescriptors';
export * from './ObjectDetection'
export * from './Point'
export * from './PredictedBox'
export * from './Rect'

View file

@ -1,19 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams } from './types';
export function convLayer(
x: tf.Tensor4D,
params: ConvParams,
padding: 'valid' | 'same' = 'same',
withRelu: boolean = false
): tf.Tensor4D {
return tf.tidy(() => {
const out = tf.add(
tf.conv2d(x, params.filters, [1, 1], padding),
params.bias
) as tf.Tensor4D
return withRelu ? tf.relu(out) : out
})
}

View file

@ -1,15 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { SeparableConvParams } from './types';
export function depthwiseSeparableConv(
x: tf.Tensor4D,
params: SeparableConvParams,
stride: [number, number]
): tf.Tensor4D {
return tf.tidy(() => {
let out = tf.separableConv2d(x, params.depthwise_filter, params.pointwise_filter, stride, 'same')
out = tf.add(out, params.bias)
return out
})
}

View file

@ -1,9 +0,0 @@
import { ParamMapping } from './types';
export function disposeUnusedWeightTensors(weightMap: any, paramMappings: ParamMapping[]) {
Object.keys(weightMap).forEach(path => {
if (!paramMappings.some(pm => pm.originalPath === path)) {
weightMap[path].dispose()
}
})
}

View file

@ -1,31 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, ExtractWeightsFunction, ParamMapping } from './types';
export function extractConvParamsFactory(
extractWeights: ExtractWeightsFunction,
paramMappings: ParamMapping[]
) {
return function(
channelsIn: number,
channelsOut: number,
filterSize: number,
mappedPrefix: string
): ConvParams {
const filters = tf.tensor4d(
extractWeights(channelsIn * channelsOut * filterSize * filterSize),
[filterSize, filterSize, channelsIn, channelsOut]
)
const bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/filters` },
{ paramPath: `${mappedPrefix}/bias` }
)
return { filters, bias }
}
}

View file

@ -1,31 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { ExtractWeightsFunction, FCParams, ParamMapping } from './types';
export function extractFCParamsFactory(
extractWeights: ExtractWeightsFunction,
paramMappings: ParamMapping[]
) {
return function(
channelsIn: number,
channelsOut: number,
mappedPrefix: string
): FCParams {
const fc_weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut])
const fc_bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/weights` },
{ paramPath: `${mappedPrefix}/bias` }
)
return {
weights: fc_weights,
bias: fc_bias
}
}
}

View file

@ -1,46 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { ExtractWeightsFunction, ParamMapping, SeparableConvParams } from './types';
export function extractSeparableConvParamsFactory(
extractWeights: ExtractWeightsFunction,
paramMappings: ParamMapping[]
) {
return function(channelsIn: number, channelsOut: number, mappedPrefix: string): SeparableConvParams {
const depthwise_filter = tf.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1])
const pointwise_filter = tf.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut])
const bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/depthwise_filter` },
{ paramPath: `${mappedPrefix}/pointwise_filter` },
{ paramPath: `${mappedPrefix}/bias` }
)
return new SeparableConvParams(
depthwise_filter,
pointwise_filter,
bias
)
}
}
export function loadSeparableConvParamsFactory(
extractWeightEntry: <T>(originalPath: string, paramRank: number) => T
) {
return function (prefix: string): SeparableConvParams {
const depthwise_filter = extractWeightEntry<tf.Tensor4D>(`${prefix}/depthwise_filter`, 4)
const pointwise_filter = extractWeightEntry<tf.Tensor4D>(`${prefix}/pointwise_filter`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return new SeparableConvParams(
depthwise_filter,
pointwise_filter,
bias
)
}
}

View file

@ -1,20 +0,0 @@
import { isTensor } from '../utils';
import { ParamMapping } from './types';
export function extractWeightEntryFactory(weightMap: any, paramMappings: ParamMapping[]) {
return function<T> (originalPath: string, paramRank: number, mappedPath?: string): T {
const tensor = weightMap[originalPath]
if (!isTensor(tensor, paramRank)) {
throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor}`)
}
paramMappings.push(
{ originalPath, paramPath: mappedPath || originalPath }
)
return tensor
}
}

View file

@ -1,18 +0,0 @@
export function extractWeightsFactory(weights: Float32Array) {
let remainingWeights = weights
function extractWeights(numWeights: number): Float32Array {
const ret = remainingWeights.slice(0, numWeights)
remainingWeights = remainingWeights.slice(numWeights)
return ret
}
function getRemainingWeights(): Float32Array {
return remainingWeights
}
return {
extractWeights,
getRemainingWeights
}
}

View file

@ -1,15 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { FCParams } from './types';
export function fullyConnectedLayer(
x: tf.Tensor2D,
params: FCParams
): tf.Tensor2D {
return tf.tidy(() =>
tf.add(
tf.matMul(x, params.weights),
params.bias
)
)
}

View file

@ -1,33 +0,0 @@
export function getModelUris(uri: string | undefined, defaultModelName: string) {
const defaultManifestFilename = `${defaultModelName}-weights_manifest.json`
if (!uri) {
return {
modelBaseUri: '',
manifestUri: defaultManifestFilename
}
}
if (uri === '/') {
return {
modelBaseUri: '/',
manifestUri: `/${defaultManifestFilename}`
}
}
const protocol = uri.startsWith('http://') ? 'http://' : uri.startsWith('https://') ? 'https://' : '';
uri = uri.replace(protocol, '');
const parts = uri.split('/').filter(s => s)
const manifestFile = uri.endsWith('.json')
? parts[parts.length - 1]
: defaultManifestFilename
let modelBaseUri = protocol + (uri.endsWith('.json') ? parts.slice(0, parts.length - 1) : parts).join('/')
modelBaseUri = uri.startsWith('/') ? `/${modelBaseUri}` : modelBaseUri
return {
modelBaseUri,
manifestUri: modelBaseUri === '/' ? `/${manifestFile}` : `${modelBaseUri}/${manifestFile}`
}
}

View file

@ -1,10 +0,0 @@
export * from './convLayer'
export * from './depthwiseSeparableConv'
export * from './disposeUnusedWeightTensors'
export * from './extractConvParamsFactory'
export * from './extractFCParamsFactory'
export * from './extractSeparableConvParamsFactory'
export * from './extractWeightEntryFactory'
export * from './extractWeightsFactory'
export * from './getModelUris'
export * from './types'

View file

@ -1,12 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams } from './types';
export function loadConvParamsFactory(extractWeightEntry: <T>(originalPath: string, paramRank: number) => T) {
return function(prefix: string): ConvParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/filters`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { filters, bias }
}
}

View file

@ -1,26 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
export type ExtractWeightsFunction = (numWeights: number) => Float32Array
export type ParamMapping = {
originalPath?: string
paramPath: string
}
export type ConvParams = {
filters: tf.Tensor4D
bias: tf.Tensor1D
}
export type FCParams = {
weights: tf.Tensor2D
bias: tf.Tensor1D
}
export class SeparableConvParams {
constructor(
public depthwise_filter: tf.Tensor4D,
public pointwise_filter: tf.Tensor4D,
public bias: tf.Tensor1D
) {}
}

View file

@ -1,153 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { Dimensions } from '../classes/Dimensions';
import { env } from '../env';
import { padToSquare } from '../ops/padToSquare';
import { computeReshapedDimensions, isTensor3D, isTensor4D, range } from '../utils';
import { createCanvasFromMedia } from './createCanvas';
import { imageToSquare } from './imageToSquare';
import { TResolvedNetInput } from './types';
export class NetInput {
private _imageTensors: Array<tf.Tensor3D | tf.Tensor4D> = []
private _canvases: HTMLCanvasElement[] = []
private _batchSize: number
private _treatAsBatchInput: boolean = false
private _inputDimensions: number[][] = []
private _inputSize: number
constructor(
inputs: Array<TResolvedNetInput>,
treatAsBatchInput: boolean = false
) {
if (!Array.isArray(inputs)) {
throw new Error(`NetInput.constructor - expected inputs to be an Array of TResolvedNetInput or to be instanceof tf.Tensor4D, instead have ${inputs}`)
}
this._treatAsBatchInput = treatAsBatchInput
this._batchSize = inputs.length
inputs.forEach((input, idx) => {
if (isTensor3D(input)) {
this._imageTensors[idx] = input
this._inputDimensions[idx] = input.shape
return
}
if (isTensor4D(input)) {
const batchSize = input.shape[0]
if (batchSize !== 1) {
throw new Error(`NetInput - tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`)
}
this._imageTensors[idx] = input
this._inputDimensions[idx] = input.shape.slice(1)
return
}
const canvas = input instanceof env.getEnv().Canvas ? input : createCanvasFromMedia(input)
this._canvases[idx] = canvas
this._inputDimensions[idx] = [canvas.height, canvas.width, 3]
})
}
public get imageTensors(): Array<tf.Tensor3D | tf.Tensor4D> {
return this._imageTensors
}
public get canvases(): HTMLCanvasElement[] {
return this._canvases
}
public get isBatchInput(): boolean {
return this.batchSize > 1 || this._treatAsBatchInput
}
public get batchSize(): number {
return this._batchSize
}
public get inputDimensions(): number[][] {
return this._inputDimensions
}
public get inputSize(): number | undefined {
return this._inputSize
}
public get reshapedInputDimensions(): Dimensions[] {
return range(this.batchSize, 0, 1).map(
(_, batchIdx) => this.getReshapedInputDimensions(batchIdx)
)
}
public getInput(batchIdx: number): tf.Tensor3D | tf.Tensor4D | HTMLCanvasElement {
return this.canvases[batchIdx] || this.imageTensors[batchIdx]
}
public getInputDimensions(batchIdx: number): number[] {
return this._inputDimensions[batchIdx]
}
public getInputHeight(batchIdx: number): number {
return this._inputDimensions[batchIdx][0]
}
public getInputWidth(batchIdx: number): number {
return this._inputDimensions[batchIdx][1]
}
public getReshapedInputDimensions(batchIdx: number): Dimensions {
if (typeof this.inputSize !== 'number') {
throw new Error('getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet')
}
const width = this.getInputWidth(batchIdx)
const height = this.getInputHeight(batchIdx)
return computeReshapedDimensions({ width, height }, this.inputSize)
}
/**
* Create a batch tensor from all input canvases and tensors
* with size [batchSize, inputSize, inputSize, 3].
*
* @param inputSize Height and width of the tensor.
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
* both sides of the minor dimension oof the image.
* @returns The batch tensor.
*/
public toBatchTensor(inputSize: number, isCenterInputs: boolean = true): tf.Tensor4D {
this._inputSize = inputSize
return tf.tidy(() => {
const inputTensors = range(this.batchSize, 0, 1).map(batchIdx => {
const input = this.getInput(batchIdx)
if (input instanceof tf.Tensor) {
let imgTensor = isTensor4D(input) ? input : input.expandDims<tf.Tensor4D>()
imgTensor = padToSquare(imgTensor, isCenterInputs)
if (imgTensor.shape[1] !== inputSize || imgTensor.shape[2] !== inputSize) {
imgTensor = tf.image.resizeBilinear(imgTensor, [inputSize, inputSize])
}
return imgTensor.as3D(inputSize, inputSize, 3)
}
if (input instanceof env.getEnv().Canvas) {
return tf.browser.fromPixels(imageToSquare(input, inputSize, isCenterInputs))
}
throw new Error(`toBatchTensor - at batchIdx ${batchIdx}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${input}`)
})
const batchTensor = tf.stack(inputTensors.map(t => t.toFloat())).as4D(this.batchSize, inputSize, inputSize, 3)
return batchTensor
})
}
}

View file

@ -1,28 +0,0 @@
import { env } from '../env';
import { isMediaLoaded } from './isMediaLoaded';
export function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement) {
return new Promise((resolve, reject) => {
if (media instanceof env.getEnv().Canvas || isMediaLoaded(media)) {
return resolve(null)
}
function onLoad(e: Event) {
if (!e.currentTarget) return
e.currentTarget.removeEventListener('load', onLoad)
e.currentTarget.removeEventListener('error', onError)
resolve(e)
}
function onError(e: Event) {
if (!e.currentTarget) return
e.currentTarget.removeEventListener('load', onLoad)
e.currentTarget.removeEventListener('error', onError)
reject(e)
}
media.addEventListener('load', onLoad)
media.addEventListener('error', onError)
})
}

View file

@ -1,23 +0,0 @@
import { env } from '../env';
export function bufferToImage(buf: Blob): Promise<HTMLImageElement> {
return new Promise((resolve, reject) => {
if (!(buf instanceof Blob)) {
return reject('bufferToImage - expected buf to be of type: Blob')
}
const reader = new FileReader()
reader.onload = () => {
if (typeof reader.result !== 'string') {
return reject('bufferToImage - expected reader.result to be a string, in onload')
}
const img = env.getEnv().createImageElement()
img.onload = () => resolve(img)
img.onerror = reject
img.src = reader.result
}
reader.onerror = reject
reader.readAsDataURL(buf)
})
}

View file

@ -1,33 +0,0 @@
import { IDimensions } from '../classes/Dimensions';
import { env } from '../env';
import { getContext2dOrThrow } from './getContext2dOrThrow';
import { getMediaDimensions } from './getMediaDimensions';
import { isMediaLoaded } from './isMediaLoaded';
export function createCanvas({ width, height }: IDimensions): HTMLCanvasElement {
const { createCanvasElement } = env.getEnv()
const canvas = createCanvasElement()
canvas.width = width
canvas.height = height
return canvas
}
export function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement {
const { ImageData } = env.getEnv()
if (!(media instanceof ImageData) && !isMediaLoaded(media)) {
throw new Error('createCanvasFromMedia - media has not finished loading yet')
}
const { width, height } = dims || getMediaDimensions(media)
const canvas = createCanvas({ width, height })
if (media instanceof ImageData) {
getContext2dOrThrow(canvas).putImageData(media, 0, 0)
} else {
getContext2dOrThrow(canvas).drawImage(media, 0, 0, width, height)
}
return canvas
}

View file

@ -1,46 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { Rect } from '../classes';
import { FaceDetection } from '../classes/FaceDetection';
import { isTensor3D, isTensor4D } from '../utils';
/**
* Extracts the tensors of the image regions containing the detected faces.
* Useful if you want to compute the face descriptors for the face images.
* Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually.
*
* @param imageTensor The image tensor that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face.
*/
export async function extractFaceTensors(
imageTensor: tf.Tensor3D | tf.Tensor4D,
detections: Array<FaceDetection | Rect>
): Promise<tf.Tensor3D[]> {
if (!isTensor3D(imageTensor) && !isTensor4D(imageTensor)) {
throw new Error('extractFaceTensors - expected image tensor to be 3D or 4D')
}
if (isTensor4D(imageTensor) && imageTensor.shape[0] > 1) {
throw new Error('extractFaceTensors - batchSize > 1 not supported')
}
return tf.tidy(() => {
const [imgHeight, imgWidth, numChannels] = imageTensor.shape.slice(isTensor4D(imageTensor) ? 1 : 0)
const boxes = detections.map(
det => det instanceof FaceDetection
? det.forSize(imgWidth, imgHeight).box
: det
)
.map(box => box.clipAtImageBorders(imgWidth, imgHeight))
const faceTensors = boxes.map(({ x, y, width, height }) =>
tf.slice3d(imageTensor.as3D(imgHeight, imgWidth, numChannels), [y, x, 0], [height, width, numChannels])
)
return faceTensors
})
}

View file

@ -1,53 +0,0 @@
import { FaceDetection } from '../classes/FaceDetection';
import { Rect } from '../classes/Rect';
import { env } from '../env';
import { createCanvas } from './createCanvas';
import { getContext2dOrThrow } from './getContext2dOrThrow';
import { imageTensorToCanvas } from './imageTensorToCanvas';
import { toNetInput } from './toNetInput';
import { TNetInput } from './types';
/**
* Extracts the image regions containing the detected faces.
*
* @param input The image that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face.
*/
export async function extractFaces(
input: TNetInput,
detections: Array<FaceDetection | Rect>
): Promise<HTMLCanvasElement[]> {
const { Canvas } = env.getEnv()
let canvas = input as HTMLCanvasElement
if (!(input instanceof Canvas)) {
const netInput = await toNetInput(input)
if (netInput.batchSize > 1) {
throw new Error('extractFaces - batchSize > 1 not supported')
}
const tensorOrCanvas = netInput.getInput(0)
canvas = tensorOrCanvas instanceof Canvas
? tensorOrCanvas
: await imageTensorToCanvas(tensorOrCanvas)
}
const ctx = getContext2dOrThrow(canvas)
const boxes = detections.map(
det => det instanceof FaceDetection
? det.forSize(canvas.width, canvas.height).box.floor()
: det
)
.map(box => box.clipAtImageBorders(canvas.width, canvas.height))
return boxes.map(({ x, y, width, height }) => {
const faceImg = createCanvas({ width, height })
getContext2dOrThrow(faceImg)
.putImageData(ctx.getImageData(x, y, width, height), 0, 0)
return faceImg
})
}

View file

@ -1,12 +0,0 @@
import { bufferToImage } from './bufferToImage';
import { fetchOrThrow } from './fetchOrThrow';
export async function fetchImage(uri: string): Promise<HTMLImageElement> {
const res = await fetchOrThrow(uri)
const blob = await (res).blob()
if (!blob.type.startsWith('image/')) {
throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${blob.type}, for url: ${res.url}`)
}
return bufferToImage(blob)
}

View file

@ -1,5 +0,0 @@
import { fetchOrThrow } from './fetchOrThrow';
export async function fetchJson<T>(uri: string): Promise<T> {
return (await fetchOrThrow(uri)).json()
}

View file

@ -1,5 +0,0 @@
import { fetchOrThrow } from './fetchOrThrow';
export async function fetchNetWeights(uri: string): Promise<Float32Array> {
return new Float32Array(await (await fetchOrThrow(uri)).arrayBuffer())
}

View file

@ -1,14 +0,0 @@
import { env } from '../env';
export async function fetchOrThrow(
url: string,
init?: RequestInit
): Promise<Response> {
const fetch = env.getEnv().fetch
const res = await fetch(url, init)
if (!(res.status < 400)) {
throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`)
}
return res
}

View file

@ -1,24 +0,0 @@
import { env } from '../env';
import { resolveInput } from './resolveInput';
export function getContext2dOrThrow(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): CanvasRenderingContext2D {
const { Canvas, CanvasRenderingContext2D } = env.getEnv()
if (canvasArg instanceof CanvasRenderingContext2D) {
return canvasArg
}
const canvas = resolveInput(canvasArg)
if (!(canvas instanceof Canvas)) {
throw new Error('resolveContext2d - expected canvas to be of instance of Canvas')
}
const ctx = canvas.getContext('2d')
if (!ctx) {
throw new Error('resolveContext2d - canvas 2d context is null')
}
return ctx
}

View file

@ -1,15 +0,0 @@
import { Dimensions, IDimensions } from '../classes/Dimensions';
import { env } from '../env';
export function getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions {
const { Image, Video } = env.getEnv()
if (input instanceof Image) {
return new Dimensions(input.naturalWidth, input.naturalHeight)
}
if (input instanceof Video) {
return new Dimensions(input.videoWidth, input.videoHeight)
}
return new Dimensions(input.width, input.height)
}

View file

@ -1,20 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { env } from '../env';
import { isTensor4D } from '../utils';
export async function imageTensorToCanvas(
imgTensor: tf.Tensor,
canvas?: HTMLCanvasElement
): Promise<HTMLCanvasElement> {
const targetCanvas = canvas || env.getEnv().createCanvasElement()
const [height, width, numChannels] = imgTensor.shape.slice(isTensor4D(imgTensor) ? 1 : 0)
const imgTensor3D = tf.tidy(() => imgTensor.as3D(height, width, numChannels).toInt())
await tf.browser.toPixels(imgTensor3D, targetCanvas)
imgTensor3D.dispose()
return targetCanvas
}

View file

@ -1,28 +0,0 @@
import { env } from '../env';
import { createCanvas, createCanvasFromMedia } from './createCanvas';
import { getContext2dOrThrow } from './getContext2dOrThrow';
import { getMediaDimensions } from './getMediaDimensions';
export function imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage: boolean = false) {
const { Image, Canvas } = env.getEnv()
if (!(input instanceof Image || input instanceof Canvas)) {
throw new Error('imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement')
}
const dims = getMediaDimensions(input)
const scale = inputSize / Math.max(dims.height, dims.width)
const width = scale * dims.width
const height = scale * dims.height
const targetCanvas = createCanvas({ width: inputSize, height: inputSize })
const inputCanvas = input instanceof Canvas ? input : createCanvasFromMedia(input)
const offset = Math.abs(width - height) / 2
const dx = centerImage && width < height ? offset : 0
const dy = centerImage && height < width ? offset : 0
getContext2dOrThrow(targetCanvas).drawImage(inputCanvas, dx, dy, width, height)
return targetCanvas
}

View file

@ -1,21 +0,0 @@
export * from './awaitMediaLoaded'
export * from './bufferToImage'
export * from './createCanvas'
export * from './extractFaces'
export * from './extractFaceTensors'
export * from './fetchImage'
export * from './fetchJson'
export * from './fetchNetWeights'
export * from './fetchOrThrow'
export * from './getContext2dOrThrow'
export * from './getMediaDimensions'
export * from './imageTensorToCanvas'
export * from './imageToSquare'
export * from './isMediaElement'
export * from './isMediaLoaded'
export * from './loadWeightMap'
export * from './matchDimensions'
export * from './NetInput'
export * from './resolveInput'
export * from './toNetInput'
export * from './types'

View file

@ -1,10 +0,0 @@
import { env } from '../env';
export function isMediaElement(input: any) {
const { Image, Canvas, Video } = env.getEnv()
return input instanceof Image
|| input instanceof Canvas
|| input instanceof Video
}

View file

@ -1,9 +0,0 @@
import { env } from '../env';
export function isMediaLoaded(media: HTMLImageElement | HTMLVideoElement) : boolean {
const { Image, Video } = env.getEnv()
return (media instanceof Image && media.complete)
|| (media instanceof Video && media.readyState >= 3)
}

View file

@ -1,15 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { getModelUris } from '../common/getModelUris';
import { fetchJson } from './fetchJson';
export async function loadWeightMap(
uri: string | undefined,
defaultModelName: string,
): Promise<tf.NamedTensorMap> {
const { manifestUri, modelBaseUri } = getModelUris(uri, defaultModelName)
const manifest = await fetchJson<tf.io.WeightsManifestConfig>(manifestUri)
return tf.io.loadWeights(manifest, modelBaseUri)
}

View file

@ -1,11 +0,0 @@
import { IDimensions } from '../classes';
import { getMediaDimensions } from './getMediaDimensions';
export function matchDimensions(input: IDimensions, reference: IDimensions, useMediaDimensions: boolean = false) {
const { width, height } = useMediaDimensions
? getMediaDimensions(reference)
: reference
input.width = width
input.height = height
return { width, height }
}

View file

@ -1,8 +0,0 @@
import { env } from '../env';
export function resolveInput(arg: string | any) {
if (!env.isNodejs() && typeof arg === 'string') {
return document.getElementById(arg)
}
return arg
}

View file

@ -1,57 +0,0 @@
import { isTensor3D, isTensor4D } from '../utils';
import { awaitMediaLoaded } from './awaitMediaLoaded';
import { isMediaElement } from './isMediaElement';
import { NetInput } from './NetInput';
import { resolveInput } from './resolveInput';
import { TNetInput } from './types';
/**
* Validates the input to make sure, they are valid net inputs and awaits all media elements
* to be finished loading.
*
* @param input The input, which can be a media element or an array of different media elements.
* @returns A NetInput instance, which can be passed into one of the neural networks.
*/
export async function toNetInput(inputs: TNetInput): Promise<NetInput> {
if (inputs instanceof NetInput) {
return inputs
}
let inputArgArray = Array.isArray(inputs)
? inputs
: [inputs]
if (!inputArgArray.length) {
throw new Error('toNetInput - empty array passed as input')
}
const getIdxHint = (idx: number) => Array.isArray(inputs) ? ` at input index ${idx}:` : ''
const inputArray = inputArgArray.map(resolveInput)
inputArray.forEach((input, i) => {
if (!isMediaElement(input) && !isTensor3D(input) && !isTensor4D(input)) {
if (typeof inputArgArray[i] === 'string') {
throw new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`)
}
throw new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`)
}
if (isTensor4D(input)) {
// if tf.Tensor4D is passed in the input array, the batch size has to be 1
const batchSize = input.shape[0]
if (batchSize !== 1) {
throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`)
}
}
})
// wait for all media elements being loaded
await Promise.all(
inputArray.map(input => isMediaElement(input) && awaitMediaLoaded(input))
)
return new NetInput(inputArray, Array.isArray(inputs))
}

View file

@ -1,11 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from './NetInput';
export type TMediaElement = HTMLImageElement | HTMLVideoElement | HTMLCanvasElement
export type TResolvedNetInput = TMediaElement | tf.Tensor3D | tf.Tensor4D
export type TNetInputArg = string | TResolvedNetInput
export type TNetInput = TNetInputArg | Array<TNetInputArg> | NetInput | tf.Tensor4D

View file

@ -1,24 +0,0 @@
import { Environment } from './types';
export function createBrowserEnv(): Environment {
const fetch = window['fetch'] || function() {
throw new Error('fetch - missing fetch implementation for browser environment')
}
const readFile = function() {
throw new Error('readFile - filesystem not available for browser environment')
}
return {
Canvas: HTMLCanvasElement,
CanvasRenderingContext2D: CanvasRenderingContext2D,
Image: HTMLImageElement,
ImageData: ImageData,
Video: HTMLVideoElement,
createCanvasElement: () => document.createElement('canvas'),
createImageElement: () => document.createElement('img'),
fetch,
readFile
}
}

View file

@ -1,30 +0,0 @@
import { FileSystem } from './types';
export function createFileSystem(fs?: any): FileSystem {
let requireFsError = ''
if (!fs) {
try {
fs = require('fs')
} catch (err) {
requireFsError = err.toString()
}
}
const readFile = fs
? function(filePath: string) {
return new Promise<Buffer>((res, rej) => {
fs.readFile(filePath, function(err: any, buffer: Buffer) {
return err ? rej(err) : res(buffer)
})
})
}
: function() {
throw new Error(`readFile - failed to require fs in nodejs environment with error: ${requireFsError}`)
}
return {
readFile
}
}

View file

@ -1,41 +0,0 @@
import { fetch as tfFetch } from '@tensorflow/tfjs-core/dist/util';
import { Environment } from './types';
export function createWorkerEnv(): Environment {
const fetch = tfFetch || function() {
throw new Error('fetch - missing fetch implementation for browser environment')
}
const readFile = function() {
throw new Error('readFile - filesystem not available for browser environment')
}
const createCanvasElement = function() {
const canvas = new OffscreenCanvas(1, 1) as any;
canvas.localName = 'canvas';
canvas.nodeName = 'CANVAS';
canvas.tagName = 'CANVAS';
canvas.nodeType = 1;
canvas.innerHTML = '';
canvas.remove = () => {
console.log('nope');
};
return canvas;
}
const HTMLImageElement = function() {}
const HTMLVideoElement = function() {}
return {
Canvas: OffscreenCanvas as any,
CanvasRenderingContext2D: OffscreenCanvasRenderingContext2D as any,
Image: HTMLImageElement as any,
ImageData: ImageData,
Video: HTMLVideoElement as any,
createCanvasElement: createCanvasElement,
createImageElement: () => document.createElement('img'),
fetch,
readFile
}
}

View file

@ -1,69 +0,0 @@
import { createBrowserEnv } from './createBrowserEnv';
import { createFileSystem } from './createFileSystem';
import { createWorkerEnv } from './createWorkerEnv';
import { isBrowser } from './isBrowser';
import { isNodejs } from './isNodejs';
import { Environment } from './types';
let environment: Environment | null
function getEnv(): Environment {
if (!environment) {
throw new Error('getEnv - environment is not defined, check isNodejs() and isBrowser()')
}
return environment
}
function setEnv(env: Environment) {
environment = env
}
function isWorker() {
return typeof importScripts === 'function'
}
function initialize() {
// check for isBrowser() first to prevent electron renderer process
// to be initialized with wrong environment due to isNodejs() returning true
if (isWorker()) {
setEnv(createWorkerEnv())
} else if (isBrowser()) {
setEnv(createBrowserEnv())
}
}
function monkeyPatch(env: Partial<Environment>) {
if (!environment) {
initialize()
}
if (!environment) {
throw new Error('monkeyPatch - environment is not defined, check isNodejs() and isBrowser()')
}
const { Canvas = environment.Canvas, Image = environment.Image } = env
environment.Canvas = Canvas
environment.Image = Image
environment.createCanvasElement = env.createCanvasElement || (() => new Canvas())
environment.createImageElement = env.createImageElement || (() => new Image())
environment.ImageData = env.ImageData || environment.ImageData
environment.Video = env.Video || environment.Video
environment.fetch = env.fetch || environment.fetch
environment.readFile = env.readFile || environment.readFile
}
export const env = {
getEnv,
setEnv,
initialize,
createBrowserEnv,
createFileSystem,
monkeyPatch,
isBrowser,
isNodejs
}
initialize()
export * from './types'

View file

@ -1,9 +0,0 @@
export function isBrowser(): boolean {
return typeof window === 'object'
&& typeof document !== 'undefined'
&& typeof HTMLImageElement !== 'undefined'
&& typeof HTMLCanvasElement !== 'undefined'
&& typeof HTMLVideoElement !== 'undefined'
&& typeof ImageData !== 'undefined'
&& typeof CanvasRenderingContext2D !== 'undefined'
}

View file

@ -1,8 +0,0 @@
export function isNodejs(): boolean {
return typeof global === 'object'
&& typeof require === 'function'
&& typeof module !== 'undefined'
// issues with gatsby.js: module.exports is undefined
// && !!module.exports
&& typeof process !== 'undefined' && !!process.version
}

View file

@ -1,14 +0,0 @@
export type FileSystem = {
readFile: (filePath: string) => Promise<Buffer>
}
export type Environment = FileSystem & {
Canvas: typeof HTMLCanvasElement
CanvasRenderingContext2D: typeof CanvasRenderingContext2D
Image: typeof HTMLImageElement
ImageData: typeof ImageData
Video: typeof HTMLVideoElement
createCanvasElement: () => HTMLCanvasElement
createImageElement: () => HTMLImageElement
fetch: (url: string, init?: RequestInit) => Promise<Response>
}

View file

@ -1,13 +0,0 @@
export function euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array) {
if (arr1.length !== arr2.length)
throw new Error('euclideanDistance: arr1.length !== arr2.length')
const desc1 = Array.from(arr1)
const desc2 = Array.from(arr2)
return Math.sqrt(
desc1
.map((val, i) => val - desc2[i])
.reduce((res, diff) => res + Math.pow(diff, 2), 0)
)
}

View file

@ -1,55 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { NeuralNetwork } from '../NeuralNetwork';
import { normalize } from '../ops';
import { denseBlock4 } from './denseBlock';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { FaceFeatureExtractorParams, IFaceFeatureExtractor } from './types';
export class FaceFeatureExtractor extends NeuralNetwork<FaceFeatureExtractorParams> implements IFaceFeatureExtractor<FaceFeatureExtractorParams> {
constructor() {
super('FaceFeatureExtractor')
}
public forwardInput(input: NetInput): tf.Tensor4D {
const { params } = this
if (!params) {
throw new Error('FaceFeatureExtractor - load model before inference')
}
return tf.tidy(() => {
const batchTensor = input.toBatchTensor(112, true)
const meanRgb = [122.782, 117.001, 104.298]
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D
let out = denseBlock4(normalized, params.dense0, true)
out = denseBlock4(out, params.dense1)
out = denseBlock4(out, params.dense2)
out = denseBlock4(out, params.dense3)
out = tf.avgPool(out, [7, 7], [2, 2], 'valid')
return out
})
}
public async forward(input: TNetInput): Promise<tf.Tensor4D> {
return this.forwardInput(await toNetInput(input))
}
protected getDefaultModelName(): string {
return 'face_feature_extractor_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMap(weightMap)
}
protected extractParams(weights: Float32Array) {
return extractParams(weights)
}
}

View file

@ -1,54 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { NeuralNetwork } from '../NeuralNetwork';
import { normalize } from '../ops';
import { denseBlock3 } from './denseBlock';
import { extractParamsFromWeigthMapTiny } from './extractParamsFromWeigthMapTiny';
import { extractParamsTiny } from './extractParamsTiny';
import { IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from './types';
export class TinyFaceFeatureExtractor extends NeuralNetwork<TinyFaceFeatureExtractorParams> implements IFaceFeatureExtractor<TinyFaceFeatureExtractorParams> {
constructor() {
super('TinyFaceFeatureExtractor')
}
public forwardInput(input: NetInput): tf.Tensor4D {
const { params } = this
if (!params) {
throw new Error('TinyFaceFeatureExtractor - load model before inference')
}
return tf.tidy(() => {
const batchTensor = input.toBatchTensor(112, true)
const meanRgb = [122.782, 117.001, 104.298]
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D
let out = denseBlock3(normalized, params.dense0, true)
out = denseBlock3(out, params.dense1)
out = denseBlock3(out, params.dense2)
out = tf.avgPool(out, [14, 14], [2, 2], 'valid')
return out
})
}
public async forward(input: TNetInput): Promise<tf.Tensor4D> {
return this.forwardInput(await toNetInput(input))
}
protected getDefaultModelName(): string {
return 'face_feature_extractor_tiny_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMapTiny(weightMap)
}
protected extractParams(weights: Float32Array) {
return extractParamsTiny(weights)
}
}

View file

@ -1,55 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, SeparableConvParams } from '../common';
import { depthwiseSeparableConv } from '../common/depthwiseSeparableConv';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function denseBlock3(
x: tf.Tensor4D,
denseBlockParams: DenseBlock3Params,
isFirstLayer: boolean = false
): tf.Tensor4D {
return tf.tidy(() => {
const out1 = tf.relu(
isFirstLayer
? tf.add(
tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, [2, 2], 'same'),
denseBlockParams.conv0.bias
)
: depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, [2, 2])
) as tf.Tensor4D
const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1])
const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D
const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1])
return tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D
})
}
export function denseBlock4(
x: tf.Tensor4D,
denseBlockParams: DenseBlock4Params,
isFirstLayer: boolean = false,
isScaleDown: boolean = true
): tf.Tensor4D {
return tf.tidy(() => {
const out1 = tf.relu(
isFirstLayer
? tf.add(
tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, isScaleDown ? [2, 2] : [1, 1], 'same'),
denseBlockParams.conv0.bias
)
: depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, isScaleDown ? [2, 2] : [1, 1])
) as tf.Tensor4D
const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1])
const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D
const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1])
const in4 = tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D
const out4 = depthwiseSeparableConv(in4, denseBlockParams.conv3, [1, 1])
return tf.relu(tf.add(out1, tf.add(out2, tf.add(out3, out4)))) as tf.Tensor4D
})
}

View file

@ -1,32 +0,0 @@
import { extractWeightsFactory, ParamMapping } from '../common';
import { extractorsFactory } from './extractorsFactory';
import { FaceFeatureExtractorParams } from './types';
export function extractParams(weights: Float32Array): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = extractWeightsFactory(weights)
const {
extractDenseBlock4Params
} = extractorsFactory(extractWeights, paramMappings)
const dense0 = extractDenseBlock4Params(3, 32, 'dense0', true)
const dense1 = extractDenseBlock4Params(32, 64, 'dense1')
const dense2 = extractDenseBlock4Params(64, 128, 'dense2')
const dense3 = extractDenseBlock4Params(128, 256, 'dense3')
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
return {
paramMappings,
params: { dense0, dense1, dense2, dense3 }
}
}

View file

@ -1,27 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { disposeUnusedWeightTensors, ParamMapping } from '../common';
import { loadParamsFactory } from './loadParamsFactory';
import { FaceFeatureExtractorParams } from './types';
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractDenseBlock4Params
} = loadParamsFactory(weightMap, paramMappings)
const params = {
dense0: extractDenseBlock4Params('dense0', true),
dense1: extractDenseBlock4Params('dense1'),
dense2: extractDenseBlock4Params('dense2'),
dense3: extractDenseBlock4Params('dense3')
}
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}

View file

@ -1,26 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { disposeUnusedWeightTensors, ParamMapping } from '../common';
import { loadParamsFactory } from './loadParamsFactory';
import { TinyFaceFeatureExtractorParams } from './types';
export function extractParamsFromWeigthMapTiny(
weightMap: tf.NamedTensorMap
): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractDenseBlock3Params
} = loadParamsFactory(weightMap, paramMappings)
const params = {
dense0: extractDenseBlock3Params('dense0', true),
dense1: extractDenseBlock3Params('dense1'),
dense2: extractDenseBlock3Params('dense2')
}
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}

View file

@ -1,32 +0,0 @@
import { extractWeightsFactory, ParamMapping } from '../common';
import { extractorsFactory } from './extractorsFactory';
import { TinyFaceFeatureExtractorParams } from './types';
export function extractParamsTiny(weights: Float32Array): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = extractWeightsFactory(weights)
const {
extractDenseBlock3Params
} = extractorsFactory(extractWeights, paramMappings)
const dense0 = extractDenseBlock3Params(3, 32, 'dense0', true)
const dense1 = extractDenseBlock3Params(32, 64, 'dense1')
const dense2 = extractDenseBlock3Params(64, 128, 'dense2')
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
return {
paramMappings,
params: { dense0, dense1, dense2 }
}
}

View file

@ -1,38 +0,0 @@
import {
extractConvParamsFactory,
extractSeparableConvParamsFactory,
ExtractWeightsFunction,
ParamMapping,
} from '../common';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings)
const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings)
function extractDenseBlock3Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock3Params {
const conv0 = isFirstLayer
? extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv0`)
: extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/conv0`)
const conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv1`)
const conv2 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv2`)
return { conv0, conv1, conv2 }
}
function extractDenseBlock4Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock4Params {
const { conv0, conv1, conv2 } = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer)
const conv3 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv3`)
return { conv0, conv1, conv2, conv3 }
}
return {
extractDenseBlock3Params,
extractDenseBlock4Params
}
}

View file

@ -1,2 +0,0 @@
export * from './FaceFeatureExtractor';
export * from './TinyFaceFeatureExtractor';

View file

@ -1,37 +0,0 @@
import { extractWeightEntryFactory, loadSeparableConvParamsFactory, ParamMapping } from '../common';
import { loadConvParamsFactory } from '../common/loadConvParamsFactory';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
const extractConvParams = loadConvParamsFactory(extractWeightEntry)
const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry)
function extractDenseBlock3Params(prefix: string, isFirstLayer: boolean = false): DenseBlock3Params {
const conv0 = isFirstLayer
? extractConvParams(`${prefix}/conv0`)
: extractSeparableConvParams(`${prefix}/conv0`)
const conv1 = extractSeparableConvParams(`${prefix}/conv1`)
const conv2 = extractSeparableConvParams(`${prefix}/conv2`)
return { conv0, conv1, conv2 }
}
function extractDenseBlock4Params(prefix: string, isFirstLayer: boolean = false): DenseBlock4Params {
const conv0 = isFirstLayer
? extractConvParams(`${prefix}/conv0`)
: extractSeparableConvParams(`${prefix}/conv0`)
const conv1 = extractSeparableConvParams(`${prefix}/conv1`)
const conv2 = extractSeparableConvParams(`${prefix}/conv2`)
const conv3 = extractSeparableConvParams(`${prefix}/conv3`)
return { conv0, conv1, conv2, conv3 }
}
return {
extractDenseBlock3Params,
extractDenseBlock4Params
}
}

View file

@ -1,49 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, TNetInput } from '../dom';
import { ConvParams, SeparableConvParams } from '../common';
import { NeuralNetwork } from '../NeuralNetwork';
export type ConvWithBatchNormParams = BatchNormParams & {
filter: tf.Tensor4D
}
export type BatchNormParams = {
mean: tf.Tensor1D
variance: tf.Tensor1D
scale: tf.Tensor1D
offset: tf.Tensor1D
}
export type SeparableConvWithBatchNormParams = {
depthwise: ConvWithBatchNormParams
pointwise: ConvWithBatchNormParams
}
export type DenseBlock3Params = {
conv0: SeparableConvParams | ConvParams
conv1: SeparableConvParams
conv2: SeparableConvParams
}
export type DenseBlock4Params = DenseBlock3Params & {
conv3: SeparableConvParams
}
export type TinyFaceFeatureExtractorParams = {
dense0: DenseBlock3Params
dense1: DenseBlock3Params
dense2: DenseBlock3Params
}
export type FaceFeatureExtractorParams = {
dense0: DenseBlock4Params
dense1: DenseBlock4Params
dense2: DenseBlock4Params
dense3: DenseBlock4Params
}
export interface IFaceFeatureExtractor<TNetParams extends TinyFaceFeatureExtractorParams | FaceFeatureExtractorParams> extends NeuralNetwork<TNetParams> {
forwardInput(input: NetInput): tf.Tensor4D
forward(input: TNetInput): Promise<tf.Tensor4D>
}

View file

@ -1,18 +0,0 @@
import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';
import { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceLandmark68NetBase } from './FaceLandmark68NetBase';
export class FaceLandmark68Net extends FaceLandmark68NetBase<FaceFeatureExtractorParams> {
constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {
super('FaceLandmark68Net', faceFeatureExtractor)
}
protected getDefaultModelName(): string {
return 'face_landmark_68_model'
}
protected getClassifierChannelsIn(): number {
return 256
}
}

View file

@ -1,107 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { IDimensions, Point } from '../classes';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { FaceFeatureExtractorParams, TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceProcessor } from '../faceProcessor/FaceProcessor';
import { isEven } from '../utils';
export abstract class FaceLandmark68NetBase<
TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams
>
extends FaceProcessor<TExtractorParams> {
public postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D {
const inputDimensions = originalDimensions.map(({ width, height }) => {
const scale = inputSize / Math.max(height, width)
return {
width: width * scale,
height: height * scale
}
})
const batchSize = inputDimensions.length
return tf.tidy(() => {
const createInterleavedTensor = (fillX: number, fillY: number) =>
tf.stack([
tf.fill([68], fillX),
tf.fill([68], fillY)
], 1).as2D(1, 136).as1D()
const getPadding = (batchIdx: number, cond: (w: number, h: number) => boolean): number => {
const { width, height } = inputDimensions[batchIdx]
return cond(width, height) ? Math.abs(width - height) / 2 : 0
}
const getPaddingX = (batchIdx: number) => getPadding(batchIdx, (w, h) => w < h)
const getPaddingY = (batchIdx: number) => getPadding(batchIdx, (w, h) => h < w)
const landmarkTensors = output
.mul(tf.fill([batchSize, 136], inputSize))
.sub(tf.stack(Array.from(Array(batchSize), (_, batchIdx) =>
createInterleavedTensor(
getPaddingX(batchIdx),
getPaddingY(batchIdx)
)
)))
.div(tf.stack(Array.from(Array(batchSize), (_, batchIdx) =>
createInterleavedTensor(
inputDimensions[batchIdx].width,
inputDimensions[batchIdx].height
)
)))
return landmarkTensors as tf.Tensor2D
})
}
public forwardInput(input: NetInput): tf.Tensor2D {
return tf.tidy(() => {
const out = this.runNet(input)
return this.postProcess(
out,
input.inputSize as number,
input.inputDimensions.map(([height, width]) => ({ height, width }))
)
})
}
public async forward(input: TNetInput): Promise<tf.Tensor2D> {
return this.forwardInput(await toNetInput(input))
}
public async detectLandmarks(input: TNetInput): Promise<FaceLandmarks68 | FaceLandmarks68[]> {
const netInput = await toNetInput(input)
const landmarkTensors = tf.tidy(
() => tf.unstack(this.forwardInput(netInput))
)
const landmarksForBatch = await Promise.all(landmarkTensors.map(
async (landmarkTensor, batchIdx) => {
const landmarksArray = Array.from(await landmarkTensor.data())
const xCoords = landmarksArray.filter((_, i) => isEven(i))
const yCoords = landmarksArray.filter((_, i) => !isEven(i))
return new FaceLandmarks68(
Array(68).fill(0).map((_, i) => new Point(xCoords[i], yCoords[i])),
{
height: netInput.getInputHeight(batchIdx),
width : netInput.getInputWidth(batchIdx),
}
)
}
))
landmarkTensors.forEach(t => t.dispose())
return netInput.isBatchInput
? landmarksForBatch
: landmarksForBatch[0]
}
protected getClassifierChannelsOut(): number {
return 136
}
}

View file

@ -1,5 +0,0 @@
import { FaceLandmark68Net } from './FaceLandmark68Net';
export * from './FaceLandmark68Net';
export class FaceLandmarkNet extends FaceLandmark68Net {}

View file

@ -1,88 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { fullyConnectedLayer } from '../common/fullyConnectedLayer';
import { NetInput } from '../dom';
import {
FaceFeatureExtractorParams,
IFaceFeatureExtractor,
TinyFaceFeatureExtractorParams,
} from '../faceFeatureExtractor/types';
import { NeuralNetwork } from '../NeuralNetwork';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { NetParams } from './types';
import { seperateWeightMaps } from './util';
export abstract class FaceProcessor<
TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams
>
extends NeuralNetwork<NetParams> {
protected _faceFeatureExtractor: IFaceFeatureExtractor<TExtractorParams>
constructor(_name: string, faceFeatureExtractor: IFaceFeatureExtractor<TExtractorParams>) {
super(_name)
this._faceFeatureExtractor = faceFeatureExtractor
}
public get faceFeatureExtractor(): IFaceFeatureExtractor<TExtractorParams> {
return this._faceFeatureExtractor
}
protected abstract getDefaultModelName(): string
protected abstract getClassifierChannelsIn(): number
protected abstract getClassifierChannelsOut(): number
public runNet(input: NetInput | tf.Tensor4D): tf.Tensor2D {
const { params } = this
if (!params) {
throw new Error(`${this._name} - load model before inference`)
}
return tf.tidy(() => {
const bottleneckFeatures = input instanceof NetInput
? this.faceFeatureExtractor.forwardInput(input)
: input
return fullyConnectedLayer(bottleneckFeatures.as2D(bottleneckFeatures.shape[0], -1), params.fc)
})
}
public dispose(throwOnRedispose: boolean = true) {
this.faceFeatureExtractor.dispose(throwOnRedispose)
super.dispose(throwOnRedispose)
}
public loadClassifierParams(weights: Float32Array) {
const { params, paramMappings } = this.extractClassifierParams(weights)
this._params = params
this._paramMappings = paramMappings
}
public extractClassifierParams(weights: Float32Array) {
return extractParams(weights, this.getClassifierChannelsIn(), this.getClassifierChannelsOut())
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap)
this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap)
return extractParamsFromWeigthMap(classifierMap)
}
protected extractParams(weights: Float32Array) {
const cIn = this.getClassifierChannelsIn()
const cOut = this.getClassifierChannelsOut()
const classifierWeightSize = (cOut * cIn ) + cOut
const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize)
const classifierWeights = weights.slice(weights.length - classifierWeightSize)
this.faceFeatureExtractor.extractWeights(featureExtractorWeights)
return this.extractClassifierParams(classifierWeights)
}
}

View file

@ -1,25 +0,0 @@
import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common';
import { NetParams } from './types';
export function extractParams(weights: Float32Array, channelsIn: number, channelsOut: number): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = extractWeightsFactory(weights)
const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings)
const fc = extractFCParams(channelsIn, channelsOut, 'fc')
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
return {
paramMappings,
params: { fc }
}
}

View file

@ -1,27 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common';
import { NetParams } from './types';
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractFcParams(prefix: string): FCParams {
const weights = extractWeightEntry<tf.Tensor2D>(`${prefix}/weights`, 2)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { weights, bias }
}
const params = {
fc: extractFcParams('fc')
}
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}

View file

@ -1 +0,0 @@
export * from './FaceProcessor';

View file

@ -1,6 +0,0 @@
import { FCParams } from '../common';
export type NetParams = {
fc: FCParams
}

View file

@ -1,15 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
export function seperateWeightMaps(weightMap: tf.NamedTensorMap) {
const featureExtractorMap: tf.NamedTensorMap = {}
const classifierMap: tf.NamedTensorMap = {}
Object.keys(weightMap).forEach(key => {
const map = key.startsWith('fc') ? classifierMap : featureExtractorMap
map[key] = weightMap[key]
})
return { featureExtractorMap, classifierMap }
}

View file

@ -1,94 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { NeuralNetwork } from '../NeuralNetwork';
import { normalize } from '../ops';
import { convDown } from './convLayer';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { residual, residualDown } from './residualLayer';
import { NetParams } from './types';
export class FaceRecognitionNet extends NeuralNetwork<NetParams> {
constructor() {
super('FaceRecognitionNet')
}
public forwardInput(input: NetInput): tf.Tensor2D {
const { params } = this
if (!params) {
throw new Error('FaceRecognitionNet - load model before inference')
}
return tf.tidy(() => {
const batchTensor = input.toBatchTensor(150, true).toFloat()
const meanRgb = [122.782, 117.001, 104.298]
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(256)) as tf.Tensor4D
let out = convDown(normalized, params.conv32_down)
out = tf.maxPool(out, 3, 2, 'valid')
out = residual(out, params.conv32_1)
out = residual(out, params.conv32_2)
out = residual(out, params.conv32_3)
out = residualDown(out, params.conv64_down)
out = residual(out, params.conv64_1)
out = residual(out, params.conv64_2)
out = residual(out, params.conv64_3)
out = residualDown(out, params.conv128_down)
out = residual(out, params.conv128_1)
out = residual(out, params.conv128_2)
out = residualDown(out, params.conv256_down)
out = residual(out, params.conv256_1)
out = residual(out, params.conv256_2)
out = residualDown(out, params.conv256_down_out)
const globalAvg = out.mean([1, 2]) as tf.Tensor2D
const fullyConnected = tf.matMul(globalAvg, params.fc)
return fullyConnected as tf.Tensor2D
})
}
public async forward(input: TNetInput): Promise<tf.Tensor2D> {
return this.forwardInput(await toNetInput(input))
}
public async computeFaceDescriptor(input: TNetInput): Promise<Float32Array|Float32Array[]> {
const netInput = await toNetInput(input)
const faceDescriptorTensors = tf.tidy(
() => tf.unstack(this.forwardInput(netInput))
)
const faceDescriptorsForBatch = await Promise.all(faceDescriptorTensors.map(
t => t.data()
)) as Float32Array[]
faceDescriptorTensors.forEach(t => t.dispose())
return netInput.isBatchInput
? faceDescriptorsForBatch
: faceDescriptorsForBatch[0]
}
protected getDefaultModelName(): string {
return 'face_recognition_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMap(weightMap)
}
protected extractParams(weights: Float32Array) {
return extractParams(weights)
}
}

View file

@ -1,32 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { scale } from './scaleLayer';
import { ConvLayerParams } from './types';
function convLayer(
x: tf.Tensor4D,
params: ConvLayerParams,
strides: [number, number],
withRelu: boolean,
padding: 'valid' | 'same' = 'same'
): tf.Tensor4D {
const { filters, bias } = params.conv
let out = tf.conv2d(x, filters, strides, padding)
out = tf.add(out, bias)
out = scale(out, params.scale)
return withRelu ? tf.relu(out) : out
}
export function conv(x: tf.Tensor4D, params: ConvLayerParams) {
return convLayer(x, params, [1, 1], true)
}
export function convNoRelu(x: tf.Tensor4D, params: ConvLayerParams) {
return convLayer(x, params, [1, 1], false)
}
export function convDown(x: tf.Tensor4D, params: ConvLayerParams) {
return convLayer(x, params, [2, 2], true, 'valid')
}

View file

@ -1,155 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from '../common';
import { isFloat } from '../utils';
import { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
function extractFilterValues(numFilterValues: number, numFilters: number, filterSize: number): tf.Tensor4D {
const weights = extractWeights(numFilterValues)
const depth = weights.length / (numFilters * filterSize * filterSize)
if (isFloat(depth)) {
throw new Error(`depth has to be an integer: ${depth}, weights.length: ${weights.length}, numFilters: ${numFilters}, filterSize: ${filterSize}`)
}
return tf.tidy(
() => tf.transpose(
tf.tensor4d(weights, [numFilters, depth, filterSize, filterSize]),
[2, 3, 1, 0]
)
)
}
function extractConvParams(
numFilterValues: number,
numFilters: number,
filterSize: number,
mappedPrefix: string
): ConvParams {
const filters = extractFilterValues(numFilterValues, numFilters, filterSize)
const bias = tf.tensor1d(extractWeights(numFilters))
paramMappings.push(
{ paramPath: `${mappedPrefix}/filters` },
{ paramPath: `${mappedPrefix}/bias` }
)
return { filters, bias }
}
function extractScaleLayerParams(numWeights: number, mappedPrefix: string): ScaleLayerParams {
const weights = tf.tensor1d(extractWeights(numWeights))
const biases = tf.tensor1d(extractWeights(numWeights))
paramMappings.push(
{ paramPath: `${mappedPrefix}/weights` },
{ paramPath: `${mappedPrefix}/biases` }
)
return {
weights,
biases
}
}
function extractConvLayerParams(
numFilterValues: number,
numFilters: number,
filterSize: number,
mappedPrefix: string
): ConvLayerParams {
const conv = extractConvParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv`)
const scale = extractScaleLayerParams(numFilters, `${mappedPrefix}/scale`)
return { conv, scale }
}
function extractResidualLayerParams(
numFilterValues: number,
numFilters: number,
filterSize: number,
mappedPrefix: string,
isDown: boolean = false
): ResidualLayerParams {
const conv1 = extractConvLayerParams((isDown ? 0.5 : 1) * numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv1`)
const conv2 = extractConvLayerParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv2`)
return { conv1, conv2 }
}
return {
extractConvLayerParams,
extractResidualLayerParams
}
}
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {
const {
extractWeights,
getRemainingWeights
} = extractWeightsFactory(weights)
const paramMappings: ParamMapping[] = []
const {
extractConvLayerParams,
extractResidualLayerParams
} = extractorsFactory(extractWeights, paramMappings)
const conv32_down = extractConvLayerParams(4704, 32, 7, 'conv32_down')
const conv32_1 = extractResidualLayerParams(9216, 32, 3, 'conv32_1')
const conv32_2 = extractResidualLayerParams(9216, 32, 3, 'conv32_2')
const conv32_3 = extractResidualLayerParams(9216, 32, 3, 'conv32_3')
const conv64_down = extractResidualLayerParams(36864, 64, 3, 'conv64_down', true)
const conv64_1 = extractResidualLayerParams(36864, 64, 3, 'conv64_1')
const conv64_2 = extractResidualLayerParams(36864, 64, 3, 'conv64_2')
const conv64_3 = extractResidualLayerParams(36864, 64, 3, 'conv64_3')
const conv128_down = extractResidualLayerParams(147456, 128, 3, 'conv128_down', true)
const conv128_1 = extractResidualLayerParams(147456, 128, 3, 'conv128_1')
const conv128_2 = extractResidualLayerParams(147456, 128, 3, 'conv128_2')
const conv256_down = extractResidualLayerParams(589824, 256, 3, 'conv256_down', true)
const conv256_1 = extractResidualLayerParams(589824, 256, 3, 'conv256_1')
const conv256_2 = extractResidualLayerParams(589824, 256, 3, 'conv256_2')
const conv256_down_out = extractResidualLayerParams(589824, 256, 3, 'conv256_down_out')
const fc = tf.tidy(
() => tf.transpose(tf.tensor2d(extractWeights(256 * 128), [128, 256]), [1, 0])
)
paramMappings.push({ paramPath: `fc` })
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
const params = {
conv32_down,
conv32_1,
conv32_2,
conv32_3,
conv64_down,
conv64_1,
conv64_2,
conv64_3,
conv128_down,
conv128_1,
conv128_2,
conv256_down,
conv256_1,
conv256_2,
conv256_down_out,
fc
}
return { params, paramMappings }
}

View file

@ -1,101 +0,0 @@
import * as tf from '@tensorflow/tfjs-core';
import { disposeUnusedWeightTensors, extractWeightEntryFactory, ParamMapping } from '../common';
import { isTensor2D } from '../utils';
import { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';
function extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractScaleLayerParams(prefix: string): ScaleLayerParams {
const weights = extractWeightEntry<tf.Tensor1D>(`${prefix}/scale/weights`, 1)
const biases = extractWeightEntry<tf.Tensor1D>(`${prefix}/scale/biases`, 1)
return { weights, biases }
}
function extractConvLayerParams(prefix: string): ConvLayerParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/conv/filters`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/conv/bias`, 1)
const scale = extractScaleLayerParams(prefix)
return { conv: { filters, bias }, scale }
}
function extractResidualLayerParams(prefix: string): ResidualLayerParams {
return {
conv1: extractConvLayerParams(`${prefix}/conv1`),
conv2: extractConvLayerParams(`${prefix}/conv2`)
}
}
return {
extractConvLayerParams,
extractResidualLayerParams
}
}
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractConvLayerParams,
extractResidualLayerParams
} = extractorsFactory(weightMap, paramMappings)
const conv32_down = extractConvLayerParams('conv32_down')
const conv32_1 = extractResidualLayerParams('conv32_1')
const conv32_2 = extractResidualLayerParams('conv32_2')
const conv32_3 = extractResidualLayerParams('conv32_3')
const conv64_down = extractResidualLayerParams('conv64_down')
const conv64_1 = extractResidualLayerParams('conv64_1')
const conv64_2 = extractResidualLayerParams('conv64_2')
const conv64_3 = extractResidualLayerParams('conv64_3')
const conv128_down = extractResidualLayerParams('conv128_down')
const conv128_1 = extractResidualLayerParams('conv128_1')
const conv128_2 = extractResidualLayerParams('conv128_2')
const conv256_down = extractResidualLayerParams('conv256_down')
const conv256_1 = extractResidualLayerParams('conv256_1')
const conv256_2 = extractResidualLayerParams('conv256_2')
const conv256_down_out = extractResidualLayerParams('conv256_down_out')
const fc = weightMap['fc']
paramMappings.push({ originalPath: 'fc', paramPath: 'fc' })
if (!isTensor2D(fc)) {
throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${fc}`)
}
const params = {
conv32_down,
conv32_1,
conv32_2,
conv32_3,
conv64_down,
conv64_1,
conv64_2,
conv64_3,
conv128_down,
conv128_1,
conv128_2,
conv256_down,
conv256_1,
conv256_2,
conv256_down_out,
fc
}
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}

Some files were not shown because too many files have changed in this diff Show more