nicecode-v2/packages/func/src/math/index.ts

449 lines
13 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import { cloneDeep, get, isNull, isNumber, isString } from 'lodash-es';
import { dataURLToBlob, generateImg, urlToImg } from '../file';
import { IOdRectOrigin, Rect } from '@zhst/types';
const proto = {
Common: {
AlgorithmVersion: {
VERSION_REID_HEAD_ATTR: '形体',
VERSION_FACE: '人脸',
VERSION_NON_MOTOR_VEHICLE: '非机动车',
},
},
};
export const ALGORITHM_VERSION = {
['7']: '形体',
['4']: '人脸',
['6']: '非机动车',
};
export const algorithmVersions = [...Object.keys(ALGORITHM_VERSION)]
export const getBikeExtendRect = (rect: Rect, maxW: number) => {
const newRect = { ...rect };
//向上扩大一倍
const oldY = cloneDeep(rect.y);
newRect.y = newRect.y - newRect.h < 0 ? 0 : newRect.y - newRect.h;
newRect.h += oldY - newRect.y;
let newX = Math.round(newRect.x - newRect.w * 0.15);
if (newX < 0) {
newX = 0;
}
let newW = newRect.x - newX + newRect.w + Math.round(newRect.w * 0.15);
if (newX + newW > maxW) {
newW = maxW - newX;
}
newRect.x = newX;
newRect.w = newW;
return newRect;
};
export const getOtherExtendRect = (srcRect, maxW, maxH, type) => {
const wExtendRadio = 0.25;
const upExtendRadio = 0.25;
const downExtendRadio = 0.25;
const fixPersonExtend = true;
let nx = 0;
let nw = 0;
nx = srcRect.x - Math.round(srcRect.w * wExtendRadio);
if (nx < 0) {
nx = 0;
}
nw = srcRect.x - nx + srcRect.w + Math.round(srcRect.w * wExtendRadio);
if (nx + nw > maxW) {
nw = maxW - nx;
}
let ny = 0;
let nh = 0;
ny = srcRect.y - Math.round(upExtendRadio * srcRect.h);
if (ny < 0) {
ny = 0;
}
nh = srcRect.y - ny + srcRect.h + Math.round(srcRect.h * downExtendRadio);
if (ny + nh > maxH) {
nh = maxH - ny;
}
let newRect = {
x: nx,
y: ny,
w: nw,
h: nh,
};
if (
(type === proto.Common.AlgorithmVersion.VERSION_REID_HEAD_ATTR ||
type === proto.Common.AlgorithmVersion.VERSION_FACE) &&
fixPersonExtend
) {
const fixW = Math.round(nh * 0.75);
if (nw < fixW) {
// 应该扩展宽度
let newX = nx + Math.round(nw / 2.0 - 0.5 * fixW);
if (newX < 0) {
newX = 0;
}
let newW = fixW;
if (newW + newX > maxW) {
newW = maxW - newX;
}
newRect = {
x: newX,
y: ny,
w: newW,
h: nh,
};
} else if (nw > fixW) {
// 应该扩展高度
const fixH = Math.round(nw * 1.333);
let newY = ny + Math.round(nh / 2.0 - 0.5 * fixH);
if (newY < 0) {
newY = 0;
}
let newH = fixH;
if (newY + newH > maxH) {
newH = maxH - newY;
}
newRect = {
x: nx,
y: newY,
w: nw,
h: newH,
};
}
}
return newRect;
};
export const getNormalization = (srcRect: Rect, maxW: number, maxH: number) => {
const newRect = {
...srcRect,
};
newRect.x = srcRect.x / maxW;
newRect.y = srcRect.y / maxH;
newRect.w = srcRect.w / maxW;
newRect.h = srcRect.h / maxH;
if (newRect.x + newRect.w > 1) {
newRect.w = 1 - newRect.x;
}
if (newRect.y + newRect.h > 1) {
newRect.h = 1 - newRect.y;
}
return newRect;
};
//传入od框 穿出 od扩展框
export const getExtendRect = (normalizationRect: Rect, imgW: number, imgH: number, type: string) => {
const rect = {
x: normalizationRect.x * imgW,
y: normalizationRect.y * imgH,
w: normalizationRect.w * imgW,
h: normalizationRect.h * imgH,
};
let newRect;
if (type === proto.Common.AlgorithmVersion.VERSION_NON_MOTOR_VEHICLE) {
newRect = getBikeExtendRect(rect, imgW);
} else {
newRect = getOtherExtendRect(rect, imgW, imgH, type);
}
newRect = getNormalization(newRect, imgW, imgH);
return newRect;
};
export const getTransformRect = (image: { height: number; width: number; }, transform: { translateX: any; translateY: any; scale: any; rotate: any; }, rect: Rect) => {
const canvasRect = {
x: rect.x,
y: rect.y,
x2: rect.x + rect.w,
y2: rect.h + rect.y,
};
//1.转成缩放前的坐标
const { translateX, translateY, scale, rotate } = transform;
const originAxisRect = {
x: (canvasRect.x - translateX) / scale,
y: (canvasRect.y - translateY) / scale,
x2: (canvasRect.x2 - translateX) / scale,
y2: (canvasRect.y2 - translateY) / scale,
};
//2.转成图片坐标
//不考虑旋转 图片原点就是坐标原点
let imgAxisRect = originAxisRect;
//但是旋转90度后图片不在坐标原点 加上这部分diff
if (rotate % 180 !== 0) {
//90度调整偏移量
const offsetX = -(image.height - image.width) / 2;
const offsetY = -(image.width - image.height) / 2;
imgAxisRect = {
x: originAxisRect.x - offsetX,
y: originAxisRect.y - offsetY,
x2: originAxisRect.x2 - offsetX,
y2: originAxisRect.y2 - offsetY,
};
}
//3.限制框不要超出图片
let imgW = image.width;
let imgH = image.height;
if (rotate % 180 !== 0) {
[imgW, imgH] = [imgH, imgW];
}
imgAxisRect.x = Math.min(imgW, Math.max(imgAxisRect.x, 0));
imgAxisRect.y = Math.min(imgH, Math.max(imgAxisRect.y, 0));
imgAxisRect.x2 = Math.min(imgW, Math.max(imgAxisRect.x2, 0));
imgAxisRect.y2 = Math.min(imgH, Math.max(imgAxisRect.y2, 0));
//获取归一化坐标
const endRect = {
x: imgAxisRect.x2 > imgAxisRect.x ? imgAxisRect.x : imgAxisRect.x2,
y: imgAxisRect.y2 > imgAxisRect.y ? imgAxisRect.y : imgAxisRect.y2,
w: Math.abs(imgAxisRect.x2 - imgAxisRect.x),
h: Math.abs(imgAxisRect.y2 - imgAxisRect.y),
};
return getNormalization(endRect, imgW, imgH);
};
//旋转图片后转成file 对象
export const getRotateImg = (image: HTMLImageElement, rotate: number) => {
let imgW = image.width;
let imgH = image.height;
if (rotate % 180 !== 0) {
[imgW, imgH] = [imgH, imgW];
}
const commonCanvas = document.createElement('canvas');
commonCanvas.width = imgW;
commonCanvas.height = imgH;
commonCanvas.style.display = 'none';
document.body.appendChild(commonCanvas);
const commonCtx = commonCanvas.getContext('2d');
//移动到图片中心 旋转
commonCtx?.save();
if (rotate % 180 !== 0) {
//90度调整偏移量
commonCtx?.translate((image.height - image.width) / 2, (image.width - image.height) / 2);
}
commonCtx?.translate(image.width / 2, image.height / 2);
commonCtx?.rotate((rotate / 180) * Math.PI);
commonCtx?.translate(-image.width / 2, -image.height / 2);
commonCtx?.drawImage(image, 0, 0);
commonCtx?.restore();
const dataUrl = commonCanvas.toDataURL('image/jpeg');
const blobData = dataURLToBlob(dataUrl);
const file = new window.File([blobData], `${new Date().getTime()}`, {
type: 'image/jpeg',
});
commonCanvas.parentNode?.removeChild(commonCanvas);
return file;
};
/**
* 格式化工具
* @param originData
* @returns
*/
export const getOdRect = (originData: IOdRectOrigin) => {
let data = get(originData, 'objects', [])
.filter((v: any) => !isNull(get(v, 'infoOnSource.bboxInFrame.bboxRatio')))
.map((v: any, index: any) => {
// objectId==0 特征没有提取到过滤1掉
const rect = get(v, 'infoOnSource.bboxInFrame.bboxRatio');
const extendBox = get(v, 'infoOnSource.bboxInFrame.extendBoxRatio');
const frameTimestamp = get(v, 'timestamp'); //时间戳创建档案的时候需要
const qualityScore = get(v, 'qualityScore');
const algorithmVersion =
get(v, 'objectType') === 'OBJECT_TYPE_PEDESTRAIN'
? 'VERSION_REID_HEAD_ATTR'
: get(v, 'objectType') === 'OBJECT_TYPE_FACE'
? 'VERSION_FACE'
: 'VERSION_REID_HEAD_ATTR';
const featureData = get(v, 'feature', []).filter(
(v: any) => v.type === 'FEATURE_TYPE_BYTE'
);
const objectRectIndex = algorithmVersion === 'VERSION_FACE' ? 0 : 1;
const objectType = get(v, 'objectType');
const objectId = get(v, 'objectIndex.objectId');
const sourceObjectId = get(v, 'sourceObjectId');
return {
x: rect.x,
y: rect.y,
w: rect.w,
h: rect.h,
// faceCorrectImage: faceCorrectImage,
id: index,
qualityScore: qualityScore,
algorithmVersion: algorithmVersion,
featureData: get(featureData, '0.featureByte'),
objectRectIndex: objectRectIndex,
objectType: objectType,
objectId: objectId,
frameTimestamp: frameTimestamp,
sourceObjectId: sourceObjectId,
extendBox: extendBox,
};
});
if (data.length > 0) {
data = data.filter((v: { objectId: string; }) => v.objectId !== '0');
} else {
throw new Error('empty');
}
return data;
};
//档案库od
export const getOdRectV2 = (originData: { odv2Result: any[]; }) => {
// const fileKey = 'v1_' + window.btoa('public_' + imgKey.split('public/')[1]);
const resp = originData.odv2Result[0];
const subObjects: { x: any; y: any; w: any; h: any; id: any; qualityScore: any; algorithmVersion: any; featrueData: any; objectRectIndex: number; objectType: any; objectId: any; }[] = []; //形体
const data = get(resp, 'objects', [])
.filter((v) => !isNull(get(v, 'subObjects[0].infoOnSource.bboxInFrame.bboxRatio')))
.map((v, index) => {
const rect = get(v, 'infoOnSource.bboxInFrame.bboxRatio');
const qualityScore = get(v, 'qualityScore');
const algorithmVersion = get(v, 'objectType');
const featrueData = get(v, 'feature', []).filter(
(v) => v.name === 'feature-body' || v.name === 'feature-face'
);
const objectRectIndex = algorithmVersion === 'OBJECT_TYPE_FACE' ? 0 : 1;
const objectType = get(v, 'objectType');
const objectId = get(v, 'objectIndex.objectId');
//如果存在subObjects的数组不为null表示形体里面带人脸人脸的od框也要显示出来
if (get(v, 'subObjects', []).length) {
get(v, 'subObjects', []).forEach((e) => {
const rect = get(e, 'infoOnSource.bboxInFrame.bboxRatio');
const qualityScore = get(e, 'qualityScore');
const algorithmVersion = get(e, 'objectType');
const featrueData = get(e, 'feature', []).filter(
(v: { name: string; }) => v.name === 'feature-body' || v.name === 'feature-face'
);
const objectRectIndex = algorithmVersion === 'OBJECT_TYPE_FACE' ? 0 : 1;
const objectType = get(e, 'objectType');
const objectId = get(e, 'objectIndex.objectId');
subObjects.push({
x: rect.x,
y: rect.y,
w: rect.w,
h: rect.h,
id: index,
qualityScore: qualityScore,
algorithmVersion: algorithmVersion,
featrueData: featrueData.length ? featrueData[0].featureByte : '',
objectRectIndex: objectRectIndex,
objectType: objectType,
objectId,
});
});
}
return {
x: rect.x,
y: rect.y,
w: rect.w,
h: rect.h,
id: index,
qualityScore: qualityScore,
algorithmVersion: algorithmVersion,
featrueData: featrueData[0].featureByte,
objectRectIndex: objectRectIndex,
objectType: objectType,
objectId: objectId,
};
});
const brr = data.concat(subObjects).map((v: { id: any; }, vs: any) => {
if (String(v.id)) {
v.id = vs;
}
return v;
});
if (brr.length > 0) {
console.log(brr, 'data111');
} else {
throw new Error('empty');
}
console.log(brr);
return brr;
};
//获取图片
export const getFileByRect = async (img: any, odRect: Rect) => {
let image;
if (isString(img)) {
const url = generateImg(img);
console.log(url, '获取图片');
image = await urlToImg(url);
console.log(image, '获取的图片');
} else {
image = img;
}
const commonCanvas = document.createElement('canvas');
commonCanvas.width = odRect.w * image.width;
commonCanvas.height = odRect.h * image.height;
commonCanvas.style.display = 'none';
document.body.appendChild(commonCanvas);
const commonCtx = commonCanvas.getContext('2d');
commonCtx?.translate(-odRect.x * image.width, -odRect.y * image.height);
commonCtx?.drawImage(image, 0, 0);
const base64 = commonCanvas.toDataURL('image/jpeg');
const blobData = dataURLToBlob(base64);
commonCanvas?.parentNode?.removeChild(commonCanvas);
const file = new window.File([blobData], `${new Date().getTime()}`, {
type: 'image/jpeg',
});
return file;
};
/**
* Get transforms base on the given object.
* @param {Object} obj - The target object.
* @returns {string} A string contains transform values.
*/
export function getTransforms({
rotate,
scaleX,
scaleY,
translateX,
translateY,
}: {
rotate?: number;
scaleX?: number;
scaleY?: number;
translateX?: number;
translateY?: number;
}) {
const values = [];
if (isNumber(translateX) && translateX !== 0) {
values.push(`translateX(${translateX}px)`);
}
if (isNumber(translateY) && translateY !== 0) {
values.push(`translateY(${translateY}px)`);
}
// Rotate should come first before scale to match orientation transform
if (isNumber(rotate) && rotate !== 0) {
values.push(`rotate(${rotate}deg)`);
}
if (isNumber(scaleX) && scaleX !== 1) {
values.push(`scaleX(${scaleX})`);
}
if (isNumber(scaleY) && scaleY !== 1) {
values.push(`scaleY(${scaleY})`);
}
const transform = values.length ? values.join(' ') : 'none';
return {
WebkitTransform: transform,
msTransform: transform,
transform,
};
}