I tried everything, someone knows how to make canvas works in safari iphone, or an alternative approach that works similar? I have this component that tracks faces from react, but it's not working only on iphone, i modified it inoumerous times, but nothing works, someone can help me?
FaceDetector.js
import _regeneratorRuntime from "@babel/runtime/regenerator";
import _asyncToGenerator from "@babel/runtime/helpers/esm/asyncToGenerator";
import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
import _classCallCheck from "@babel/runtime/helpers/esm/classCallCheck";
import _createClass from "@babel/runtime/helpers/esm/createClass";
import _possibleConstructorReturn from "@babel/runtime/helpers/esm/possibleConstructorReturn";
import _getPrototypeOf from "@babel/runtime/helpers/esm/getPrototypeOf";
import _inherits from "@babel/runtime/helpers/esm/inherits";
import React, { Component } from 'react';
import * as pico from './pico';
var FaceDetector =
/*#__PURE__*/
function (_Component) {
_inherits(FaceDetector, _Component);
function FaceDetector(props) {
var _this;
_classCallCheck(this, FaceDetector);
_this = _possibleConstructorReturn(this, _getPrototypeOf(FaceDetector).call(this, props));
_this.performPartialWork = function () {
if (!_this.workQueue.length || !_this.mounted) return;
var firstTask = _this.workQueue.shift();
var taskStartTime = performance.now();
_this.carryOverData = firstTask.action(_this.carryOverData);
_this.taskTimes[firstTask.tag] = performance.now() - taskStartTime;
if (!_this.workQueue.length) return;
requestIdleCallback(function (deadline) {
if (!_this.taskTimes[_this.workQueue[0].tag]) _this.taskTimes[_this.workQueue[0].tag] = 1;
if (_this.taskTimes[_this.workQueue[0].tag] < deadline.timeRemaining() * 0.9) {
_this.performPartialWork();
}
});
};
_this.detectionLoop = function () {
_this.performPartialWork();
requestAnimationFrame(_this.detectionLoop);
};
_this.relativeFaceLocation = function (faceData) {
var widthIndex = _this.props.width / 100;
var heightIndex = _this.props.height / 100;
if (faceData && faceData.x) {
var x = faceData.x,
y = faceData.y,
size = faceData.size,
strength = faceData.strength;
size = Math.round(size / widthIndex);
y = Math.round(y / heightIndex);
x = 100 - Math.round(x / widthIndex);
x = Math.min(Math.max(x, 0), 100);
y = Math.min(Math.max(y, 0), 100);
strength = Math.round(strength);
return {
x: x,
y: y,
size: size,
strength: strength
};
}
};
_this.calculateFaceSizeScale = function (detectionStrength) {
var s = detectionStrength;
if (s > 1000) {
return 1.2;
} else if (s < 1000 && s > 900) {
return 1.1;
} else if (s < 900 && s > 800) {
return 1.075;
} else if (s < 800 && s > 700) {
return 1.05;
} else if (s < 700 && s > 600) {
return 1.03;
} else if (s < 600 && s > 500) {
return 1.01;
} else if (s < 500 && s > 400) {
return 1.005;
} else if (s < 400 && s > 300) {
return 0.995;
} else if (s < 300 && s > 200) {
return 0.99;
} else if (s < 200 && s > 100) {
return 0.95;
} else if (s < 100 && s > 50) {
return 0.9;
} else {
return 0.8;
}
};
_this.updatePerformanceQueue = function (detectionStart, detectionEnd, queue) {
queue.push(detectionEnd - detectionStart);
if (queue.length > 60) queue.shift();
return queue;
};
_this.updateCanvas = function () {
if(!_this.mounted){
return;
}
if(_this.props.fixPosition == 'true'){
var width = _this.props.width;
var height = _this.props.height;
_this.canvas.width = width;
_this.canvas.height = height;
}else{
var width = _this.state.currentCanvasSizeIndex * 4;
var height = _this.state.currentCanvasSizeIndex * 3;
_this.canvas.width = Math.floor(width);
_this.canvas.height = Math.floor(height);
}
_this.video.onplay = function() {
_this.ctx.drawImage(_this.video, 0, 0, width, height);
if (_this.state.facesData[0]) {
_this.state.facesData.map(function (face) {
_this.ctx.beginPath();
_this.ctx.arc(face.x, face.y, face.size / 2, 0, 2 * Math.PI, false);
_this.ctx.lineWidth = 3;
_this.ctx.strokeStyle = face.strength < 100 ? 'red' : '#3D8980';
_this.ctx.stroke();
});
}
}
};
_this.detect = function () {
var imageData = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 1;
var detectedFacesData = pico.processfn(imageData, _this.baseFaceSize * _this.state.faceScale, _this.state.currentCanvasSizeIndex * 3, _this.state.currentCanvasSizeIndex * 4).filter(function (face) {
return face[3] > 20;
});
var newFacesData = [];
var bestDetectionData = [0, 0];
if (detectedFacesData.length) {
detectedFacesData.map(function (detectedFaceData, index) {
var newFaceData = {};
var _detectedFaceData = _slicedToArray(detectedFaceData, 4),
y = _detectedFaceData[0],
x = _detectedFaceData[1],
size = _detectedFaceData[2],
strength = _detectedFaceData[3];
newFaceData.y = y;
newFaceData.x = x;
newFaceData.size = size;
newFaceData.strength = strength;
if (bestDetectionData[0] < strength) {
bestDetectionData = [strength, index];
}
newFacesData.push(newFaceData);
});
}
var _this$state = _this.state,
faceScale = _this$state.faceScale,
currentCanvasSizeIndex = _this$state.currentCanvasSizeIndex,
noFaceFrames = _this$state.noFaceFrames,
highFaceFrames = _this$state.highFaceFrames;
var newCanvasSizeIndex = currentCanvasSizeIndex;
var newNoFaceFrames = noFaceFrames;
var newHighFaceFrames = highFaceFrames;
var _bestDetectionData = bestDetectionData,
_bestDetectionData2 = _slicedToArray(_bestDetectionData, 1),
bestDetection = _bestDetectionData2[0];
var newFaceScale = Math.max(_this.calculateFaceSizeScale(bestDetection), 0.01) || faceScale;
if (bestDetection > 250) {
if (newHighFaceFrames < 1) {
newHighFaceFrames = newHighFaceFrames + 1;
} else {
newCanvasSizeIndex = newCanvasSizeIndex - 2;
newHighFaceFrames = 0;
}
} else {
newHighFaceFrames = 0;
}
if (!newFacesData.length) {
if (newNoFaceFrames < 1) {
newNoFaceFrames = newNoFaceFrames + 1;
} else {
newCanvasSizeIndex = Math.min(newCanvasSizeIndex + 2, 200);
newNoFaceFrames = 0;
}
} else {
newNoFaceFrames = 0;
}
return {
newFacesData: newFacesData,
newFaceScale: newFaceScale,
newCanvasSizeIndex: newCanvasSizeIndex,
newNoFaceFrames: newNoFaceFrames,
newHighFaceFrames: newHighFaceFrames
};
};
_this.ctx = null;
_this.mounted = true;
_this.imageData = null;
_this.video = document.createElement("video");
_this.baseFaceSize = 100;
_this.workQueue = [];
_this.taskTimes = {};
_this.carryOverData = null;
_this.tracks = null;
_this.state = {
currentCanvasSizeIndex: 100,
facesData: {},
faceScale: 1,
first: null,
height: _this.maxHeight,
noFaceFrames: 0,
highFaceFrames: 0,
framesSinceUpdate: 0
};
return _this;
}
_createClass(FaceDetector, [{
key: "componentDidMount",
value: function () {
var _componentDidMount = _asyncToGenerator(
/*#__PURE__*/
_regeneratorRuntime.mark(function _callee() {
var stream;
return _regeneratorRuntime.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
_context.next = 2;
this.video.controls = false;
this.video.setAttribute('autoplay', '');
this.video.setAttribute('muted', '');
this.video.setAttribute('playsinline', '');
navigator.mediaDevices.getUserMedia({
video: {facingMode: 'user'},
audio: false
}).then(success => {
alert('got media device')
this.video.srcObject = success;
this.tracks = success.getTracks();
return success;
}).catch(err => {
alert('Error media device')
return null;
});
case 2:
stream = _context.sent;
//this.tracks = stream.getTracks()
this.video.play();
/*
if(navigator.platform == 'iPhone'){
this.video.setAttribute('autoplay','1')
this.video.setAttribute('playsinline','1')
this.video.srcObject = stream;
}else{
this.video.srcObject = stream;
this.video.play();
}*/
this.ctx = this.canvas.getContext('2d', {
alpha: false
});
pico.picoInit();
if (this.props.active) {
this.newWorkQueue();
this.detectionLoop();
}
case 8:
case "end":
return _context.stop();
}
}
}, _callee, this);
}));
function componentDidMount() {
return _componentDidMount.apply(this, arguments);
}
return componentDidMount;
}()
}, {
key: "componentDidUpdate",
value: function componentDidUpdate() {
if (this.mounted && this.props.active && !this.workQueue.length) {
this.newWorkQueue();
}
}
}, {
key: "componentWillUnmount" ,
value: function componentWillUnmount() {
this.mounted = false;
this.tracks.forEach( (track) => {
track.stop();
});
this.video.srcObject = null;
}
}, {
key: "render",
value: function render() {
if(!this.mounted){
return;
}
var _this2 = this;
var facesData = this.state.facesData;
var relativeFacesData = facesData.length ? facesData.map(function (face) {
return _this2.relativeFaceLocation(face);
}) : [{
x: null,
y: null,
size: null,
strength: null
}];
return React.createElement(React.Fragment, null, React.createElement("canvas", {
ref: function ref(_ref) {
return _this2.canvas = _ref;
},
style: {
display: this.props.showCanvas ? 'inline' : 'none'
}
}), this.props.children && this.props.children(relativeFacesData));
}
}, {
key: "newWorkQueue",
value: function newWorkQueue() {
var _this3 = this;
this.workQueue = [{
action: this.updateCanvas,
tag: 'updateCanvas'
}, {
action: function action() {
_this3.imageData = _this3.ctx.getImageData(0, 0, _this3.state.currentCanvasSizeIndex * 4, _this3.state.currentCanvasSizeIndex * 3).data;
},
tag: 'getContextData'
}, {
tag: 'detectAndSetState',
action: function action() {
var _this3$detect = _this3.detect(_this3.imageData),
newFacesData = _this3$detect.newFacesData,
newFaceScale = _this3$detect.newFaceScale,
newCanvasSizeIndex = _this3$detect.newCanvasSizeIndex,
newNoFaceFrames = _this3$detect.newNoFaceFrames,
newHighFaceFrames = _this3$detect.newHighFaceFrames;
_this3.setState(function () {
return {
facesData: newFacesData[0] ? newFacesData : _this3.state.facesData,
faceScale: newFaceScale,
currentCanvasSizeIndex: newCanvasSizeIndex,
noFaceFrames: newNoFaceFrames,
highFaceFrames: newHighFaceFrames,
framesSinceUpdate: 0
};
});
}
}];
}
}]);
return FaceDetector;
}(Component);
export { FaceDetector as default };
FaceDetector.defaultProps = {
active: true,
showCanvas: false
};
pico.js
/* This library is released under the MIT license, see https://github.com/tehnokv/picojs */
var unpack_cascade = function unpack_cascade(bytes) {
var dview = new DataView(new ArrayBuffer(4)); // we skip the first 8 bytes of the cascade file
// (cascade version number and some data used during the learning process)
var p = 8; // read the depth (size) of each tree first: a 32-bit signed integer
dview.setUint8(0, bytes[p + 0]);
dview.setUint8(1, bytes[p + 1]);
dview.setUint8(2, bytes[p + 2]);
dview.setUint8(3, bytes[p + 3]);
var tdepth = dview.getInt32(0, true);
p += 4; // next, read the number of trees in the cascade: another 32-bit signed integer
dview.setUint8(0, bytes[p + 0]);
dview.setUint8(1, bytes[p + 1]);
dview.setUint8(2, bytes[p + 2]);
dview.setUint8(3, bytes[p + 3]);
var ntrees = dview.getInt32(0, true);
p += 4; // read the actual trees and cascade thresholds
var tcodes = [];
var tpreds = [];
var thresh = [];
for (var t = 0; t < ntrees; ++t) {
var i = void 0; // read the binary tests placed in internal tree nodes
Array.prototype.push.apply(tcodes, [0, 0, 0, 0]);
Array.prototype.push.apply(tcodes, bytes.slice(p, p + 4 * Math.pow(2, tdepth) - 4));
p = p + 4 * Math.pow(2, tdepth) - 4; // read the prediction in the leaf nodes of the tree
for (i = 0; i < Math.pow(2, tdepth); ++i) {
dview.setUint8(0, bytes[p + 0]);
dview.setUint8(1, bytes[p + 1]);
dview.setUint8(2, bytes[p + 2]);
dview.setUint8(3, bytes[p + 3]);
tpreds.push(dview.getFloat32(0, true));
p = p + 4;
} // read the threshold
dview.setUint8(0, bytes[p + 0]);
dview.setUint8(1, bytes[p + 1]);
dview.setUint8(2, bytes[p + 2]);
dview.setUint8(3, bytes[p + 3]);
thresh.push(dview.getFloat32(0, true));
p = p + 4;
}
tcodes = new Int8Array(tcodes);
tpreds = new Float32Array(tpreds);
thresh = new Float32Array(thresh); // construct the classification function from the read data
function classify_region(r, c, s, pixels, ldim) {
r = 256 * r;
c = 256 * c;
var root = 0;
var o = 0.0;
var pow2tdepth = Math.pow(2, tdepth) >> 0; // '>>0' transforms this number to int
for (var i = 0; i < ntrees; ++i) {
var idx = 1;
for (var j = 0; j < tdepth; ++j) {
// we use '>> 8' here to perform an integer division: this seems important for performance
idx = 2 * idx + (pixels[(r + tcodes[root + 4 * idx + 0] * s >> 8) * ldim + (c + tcodes[root + 4 * idx + 1] * s >> 8)] <= pixels[(r + tcodes[root + 4 * idx + 2] * s >> 8) * ldim + (c + tcodes[root + 4 * idx + 3] * s >> 8)]);
}
o = o + tpreds[pow2tdepth * i + idx - pow2tdepth];
if (o <= thresh[i]) return -1;
root += 4 * pow2tdepth;
}
return o - thresh[ntrees - 1];
}
return classify_region;
};
var facefinder_classify_region = function facefinder_classify_region(r, c, s, pixels, ldim) {
return -1.0;
};
var update_memory = instantiate_detection_memory(5);
export var picoInit = function picoInit() {
var cascadeurl = 'https://raw.githubusercontent.com/nenadmarkus/pico/c2e81f9d23cc11d1a612fd21e4f9de0921a5d0d9/rnt/cascades/facefinder';
fetch(cascadeurl).then(function (response) {
response.arrayBuffer().then(function (buffer) {
var bytes = new Int8Array(buffer);
facefinder_classify_region = unpack_cascade(bytes);
console.log('* cascade loaded');
});
});
};
var run_cascade = function run_cascade(image, classify_region, params) {
var pixels = image.pixels;
var nrows = image.nrows;
var ncols = image.ncols;
var ldim = image.ldim;
var shiftfactor = params.shiftfactor;
var minsize = params.minsize;
var maxsize = params.maxsize;
var scalefactor = params.scalefactor;
var scale = minsize;
var detections = [];
while (scale <= maxsize) {
var step = Math.max(shiftfactor * scale, 1) >> 0; // '>>0' transforms this number to int
var offset = scale / 2 + 1 >> 0;
for (var r = offset; r <= nrows - offset; r += step) {
for (var c = offset; c <= ncols - offset; c += step) {
var q = classify_region(r, c, scale, pixels, ldim);
if (q > 0.0) detections.push([r, c, scale, q]);
}
}
scale = scale * scalefactor;
}
return detections;
};
var cluster_detections = function cluster_detections(dets, iouthreshold) {
// sort detections by their score
dets = dets.sort(function (a, b) {
return b[3] - a[3];
}); // this helper function calculates the intersection over union for two detections
function calculate_iou(det1, det2) {
// unpack the position and size of each detection
var r1 = det1[0],
c1 = det1[1],
s1 = det1[2];
var r2 = det2[0],
c2 = det2[1],
s2 = det2[2]; // calculate detection overlap in each dimension
var overr = Math.max(0, Math.min(r1 + s1 / 2, r2 + s2 / 2) - Math.max(r1 - s1 / 2, r2 - s2 / 2));
var overc = Math.max(0, Math.min(c1 + s1 / 2, c2 + s2 / 2) - Math.max(c1 - s1 / 2, c2 - s2 / 2)); // calculate and return IoU
return overr * overc / (s1 * s1 + s2 * s2 - overr * overc);
} // do clustering through non-maximum suppression
var assignments = new Array(dets.length).fill(0);
var clusters = [];
for (var i = 0; i < dets.length; ++i) {
// is this detection assigned to a cluster?
if (assignments[i] == 0) {
// it is not:
// now we make a cluster out of it and see whether some other detections belong to it
var r = 0.0,
c = 0.0,
s = 0.0,
q = 0.0,
n = 0;
for (var j = i; j < dets.length; ++j) {
if (calculate_iou(dets[i], dets[j]) > iouthreshold) {
assignments[j] = 1;
r = r + dets[j][0];
c = c + dets[j][1];
s = s + dets[j][2];
q = q + dets[j][3];
n = n + 1;
}
} // make a cluster representative
clusters.push([r / n, c / n, s / n, q]);
}
}
return clusters;
};
function instantiate_detection_memory(size) {
// initialize a circular buffer of `size` elements
var n = 0,
memory = [];
for (var i = 0; i < size; ++i) {
memory.push([]);
} // build a function that:
// (1) inserts the current frame's detections into the buffer;
// (2) merges all detections from the last `size` frames and returns them
function update_memory(dets) {
memory[n] = dets;
n = (n + 1) % memory.length;
dets = [];
for (i = 0; i < memory.length; ++i) {
dets = dets.concat(memory[i]);
} //
return dets;
}
return update_memory;
} // (2) define a function to transform an RGBA image to grayscale
var rgba_to_grayscale = function rgba_to_grayscale(rgba, nrows, ncols) {
var gray = new Uint8Array(nrows * ncols);
for (var r = 0; r < nrows; ++r) {
for (var c = 0; c < ncols; ++c) {
// gray = 0.2*red + 0.7*green + 0.1*blue
gray[r * ncols + c] = (2 * rgba[r * 4 * ncols + 4 * c + 0] + 7 * rgba[r * 4 * ncols + 4 * c + 1] + 1 * rgba[r * 4 * ncols + 4 * c + 2]) / 10;
}
}
return gray;
};
export var processfn = function processfn(imageData, minFaceSize, height, width) {
if (!imageData) return null;
var dets; // prepare input to `run_cascade`
var image = {
"pixels": rgba_to_grayscale(imageData, height, width),
"nrows": height,
"ncols": width,
"ldim": width
};
var params = {
"shiftfactor": 0.1,
// move the detection window by 10% of its size
"minsize": minFaceSize,
// minimum size of a face
"maxsize": 1000,
// maximum size of a face
"scalefactor": 1.1 // for multiscale processing: resize the detection window by 10% when moving to the higher scale
// run the cascade over the frame and cluster the obtained detections
// dets is an array that contains (r, c, s, q) quadruplets
// (representing row, column, scale and detection score)
};
dets = run_cascade(image, facefinder_classify_region, params);
dets = update_memory(dets);
dets = cluster_detections(dets, 0.2); // set IoU threshold to 0.2
return dets;
};
The specific part that is not working in iphone:
_this.updateCanvas = function () {
if(!_this.mounted){
return;
}
if(_this.props.fixPosition == 'true'){
var width = _this.props.width;
var height = _this.props.height;
_this.canvas.width = width;
_this.canvas.height = height;
}else{
var width = _this.state.currentCanvasSizeIndex * 4;
var height = _this.state.currentCanvasSizeIndex * 3;
_this.canvas.width = Math.floor(width);
_this.canvas.height = Math.floor(height);
}
_this.video.onplay = function() {
_this.ctx.drawImage(_this.video, 0, 0, width, height);
if (_this.state.facesData[0]) {
_this.state.facesData.map(function (face) {
_this.ctx.beginPath();
_this.ctx.arc(face.x, face.y, face.size / 2, 0, 2 * Math.PI, false);
_this.ctx.lineWidth = 3;
_this.ctx.strokeStyle = face.strength < 100 ? 'red' : '#3D8980';
_this.ctx.stroke();
});
}
}
};
Using component in reactjs:
import React from 'react'
import FaceDetector from './FaceDetector.js'
export default class foo extends React.Component{
...
render(){
return(
<FaceDetector className='canvas' active='false' showCanvas='false' fixPosition='true' width={400} height={400}>
{position => {console.log(position)}}
</FaceDetector>
);
}
}
if someone could help me, i will really appreciate it. Thank you guys very much
Via Active questions tagged javascript - Stack Overflow https://ift.tt/2FdjaAW
Comments
Post a Comment