feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,22 @@
{
"extends": "google",
"parserOptions": {
"ecmaVersion": 6
},
"rules": {
"max-len": ["error", 100, {"ignoreUrls": true}],
"quotes": ["error", "single"],
"indent": ["error", 4, {"ArrayExpression": "first",
"ObjectExpression": "first",
"CallExpression": {"arguments": "first"},
"SwitchCase": 1}],
"require-jsdoc": "off",
"new-cap": "off"
},
"plugins": ["html"],
"settings": {
"html/javascript-mime-types": ["text/javascript", "text/code-snippet"],
"html/indent": "0",
"html/report-bad-indent": "error"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Padding Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Padding Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
let s = new cv.Scalar(255, 0, 0, 255);
cv.copyMakeBorder(src, dst, 10, 10, 10, 10, cv.BORDER_CONSTANT, s);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script>
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image ROI Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image ROI Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
let rect = new cv.Rect(100, 100, 200, 200);
dst = src.roi(rect);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,126 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Background Subtraction Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Background Subtraction Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted loop></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let fgmask = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let fgbg = new cv.BackgroundSubtractorMOG2(500, 16, true);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); fgmask.delete(); fgbg.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
fgbg.apply(frame, fgmask);
cv.imshow('canvasOutput', fgmask);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'box.mp4';
});
</script>
</body>
</html>

View File

@ -0,0 +1,172 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>CamShift Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>CamShift Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the video.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as CamShift input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as CamShift output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted loop></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// take first frame of the video
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame);
// hardcode the initial location of window
let trackWindow = new cv.Rect(150, 60, 63, 125);
// set up the ROI for tracking
let roi = frame.roi(trackWindow);
let hsvRoi = new cv.Mat();
cv.cvtColor(roi, hsvRoi, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsvRoi, hsvRoi, cv.COLOR_RGB2HSV);
let mask = new cv.Mat();
let lowScalar = new cv.Scalar(30, 30, 0);
let highScalar = new cv.Scalar(180, 180, 180);
let low = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), lowScalar);
let high = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), highScalar);
cv.inRange(hsvRoi, low, high, mask);
let roiHist = new cv.Mat();
let hsvRoiVec = new cv.MatVector();
hsvRoiVec.push_back(hsvRoi);
cv.calcHist(hsvRoiVec, [0], mask, roiHist, [180], [0, 180]);
cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
// delete useless mats.
roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv);
let dst = new cv.Mat();
let trackBox = null;
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); dst.delete(); hsvVec.delete(); roiHist.delete(); hsv.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
cv.cvtColor(frame, hsv, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsv, hsv, cv.COLOR_RGB2HSV);
cv.calcBackProject(hsvVec, [0], roiHist, dst, [0, 180], 1);
// apply camshift to get the new location
[trackBox, trackWindow] = cv.CamShift(dst, trackWindow, termCrit);
// Draw it on image
let pts = cv.rotatedRectPoints(trackBox);
cv.line(frame, pts[0], pts[1], [255, 0, 0, 255], 3);
cv.line(frame, pts[1], pts[2], [255, 0, 0, 255], 3);
cv.line(frame, pts[2], pts[3], [255, 0, 0, 255], 3);
cv.line(frame, pts[3], pts[0], [255, 0, 0, 255], 3);
cv.imshow('canvasOutput', frame);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'cup.mp4';
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Canny Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Canny Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
// You can try more different parameters
cv.Canny(src, dst, 50, 100, 3, false);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,67 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Convert Color Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Convert Color Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY, 0);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image InRange Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image InRange Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let low = new cv.Mat(src.rows, src.cols, src.type(), [0, 0, 0, 0]);
let high = new cv.Mat(src.rows, src.cols, src.type(), [150, 150, 150, 255]);
// You can try more different parameters
cv.inRange(src, low, high, dst);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); low.delete(); high.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,86 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image ApproxPolyDP Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image ApproxPolyDP Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
let poly = new cv.MatVector();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
// approximates each contour to polygon
for (let i = 0; i < contours.size(); ++i) {
let tmp = new cv.Mat();
let cnt = contours.get(i);
// You can try more different parameters
cv.approxPolyDP(cnt, tmp, 3, true);
poly.push_back(tmp);
cnt.delete(); tmp.delete();
}
// draw contours with random Scalar
for (let i = 0; i < contours.size(); ++i) {
let color = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
Math.round(Math.random() * 255));
cv.drawContours(dst, poly, i, color, 1, 8, hierarchy, 0);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); poly.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,63 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Area Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Area Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<div>
<canvas id="canvasInput"></canvas>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</div>
<div>
<p><strong>The area is: </strong><span id="areaOutput"></span></p>
</div>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(20);
// You can try more different parameters
let area = cv.contourArea(cnt, false);
areaOutput.innerHTML = area;
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,79 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Bounding Rect Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Bounding Rect Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let rect = cv.boundingRect(cnt);
let contoursColor = new cv.Scalar(255, 255, 255);
let rectangleColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
let point1 = new cv.Point(rect.x, rect.y);
let point2 = new cv.Point(rect.x + rect.width, rect.y + rect.height);
cv.rectangle(dst, point1, point2, rectangleColor, 2, cv.LINE_AA, 0);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,86 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Convex Hull Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Convex Hull Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
let hull = new cv.MatVector();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
// approximates each contour to convex hull
for (let i = 0; i < contours.size(); ++i) {
let tmp = new cv.Mat();
let cnt = contours.get(i);
// You can try more different parameters
cv.convexHull(cnt, tmp, false, true);
hull.push_back(tmp);
cnt.delete(); tmp.delete();
}
// draw contours with random Scalar
for (let i = 0; i < contours.size(); ++i) {
let colorHull = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
Math.round(Math.random() * 255));
cv.drawContours(dst, hull, i, colorHull, 1, 8, hierarchy, 0);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); hull.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Fit Ellipse Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Fit Ellipse Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let rotatedRect = cv.fitEllipse(cnt);
let contoursColor = new cv.Scalar(255, 255, 255);
let ellipseColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
cv.ellipse1(dst, rotatedRect, ellipseColor, 1, cv.LINE_8);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,86 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Fit Line Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Fit Line Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
let line = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
cv.fitLine(cnt, line, cv.DIST_L2, 0, 0.01, 0.01);
let contoursColor = new cv.Scalar(255, 255, 255);
let lineColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
let vx = line.data32F[0];
let vy = line.data32F[1];
let x = line.data32F[2];
let y = line.data32F[3];
let lefty = Math.round((-x * vy / vx) + y);
let righty = Math.round(((src.cols - x) * vy / vx) + y);
let point1 = new cv.Point(src.cols - 1, righty);
let point2 = new cv.Point(0, lefty);
cv.line(dst, point1, point2, lineColor, 2, cv.LINE_AA, 0);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); line.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,81 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Min Area Rect Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Min Area Rect Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let rotatedRect = cv.minAreaRect(cnt);
let vertices = cv.RotatedRect.points(rotatedRect);
let contoursColor = new cv.Scalar(255, 255, 255);
let rectangleColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
// draw rotatedRect
for (let i = 0; i < 4; i++) {
cv.line(dst, vertices[i], vertices[(i + 1) % 4], rectangleColor, 2, cv.LINE_AA, 0);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Min Enclosing Circle Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Min Enclosing Circle Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let circle = cv.minEnclosingCircle(cnt);
let contoursColor = new cv.Scalar(255, 255, 255);
let circleColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
cv.circle(dst, circle.center, circle.radius, circleColor);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,62 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Moments Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Moments Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<div>
<canvas id="canvasInput"></canvas>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</div>
<div>
<p><strong>The m00 is: </strong><span id="momentsOutput"></span></p>
</div>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let Moments = cv.moments(cnt, false);
momentsOutput.innerHTML = Moments.m00;
src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,62 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Perimeter Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Perimeter Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<div>
<canvas id="canvasInput"></canvas>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</div>
<div>
<p><strong>The perimeter is: </strong><span id="perimeterOutput"></span></p>
</div>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(20);
// You can try more different parameters
let perimeter = cv.arcLength(cnt, true);
perimeterOutput.innerHTML = perimeter;
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Transpose Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Transpose Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 120, 200, cv.THRESH_BINARY);
cv.transpose(src, dst);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Contours Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Contours Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.cols, src.rows, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 120, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
// You can try more different parameters
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
// draw contours with random Scalar
for (let i = 0; i < contours.size(); ++i) {
let color = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
Math.round(Math.random() * 255));
cv.drawContours(dst, contours, i, color, 1, cv.LINE_8, hierarchy, 100);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,87 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Convexity Defects Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Convexity Defects Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let hull = new cv.Mat();
let defect = new cv.Mat();
let cnt = contours.get(0);
let lineColor = new cv.Scalar(255, 0, 0);
let circleColor = new cv.Scalar(255, 255, 255);
cv.convexHull(cnt, hull, false, false);
cv.convexityDefects(cnt, hull, defect);
for (let i = 0; i < defect.rows; ++i) {
let start = new cv.Point(cnt.data32S[defect.data32S[i * 4] * 2],
cnt.data32S[defect.data32S[i * 4] * 2 + 1]);
let end = new cv.Point(cnt.data32S[defect.data32S[i * 4 + 1] * 2],
cnt.data32S[defect.data32S[i * 4 + 1] * 2 + 1]);
let far = new cv.Point(cnt.data32S[defect.data32S[i * 4 + 2] * 2],
cnt.data32S[defect.data32S[i * 4 + 2] * 2 + 1]);
cv.line(dst, start, end, lineColor, 2, cv.LINE_AA, 0);
cv.circle(dst, far, 3, circleColor, -1);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); hull.delete(); defect.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,82 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Match Shape Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Match Shape Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<div>
<p><strong>The result is: </strong><span id="matchShapesOutput"></span></p>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let contourID0 = 10;
let contourID1 = 5;
let color0 = new cv.Scalar(255, 0, 0);
let color1 = new cv.Scalar(0, 0, 255);
// You can try more different parameters
let result = cv.matchShapes(contours.get(contourID0), contours.get(contourID1), 1, 0);
matchShapesOutput.innerHTML = result;
cv.drawContours(dst, contours, contourID0, color0, 1, cv.LINE_8, hierarchy, 100);
cv.drawContours(dst, contours, contourID1, color1, 1, cv.LINE_8, hierarchy, 100);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,119 @@
getBlobFromImage = function(inputSize, mean, std, swapRB, image) {
let mat;
if (typeof(image) === 'string') {
mat = cv.imread(image);
} else {
mat = image;
}
let matC3 = new cv.Mat(mat.matSize[0], mat.matSize[1], cv.CV_8UC3);
cv.cvtColor(mat, matC3, cv.COLOR_RGBA2BGR);
let input = cv.blobFromImage(matC3, std, new cv.Size(inputSize[0], inputSize[1]),
new cv.Scalar(mean[0], mean[1], mean[2]), swapRB);
matC3.delete();
return input;
}
loadLables = async function(labelsUrl) {
let response = await fetch(labelsUrl);
let label = await response.text();
label = label.split('\n');
return label;
}
loadModel = async function(e) {
return new Promise((resolve) => {
let file = e.target.files[0];
let path = file.name;
let reader = new FileReader();
reader.readAsArrayBuffer(file);
reader.onload = function(ev) {
if (reader.readyState === 2) {
let buffer = reader.result;
let data = new Uint8Array(buffer);
cv.FS_createDataFile('/', path, data, true, false, false);
resolve(path);
}
}
});
}
getTopClasses = function(probs, labels, topK = 3) {
probs = Array.from(probs);
let indexes = probs.map((prob, index) => [prob, index]);
let sorted = indexes.sort((a, b) => {
if (a[0] === b[0]) {return 0;}
return a[0] < b[0] ? -1 : 1;
});
sorted.reverse();
let classes = [];
for (let i = 0; i < topK; ++i) {
let prob = sorted[i][0];
let index = sorted[i][1];
let c = {
label: labels[index],
prob: (prob * 100).toFixed(2)
}
classes.push(c);
}
return classes;
}
loadImageToCanvas = function(e, canvasId) {
let files = e.target.files;
let imgUrl = URL.createObjectURL(files[0]);
let canvas = document.getElementById(canvasId);
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = imgUrl;
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
}
drawInfoTable = async function(jsonUrl, divId) {
let response = await fetch(jsonUrl);
let json = await response.json();
let appendix = document.getElementById(divId);
for (key of Object.keys(json)) {
let h3 = document.createElement('h3');
h3.textContent = key + " model";
appendix.appendChild(h3);
let table = document.createElement('table');
let head_tr = document.createElement('tr');
for (head of Object.keys(json[key][0])) {
let th = document.createElement('th');
th.textContent = head;
th.style.border = "1px solid black";
head_tr.appendChild(th);
}
table.appendChild(head_tr)
for (model of json[key]) {
let tr = document.createElement('tr');
for (params of Object.keys(model)) {
let td = document.createElement('td');
td.style.border = "1px solid black";
if (params !== "modelUrl" && params !== "configUrl" && params !== "labelsUrl") {
td.textContent = model[params];
tr.appendChild(td);
} else {
let a = document.createElement('a');
let link = document.createTextNode('link');
a.append(link);
a.href = model[params];
td.appendChild(a);
tr.appendChild(td);
}
}
table.appendChild(tr);
}
table.style.width = "800px";
table.style.borderCollapse = "collapse";
appendix.appendChild(table);
}
}

View File

@ -0,0 +1,70 @@
body, div, p {
font: 400 14px/22px Roboto,sans-serif;
}
canvas, img, video {
border: 1px solid black;
}
td {
padding: 10px 0px 0px 10px;
text-align: center;
}
button {
display: inline-block;
color: #fff;
background-color: #337ab7;
border-color: #2e6da4;
padding: 6px 12px;
margin-bottom: 0;
font-size: 14px;
font-weight: bold;
text-align: center;
white-space: nowrap;
vertical-align: middle;
-ms-touch-action: manipulation;
touch-action: manipulation;
cursor: pointer;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
background-image: none;
border: 1px solid transparent;
border-radius: 4px;
}
button[disabled] {
cursor: not-allowed;
filter: alpha(opacity=65);
-webkit-box-shadow: none;
box-shadow: none;
opacity: .65;
}
.control {
margin-bottom: 3px;
}
.err {
color: red;
font-weight: bold;
}
.caption {
margin: 0;
font-weight: bold;
}
.code {
padding: 4px 6px;
margin: 4px 8px 4px 2px;
background-color: #FBFCFD;
border: 1px solid #C4CFE5;
font-family: monospace, fixed;
font-size: 13px;
min-height: 13px;
line-height: 1.0;
text-wrap: unrestricted;
padding-bottom: 0px;
margin: 0px;
}
.hidden {
display: none;
}
.small {
max-width: 300px;
}

View File

@ -0,0 +1,100 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Face Detection Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Face Detection Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let gray = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
let faces = new cv.RectVector();
let eyes = new cv.RectVector();
let faceCascade = new cv.CascadeClassifier();
let eyeCascade = new cv.CascadeClassifier();
// load pre-trained classifiers
faceCascade.load('haarcascade_frontalface_default.xml');
eyeCascade.load('haarcascade_eye.xml');
// detect faces
let msize = new cv.Size(0, 0);
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize);
for (let i = 0; i < faces.size(); ++i) {
let roiGray = gray.roi(faces.get(i));
let roiSrc = src.roi(faces.get(i));
let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
let point2 = new cv.Point(faces.get(i).x + faces.get(i).width,
faces.get(i).y + faces.get(i).height);
cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
// detect eyes in face ROI
eyeCascade.detectMultiScale(roiGray, eyes);
for (let j = 0; j < eyes.size(); ++j) {
let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y);
let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width,
eyes.get(j).y + eyes.get(j).height);
cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]);
}
roiGray.delete(); roiSrc.delete();
}
cv.imshow('canvasOutput', src);
src.delete(); gray.delete(); faceCascade.delete();
eyeCascade.delete(); faces.delete(); eyes.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
let eyeCascadeFile = 'haarcascade_eye.xml';
utils.createFileFromUrl(eyeCascadeFile, eyeCascadeFile, () => {
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
tryIt.removeAttribute('disabled');
});
});
});
</script>
</body>
</html>

View File

@ -0,0 +1,142 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Face Detection Camera Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Face Detection Camera Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as face detector input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as face detector output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width=320 height=240></video>
</td>
<td>
<canvas id="canvasOutput" width=320 height=240></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let gray = new cv.Mat();
let cap = new cv.VideoCapture(video);
let faces = new cv.RectVector();
let classifier = new cv.CascadeClassifier();
// load pre-trained classifiers
classifier.load('haarcascade_frontalface_default.xml');
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
src.delete();
dst.delete();
gray.delete();
faces.delete();
classifier.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(src);
src.copyTo(dst);
cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0);
// detect faces.
classifier.detectMultiScale(gray, faces, 1.1, 3, 0);
// draw faces.
for (let i = 0; i < faces.size(); ++i) {
let face = faces.get(i);
let point1 = new cv.Point(face.x, face.y);
let point2 = new cv.Point(face.x + face.width, face.y + face.height);
cv.rectangle(dst, point1, point2, [255, 0, 0, 255]);
}
cv.imshow('canvasOutput', dst);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
utils.startCamera('qvga', onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.width = videoInput.videoWidth;
videoInput.height = videoInput.videoHeight;
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
startAndStop.removeAttribute('disabled');
});
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Gaussian Blur Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Gaussian Blur Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let ksize = new cv.Size(3, 3);
// You can try more different parameters
cv.GaussianBlur(src, dst, ksize, 0, 0, cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Bilateral Filter Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Bilateral Filter Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB, 0);
// You can try more different parameters
cv.bilateralFilter(src, dst, 9, 75, 75, cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,70 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Blur Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Blur Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let ksize = new cv.Size(3, 3);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.blur(src, dst, ksize, anchor, cv.BORDER_DEFAULT);
// cv.boxFilter(src, dst, -1, ksize, anchor, true, cv.BORDER_DEFAULT)
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Filter Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Filter Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.eye(3, 3, cv.CV_32FC1);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.filter2D(src, dst, cv.CV_8U, M, anchor, 0, cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,67 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Median Blur Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Median Blur Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.medianBlur(src, dst, 5);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,128 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image DFT Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image DFT Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
// get optimal size of DFT
let optimalRows = cv.getOptimalDFTSize(src.rows);
let optimalCols = cv.getOptimalDFTSize(src.cols);
let s0 = cv.Scalar.all(0);
let padded = new cv.Mat();
cv.copyMakeBorder(src, padded, 0, optimalRows - src.rows, 0,
optimalCols - src.cols, cv.BORDER_CONSTANT, s0);
// use cv.MatVector to distribute space for real part and imaginary part
let plane0 = new cv.Mat();
padded.convertTo(plane0, cv.CV_32F);
let planes = new cv.MatVector();
let complexI = new cv.Mat();
let plane1 = new cv.Mat.zeros(padded.rows, padded.cols, cv.CV_32F);
planes.push_back(plane0);
planes.push_back(plane1);
cv.merge(planes, complexI);
// in-place dft transform
cv.dft(complexI, complexI);
// compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2))
cv.split(complexI, planes);
cv.magnitude(planes.get(0), planes.get(1), planes.get(0));
let mag = planes.get(0);
let m1 = new cv.Mat.ones(mag.rows, mag.cols, mag.type());
cv.add(mag, m1, mag);
cv.log(mag, mag);
// crop the spectrum, if it has an odd number of rows or columns
let rect = new cv.Rect(0, 0, mag.cols & -2, mag.rows & -2);
mag = mag.roi(rect);
// rearrange the quadrants of Fourier image
// so that the origin is at the image center
let cx = mag.cols / 2;
let cy = mag.rows / 2;
let tmp = new cv.Mat();
let rect0 = new cv.Rect(0, 0, cx, cy);
let rect1 = new cv.Rect(cx, 0, cx, cy);
let rect2 = new cv.Rect(0, cy, cx, cy);
let rect3 = new cv.Rect(cx, cy, cx, cy);
let q0 = mag.roi(rect0);
let q1 = mag.roi(rect1);
let q2 = mag.roi(rect2);
let q3 = mag.roi(rect3);
// exchange 1 and 4 quadrants
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
// exchange 2 and 3 quadrants
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
// The pixel value of cv.CV_32S type image ranges from 0 to 1.
cv.normalize(mag, mag, 0, 1, cv.NORM_MINMAX);
cv.imshow('canvasOutput', mag);
src.delete(); padded.delete(); planes.delete(); complexI.delete(); m1.delete(); tmp.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,74 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Get Affine Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Get Affine Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// (data32F[0], data32F[1]) is the first point
// (data32F[2], data32F[3]) is the sescond point
// (data32F[4], data32F[5]) is the third point
let srcTri = cv.matFromArray(3, 1, cv.CV_32FC2, [0, 0, 0, 1, 1, 0]);
let dstTri = cv.matFromArray(3, 1, cv.CV_32FC2, [0.6, 0.2, 0.1, 1.3, 1.5, 0.3]);
let dsize = new cv.Size(src.rows, src.cols);
let M = cv.getAffineTransform(srcTri, dstTri);
// You can try more different parameters
cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete(); srcTri.delete(); dstTri.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Resize Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Resize Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let dsize = new cv.Size(300, 300);
// You can try more different parameters
cv.resize(src, dst, dsize, 0, 0, cv.INTER_AREA);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,70 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Rotate Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Rotate Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let dsize = new cv.Size(src.rows, src.cols);
let center = new cv.Point(src.cols / 2, src.rows / 2);
// You can try more different parameters
let M = cv.getRotationMatrix2D(center, 45, 1);
cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Affine Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Affine Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.matFromArray(2, 3, cv.CV_64FC1, [1, 0, 50, 0, 1, 100]);
let dsize = new cv.Size(src.rows, src.cols);
// You can try more different parameters
cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,75 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Perspectiv Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Perspectiv Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let dsize = new cv.Size(src.rows, src.cols);
// (data32F[0], data32F[1]) is the first point
// (data32F[2], data32F[3]) is the sescond point
// (data32F[4], data32F[5]) is the third point
// (data32F[6], data32F[7]) is the fourth point
let srcTri = cv.matFromArray(4, 1, cv.CV_32FC2, [56, 65, 368, 52, 28, 387, 389, 390]);
let dstTri = cv.matFromArray(4, 1, cv.CV_32FC2, [0, 0, 300, 0, 0, 300, 300, 300]);
let M = cv.getPerspectiveTransform(srcTri, dstTri);
// You can try more different parameters
cv.warpPerspective(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete(); srcTri.delete(); dstTri.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,85 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image GrabCut Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image GrabCut Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB, 0);
let mask = new cv.Mat();
let bgdModel = new cv.Mat();
let fgdModel = new cv.Mat();
let rect = new cv.Rect(50, 50, 260, 280);
cv.grabCut(src, mask, rect, bgdModel, fgdModel, 1, cv.GC_INIT_WITH_RECT);
// draw foreground
for (let i = 0; i < src.rows; i++) {
for (let j = 0; j < src.cols; j++) {
if (mask.ucharPtr(i, j)[0] == 0 || mask.ucharPtr(i, j)[0] == 2) {
src.ucharPtr(i, j)[0] = 0;
src.ucharPtr(i, j)[1] = 0;
src.ucharPtr(i, j)[2] = 0;
}
}
}
// draw grab rect
let color = new cv.Scalar(0, 0, 255);
let point1 = new cv.Point(rect.x, rect.y);
let point2 = new cv.Point(rect.x + rect.width, rect.y + rect.height);
cv.rectangle(src, point1, point2, color);
cv.imshow('canvasOutput', src);
src.delete(); mask.delete(); bgdModel.delete(); fgdModel.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Laplacian Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Laplacian Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
// You can try more different parameters
cv.Laplacian(src, dst, cv.CV_8U, 1, 1, 0, cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,79 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Sobel Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Sobel Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b>, <b>canvasOutputx</b> and <b>canvasOutputy</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutputx" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutputy" class="small"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutputx</div>
</td>
<td>
<div class="caption">canvasOutputy</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dstx = new cv.Mat();
let dsty = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
// You can try more different parameters
cv.Sobel(src, dstx, cv.CV_8U, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
cv.Sobel(src, dsty, cv.CV_8U, 0, 1, 3, 1, 0, cv.BORDER_DEFAULT);
// cv.Scharr(src, dstx, cv.CV_8U, 1, 0, 1, 0, cv.BORDER_DEFAULT);
// cv.Scharr(src, dsty, cv.CV_8U, 0, 1, 1, 0, cv.BORDER_DEFAULT);
cv.imshow('canvasOutputx', dstx);
cv.imshow('canvasOutputy', dsty);
src.delete(); dstx.delete(); dsty.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,78 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image AbsSobel Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image AbsSobel Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b>, <b>canvasOutput8U</b> and <b>canvasOutput64F</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutput8U" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutput64F" class="small"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput8U</div>
</td>
<td>
<div class="caption">canvasOutput64F</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dstx = new cv.Mat();
let absDstx = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
// You can try more different parameters
cv.Sobel(src, dstx, cv.CV_8U, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
cv.Sobel(src, absDstx, cv.CV_64F, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
cv.convertScaleAbs(absDstx, absDstx, 1, 0);
cv.imshow('canvasOutput8U', dstx);
cv.imshow('canvasOutput64F', absDstx);
src.delete(); dstx.delete(); absDstx.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,90 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Back Project Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Back Project Example</h2>
<p>
&lt;canvas&gt; elements named <b>srcCanvasInput</b>, <b>dstCanvasInput</b> and <b>canvasInput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="srcCanvasInput" class="small"></canvas>
</td>
<td>
<canvas id="dstCanvasInput" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutput" class="small"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">srcCanvasInput <input type="file" id="srcFileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">dstCanvasInput <input type="file" id="dstFileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('srcCanvasInput');
let dst = cv.imread('dstCanvasInput');
cv.cvtColor(src, src, cv.COLOR_RGB2HSV, 0);
cv.cvtColor(dst, dst, cv.COLOR_RGB2HSV, 0);
let srcVec = new cv.MatVector();
let dstVec = new cv.MatVector();
srcVec.push_back(src); dstVec.push_back(dst);
let backproj = new cv.Mat();
let none = new cv.Mat();
let mask = new cv.Mat();
let hist = new cv.Mat();
let channels = [0];
let histSize = [50];
let ranges = [0, 180];
let accumulate = false;
cv.calcHist(srcVec, channels, mask, hist, histSize, ranges, accumulate);
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX, -1, none);
cv.calcBackProject(dstVec, channels, hist, backproj, ranges, 1);
cv.imshow('canvasOutput', backproj);
src.delete(); dst.delete(); srcVec.delete(); dstVec.delete();
backproj.delete(); mask.delete(); hist.delete(); none.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('handSrc.jpg', 'srcCanvasInput');
utils.loadImageToCanvas('handDst.jpg', 'dstCanvasInput');
utils.addFileInputHandler('srcFileInput', 'srcCanvasInput');
utils.addFileInputHandler('dstFileInput', 'dstCanvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,88 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Histogram Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Histogram Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
let srcVec = new cv.MatVector();
srcVec.push_back(src);
let accumulate = false;
let channels = [0];
let histSize = [256];
let ranges = [0, 255];
let hist = new cv.Mat();
let mask = new cv.Mat();
let color = new cv.Scalar(255, 255, 255);
let scale = 2;
// You can try more different parameters
cv.calcHist(srcVec, channels, mask, hist, histSize, ranges, accumulate);
let result = cv.minMaxLoc(hist, mask);
let max = result.maxVal;
let dst = new cv.Mat.zeros(src.rows, histSize[0] * scale,
cv.CV_8UC3);
// draw histogram
for (let i = 0; i < histSize[0]; i++) {
let binVal = hist.data32F[i] * src.rows / max;
let point1 = new cv.Point(i * scale, src.rows - 1);
let point2 = new cv.Point((i + 1) * scale - 1, src.rows - binVal);
cv.rectangle(dst, point1, point2, color, cv.FILLED);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); srcVec.delete(); mask.delete(); hist.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,73 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image CLAHE Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image CLAHE Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let equalDst = new cv.Mat();
let claheDst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.equalizeHist(src, equalDst);
let tileGridSize = new cv.Size(8, 8);
// You can try more different parameters
let clahe = new cv.CLAHE(40, tileGridSize);
clahe.apply(src, claheDst);
cv.imshow('canvasOutput', equalDst);
cv.imshow('canvasOutput', claheDst);
src.delete(); equalDst.delete(); claheDst.delete(); clahe.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Equalize Histogram Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Equalize Histogram Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.equalizeHist(src, dst);
cv.imshow('canvasOutput', src);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,79 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hough Circles Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Hough Circles Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8U);
let circles = new cv.Mat();
let color = new cv.Scalar(255, 0, 0);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
// You can try more different parameters
cv.HoughCircles(src, circles, cv.HOUGH_GRADIENT,
1, 45, 75, 40, 0, 0);
// draw circles
for (let i = 0; i < circles.cols; ++i) {
let x = circles.data32F[i * 3];
let y = circles.data32F[i * 3 + 1];
let radius = circles.data32F[i * 3 + 2];
let center = new cv.Point(x, y);
cv.circle(dst, center, radius, color);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); circles.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,83 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hough Lines Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Hough Lines Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
let lines = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.Canny(src, src, 50, 200, 3);
// You can try more different parameters
cv.HoughLines(src, lines, 1, Math.PI / 180,
30, 0, 0, 0, Math.PI);
// draw lines
for (let i = 0; i < lines.rows; ++i) {
let rho = lines.data32F[i * 2];
let theta = lines.data32F[i * 2 + 1];
let a = Math.cos(theta);
let b = Math.sin(theta);
let x0 = a * rho;
let y0 = b * rho;
let startPoint = {x: x0 - 1000 * b, y: y0 + 1000 * a};
let endPoint = {x: x0 + 1000 * b, y: y0 - 1000 * a};
cv.line(dst, startPoint, endPoint, [255, 0, 0, 255]);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); lines.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image HoughLinesP Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image HoughLinesP Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
let lines = new cv.Mat();
let color = new cv.Scalar(255, 0, 0);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.Canny(src, src, 50, 200, 3);
// You can try more different parameters
cv.HoughLinesP(src, lines, 1, Math.PI / 180, 2, 0, 0);
// draw lines
for (let i = 0; i < lines.rows; ++i) {
let startPoint = new cv.Point(lines.data32S[i * 4], lines.data32S[i * 4 + 1]);
let endPoint = new cv.Point(lines.data32S[i * 4 + 2], lines.data32S[i * 4 + 3]);
cv.line(dst, startPoint, endPoint, color);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); lines.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,114 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Bitwise Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Bitwise Example</h2>
<p>
&lt;canvas&gt; elements named <b>imageCanvasInput</b>, <b>logoCanvasInput</b> and <b>CanvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="imageCanvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">imageCanvasInput <input type="file" id="imageFileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
<tr>
<td>
<canvas id="logoCanvasInput"></canvas>
</td>
<td>
</td>
</tr>
<tr>
<td>
<div class="caption">logoCanvasInput <input type="file" id="logoFileInput" name="file" accept="image/*" /></div>
</td>
<td>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('imageCanvasInput');
let logo = cv.imread('logoCanvasInput');
let dst = new cv.Mat();
let roi = new cv.Mat();
let mask = new cv.Mat();
let maskInv = new cv.Mat();
let imgBg = new cv.Mat();
let imgFg = new cv.Mat();
let sum = new cv.Mat();
let rect = new cv.Rect(0, 0, logo.cols, logo.rows);
// I want to put logo on top-left corner, So I create a ROI
roi = src.roi(rect);
// Create a mask of logo and create its inverse mask also
cv.cvtColor(logo, mask, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(mask, mask, 100, 255, cv.THRESH_BINARY);
cv.bitwise_not(mask, maskInv);
// Black-out the area of logo in ROI
cv.bitwise_and(roi, roi, imgBg, maskInv);
// Take only region of logo from logo image
cv.bitwise_and(logo, logo, imgFg, mask);
// Put logo in ROI and modify the main image
cv.add(imgBg, imgFg, sum);
dst = src.clone();
for (let i = 0; i < logo.rows; i++) {
for (let j = 0; j < logo.cols; j++) {
dst.ucharPtr(i, j)[0] = sum.ucharPtr(i, j)[0];
}
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); logo.delete(); roi.delete(); mask.delete();
maskInv.delete(); imgBg.delete(); imgFg.delete(); sum.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'imageCanvasInput');
utils.loadImageToCanvas('lenaFace.png', 'logoCanvasInput');
utils.addFileInputHandler('imageFileInput', 'imageCanvasInput');
utils.addFileInputHandler('logoFileInput', 'logoCanvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,263 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Classification Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Classification Example</h2>
<p>
This tutorial shows you how to write an image classification example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configFile</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="400"></canvas>
</td>
<td>
<table style="visibility: hidden;" id="result">
<thead>
<tr>
<th scope="col">#</th>
<th scope="col" width=300>Label</th>
<th scope="col">Probability</th>
</tr>
</thead>
<tbody>
<tr>
<th scope="row">1</th>
<td id="label0" align="center"></td>
<td id="prob0" align="center"></td>
</tr>
<tr>
<th scope="row">2</th>
<td id="label1" align="center"></td>
<td id="prob1" align="center"></td>
</tr>
<tr>
<th scope="row">3</th>
<td id="label2" align="center"></td>
<td id="prob2" align="center"></td>
</tr>
</tbody>
</table>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="13" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including softmax if needed and get the top classes from the output vector.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [224,224];
mean = [104, 117, 123];
std = 1;
swapRB = false;
// record if need softmax function for post-processing
needSoftmax = false;
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const probs = softmax(result);
const classes = getTopClasses(probs, labels);
updateResult(classes, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet5" type="text/code-snippet">
softmax = function(result) {
let arr = result.data32F;
if (needSoftmax) {
const maxNum = Math.max(...arr);
const expSum = arr.map((num) => Math.exp(num - maxNum)).reduce((a, b) => a + b);
return arr.map((value, index) => {
return Math.exp(value - maxNum) / expSum;
});
} else {
return arr;
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_image_classification_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let getTopClassesCode = 'getTopClasses = ' + getTopClasses.toString();
document.getElementById('codeEditor5').value += '\n' + '\n' + getTopClassesCode;
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'space_shuttle.jpg';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var softmax = function(result){};
var getTopClasses = function(mat, labels, topK = 3){};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function updateResult(classes, time) {
try{
classes.forEach((c,i) => {
let labelElement = document.getElementById('label'+i);
let probElement = document.getElementById('prob'+i);
labelElement.innerHTML = c.label;
probElement.innerHTML = c.prob + '%';
});
let result = document.getElementById('result');
result.style.visibility = 'visible';
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('result').style.visibility = 'hidden';
utils.clearError();
}
</script>
</body>
</html>

View File

@ -0,0 +1,65 @@
{
"caffe": [
{
"model": "alexnet",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel",
"configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_alexnet/deploy.prototxt"
},
{
"model": "densenet",
"mean": "127.5, 127.5, 127.5",
"std": "0.007843",
"swapRB": "false",
"needSoftmax": "true",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "https://drive.google.com/open?id=0B7ubpZO7HnlCcHlfNmJkU2VPelE",
"configUrl": "https://raw.githubusercontent.com/shicai/DenseNet-Caffe/master/DenseNet_121.prototxt"
},
{
"model": "googlenet",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel",
"configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_googlenet/deploy.prototxt"
},
{
"model": "squeezenet",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/squeezenet_v1.0.caffemodel",
"configUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/deploy.prototxt"
},
{
"model": "VGG",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel",
"configUrl": "https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/f02f8769e64494bcd3d7e97d5d747ac275825721/VGG_ILSVRC_19_layers_deploy.prototxt"
}
],
"tensorflow": [
{
"model": "inception",
"mean": "123, 117, 104",
"std": "1",
"swapRB": "true",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/petewarden/tf_ios_makefile_example/master/data/imagenet_comp_graph_label_strings.txt",
"modelUrl": "https://raw.githubusercontent.com/petewarden/tf_ios_makefile_example/master/data/tensorflow_inception_graph.pb"
}
]
}

View File

@ -0,0 +1,281 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Classification Example with Camera</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Classification Example with Camera</h2>
<p>
This tutorial shows you how to write an image classification example with camera.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configFile</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Start/Stop</b> button to start or stop the camera capture.<br>
</p>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="400" height="400"></video>
</td>
<td>
<table style="visibility: hidden;" id="result">
<thead>
<tr>
<th scope="col">#</th>
<th scope="col" width=300>Label</th>
<th scope="col">Probability</th>
</tr>
</thead>
<tbody>
<tr>
<th scope="row">1</th>
<td id="label0" align="center"></td>
<td id="prob0" align="center"></td>
</tr>
<tr>
<th scope="row">2</th>
<td id="label1" align="center"></td>
<td id="prob1" align="center"></td>
</tr>
<tr>
<th scope="row">3</th>
<td id="label2" align="center"></td>
<td id="prob2" align="center"></td>
</tr>
</tbody>
</table>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
videoInput
</div>
</td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="13" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.The function to capture video from camera, and the main loop in which will do inference once.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including softmax if needed and get the top classes from the output vector.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [224,224];
mean = [104, 117, 123];
std = 1;
swapRB = false;
// record if need softmax function for post-processing
needSoftmax = false;
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(video);
main = async function(frame) {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, frame);
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const probs = softmax(result);
const classes = getTopClasses(probs, labels);
updateResult(classes, time);
setTimeout(processVideo, 0);
input.delete();
net.delete();
result.delete();
}
function processVideo() {
try {
if (!streaming) {
return;
}
cap.read(frame);
main(frame);
} catch (err) {
utils.printError(err);
}
}
setTimeout(processVideo, 0);
</script>
<script id="codeSnippet5" type="text/code-snippet">
softmax = function(result) {
let arr = result.data32F;
if (needSoftmax) {
const maxNum = Math.max(...arr);
const expSum = arr.map((num) => Math.exp(num - maxNum)).reduce((a, b) => a + b);
return arr.map((value, index) => {
return Math.exp(value - maxNum) / expSum;
});
} else {
return arr;
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_image_classification_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let getTopClassesCode = 'getTopClasses = ' + getTopClasses.toString();
document.getElementById('codeEditor5').value += '\n' + '\n' + getTopClassesCode;
let video = document.getElementById('videoInput');
let streaming = false;
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
utils.startCamera('qvga', onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
startAndStop.removeAttribute('disabled');
});
var main = async function(frame) {};
var softmax = function(result){};
var getTopClasses = function(mat, labels, topK = 3){};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.width = videoInput.videoWidth;
videoInput.height = videoInput.videoHeight;
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
initStatus();
}
function updateResult(classes, time) {
try{
classes.forEach((c,i) => {
let labelElement = document.getElementById('label'+i);
let probElement = document.getElementById('prob'+i);
labelElement.innerHTML = c.label;
probElement.innerHTML = c.prob + '%';
});
let result = document.getElementById('result');
result.style.visibility = 'visible';
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('result').style.visibility = 'hidden';
utils.clearError();
}
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Read and Show Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Read and Show Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="8" cols="80" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// To distinguish the input and output, we graying the image.
// You can try different conversions.
cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,700 @@
<!DOCTYPE html>
<html >
<head>
<meta charset="utf-8">
<title>Image Processing Video Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
<style type="text/css">
.dg {
text-align: left;
}
.dg .property-name {
font: 11px Lucida Grande,sans-serif;
line-height: 27px;
}
.dg.main .close-button {
font: 11px Lucida Grande,sans-serif;
line-height: 27px;
}
.cell-top {
vertical-align: top;
}
</style>
</head>
<body>
<h2>Image Processing Video Example</h2>
<p>
Open the controls and try different image processing filters.
</p>
<p class="err" id="errorMessage"></p>
<div id="container">
<table>
<tr>
<td></td>
<td>
<div>
<span>Current Filter: </span><span id="filterName">Pass Through</span>
</div>
</td>
<td>
<div>Select Filter:</div>
</td>
<td></td>
</tr>
<tr>
<td></td>
<td class="cell-top">
<canvas id="canvasOutput" width="640" height="480"></canvas>
</td>
<td class="cell-top">
<div id="guiContainer"></div>
</td>
<td></td>
</tr>
</table>
<div>
<video id="videoInput" class="hidden">Your browser does not support the video tag.</video>
</div>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/stats.js/r16/Stats.min.js" type="text/javascript"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.6.4/dat.gui.min.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
let width = 0;
let height = 0;
let resolution = window.innerWidth < 960 ? 'qvga' : 'vga';
// whether streaming video from the camera.
let streaming = false;
let video = document.getElementById('videoInput');
let vc = null;
let container = document.getElementById('container');
let lastFilter = '';
let src = null;
let dstC1 = null;
let dstC3 = null;
let dstC4 = null;
function startVideoProcessing() {
src = new cv.Mat(height, width, cv.CV_8UC4);
dstC1 = new cv.Mat(height, width, cv.CV_8UC1);
dstC3 = new cv.Mat(height, width, cv.CV_8UC3);
dstC4 = new cv.Mat(height, width, cv.CV_8UC4);
requestAnimationFrame(processVideo);
}
function passThrough(src) {
return src;
}
function gray(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
return dstC1;
}
function hsv(src) {
cv.cvtColor(src, dstC3, cv.COLOR_RGBA2RGB);
cv.cvtColor(dstC3, dstC3, cv.COLOR_RGB2HSV);
return dstC3;
}
function canny(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
cv.Canny(dstC1, dstC1, controls.cannyThreshold1, controls.cannyThreshold2,
controls.cannyApertureSize, controls.cannyL2Gradient);
return dstC1;
}
function inRange(src) {
let lowValue = controls.inRangeLow;
let lowScalar = new cv.Scalar(lowValue, lowValue, lowValue, 255);
let highValue = controls.inRangeHigh;
let highScalar = new cv.Scalar(highValue, highValue, highValue, 255);
let low = new cv.Mat(height, width, src.type(), lowScalar);
let high = new cv.Mat(height, width, src.type(), highScalar);
cv.inRange(src, low, high, dstC1);
low.delete(); high.delete();
return dstC1;
}
function threshold(src) {
cv.threshold(src, dstC4, controls.thresholdValue, 200, cv.THRESH_BINARY);
return dstC4;
}
function adaptiveThreshold(src) {
let mat = new cv.Mat(height, width, cv.CV_8U);
cv.cvtColor(src, mat, cv.COLOR_RGBA2GRAY);
cv.adaptiveThreshold(mat, dstC1, 200, cv.ADAPTIVE_THRESH_GAUSSIAN_C,
cv.THRESH_BINARY, Number(controls.adaptiveBlockSize), 2);
mat.delete();
return dstC1;
}
function gaussianBlur(src) {
cv.GaussianBlur(src, dstC4,
{width: controls.gaussianBlurSize, height: controls.gaussianBlurSize},
0, 0, cv.BORDER_DEFAULT);
return dstC4;
}
function bilateralFilter(src) {
let mat = new cv.Mat(height, width, cv.CV_8UC3);
cv.cvtColor(src, mat, cv.COLOR_RGBA2RGB);
cv.bilateralFilter(mat, dstC3, controls.bilateralFilterDiameter, controls.bilateralFilterSigma,
controls.bilateralFilterSigma, cv.BORDER_DEFAULT);
mat.delete();
return dstC3;
}
function medianBlur(src) {
cv.medianBlur(src, dstC4, controls.medianBlurSize);
return dstC4;
}
function sobel(src) {
let mat = new cv.Mat(height, width, cv.CV_8UC1);
cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY, 0);
cv.Sobel(mat, dstC1, cv.CV_8U, 1, 0, controls.sobelSize, 1, 0, cv.BORDER_DEFAULT);
mat.delete();
return dstC1;
}
function scharr(src) {
let mat = new cv.Mat(height, width, cv.CV_8UC1);
cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY, 0);
cv.Scharr(mat, dstC1, cv.CV_8U, 1, 0, 1, 0, cv.BORDER_DEFAULT);
mat.delete();
return dstC1;
}
function laplacian(src) {
let mat = new cv.Mat(height, width, cv.CV_8UC1);
cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY);
cv.Laplacian(mat, dstC1, cv.CV_8U, controls.laplacianSize, 1, 0, cv.BORDER_DEFAULT);
mat.delete();
return dstC1;
}
let contoursColor = [];
for (let i = 0; i < 10000; i++) {
contoursColor.push([Math.round(Math.random() * 255),
Math.round(Math.random() * 255),
Math.round(Math.random() * 255), 0]);
}
function contours(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
cv.threshold(dstC1, dstC4, 120, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(dstC4, contours, hierarchy,
Number(controls.contoursMode),
Number(controls.contoursMethod), {x: 0, y: 0});
dstC3.delete();
dstC3 = cv.Mat.ones(height, width, cv.CV_8UC3);
for (let i = 0; i<contours.size(); ++i) {
let color = contoursColor[i];
cv.drawContours(dstC3, contours, i, color, 1, cv.LINE_8, hierarchy);
}
contours.delete(); hierarchy.delete();
return dstC3;
}
function calcHist(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
let srcVec = new cv.MatVector();
srcVec.push_back(dstC1);
let scale = 2;
let channels = [0];
let histSize = [src.cols/scale];
const ranges = [0, 255];
let hist = new cv.Mat();
let mask = new cv.Mat();
let color = new cv.Scalar(0xfb, 0xca, 0x04, 0xff);
cv.calcHist(srcVec, channels, mask, hist, histSize, ranges);
let result = cv.minMaxLoc(hist, mask);
let max = result.maxVal;
cv.cvtColor(dstC1, dstC4, cv.COLOR_GRAY2RGBA);
// draw histogram on src
for (let i = 0; i < histSize[0]; i++) {
let binVal = hist.data32F[i] * src.rows / max;
cv.rectangle(dstC4, {x: i * scale, y: src.rows - 1},
{x: (i + 1) * scale - 1, y: src.rows - binVal/3}, color, cv.FILLED);
}
srcVec.delete();
mask.delete();
hist.delete();
return dstC4;
}
function equalizeHist(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY, 0);
cv.equalizeHist(dstC1, dstC1);
return dstC1;
}
let base;
function backprojection(src) {
if (lastFilter !== 'backprojection') {
if (base instanceof cv.Mat) {
base.delete();
}
base = src.clone();
cv.cvtColor(base, base, cv.COLOR_RGB2HSV, 0);
}
cv.cvtColor(src, dstC3, cv.COLOR_RGB2HSV, 0);
let baseVec = new cv.MatVector();
let targetVec = new cv.MatVector();
baseVec.push_back(base); targetVec.push_back(dstC3);
let mask = new cv.Mat();
let hist = new cv.Mat();
let channels = [0];
let histSize = [50];
let ranges;
if (controls.backprojectionRangeLow < controls.backprojectionRangeHigh) {
ranges = [controls.backprojectionRangeLow, controls.backprojectionRangeHigh];
} else {
return src;
}
cv.calcHist(baseVec, channels, mask, hist, histSize, ranges);
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX);
cv.calcBackProject(targetVec, channels, hist, dstC1, ranges, 1);
baseVec.delete();
targetVec.delete();
mask.delete();
hist.delete();
return dstC1;
}
function erosion(src) {
let kernelSize = controls.erosionSize;
let kernel = cv.Mat.ones(kernelSize, kernelSize, cv.CV_8U);
let color = new cv.Scalar();
cv.erode(src, dstC4, kernel, {x: -1, y: -1}, 1, Number(controls.erosionBorderType), color);
kernel.delete();
return dstC4;
}
function dilation(src) {
let kernelSize = controls.dilationSize;
let kernel = cv.Mat.ones(kernelSize, kernelSize, cv.CV_8U);
let color = new cv.Scalar();
cv.dilate(src, dstC4, kernel, {x: -1, y: -1}, 1, Number(controls.dilationBorderType), color);
kernel.delete();
return dstC4;
}
function morphology(src) {
let kernelSize = controls.morphologySize;
let kernel = cv.getStructuringElement(Number(controls.morphologyShape),
{width: kernelSize, height: kernelSize});
let color = new cv.Scalar();
let op = Number(controls.morphologyOp);
let image = src;
if (op === cv.MORPH_GRADIENT || op === cv.MORPH_TOPHAT || op === cv.MORPH_BLACKHAT) {
cv.cvtColor(src, dstC3, cv.COLOR_RGBA2RGB);
image = dstC3;
}
cv.morphologyEx(image, dstC4, op, kernel, {x: -1, y: -1}, 1,
Number(controls.morphologyBorderType), color);
kernel.delete();
return dstC4;
}
function processVideo() {
if (!streaming) return;
stats.begin();
vc.read(src);
let result;
switch (controls.filter) {
case 'passThrough': result = passThrough(src); break;
case 'gray': result = gray(src); break;
case 'hsv': result = hsv(src); break;
case 'canny': result = canny(src); break;
case 'inRange': result = inRange(src); break;
case 'threshold': result = threshold(src); break;
case 'adaptiveThreshold': result = adaptiveThreshold(src); break;
case 'gaussianBlur': result = gaussianBlur(src); break;
case 'bilateralFilter': result = bilateralFilter(src); break;
case 'medianBlur': result = medianBlur(src); break;
case 'sobel': result = sobel(src); break;
case 'scharr': result = scharr(src); break;
case 'laplacian': result = laplacian(src); break;
case 'contours': result = contours(src); break;
case 'calcHist': result = calcHist(src); break;
case 'equalizeHist': result = equalizeHist(src); break;
case 'backprojection': result = backprojection(src); break;
case 'erosion': result = erosion(src); break;
case 'dilation': result = dilation(src); break;
case 'morphology': result = morphology(src); break;
default: result = passThrough(src);
}
cv.imshow('canvasOutput', result);
stats.end();
lastFilter = controls.filter;
requestAnimationFrame(processVideo);
}
let stats = null;
let filters = {
'passThrough': 'Pass Through',
'gray': 'Gray',
'hsv': 'HSV',
'canny': 'Canny Edge Detection',
'inRange': 'In Range',
'threshold': 'Threshold',
'adaptiveThreshold': 'Adaptive Threshold',
'gaussianBlur': 'Gaussian Blurring',
'medianBlur': 'Median Blurring',
'bilateralFilter': 'Bilateral Filtering',
'sobel': 'Sobel Derivatives',
'scharr': 'Scharr Derivatives',
'laplacian': 'Laplacian Derivatives',
'contours': 'Contours',
'calcHist': 'Calculation',
'equalizeHist': 'Equalization',
'backprojection': 'Backprojection',
'erosion': 'Erosion',
'dilation': 'Dilation',
'morphology': 'Morphology',
};
let filterName = document.getElementById('filterName');
let controls;
function initUI() {
stats = new Stats();
stats.showPanel(0);
container.appendChild(stats.domElement);
stats.domElement.style.position = 'absolute';
stats.domElement.style.right = '0px';
stats.domElement.style.top = '0px';
controls = {
filter: 'passThrough',
setFilter: function(filter) {
this.filter = filter;
filterName.innerHTML = filters[filter];
},
passThrough: function() {
this.setFilter('passThrough');
},
gray: function() {
this.setFilter('gray');
},
hsv: function() {
this.setFilter('hsv');
},
inRange: function() {
this.setFilter('inRange');
},
inRangeLow: 75,
inRangeHigh: 150,
threshold: function() {
this.setFilter('threshold');
},
thresholdValue: 100,
adaptiveThreshold: function() {
this.setFilter('adaptiveThreshold');
},
adaptiveBlockSize: 3,
gaussianBlur: function() {
this.setFilter('gaussianBlur');
},
gaussianBlurSize: 7,
medianBlur: function() {
this.setFilter('medianBlur');
},
medianBlurSize: 5,
bilateralFilter: function() {
this.setFilter('bilateralFilter');
},
bilateralFilterDiameter: 5,
bilateralFilterSigma: 75,
sobel: function() {
this.setFilter('sobel');
},
sobelSize: 3,
scharr: function() {
this.setFilter('scharr');
},
laplacian: function() {
this.setFilter('laplacian');
},
laplacianSize: 3,
canny: function() {
this.setFilter('canny');
},
cannyThreshold1: 150,
cannyThreshold2: 300,
cannyApertureSize: 3,
cannyL2Gradient: false,
contours: function() {
this.setFilter('contours');
},
contoursMode: cv.RETR_CCOMP,
contoursMethod: cv.CHAIN_APPROX_SIMPLE,
calcHist: function() {
this.setFilter('calcHist');
},
equalizeHist: function() {
this.setFilter('equalizeHist');
},
backprojection: function() {
this.setFilter('backprojection');
},
backprojectionRangeLow: 0,
backprojectionRangeHigh: 150,
morphology: function() {
this.setFilter('morphology');
},
morphologyShape: cv.MORPH_RECT,
morphologyOp: cv.MORPH_ERODE,
morphologySize: 5,
morphologyBorderType: cv.BORDER_CONSTANT,
};
let gui = new dat.GUI({autoPlace: false});
let guiContainer = document.getElementById('guiContainer');
guiContainer.appendChild(gui.domElement);
let lastFolder = null;
function closeLastFolder(folder) {
if (lastFolder != null && lastFolder != folder) {
lastFolder.close();
}
lastFolder = folder;
}
gui.add(controls, 'passThrough').name(filters['passThrough']).onChange(function() {
closeLastFolder(null);
});
let colorConversion = gui.addFolder('Color Conversion');
colorConversion.add(controls, 'gray').name(filters['gray']).onChange(function() {
closeLastFolder(null);
});
colorConversion.add(controls, 'hsv').name(filters['hsv']).onChange(function() {
closeLastFolder(null);
});
let inRange = colorConversion.addFolder(filters['inRange']);
inRange.domElement.onclick = function() {
closeLastFolder(inRange);
controls.inRange();
};
inRange.add(controls, 'inRangeLow', 0, 255, 1).name('lower boundary');
inRange.add(controls, 'inRangeHigh', 0, 255, 1).name('higher boundary');
// let geometricTransformations = gui.addFolder('Geometric Transformations');
// TODO
let thresholding = gui.addFolder('Thresholding');
let threshold = thresholding.addFolder(filters['threshold']);
threshold.domElement.onclick = function() {
closeLastFolder(threshold);
controls.threshold();
};
threshold.add(controls, 'thresholdValue', 0, 200, 1).name('threshold value');
let adaptiveThreshold = thresholding.addFolder(filters['adaptiveThreshold']);
adaptiveThreshold.domElement.onclick = function() {
closeLastFolder(adaptiveThreshold);
controls.adaptiveThreshold();
};
adaptiveThreshold.add(
controls, 'adaptiveBlockSize', 3, 99, 1).name('block size').onChange(
function(value) {
if (value % 2 === 0) controls.adaptiveBlockSize = value + 1;
});
let smoothing = gui.addFolder('Smoothing');
let gaussianBlur = smoothing.addFolder(filters['gaussianBlur']);
gaussianBlur.domElement.onclick = function() {
closeLastFolder(gaussianBlur);
controls.gaussianBlur();
};
gaussianBlur.add(
controls, 'gaussianBlurSize', 7, 99, 1).name('kernel size').onChange(
function(value) {
if (value % 2 === 0) controls.gaussianBlurSize = value + 1;
});
let medianBlur = smoothing.addFolder(filters['medianBlur']);
medianBlur.domElement.onclick = function() {
closeLastFolder(medianBlur);
controls.medianBlur();
};
medianBlur.add(
controls, 'medianBlurSize', 3, 99, 1).name('kernel size').onChange(
function(value) {
if (value % 2 === 0) controls.medianBlurSize = value + 1;
});
let bilateralFilter = smoothing.addFolder(filters['bilateralFilter']);
bilateralFilter.domElement.onclick = function() {
closeLastFolder(bilateralFilter);
controls.bilateralFilter();
};
bilateralFilter.add(controls, 'bilateralFilterDiameter', 1, 15, 1).name('diameter');
bilateralFilter.add(controls, 'bilateralFilterSigma', 1, 255, 1).name('sigma');
let morphology = gui.addFolder('Morphology');
morphology.domElement.onclick = function() {
closeLastFolder(morphology);
controls.morphology();
};
morphology.add(
controls, 'morphologyOp',
{'MORPH_ERODE': cv.MORPH_ERODE,
'MORPH_DILATE': cv.MORPH_DILATE,
'MORPH_OPEN ': cv.MORPH_OPEN,
'MORPH_CLOSE': cv.MORPH_CLOSE,
'MORPH_GRADIENT': cv.MORPH_GRADIENT,
'MORPH_TOPHAT': cv.MORPH_TOPHAT,
'MORPH_BLACKHAT': cv.MORPH_BLACKHAT}).name('operation');
morphology.add(
controls, 'morphologyShape',
{'MORPH_RECT': cv.MORPH_RECT,
'MORPH_CROSS': cv.MORPH_CROSS,
'MORPH_ELLIPSE': cv.MORPH_ELLIPSE}).name('shape');
morphology.add(
controls, 'morphologySize', 1, 15, 1).name('kernel size').onChange(
function(value) {
if (value % 2 === 0) controls.morphologySize = value + 1;
});
morphology.add(
controls, 'morphologyBorderType',
{'BORDER_CONSTANT': cv.BORDER_CONSTANT,
'BORDER_REPLICATE': cv.BORDER_REPLICATE,
'BORDER_REFLECT': cv.BORDER_REFLECT,
'BORDER_REFLECT_101': cv.BORDER_REFLECT_101}).name('boarder type');
let gradients = gui.addFolder('Gradients');
let sobel = gradients.addFolder(filters['sobel']);
sobel.domElement.onclick = function() {
closeLastFolder(sobel);
controls.sobel();
};
sobel.add(controls, 'sobelSize', 3, 19, 1).name('kernel size').onChange(function(value) {
if (value % 2 === 0) controls.sobelSize = value + 1;
});
gradients.add(controls, 'scharr').name(filters['scharr']).onChange(function() {
closeLastFolder(null);
});
let laplacian = gradients.addFolder(filters['laplacian']);
laplacian.domElement.onclick = function() {
closeLastFolder(laplacian);
controls.laplacian();
};
laplacian.add(
controls, 'laplacianSize', 1, 19, 1).name('kernel size').onChange(
function(value) {
if (value % 2 === 0) controls.laplacianSize = value + 1;
});
let canny = gui.addFolder(filters['canny']);
canny.domElement.onclick = function() {
closeLastFolder(canny);
controls.canny();
};
canny.add(controls, 'cannyThreshold1', 1, 500, 1).name('threshold1');
canny.add(controls, 'cannyThreshold2', 1, 500, 1).name('threshold2');
canny.add(controls, 'cannyApertureSize', 3, 7, 1).name('aperture size').onChange(
function(value) {
if (value % 2 === 0) controls.cannyApertureSize = value + 1;
});
canny.add(controls, 'cannyL2Gradient').name('l2 gradient');
let contours = gui.addFolder(filters['contours']);
contours.domElement.onclick = function() {
closeLastFolder(contours);
controls.contours();
};
contours.add(
controls, 'contoursMode',
{'RETR_EXTERNAL': cv.RETR_EXTERNAL,
'RETR_LIST': cv.RETR_LIST,
'RETR_CCOMP': cv.RETR_CCOMP,
'RETR_TREE': cv.RETR_TREE}).name('mode');
contours.add(
controls, 'contoursMethod',
{'CHAIN_APPROX_NONE': cv.CHAIN_APPROX_NONE,
'CHAIN_APPROX_SIMPLE': cv.CHAIN_APPROX_SIMPLE,
'CHAIN_APPROX_TC89_L1': cv.CHAIN_APPROX_TC89_L1,
'CHAIN_APPROX_TC89_KCOS': cv.CHAIN_APPROX_TC89_KCOS}).name('method');
let histograms = gui.addFolder('Histograms');
histograms.add(controls, 'calcHist').name(filters['calcHist']).onChange(function() {
closeLastFolder(null);
});
histograms.add(controls, 'equalizeHist').name(filters['equalizeHist']).onChange(function() {
closeLastFolder(null);
});
let backprojection = histograms.addFolder(filters['backprojection']);
backprojection.domElement.onclick = function() {
closeLastFolder(backprojection);
controls.backprojection();
};
backprojection.add(controls, 'backprojectionRangeLow', 0, 255, 1).name('range low');
backprojection.add(controls, 'backprojectionRangeHigh', 0, 255, 1).name('range high');
}
function startCamera() {
if (!streaming) {
utils.clearError();
utils.startCamera(resolution, onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
}
function onVideoStarted() {
height = video.videoHeight;
width = video.videoWidth;
video.setAttribute('width', width);
video.setAttribute('height', height);
streaming = true;
vc = new cv.VideoCapture(video);
startVideoProcessing();
}
function stopVideoProcessing() {
if (src != null && !src.isDeleted()) src.delete();
if (dstC1 != null && !dstC1.isDeleted()) dstC1.delete();
if (dstC3 != null && !dstC3.isDeleted()) dstC3.delete();
if (dstC4 != null && !dstC4.isDeleted()) dstC4.delete();
}
function onVideoStopped() {
if (!streaming) return;
stopVideoProcessing();
document.getElementById('canvasOutput').getContext('2d').clearRect(0, 0, width, height);
streaming = false;
}
utils.loadOpenCv(() => {
initUI();
startCamera();
});
</script>
</body>
</html>

View File

@ -0,0 +1,127 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Intelligent Scissors Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Intelligent Scissors Example</h2>
<p>
Click <b>Start</b> button to launch the code below.<br>
Then click on image to pick source point. After that you can hover mouse pointer over canvas to specify target point candidate.<br>
You can change the code in the &lt;textarea&gt; to investigate more. You can choose another image (need to "Stop" first).
</p>
<div>
<div class="control"><button id="tryIt" disabled>Start</button> <button id="stopIt" disabled>Stop</button></div>
<textarea class="code" rows="20" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div id="inputParams">
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
<canvas id="canvasInput"></canvas>
</div>
<div id="result" style="display:none">
<canvas id="canvasOutput"></canvas>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
//cv.resize(src, src, new cv.Size(1024, 1024));
cv.imshow('canvasOutput', src);
let tool = new cv.segmentation_IntelligentScissorsMB();
tool.setEdgeFeatureCannyParameters(32, 100);
tool.setGradientMagnitudeMaxLimit(200);
tool.applyImage(src);
let hasMap = false;
let canvas = document.getElementById('canvasOutput');
canvas.addEventListener('click', e => {
let startX = e.offsetX, startY = e.offsetY; console.log(startX, startY);
if (startX < src.cols && startY < src.rows)
{
console.time('buildMap');
tool.buildMap(new cv.Point(startX, startY));
console.timeEnd('buildMap');
hasMap = true;
}
});
canvas.addEventListener('mousemove', e => {
let x = e.offsetX, y = e.offsetY; //console.log(x, y);
let dst = src.clone();
if (hasMap && x >= 0 && x < src.cols && y >= 0 && y < src.rows)
{
let contour = new cv.Mat();
tool.getContour(new cv.Point(x, y), contour);
let contours = new cv.MatVector();
contours.push_back(contour);
let color = new cv.Scalar(0, 255, 0, 255); // RGBA
cv.polylines(dst, contours, false, color, 1, cv.LINE_8);
contours.delete(); contour.delete();
}
cv.imshow('canvasOutput', dst);
dst.delete();
});
canvas.addEventListener('dispose', e => {
src.delete();
tool.delete();
});
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let disposeEvent = new Event('dispose');
let tryIt = document.getElementById('tryIt');
let stopIt = document.getElementById('stopIt');
tryIt.addEventListener('click', () => {
let e_input = document.getElementById('inputParams');
e_input.style.display = 'none';
let e_result = document.getElementById("result")
e_result.style.display = '';
var e = document.getElementById("canvasOutput");
var e_new = e.cloneNode(true);
e.parentNode.replaceChild(e_new, e); // reset event handlers
stopIt.removeAttribute('disabled');
tryIt.setAttribute('disabled', '');
utils.executeCode('codeEditor');
});
stopIt.addEventListener('click', () => {
let e_input = document.getElementById('inputParams');
e_input.style.display = '';
let e_result = document.getElementById("result")
e_result.style.display = 'none';
var e = document.getElementById("canvasOutput");
e.dispatchEvent(disposeEvent);
var e_new = e.cloneNode(true);
e.parentNode.replaceChild(e_new, e); // reset event handlers
tryIt.removeAttribute('disabled');
stopIt.setAttribute('disabled', '');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,170 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>MeanShift Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>MeanShift Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the video.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as meanShift input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as meanShift output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted loop></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240" ></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// take first frame of the video
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame);
// hardcode the initial location of window
let trackWindow = new cv.Rect(150, 60, 63, 125);
// set up the ROI for tracking
let roi = frame.roi(trackWindow);
let hsvRoi = new cv.Mat();
cv.cvtColor(roi, hsvRoi, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsvRoi, hsvRoi, cv.COLOR_RGB2HSV);
let mask = new cv.Mat();
let lowScalar = new cv.Scalar(30, 30, 0);
let highScalar = new cv.Scalar(180, 180, 180);
let low = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), lowScalar);
let high = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), highScalar);
cv.inRange(hsvRoi, low, high, mask);
let roiHist = new cv.Mat();
let hsvRoiVec = new cv.MatVector();
hsvRoiVec.push_back(hsvRoi);
cv.calcHist(hsvRoiVec, [0], mask, roiHist, [180], [0, 180]);
cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
// delete useless mats.
roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
let dst = new cv.Mat();
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); dst.delete(); hsvVec.delete(); roiHist.delete(); hsv.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
cv.cvtColor(frame, hsv, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsv, hsv, cv.COLOR_RGB2HSV);
cv.calcBackProject(hsvVec, [0], roiHist, dst, [0, 180], 1);
// Apply meanshift to get the new location
// and it also returns number of iterations meanShift took to converge,
// which is useless in this demo.
[, trackWindow] = cv.meanShift(dst, trackWindow, termCrit);
// Draw it on image
let [x, y, w, h] = [trackWindow.x, trackWindow.y, trackWindow.width, trackWindow.height];
cv.rectangle(frame, new cv.Point(x, y), new cv.Point(x+w, y+h), [255, 0, 0, 255], 2);
cv.imshow('canvasOutput', frame);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'cup.mp4';
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Black Hat Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Black Hat Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
let dst = new cv.Mat();
let M = cv.Mat.ones(53, 53, cv.CV_8U);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_BLACKHAT, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Closing Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Closing Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_CLOSE, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Dilate Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Dilate Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.dilate(src, dst, M, anchor, 1, cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Erode Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Erode Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.erode(src, dst, M, anchor, 1, cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,71 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Get Structuring Element Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Get Structuring Element Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
let dst = new cv.Mat();
let M = new cv.Mat();
let ksize = new cv.Size(5, 5);
// You can try more different parameters
M = cv.getStructuringElement(cv.MORPH_CROSS, ksize);
cv.morphologyEx(src, dst, cv.MORPH_GRADIENT, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Gradient Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Gradient Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_GRADIENT, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,70 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Opening Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Opening Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_OPEN, M, anchor, 1,
cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Top Hat Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Top Hat Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
let dst = new cv.Mat();
let M = cv.Mat.ones(9, 9, cv.CV_8U);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_TOPHAT, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,387 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Object Detection Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Object Detection Example</h2>
<p>
This tutorial shows you how to write an object detection example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configFile</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="400"></canvas>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="15" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="16" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including get boxes from output and draw boxes into the image.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [300, 300];
mean = [127.5, 127.5, 127.5];
std = 0.007843;
swapRB = false;
confThreshold = 0.5;
nmsThreshold = 0.4;
// The type of output, can be YOLO or SSD
outType = "SSD";
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const output = postProcess(result, labels);
updateResult(output, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet5" type="text/code-snippet">
postProcess = function(result, labels) {
let canvasOutput = document.getElementById('canvasOutput');
const outputWidth = canvasOutput.width;
const outputHeight = canvasOutput.height;
const resultData = result.data32F;
// Get the boxes(with class and confidence) from the output
let boxes = [];
switch(outType) {
case "YOLO": {
const vecNum = result.matSize[0];
const vecLength = result.matSize[1];
const classNum = vecLength - 5;
for (let i = 0; i < vecNum; ++i) {
let vector = resultData.slice(i*vecLength, (i+1)*vecLength);
let scores = vector.slice(5, vecLength);
let classId = scores.indexOf(Math.max(...scores));
let confidence = scores[classId];
if (confidence > confThreshold) {
let center_x = Math.round(vector[0] * outputWidth);
let center_y = Math.round(vector[1] * outputHeight);
let width = Math.round(vector[2] * outputWidth);
let height = Math.round(vector[3] * outputHeight);
let left = Math.round(center_x - width / 2);
let top = Math.round(center_y - height / 2);
let box = {
scores: scores,
classId: classId,
confidence: confidence,
bounding: [left, top, width, height],
toDraw: true
}
boxes.push(box);
}
}
// NMS(Non Maximum Suppression) algorithm
let boxNum = boxes.length;
let tmp_boxes = [];
let sorted_boxes = [];
for (let c = 0; c < classNum; ++c) {
for (let i = 0; i < boxes.length; ++i) {
tmp_boxes[i] = [boxes[i], i];
}
sorted_boxes = tmp_boxes.sort((a, b) => { return (b[0].scores[c] - a[0].scores[c]); });
for (let i = 0; i < boxNum; ++i) {
if (sorted_boxes[i][0].scores[c] === 0) continue;
else {
for (let j = i + 1; j < boxNum; ++j) {
if (IOU(sorted_boxes[i][0], sorted_boxes[j][0]) >= nmsThreshold) {
boxes[sorted_boxes[j][1]].toDraw = false;
}
}
}
}
}
} break;
case "SSD": {
const vecNum = result.matSize[2];
const vecLength = 7;
for (let i = 0; i < vecNum; ++i) {
let vector = resultData.slice(i*vecLength, (i+1)*vecLength);
let confidence = vector[2];
if (confidence > confThreshold) {
let left, top, right, bottom, width, height;
left = Math.round(vector[3]);
top = Math.round(vector[4]);
right = Math.round(vector[5]);
bottom = Math.round(vector[6]);
width = right - left + 1;
height = bottom - top + 1;
if (width <= 2 || height <= 2) {
left = Math.round(vector[3] * outputWidth);
top = Math.round(vector[4] * outputHeight);
right = Math.round(vector[5] * outputWidth);
bottom = Math.round(vector[6] * outputHeight);
width = right - left + 1;
height = bottom - top + 1;
}
let box = {
classId: vector[1] - 1,
confidence: confidence,
bounding: [left, top, width, height],
toDraw: true
}
boxes.push(box);
}
}
} break;
default:
console.error(`Unsupported output type ${outType}`)
}
// Draw the saved box into the image
let image = cv.imread("canvasInput");
let output = new cv.Mat(outputWidth, outputHeight, cv.CV_8UC3);
cv.cvtColor(image, output, cv.COLOR_RGBA2RGB);
let boxNum = boxes.length;
for (let i = 0; i < boxNum; ++i) {
if (boxes[i].toDraw) {
drawBox(boxes[i]);
}
}
return output;
// Calculate the IOU(Intersection over Union) of two boxes
function IOU(box1, box2) {
let bounding1 = box1.bounding;
let bounding2 = box2.bounding;
let s1 = bounding1[2] * bounding1[3];
let s2 = bounding2[2] * bounding2[3];
let left1 = bounding1[0];
let right1 = left1 + bounding1[2];
let left2 = bounding2[0];
let right2 = left2 + bounding2[2];
let overlapW = calOverlap([left1, right1], [left2, right2]);
let top1 = bounding2[1];
let bottom1 = top1 + bounding1[3];
let top2 = bounding2[1];
let bottom2 = top2 + bounding2[3];
let overlapH = calOverlap([top1, bottom1], [top2, bottom2]);
let overlapS = overlapW * overlapH;
return overlapS / (s1 + s2 + overlapS);
}
// Calculate the overlap range of two vector
function calOverlap(range1, range2) {
let min1 = range1[0];
let max1 = range1[1];
let min2 = range2[0];
let max2 = range2[1];
if (min2 > min1 && min2 < max1) {
return max1 - min2;
} else if (max2 > min1 && max2 < max1) {
return max2 - min1;
} else {
return 0;
}
}
// Draw one predict box into the origin image
function drawBox(box) {
let bounding = box.bounding;
let left = bounding[0];
let top = bounding[1];
let width = bounding[2];
let height = bounding[3];
cv.rectangle(output, new cv.Point(left, top), new cv.Point(left + width, top + height),
new cv.Scalar(0, 255, 0));
cv.rectangle(output, new cv.Point(left, top), new cv.Point(left + width, top + 15),
new cv.Scalar(255, 255, 255), cv.FILLED);
let text = `${labels[box.classId]}: ${box.confidence.toFixed(4)}`;
cv.putText(output, text, new cv.Point(left, top + 10), cv.FONT_HERSHEY_SIMPLEX, 0.3,
new cv.Scalar(0, 0, 0));
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_object_detection_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'lena.png';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var postProcess = function(result, labels) {};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
cv.imshow('canvasOutput', output);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

View File

@ -0,0 +1,39 @@
{
"caffe": [
{
"model": "mobilenet_SSD",
"inputSize": "300, 300",
"mean": "127.5, 127.5, 127.5",
"std": "0.007843",
"swapRB": "false",
"outType": "SSD",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt",
"modelUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/mobilenet_iter_73000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/deploy.prototxt"
},
{
"model": "VGG_SSD",
"inputSize": "300, 300",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"outType": "SSD",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt",
"modelUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download",
"configUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download"
}
],
"darknet": [
{
"model": "yolov2_tiny",
"inputSize": "416, 416",
"mean": "0, 0, 0",
"std": "0.00392",
"swapRB": "false",
"outType": "YOLO",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_yolov3.txt",
"modelUrl": "https://pjreddie.com/media/files/yolov2-tiny.weights",
"configUrl": "https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-tiny.cfg"
}
]
}

View File

@ -0,0 +1,402 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Object Detection Example with Camera</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Object Detection Example with Camera </h2>
<p>
This tutorial shows you how to write an object detection example with camera.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configInput</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Start/Stop</b> button to start or stop the camera capture.<br>
</p>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="400" height="400"></video>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
videoInput
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="15" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.The function to capture video from camera, and the main loop in which will do inference once.</p>
<textarea class="code" rows="34" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including get boxes from output and draw boxes into the image.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [300, 300];
mean = [127.5, 127.5, 127.5];
std = 0.007843;
swapRB = false;
confThreshold = 0.5;
nmsThreshold = 0.4;
// the type of output, can be YOLO or SSD
outType = "SSD";
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
let frame = new cv.Mat(videoInput.height, videoInput.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(videoInput);
main = async function(frame) {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, frame);
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const output = postProcess(result, labels, frame);
updateResult(output, time);
setTimeout(processVideo, 0);
input.delete();
net.delete();
result.delete();
}
function processVideo() {
try {
if (!streaming) {
return;
}
cap.read(frame);
main(frame);
} catch (err) {
utils.printError(err);
}
}
setTimeout(processVideo, 0);
</script>
<script id="codeSnippet5" type="text/code-snippet">
postProcess = function(result, labels, frame) {
let canvasOutput = document.getElementById('canvasOutput');
const outputWidth = canvasOutput.width;
const outputHeight = canvasOutput.height;
const resultData = result.data32F;
// Get the boxes(with class and confidence) from the output
let boxes = [];
switch(outType) {
case "YOLO": {
const vecNum = result.matSize[0];
const vecLength = result.matSize[1];
const classNum = vecLength - 5;
for (let i = 0; i < vecNum; ++i) {
let vector = resultData.slice(i*vecLength, (i+1)*vecLength);
let scores = vector.slice(5, vecLength);
let classId = scores.indexOf(Math.max(...scores));
let confidence = scores[classId];
if (confidence > confThreshold) {
let center_x = Math.round(vector[0] * outputWidth);
let center_y = Math.round(vector[1] * outputHeight);
let width = Math.round(vector[2] * outputWidth);
let height = Math.round(vector[3] * outputHeight);
let left = Math.round(center_x - width / 2);
let top = Math.round(center_y - height / 2);
let box = {
scores: scores,
classId: classId,
confidence: confidence,
bounding: [left, top, width, height],
toDraw: true
}
boxes.push(box);
}
}
// NMS(Non Maximum Suppression) algorithm
let boxNum = boxes.length;
let tmp_boxes = [];
let sorted_boxes = [];
for (let c = 0; c < classNum; ++c) {
for (let i = 0; i < boxes.length; ++i) {
tmp_boxes[i] = [boxes[i], i];
}
sorted_boxes = tmp_boxes.sort((a, b) => { return (b[0].scores[c] - a[0].scores[c]); });
for (let i = 0; i < boxNum; ++i) {
if (sorted_boxes[i][0].scores[c] === 0) continue;
else {
for (let j = i + 1; j < boxNum; ++j) {
if (IOU(sorted_boxes[i][0], sorted_boxes[j][0]) >= nmsThreshold) {
boxes[sorted_boxes[j][1]].toDraw = false;
}
}
}
}
}
} break;
case "SSD": {
const vecNum = result.matSize[2];
const vecLength = 7;
for (let i = 0; i < vecNum; ++i) {
let vector = resultData.slice(i*vecLength, (i+1)*vecLength);
let confidence = vector[2];
if (confidence > confThreshold) {
let left, top, right, bottom, width, height;
left = Math.round(vector[3]);
top = Math.round(vector[4]);
right = Math.round(vector[5]);
bottom = Math.round(vector[6]);
width = right - left + 1;
height = bottom - top + 1;
if (width <= 2 || height <= 2) {
left = Math.round(vector[3] * outputWidth);
top = Math.round(vector[4] * outputHeight);
right = Math.round(vector[5] * outputWidth);
bottom = Math.round(vector[6] * outputHeight);
width = right - left + 1;
height = bottom - top + 1;
}
let box = {
classId: vector[1] - 1,
confidence: confidence,
bounding: [left, top, width, height],
toDraw: true
}
boxes.push(box);
}
}
} break;
default:
console.error(`Unsupported output type ${outType}`)
}
// Draw the saved box into the image
let output = new cv.Mat(outputWidth, outputHeight, cv.CV_8UC3);
cv.cvtColor(frame, output, cv.COLOR_RGBA2RGB);
let boxNum = boxes.length;
for (let i = 0; i < boxNum; ++i) {
if (boxes[i].toDraw) {
drawBox(boxes[i]);
}
}
return output;
// Calculate the IOU(Intersection over Union) of two boxes
function IOU(box1, box2) {
let bounding1 = box1.bounding;
let bounding2 = box2.bounding;
let s1 = bounding1[2] * bounding1[3];
let s2 = bounding2[2] * bounding2[3];
let left1 = bounding1[0];
let right1 = left1 + bounding1[2];
let left2 = bounding2[0];
let right2 = left2 + bounding2[2];
let overlapW = calOverlap([left1, right1], [left2, right2]);
let top1 = bounding2[1];
let bottom1 = top1 + bounding1[3];
let top2 = bounding2[1];
let bottom2 = top2 + bounding2[3];
let overlapH = calOverlap([top1, bottom1], [top2, bottom2]);
let overlapS = overlapW * overlapH;
return overlapS / (s1 + s2 + overlapS);
}
// Calculate the overlap range of two vector
function calOverlap(range1, range2) {
let min1 = range1[0];
let max1 = range1[1];
let min2 = range2[0];
let max2 = range2[1];
if (min2 > min1 && min2 < max1) {
return max1 - min2;
} else if (max2 > min1 && max2 < max1) {
return max2 - min1;
} else {
return 0;
}
}
// Draw one predict box into the origin image
function drawBox(box) {
let bounding = box.bounding;
let left = bounding[0];
let top = bounding[1];
let width = bounding[2];
let height = bounding[3];
cv.rectangle(output, new cv.Point(left, top), new cv.Point(left + width, top + height),
new cv.Scalar(0, 255, 0));
cv.rectangle(output, new cv.Point(left, top), new cv.Point(left + width, top + 15),
new cv.Scalar(255, 255, 255), cv.FILLED);
let text = `${labels[box.classId]}: ${box.confidence.toFixed(4)}`;
cv.putText(output, text, new cv.Point(left, top + 10), cv.FONT_HERSHEY_SIMPLEX, 0.3,
new cv.Scalar(0, 0, 0));
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_object_detection_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let videoInput = document.getElementById('videoInput');
let streaming = false;
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
utils.startCamera('qvga', onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
startAndStop.removeAttribute('disabled');
});
var main = async function(frame) {};
var postProcess = function(result, labels, frame) {};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.width = videoInput.videoWidth;
videoInput.height = videoInput.videoHeight;
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
initStatus();
}
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
cv.imshow('canvasOutput', output);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

View File

@ -0,0 +1,163 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Dense Optical Flow Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Dense Optical Flow Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the video.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as output.<br>
We get a 2-channel array with optical flow vectors, (u,v). We find their magnitude and direction.
We color code the result for better visualization. Direction corresponds to Hue value of the image.
Magnitude corresponds to Value plane.<br>
The code of &lt;textarea&gt; will be executed when video is started.<br>
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240" ></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// take first frame of the video
let frame1 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame1);
let prvs = new cv.Mat();
cv.cvtColor(frame1, prvs, cv.COLOR_RGBA2GRAY);
frame1.delete();
let hsv = new cv.Mat();
let hsv0 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let hsv1 = new cv.Mat(video.height, video.width, cv.CV_8UC1, new cv.Scalar(255));
let hsv2 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv0); hsvVec.push_back(hsv1); hsvVec.push_back(hsv2);
let frame2 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let next = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let flow = new cv.Mat(video.height, video.width, cv.CV_32FC2);
let flowVec = new cv.MatVector();
let mag = new cv.Mat(video.height, video.width, cv.CV_32FC1);
let ang = new cv.Mat(video.height, video.width, cv.CV_32FC1);
let rgb = new cv.Mat(video.height, video.width, cv.CV_8UC3);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
prvs.delete(); hsv.delete(); hsv0.delete(); hsv1.delete(); hsv2.delete();
hsvVec.delete(); frame2.delete(); flow.delete(); flowVec.delete(); next.delete();
mag.delete(); ang.delete(); rgb.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame2);
cv.cvtColor(frame2, next, cv.COLOR_RGBA2GRAY);
cv.calcOpticalFlowFarneback(prvs, next, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cv.split(flow, flowVec);
let u = flowVec.get(0);
let v = flowVec.get(1);
cv.cartToPolar(u, v, mag, ang);
u.delete(); v.delete();
ang.convertTo(hsv0, cv.CV_8UC1, 180/Math.PI/2);
cv.normalize(mag, hsv2, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1);
cv.merge(hsvVec, hsv);
cv.cvtColor(hsv, rgb, cv.COLOR_HSV2RGB);
cv.imshow('canvasOutput', rgb);
next.copyTo(prvs);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
}
videoInput.addEventListener('ended', () => {
onVideoStopped();
});
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'box.mp4';
});
</script>
</body>
</html>

View File

@ -0,0 +1,190 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Lucas-Kanade Optical Flow Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Lucas-Kanade Optical Flow Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the video.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as output.<br>
To decide the points, we use <b>cv.goodFeaturesToTrack()</b>. We take the first frame, detect some Shi-Tomasi corner points in it, then we iteratively track those points using <b>cv.calcOpticalFlowPyrLK</b>.<br>
The code of &lt;textarea&gt; will be executed when video is started.<br>
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240" ></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// parameters for ShiTomasi corner detection
let [maxCorners, qualityLevel, minDistance, blockSize] = [30, 0.3, 7, 7];
// parameters for lucas kanade optical flow
let winSize = new cv.Size(15, 15);
let maxLevel = 2;
let criteria = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03);
// create some random colors
let color = [];
for (let i = 0; i < maxCorners; i++) {
color.push(new cv.Scalar(parseInt(Math.random()*255), parseInt(Math.random()*255),
parseInt(Math.random()*255), 255));
}
// take first frame and find corners in it
let oldFrame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(oldFrame);
let oldGray = new cv.Mat();
cv.cvtColor(oldFrame, oldGray, cv.COLOR_RGB2GRAY);
let p0 = new cv.Mat();
let none = new cv.Mat();
cv.goodFeaturesToTrack(oldGray, p0, maxCorners, qualityLevel, minDistance, none, blockSize);
// Create a mask image for drawing purposes
let zeroEle = new cv.Scalar(0, 0, 0, 255);
let mask = new cv.Mat(oldFrame.rows, oldFrame.cols, oldFrame.type(), zeroEle);
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let frameGray = new cv.Mat();
let p1 = new cv.Mat();
let st = new cv.Mat();
let err = new cv.Mat();
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); oldGray.delete(); p0.delete(); p1.delete(); err.delete(); mask.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
cv.cvtColor(frame, frameGray, cv.COLOR_RGBA2GRAY);
// calculate optical flow
cv.calcOpticalFlowPyrLK(oldGray, frameGray, p0, p1, st, err, winSize, maxLevel, criteria);
// select good points
let goodNew = [];
let goodOld = [];
for (let i = 0; i < st.rows; i++) {
if (st.data[i] === 1) {
goodNew.push(new cv.Point(p1.data32F[i*2], p1.data32F[i*2+1]));
goodOld.push(new cv.Point(p0.data32F[i*2], p0.data32F[i*2+1]));
}
}
// draw the tracks
for (let i = 0; i < goodNew.length; i++) {
cv.line(mask, goodNew[i], goodOld[i], color[i], 2);
cv.circle(frame, goodNew[i], 5, color[i], -1);
}
cv.add(frame, mask, frame);
cv.imshow('canvasOutput', frame);
// now update the previous frame and previous points
frameGray.copyTo(oldGray);
p0.delete(); p0 = null;
p0 = new cv.Mat(goodNew.length, 1, cv.CV_32FC2);
for (let i = 0; i < goodNew.length; i++) {
p0.data32F[i*2] = goodNew[i].x;
p0.data32F[i*2+1] = goodNew[i].y;
}
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
}
videoInput.addEventListener('ended', () => {
onVideoStopped();
});
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'box.mp4';
});
</script>
</body>
</html>

View File

@ -0,0 +1,327 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Pose Estimation Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Pose Estimation Example</h2>
<p>
This tutorial shows you how to write an pose estimation example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configInput</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="250"></canvas>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="250"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="15" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.The pairs of keypoints of different dataset.</p>
<textarea class="code" rows="30" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including get the predicted points and draw lines into the image.</p>
<textarea class="code" rows="30" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [368, 368];
mean = [0, 0, 0];
std = 0.00392;
swapRB = false;
threshold = 0.1;
// the pairs of keypoint, can be "COCO", "MPI" and "BODY_25"
dataset = "COCO";
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const output = postProcess(result);
updateResult(output, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet4" type="text/code-snippet">
BODY_PARTS = {};
POSE_PAIRS = [];
if (dataset === 'COCO') {
BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
"LEye": 15, "REar": 16, "LEar": 17, "Background": 18 };
POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"],
["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"],
["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ]
} else if (dataset === 'MPI') {
BODY_PARTS = { "Head": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "Chest": 14,
"Background": 15 }
POSE_PAIRS = [ ["Head", "Neck"], ["Neck", "RShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["Neck", "LShoulder"], ["LShoulder", "LElbow"],
["LElbow", "LWrist"], ["Neck", "Chest"], ["Chest", "RHip"], ["RHip", "RKnee"],
["RKnee", "RAnkle"], ["Chest", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"] ]
} else if (dataset === 'BODY_25') {
BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "MidHip": 8, "RHip": 9,
"RKnee": 10, "RAnkle": 11, "LHip": 12, "LKnee": 13, "LAnkle": 14,
"REye": 15, "LEye": 16, "REar": 17, "LEar": 18, "LBigToe": 19,
"LSmallToe": 20, "LHeel": 21, "RBigToe": 22, "RSmallToe": 23,
"RHeel": 24, "Background": 25 }
POSE_PAIRS = [ ["Neck", "Nose"], ["Neck", "RShoulder"],
["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"],
["LElbow", "LWrist"], ["Nose", "REye"],
["REye", "REar"], ["Neck", "LEye"],
["LEye", "LEar"], ["Neck", "MidHip"],
["MidHip", "RHip"], ["RHip", "RKnee"],
["RKnee", "RAnkle"], ["RAnkle", "RBigToe"],
["RBigToe", "RSmallToe"], ["RAnkle", "RHeel"],
["MidHip", "LHip"], ["LHip", "LKnee"],
["LKnee", "LAnkle"], ["LAnkle", "LBigToe"],
["LBigToe", "LSmallToe"], ["LAnkle", "LHeel"] ]
}
</script>
<script id="codeSnippet5" type="text/code-snippet">
postProcess = function(result) {
const resultData = result.data32F;
const matSize = result.matSize;
const size1 = matSize[1];
const size2 = matSize[2];
const size3 = matSize[3];
const mapSize = size2 * size3;
let canvasOutput = document.getElementById('canvasOutput');
const outputWidth = canvasOutput.width;
const outputHeight = canvasOutput.height;
let image = cv.imread("canvasInput");
let output = new cv.Mat(outputWidth, outputHeight, cv.CV_8UC3);
cv.cvtColor(image, output, cv.COLOR_RGBA2RGB);
// get position of keypoints from output
let points = [];
for (let i = 0; i < Object.keys(BODY_PARTS).length; ++i) {
heatMap = resultData.slice(i*mapSize, (i+1)*mapSize);
let maxIndex = 0;
let maxConf = heatMap[0];
for (index in heatMap) {
if (heatMap[index] > heatMap[maxIndex]) {
maxIndex = index;
maxConf = heatMap[index];
}
}
if (maxConf > threshold) {
indexX = maxIndex % size3;
indexY = maxIndex / size3;
x = outputWidth * indexX / size3;
y = outputHeight * indexY / size2;
points[i] = [Math.round(x), Math.round(y)];
}
}
// draw the points and lines into the image
for (pair of POSE_PAIRS) {
partFrom = pair[0];
partTo = pair[1];
idFrom = BODY_PARTS[partFrom];
idTo = BODY_PARTS[partTo];
pointFrom = points[idFrom];
pointTo = points[idTo];
if (points[idFrom] && points[idTo]) {
cv.line(output, new cv.Point(pointFrom[0], pointFrom[1]),
new cv.Point(pointTo[0], pointTo[1]), new cv.Scalar(0, 255, 0), 3);
cv.ellipse(output, new cv.Point(pointFrom[0], pointFrom[1]), new cv.Size(3, 3), 0, 0, 360,
new cv.Scalar(0, 0, 255), cv.FILLED);
cv.ellipse(output, new cv.Point(pointTo[0], pointTo[1]), new cv.Size(3, 3), 0, 0, 360,
new cv.Scalar(0, 0, 255), cv.FILLED);
}
}
return output;
}
</script>
<script type="text/javascript">
let jsonUrl = "js_pose_estimation_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor2').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor3').value = loadModelCode;
utils.loadCode('codeSnippet4', 'codeEditor4');
utils.loadCode('codeSnippet5', 'codeEditor5');
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'roi.jpg';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var postProcess = function(result) {};
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
let resized = new cv.Mat(canvasOutput.width, canvasOutput.height, cv.CV_8UC4);
cv.resize(output, resized, new cv.Size(canvasOutput.width, canvasOutput.height));
cv.imshow('canvasOutput', resized);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

View File

@ -0,0 +1,34 @@
{
"caffe": [
{
"model": "body_25",
"inputSize": "368, 368",
"mean": "0, 0, 0",
"std": "0.00392",
"swapRB": "false",
"dataset": "BODY_25",
"modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/body_25/pose_iter_584000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/body_25/pose_deploy.prototxt"
},
{
"model": "coco",
"inputSize": "368, 368",
"mean": "0, 0, 0",
"std": "0.00392",
"swapRB": "false",
"dataset": "COCO",
"modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/coco/pose_iter_440000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/coco/pose_deploy_linevec.prototxt"
},
{
"model": "mpi",
"inputSize": "368, 368",
"mean": "0, 0, 0",
"std": "0.00392",
"swapRB": "false",
"dataset": "MPI",
"modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/mpi/pose_deploy_linevec.prototxt"
}
]
}

View File

@ -0,0 +1,67 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image PyrDown Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image PyrDown Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.pyrDown(src, dst, new cv.Size(0, 0), cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,67 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image PyrUp Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image PyrUp Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.pyrUp(src, dst, new cv.Size(0, 0), cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,243 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Semantic Segmentation Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Semantic Segmentation Example</h2>
<p>
This tutorial shows you how to write an semantic segmentation example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configInput</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="400"></canvas>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="5" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="16" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.The post-processing, including gengerate colors for different classes and argmax to get the classes for each pixel.</p>
<textarea class="code" rows="34" cols="100" id="codeEditor4" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [513, 513];
mean = [127.5, 127.5, 127.5];
std = 0.007843;
swapRB = false;
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const colors = generateColors(result);
const output = argmax(result, colors);
updateResult(output, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet4" type="text/code-snippet">
generateColors = function(result) {
const numClasses = result.matSize[1];
let colors = [0,0,0];
while(colors.length < numClasses*3){
colors.push(Math.round((Math.random()*255 + colors[colors.length-3]) / 2));
}
return colors;
}
argmax = function(result, colors) {
const C = result.matSize[1];
const H = result.matSize[2];
const W = result.matSize[3];
const resultData = result.data32F;
const imgSize = H*W;
let classId = [];
for (i = 0; i<imgSize; ++i) {
let id = 0;
for (j = 0; j < C; ++j) {
if (resultData[j*imgSize+i] > resultData[id*imgSize+i]) {
id = j;
}
}
classId.push(colors[id*3]);
classId.push(colors[id*3+1]);
classId.push(colors[id*3+2]);
classId.push(255);
}
output = cv.matFromArray(H,W,cv.CV_8UC4,classId);
return output;
}
</script>
<script type="text/javascript">
let jsonUrl = "js_semantic_segmentation_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor2').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor3').value = loadModelCode;
utils.loadCode('codeSnippet4', 'codeEditor4');
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'roi.jpg';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var generateColors = function(result) {};
var argmax = function(result, colors) {};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
let resized = new cv.Mat(canvasOutput.width, canvasOutput.height, cv.CV_8UC4);
cv.resize(output, resized, new cv.Size(canvasOutput.width, canvasOutput.height));
cv.imshow('canvasOutput', resized);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

View File

@ -0,0 +1,12 @@
{
"tensorflow": [
{
"model": "deeplabv3",
"inputSize": "513, 513",
"mean": "127.5, 127.5, 127.5",
"std": "0.007843",
"swapRB": "false",
"modelUrl": "https://drive.google.com/uc?id=1v-hfGenaE9tiGOzo5qdgMNG_gqQ5-Xn4&export=download"
}
]
}

View File

@ -0,0 +1,60 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello OpenCV.js</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Hello OpenCV.js</h2>
<p id="status">OpenCV.js is loading...</p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<img id="imageSrc" alt="No Image" class="small" />
</td>
<td>
<canvas id="canvasOutput" class="small" height="300px"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">imageSrc <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script type="text/javascript">
let imgElement = document.getElementById('imageSrc');
let inputElement = document.getElementById('fileInput');
inputElement.addEventListener('change', (e) => {
imgElement.src = URL.createObjectURL(e.target.files[0]);
}, false);
imgElement.onload = function() {
let mat = cv.imread(imgElement);
cv.imshow('canvasOutput', mat);
mat.delete();
};
function onOpenCvReady() { // eslint-disable-line no-unused-vars
document.getElementById('status').innerHTML = '<b>OpenCV.js is ready</b>.' +
'You can upload an image.<br>' +
'The <b>imageSrc</b> is a &lt;img&gt; element used as cv.Mat input. ' +
'The <b>canvasOutput</b> is a &lt;canvas&gt; element used as cv.Mat output.';
}
function onOpenCvError() { // eslint-disable-line no-unused-vars
let element = document.getElementById('status');
element.setAttribute('class', 'err');
element.innerHTML = 'Failed to load opencv.js';
}
</script>
<script async src="opencv.js" type="text/javascript" onload="onOpenCvReady();" onerror="onOpenCvError();"></script>
</body>
</html>

View File

@ -0,0 +1,228 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Style Transfer Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Style Transfer Example</h2>
<p>
This tutorial shows you how to write an style transfer example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configFile</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="400"></canvas>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="5" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="15" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.The post-processing, including scaling and reordering.</p>
<textarea class="code" rows="21" cols="100" id="codeEditor4" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [224, 224];
mean = [104, 117, 123];
std = 1;
swapRB = false;
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const output = postProcess(result);
updateResult(output, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet4" type="text/code-snippet">
postProcess = function(result) {
const resultData = result.data32F;
const C = result.matSize[1];
const H = result.matSize[2];
const W = result.matSize[3];
const mean = [104, 117, 123];
let normData = [];
for (let h = 0; h < H; ++h) {
for (let w = 0; w < W; ++w) {
for (let c = 0; c < C; ++c) {
normData.push(resultData[c*H*W + h*W + w] + mean[c]);
}
normData.push(255);
}
}
let output = new cv.matFromArray(H, W, cv.CV_8UC4, normData);
return output;
}
</script>
<script type="text/javascript">
let jsonUrl = "js_style_transfer_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor2').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor3').value = loadModelCode;
utils.loadCode('codeSnippet4', 'codeEditor4');
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'lena.png';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var postProcess = function(result) {};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
let resized = new cv.Mat(canvasOutput.width, canvasOutput.height, cv.CV_8UC4);
cv.resize(output, resized, new cv.Size(canvasOutput.width, canvasOutput.height));
cv.imshow('canvasOutput', resized);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

View File

@ -0,0 +1,76 @@
{
"torch": [
{
"model": "candy.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/candy.t7"
},
{
"model": "composition_vii.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//eccv16/composition_vii.t7"
},
{
"model": "feathers.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/feathers.t7"
},
{
"model": "la_muse.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/la_muse.t7"
},
{
"model": "mosaic.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/mosaic.t7"
},
{
"model": "starry_night.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//eccv16/starry_night.t7"
},
{
"model": "the_scream.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/the_scream.t7"
},
{
"model": "the_wave.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//eccv16/the_wave.t7"
},
{
"model": "udnie.t7",
"inputSize": "224, 224",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/udnie.t7"
}
]
}

View File

@ -0,0 +1,90 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Template Match Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Template Match Example</h2>
<p>
&lt;canvas&gt; elements named <b>imageCanvasInput</b>, <b>templateCanvasInput</b>
and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="imageCanvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">imageCanvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
<tr>
<td>
<canvas id="templateCanvasInput"></canvas>
</td>
<td>
</td>
</tr>
<tr>
<td>
<div class="caption">templateCanvasInput <input type="file" id="templateFileInput" name="file" accept="image/*" /></div>
</td>
<td>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('imageCanvasInput');
let templ = cv.imread('templateCanvasInput');
let dst = new cv.Mat();
let mask = new cv.Mat();
cv.matchTemplate(src, templ, dst, cv.TM_CCOEFF, mask);
let result = cv.minMaxLoc(dst, mask);
let maxPoint = result.maxLoc;
let color = new cv.Scalar(255, 0, 0, 255);
let point = new cv.Point(maxPoint.x + templ.cols, maxPoint.y + templ.rows);
cv.rectangle(src, maxPoint, point, color, 2, cv.LINE_8, 0);
cv.imshow('canvasOutput', src);
src.delete(); dst.delete(); mask.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'imageCanvasInput');
utils.loadImageToCanvas('lenaFace.png', 'templateCanvasInput');
utils.addFileInputHandler('fileInput', 'imageCanvasInput');
utils.addFileInputHandler('templateFileInput', 'templateCanvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Adaptive Threshold Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Adaptive Threshold Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
// You can try more different parameters
cv.adaptiveThreshold(src, dst, 200, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 3, 2);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Threshold Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Threshold Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.threshold(src, dst, 177, 200, cv.THRESH_BINARY);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,89 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Trackbar Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Trackbar Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput1</b>, <b>canvasInput2</b> and <b>canvasOutput</b> have been prepared.<br>
The code of &lt;textarea&gt; will be executed when &lt;input&gt; element named <b>trackbar</b> value changes.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<textarea class="code" rows="12" cols="80" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<b>trackbar</b>
<input type="range" id="trackbar" disabled value="50" min="0" max="100" step="1">
<label id="weightValue" ></label>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput1" class="small"></canvas>
</td>
<td>
<canvas id="canvasInput2" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutput" class="small"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput1</div>
</td>
<td>
<div class="caption">canvasInput2</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let trackbar = document.getElementById('trackbar');
let alpha = trackbar.value/trackbar.max;
let beta = ( 1.0 - alpha );
let src1 = cv.imread('canvasInput1');
let src2 = cv.imread('canvasInput2');
let dst = new cv.Mat();
cv.addWeighted( src1, alpha, src2, beta, 0.0, dst, -1);
cv.imshow('canvasOutput', dst);
dst.delete();
src1.delete();
src2.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('apple.jpg', 'canvasInput1');
utils.loadImageToCanvas('orange.jpg', 'canvasInput2');
let trackbar = document.getElementById('trackbar');
trackbar.addEventListener('input', () => {
utils.executeCode('codeEditor');
});
let weightValue = document.getElementById('weightValue');
weightValue.innerText = trackbar.value;
trackbar.addEventListener('input', () => {
weightValue.innerText = trackbar.value;
});
utils.loadOpenCv(() => {
trackbar.removeAttribute('disabled');
utils.executeCode('codeEditor');
});
</script>
</body>
</html>

View File

@ -0,0 +1,120 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Video Capture Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Video Capture Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as OpenCV.js input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as OpenCv.js output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width=320 height=240></video>
</td>
<td>
<canvas id="canvasOutput" width=320 height=240></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let cap = new cv.VideoCapture(video);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
src.delete();
dst.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(src);
cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
cv.imshow('canvasOutput', dst);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
utils.startCamera('qvga', onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.width = videoInput.videoWidth;
videoInput.height = videoInput.videoHeight;
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
startAndStop.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Background Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Background Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let gray = new cv.Mat();
let opening = new cv.Mat();
let coinsBg = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
// get background
let M = cv.Mat.ones(3, 3, cv.CV_8U);
cv.erode(gray, gray, M);
cv.dilate(gray, opening, M);
cv.dilate(opening, coinsBg, M, new cv.Point(-1, -1), 3);
cv.imshow('canvasOutput', coinsBg);
src.delete(); dst.delete(); gray.delete(); opening.delete(); coinsBg.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,82 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Distance Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Distance Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let gray = new cv.Mat();
let opening = new cv.Mat();
let coinsBg = new cv.Mat();
let coinsFg = new cv.Mat();
let distTrans = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
let M = cv.Mat.ones(3, 3, cv.CV_8U);
cv.erode(gray, gray, M);
cv.dilate(gray, opening, M);
cv.dilate(opening, coinsBg, M, new cv.Point(-1, -1), 3);
// distance transform
cv.distanceTransform(opening, distTrans, cv.DIST_L2, 5);
cv.normalize(distTrans, distTrans, 1, 0, cv.NORM_INF);
cv.imshow('canvasOutput', distTrans);
src.delete(); dst.delete(); gray.delete(); opening.delete();
coinsBg.delete(); coinsFg.delete(); distTrans.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,83 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Foreground Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Foreground Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let gray = new cv.Mat();
let opening = new cv.Mat();
let coinsBg = new cv.Mat();
let coinsFg = new cv.Mat();
let distTrans = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
let M = cv.Mat.ones(3, 3, cv.CV_8U);
cv.erode(gray, gray, M);
cv.dilate(gray, opening, M);
cv.dilate(opening, coinsBg, M, new cv.Point(-1, -1), 3);
cv.distanceTransform(opening, distTrans, cv.DIST_L2, 5);
cv.normalize(distTrans, distTrans, 1, 0, cv.NORM_INF);
// get foreground
cv.threshold(distTrans, coinsFg, 0.7 * 1, 255, cv.THRESH_BINARY);
cv.imshow('canvasOutput', coinsFg);
src.delete(); dst.delete(); gray.delete(); opening.delete();
coinsBg.delete(); coinsFg.delete(); distTrans.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,71 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Threshold Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Threshold Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let gray = new cv.Mat();
// gray and threshold image
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
cv.imshow('canvasOutput', gray);
src.delete(); dst.delete(); gray.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@ -0,0 +1,110 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Watershed Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Watershed Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let gray = new cv.Mat();
let opening = new cv.Mat();
let coinsBg = new cv.Mat();
let coinsFg = new cv.Mat();
let distTrans = new cv.Mat();
let unknown = new cv.Mat();
let markers = new cv.Mat();
// gray and threshold image
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
// get background
let M = cv.Mat.ones(3, 3, cv.CV_8U);
cv.erode(gray, gray, M);
cv.dilate(gray, opening, M);
cv.dilate(opening, coinsBg, M, new cv.Point(-1, -1), 3);
// distance transform
cv.distanceTransform(opening, distTrans, cv.DIST_L2, 5);
cv.normalize(distTrans, distTrans, 1, 0, cv.NORM_INF);
// get foreground
cv.threshold(distTrans, coinsFg, 0.7 * 1, 255, cv.THRESH_BINARY);
coinsFg.convertTo(coinsFg, cv.CV_8U, 1, 0);
cv.subtract(coinsBg, coinsFg, unknown);
// get connected components markers
cv.connectedComponents(coinsFg, markers);
for (let i = 0; i < markers.rows; i++) {
for (let j = 0; j < markers.cols; j++) {
markers.intPtr(i, j)[0] = markers.ucharPtr(i, j)[0] + 1;
if (unknown.ucharPtr(i, j)[0] == 255) {
markers.intPtr(i, j)[0] = 0;
}
}
}
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB, 0);
cv.watershed(src, markers);
// draw barriers
for (let i = 0; i < markers.rows; i++) {
for (let j = 0; j < markers.cols; j++) {
if (markers.intPtr(i, j)[0] == -1) {
src.ucharPtr(i, j)[0] = 255; // R
src.ucharPtr(i, j)[1] = 0; // G
src.ucharPtr(i, j)[2] = 0; // B
}
}
}
cv.imshow('canvasOutput', src);
src.delete(); dst.delete(); gray.delete(); opening.delete(); coinsBg.delete();
coinsFg.delete(); distTrans.delete(); unknown.delete(); markers.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

@ -0,0 +1,170 @@
function Utils(errorOutputId) { // eslint-disable-line no-unused-vars
let self = this;
this.errorOutput = document.getElementById(errorOutputId);
const OPENCV_URL = 'opencv.js';
this.loadOpenCv = function(onloadCallback) {
let script = document.createElement('script');
script.setAttribute('async', '');
script.setAttribute('type', 'text/javascript');
script.addEventListener('load', async () => {
if (cv.getBuildInformation)
{
console.log(cv.getBuildInformation());
onloadCallback();
}
else
{
// WASM
if (cv instanceof Promise) {
cv = await cv;
console.log(cv.getBuildInformation());
onloadCallback();
} else {
cv['onRuntimeInitialized']=()=>{
console.log(cv.getBuildInformation());
onloadCallback();
}
}
}
});
script.addEventListener('error', () => {
self.printError('Failed to load ' + OPENCV_URL);
});
script.src = OPENCV_URL;
let node = document.getElementsByTagName('script')[0];
node.parentNode.insertBefore(script, node);
};
this.createFileFromUrl = function(path, url, callback) {
let request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function(ev) {
if (request.readyState === 4) {
if (request.status === 200) {
let data = new Uint8Array(request.response);
cv.FS_createDataFile('/', path, data, true, false, false);
callback();
} else {
self.printError('Failed to load ' + url + ' status: ' + request.status);
}
}
};
request.send();
};
this.loadImageToCanvas = function(url, cavansId) {
let canvas = document.getElementById(cavansId);
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.onload = function() {
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0, img.width, img.height);
};
img.src = url;
};
this.executeCode = function(textAreaId) {
try {
this.clearError();
let code = document.getElementById(textAreaId).value;
eval(code);
} catch (err) {
this.printError(err);
}
};
this.clearError = function() {
this.errorOutput.innerHTML = '';
};
this.printError = function(err) {
if (typeof err === 'undefined') {
err = '';
} else if (typeof err === 'number') {
if (!isNaN(err)) {
if (typeof cv !== 'undefined') {
err = 'Exception: ' + cv.exceptionFromPtr(err).msg;
}
}
} else if (typeof err === 'string') {
let ptr = Number(err.split(' ')[0]);
if (!isNaN(ptr)) {
if (typeof cv !== 'undefined') {
err = 'Exception: ' + cv.exceptionFromPtr(ptr).msg;
}
}
} else if (err instanceof Error) {
err = err.stack.replace(/\n/g, '<br>');
}
this.errorOutput.innerHTML = err;
};
this.loadCode = function(scriptId, textAreaId) {
let scriptNode = document.getElementById(scriptId);
let textArea = document.getElementById(textAreaId);
if (scriptNode.type !== 'text/code-snippet') {
throw Error('Unknown code snippet type');
}
textArea.value = scriptNode.text.replace(/^\n/, '');
};
this.addFileInputHandler = function(fileInputId, canvasId) {
let inputElement = document.getElementById(fileInputId);
inputElement.addEventListener('change', (e) => {
let files = e.target.files;
if (files.length > 0) {
let imgUrl = URL.createObjectURL(files[0]);
self.loadImageToCanvas(imgUrl, canvasId);
}
}, false);
};
function onVideoCanPlay() {
if (self.onCameraStartedCallback) {
self.onCameraStartedCallback(self.stream, self.video);
}
};
this.startCamera = function(resolution, callback, videoId) {
const constraints = {
'qvga': {width: {exact: 320}, height: {exact: 240}},
'vga': {width: {exact: 640}, height: {exact: 480}}};
let video = document.getElementById(videoId);
if (!video) {
video = document.createElement('video');
}
let videoConstraint = constraints[resolution];
if (!videoConstraint) {
videoConstraint = true;
}
navigator.mediaDevices.getUserMedia({video: videoConstraint, audio: false})
.then(function(stream) {
video.srcObject = stream;
video.play();
self.video = video;
self.stream = stream;
self.onCameraStartedCallback = callback;
video.addEventListener('canplay', onVideoCanPlay, false);
})
.catch(function(err) {
self.printError('Camera Error: ' + err.name + ' ' + err.message);
});
};
this.stopCamera = function() {
if (this.video) {
this.video.pause();
this.video.srcObject = null;
this.video.removeEventListener('canplay', onVideoCanPlay);
}
if (this.stream) {
this.stream.getVideoTracks()[0].stop();
}
};
};