Android Eye Detection and Tracking With OpenCV
Android Eye Detection and Tracking With OpenCV
ANDROID / OPENCV
28 LED, 2013
140
This tutorial is for older samples, if you are starting with the new ones (2.4.6) please
look at this updated tutorial. Method and principles of matching are the same for both.
Finally I found some time to write promised tutorial of eye detection and template
matching on Android. Since OpenCV for Android is getting better and better, some code
snippets can be old and not the best possible way to solve the problem. If you found
some improvements, please comment or contact me, I will edit this post and share it to
others.
We take standard OpenCV example for face detections and extends it a little.
Android OpenCV SDK can be found here
If you arentn familiar with Eclipse and OpenCV yet, please read basic setup of
opencv4android first.
Import Face detection sample to Eclipse and clean/build the project to be sure is
correctly imported and working.
As you can se on the video, there are some differences in GUI against the pure sample.
There is a slider to easily change the matching method and button to recreate the eye
template.
So at first we add those elements to the GUI.
1.
mView.setDetectorType(mDetectorType);
mView.setMinFaceSize(0.2f);
getApplicationContext());
VerticalseekBar.setMax(5);
RelativeLayout.LayoutParams.WRAP_CONTENT, 400);
vsek.addRule(RelativeLayout.ALIGN_PARENT_RIGHT);
VerticalseekBar.setId(1);
VerticalseekBar
.setOnSeekBarChangeListener(new
OnSeekBarChangeListener() {
int progress,
boolean fromUser) {
method = progress;
switch (method) {
case 0:
matching_method.setText("TM_SQDIFF");
break;
case 1:
matching_method.setText("TM_SQDIFF_NORMED");
break;
case 2:
matching_method.setText("TM_CCOEFF");
break;
case 3:
matching_method.setText("TM_CCOEFF_NORMED");
break;
case 4:
matching_method.setText("TM_CCORR");
break;
case 5:
matching_method.setText("TM_CCORR_NORMED");
break;
public void
onStartTrackingTouch(SeekBar seekBar) {
public void
onStopTrackingTouch(SeekBar seekBar) {
});
matching_method.setText("TM_SQDIFF");
matching_method.setTextColor(Color.YELLOW);
RelativeLayout.LayoutParams.WRAP_CONTENT,
RelativeLayout.LayoutParams.WRAP_CONTENT);
matching_method_param
.addRule(RelativeLayout.ALIGN_PARENT_RIGHT);
matching_method_param.addRule(RelativeLayout.BELOW,
VerticalseekBar.getId());
btn.setText("Create template");
RelativeLayout.LayoutParams.WRAP_CONTENT,
RelativeLayout.LayoutParams.WRAP_CONTENT);
btnp.addRule(RelativeLayout.ALIGN_PARENT_LEFT);
btn.setId(2);
btn.setOnClickListener(new OnClickListener() {
mView.resetLearFramesCount();
});
getApplicationContext());
frameLayout.addView(mView, 0);
frameLayout.addView(btn, btnp);
frameLayout.addView(VerticalseekBar, vsek);
frameLayout.addView(matching_method, matching_method_param);
setContentView(frameLayout);
2.
3.
TAG = "Sample::FdView";
private Mat
mRgba;
private Mat
mGray;
private Mat
mZoomCorner;
private Mat
mZoomWindow;
private Mat
mZoomWindow2;
// Helper Mat
private Mat
mResult;
private Mat
teplateR;
private Mat
teplateL;
private File
mCascadeFile;
private CascadeClassifier
mJavaDetector;
private CascadeClassifier
mCascadeER;
private CascadeClassifier
mCascadeEL;
JAVA_DETECTOR
= 0;
NATIVE_DETECTOR
= 1;
// Matching methods
= 0;
= 1;
= 2;
= 3;
= 4;
= 5;
private int
mDetectorType
= JAVA_DETECTOR;
private float
mRelativeFaceSize = 0;
private int
mAbsoluteFaceSize = 0;
private int
learn_frames = 0;
// match value
private double
match_value;
private Rect
Now we need to load cascade classifier files for left and right eye
haarcascade_lefteye_2splits.xml distributed with OpenCV package (data folder) I
used the same classifier for both eyes, because for right eye,
haarcascade_lefteye_2splits.xml gives me better results, than
haarcascade_righteye_2splits.xml . But you can try it with both simply rewrite the
filename.
super(context);
try {
InputStream is = context.getResources().openRawResource(R.raw.lbpcascade_frontalface);
int bytesRead;
os.write(buffer, 0, bytesRead);
is.close();
os.close();
int bytesReadER;
oser.write(bufferER, 0, bytesReadER);
iser.close();
oser.close();
//----------------------------------------------------------------------------------------------------
int bytesReadEL;
osel.write(bufferEL, 0, bytesReadEL);
isel.close();
osel.close();
// ------------------------------------------------------------------------------------------------------
mJavaDetector = null;
mCascadeER=null;
mCascadeEL=null;
} else
cascadeDir.delete();
cascadeFileER.delete();
cascadeDirER.delete();
cascadeFileEL.delete();
cascadeDirEL.delete();
} catch (IOException e) {
e.printStackTrace();
@Override
...
if (mDetectorType == JAVA_DETECTOR)
if (mJavaDetector != null)
CreateAuxiliaryMats();
for (int i = 0; i < facesArray.length; i++){ Rect r = facesArray[i]; Core.rectangle(mGray, r.tl(), r.br(), new
Scalar(0, 255, 0, 255), 3); // Draw rectangle around the face Core.rectangle(mRgba, r.tl(), r.br(), new Scalar(0,
255, 0, 255), 3);
Now we detect face, right, nothing new, its face detection sample:) What about
eyes? As face is found, it reduces our ROI (region of interest where we will finding
eyes) to face rectangle only. From face anatomy we can exclude the bottom part of
face with mouth and some top part with forehead and hair. I could be computed
relatively to the face size. See picture below original image -> detected face -> eye
area -> area splitted area for right, left eye. It saves computing power.
// split it
Rect eyearea_left = new Rect(r.x +r.width/16 +(r.width - 2*r.width/16)/2,(int)(r.y + (r.height/4.5)),(r.width 2*r.width/16)/2,(int)( r.height/3.0));
// draw the area - mGray is working grayscale mat, if you want to see area in rgb preview, change mGray to mRgba
Count 5 first frames for learning get_template function get classifier, area to
detect, and desired size of new template.
if(learn_frames<5){
teplateR = get_template(mCascadeER,eyearea_right,24);
teplateL = get_template(mCascadeEL,eyearea_left,24);
learn_frames++;
}else{
...
Zoom method:
if (mGray.empty())
return;
if (mZoomWindow == null){
private double
Point matchLoc;
int result_cols =
mROI.cols() - mTemplate.cols() + 1;
if(mTemplate.cols()==0 ||mTemplate.rows()==0){
return 0.0;
switch (type){
case TM_SQDIFF:
break;
case TM_SQDIFF_NORMED:
break;
case TM_CCOEFF:
break;
case TM_CCOEFF_NORMED:
break;
case TM_CCORR:
break;
case TM_CCORR_NORMED:
break;
Core.MinMaxLocResult mmres =
Core.minMaxLoc(mResult);
{ matchLoc = mmres.minLoc; }
else
{ matchLoc = mmres.maxLoc; }
Point
Point
{ return mmres.maxVal; }
else
{ return mmres.minVal; }
On following picture you can see all ROI areas and matching in progress yellow
rectangles.
Get template find eye in desired roi by haar classifier, if eye is found, reduce roi
to the eye only and search for the darkness point pupil. Create rectangle of desired
size, centered in pupil our new eye template.
private Mat
Rect e = eyesArray[i];
// reduce ROI
mROI = mGray.submat(eye_only_rectangle);
template = (mGray.submat(eye_template)).clone();
return template;
return template;