migrated code to public repository

This commit is contained in:
mohsen-mansouryar 2016-03-09 19:52:35 +01:00
parent a7df82d7a4
commit f34dc653e5
233 changed files with 16279 additions and 186 deletions

View file

BIN
code/recording/aruco_test Normal file

Binary file not shown.

View file

@ -0,0 +1,242 @@
/*****************************************************************************************
Copyright 2011 Rafael Muñoz Salinas. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of Rafael Muñoz Salinas.
********************************************************************************************/
#include <iostream>
#include <fstream>
#include <sstream>
#include "aruco.h"
#include "cvdrawingutils.h"
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace aruco;
string TheInputVideo;
string TheIntrinsicFile;
float TheMarkerSize = -1;
int ThePyrDownLevel;
MarkerDetector MDetector;
VideoCapture TheVideoCapturer;
vector< Marker > TheMarkers;
Mat TheInputImage, TheInputImageCopy;
CameraParameters TheCameraParameters;
void cvTackBarEvents(int pos, void *);
bool readCameraParameters(string TheIntrinsicFile, CameraParameters &CP, Size size);
pair< double, double > AvrgTime(0, 0); // determines the average time required for detection
double ThresParam1, ThresParam2;
int iThresParam1, iThresParam2;
int waitTime = 0;
/************************************
*
*
*
*
************************************/
bool readArguments(int argc, char **argv) {
if (argc < 2) {
cerr << "Invalid number of arguments" << endl;
cerr << "Usage: (in.avi|live[:idx_cam=0]) [intrinsics.yml] [size]" << endl;
return false;
}
TheInputVideo = argv[1];
if (argc >= 3)
TheIntrinsicFile = argv[2];
if (argc >= 4)
TheMarkerSize = atof(argv[3]);
if (argc == 3)
cerr << "NOTE: You need makersize to see 3d info!!!!" << endl;
return true;
}
int findParam(std::string param, int argc, char *argv[]) {
for (int i = 0; i < argc; i++)
if (string(argv[i]) == param)
return i;
return -1;
}
/************************************
*
*
*
*
************************************/
int main(int argc, char **argv) {
try {
if (readArguments(argc, argv) == false) {
return 0;
}
// parse arguments
// read from camera or from file
if (TheInputVideo.find("live") != string::npos) {
int vIdx = 0;
// check if the :idx is here
char cad[100];
if (TheInputVideo.find(":") != string::npos) {
std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
}
cout << "Opening camera index " << vIdx << endl;
TheVideoCapturer.open(vIdx);
waitTime = 10;
} else
TheVideoCapturer.open(TheInputVideo);
// check video is open
if (!TheVideoCapturer.isOpened()) {
cerr << "Could not open video" << endl;
return -1;
}
bool isVideoFile = false;
if (TheInputVideo.find(".avi") != std::string::npos || TheInputVideo.find("live") != string::npos)
isVideoFile = true;
// read first image to get the dimensions
TheVideoCapturer >> TheInputImage;
// read camera parameters if passed
if (TheIntrinsicFile != "") {
TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
TheCameraParameters.resize(TheInputImage.size());
}
// Configure other parameters
if (ThePyrDownLevel > 0)
MDetector.pyrDown(ThePyrDownLevel);
// Create gui
// cv::namedWindow("thres", 1);
// cv::namedWindow("in", 1);
MDetector.setThresholdParams(7, 7);
MDetector.setThresholdParamRange(2, 0);
// MDetector.enableLockedCornersMethod(true);
// MDetector.setCornerRefinementMethod ( MarkerDetector::SUBPIX );
MDetector.getThresholdParams(ThresParam1, ThresParam2);
iThresParam1 = ThresParam1;
iThresParam2 = ThresParam2;
// cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
// cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
char key = 0;
int index = 0;
// capture until press ESC or until the end of the video
TheVideoCapturer.retrieve(TheInputImage);
do {
// copy image
index++; // number of images captured
double tick = (double)getTickCount(); // for checking the speed
// Detection of markers in the image passed
MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters, TheMarkerSize);
// chekc the speed by calculating the mean speed of all iterations
AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
AvrgTime.second++;
cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second << " milliseconds nmarkers=" << TheMarkers.size() << std::flush;
// print marker info and draw the markers in image
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
cout << endl << TheMarkers[i];
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255), 1);
}
if (TheMarkers.size() != 0)
cout << endl;
// print other rectangles that contains no valid markers
/** for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
aruco::Marker m( MDetector.getCandidates()[i],999);
m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
}*/
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
}
// DONE! Easy, right?
// show input with augmented information and the thresholded image
// cv::imshow("in", TheInputImageCopy);
// cv::imshow("thres", MDetector.getThresholdedImage());
// key = cv::waitKey(waitTime); // wait for key to be pressed
// key = cv::waitKey(10); // wait for key to be pressed
key = 10;
if (isVideoFile)
TheVideoCapturer.retrieve(TheInputImage);
} while (key != 27 && (TheVideoCapturer.grab() || !isVideoFile));
} catch (std::exception &ex)
{
cout << "Exception :" << ex.what() << endl;
}
}
/************************************
*
*
*
*
************************************/
void cvTackBarEvents(int pos, void *) {
if (iThresParam1 < 3)
iThresParam1 = 3;
if (iThresParam1 % 2 != 1)
iThresParam1++;
if (ThresParam2 < 1)
ThresParam2 = 1;
ThresParam1 = iThresParam1;
ThresParam2 = iThresParam2;
MDetector.setThresholdParams(ThresParam1, ThresParam2);
// recompute
MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters);
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++)
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255), 1);
// print other rectangles that contains no valid markers
/*for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
aruco::Marker m( MDetector.getCandidates()[i],999);
m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
}*/
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i = 0; i < TheMarkers.size(); i++)
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
// cv::imshow("in", TheInputImageCopy);
// cv::imshow("thres", MDetector.getThresholdedImage());
}

View file

@ -0,0 +1,89 @@
from __future__ import division
import os, sys, cv2
import numpy as np
import shutil
# PARTICIPANTS = ['p10', 'p16', 'p13', 'p24', 'p5', 'p14', 'p26', 'p12', 'p20', 'p7', 'p15', 'p11', 'p21', 'p25']
# PARTICIPANTS = ['p21']
# ROOT = '/BS/3D_Gaze_Tracking/archive00/participants/'
# DEST = '/BS/3D_Gaze_Tracking/work/Eye_Images/'
DEST = '/home/mmbrian/3D_Gaze_Tracking/work/Marker_Eye_Images_7/'
ROOT = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants'
def main():
# distCoeffs = np.load("/BS/3D_Gaze_Tracking/work/dist.npy")
# cameraMatrix = np.load("/BS/3D_Gaze_Tracking/work/cameraMatrix.npy")
distCoeffs = np.load("dist.npy")
cameraMatrix = np.load("cameraMatrix.npy")
PARTICIPANTS = [sys.argv[1]]
processed = 0
for p in os.listdir(ROOT):
if p in PARTICIPANTS:
print '> Collecting images for participant', p
d1 = os.path.join(ROOT, p)
d1 = os.path.join(d1, os.listdir(d1)[0]) # ../p_i/../
for d2 in os.listdir(d1):
path = os.path.join(d1, d2)
print '> Processing', path
processPath(path, p, d2, cameraMatrix, distCoeffs)
processed+=1
print '> Processed %s participants.' % processed
def processPath(path = None, participant = None, experiment = None, cameraMatrix = None, distCoeffs = None):
if not path:
path = sys.argv[1]
raw_images_dir = os.path.join(DEST, 'ImagesRaw')
raw_images_dir = os.path.join(raw_images_dir, participant)
raw_images_dir = os.path.join(raw_images_dir, experiment)
undist_images_dir = os.path.join(DEST, 'ImagesUndist')
undist_images_dir = os.path.join(undist_images_dir, participant)
undist_images_dir = os.path.join(undist_images_dir, experiment)
if not os.path.exists(raw_images_dir):
os.makedirs(raw_images_dir)
else:
print '> Already processed.'
return
if not os.path.exists(undist_images_dir):
os.makedirs(undist_images_dir)
else:
print '> Already processed.'
return
# else:
# print '> Removing old images...'
# shutil.rmtree(raw_images_dir)
# return
# os.makedirs(raw_images_dir)
p_frames = np.load(os.path.join(path, 'p_frames.npy'))
frames = sorted(reduce(lambda l1,l2: list(l1)+list(l2), p_frames))
cap = cv2.VideoCapture(os.path.join(path, 'eye0.mp4'))
fc_eye = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
fps_eye = int(cap.get(cv2.cv.CV_CAP_PROP_FPS))
eye_frame = 0
world_frame = 0
status, img = cap.read() # extract the first frame
while status:
try:
if eye_frame == frames[world_frame]:
save_dir = os.path.join(raw_images_dir, 'img_%s.png' %(frames[world_frame]))
cv2.imwrite(save_dir, img)
save_dir = os.path.join(undist_images_dir, 'img_%s.png' %(frames[world_frame]))
undistImg = cv2.undistort(img, cameraMatrix, distCoeffs)
cv2.imwrite(save_dir, undistImg)
world_frame+=1
except:
break
eye_frame+=1
status, img = cap.read()
cap.release()
# print '> Removing world video...'
# os.remove(world_video)
print "> Processed %d frames." % (world_frame)
if __name__ == '__main__':
main()

3
code/recording/data/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
*.jpg
*.JPG
*.mkv

View file

@ -0,0 +1,238 @@
name of display: :0
version number: 11.0
vendor string: The X.Org Foundation
vendor release number: 11600000
X.Org version: 1.16.0
maximum request size: 16777212 bytes
motion buffer size: 256
bitmap unit, bit order, padding: 32, LSBFirst, 32
image byte order: LSBFirst
number of supported pixmap formats: 7
supported pixmap formats:
depth 1, bits_per_pixel 1, scanline_pad 32
depth 4, bits_per_pixel 8, scanline_pad 32
depth 8, bits_per_pixel 8, scanline_pad 32
depth 15, bits_per_pixel 16, scanline_pad 32
depth 16, bits_per_pixel 16, scanline_pad 32
depth 24, bits_per_pixel 32, scanline_pad 32
depth 32, bits_per_pixel 32, scanline_pad 32
keycode range: minimum 8, maximum 255
focus: window 0x240000b, revert to Parent
number of extensions: 29
BIG-REQUESTS
Composite
DAMAGE
DOUBLE-BUFFER
DPMS
DRI2
DRI3
GLX
Generic Event Extension
MIT-SCREEN-SAVER
MIT-SHM
Present
RANDR
RECORD
RENDER
SECURITY
SGI-GLX
SHAPE
SYNC
X-Resource
XC-MISC
XFIXES
XFree86-DGA
XFree86-VidModeExtension
XINERAMA
XInputExtension
XKEYBOARD
XTEST
XVideo
default screen number: 0
number of screens: 1
screen #0:
dimensions: 3286x1080 pixels (869x286 millimeters)
resolution: 96x96 dots per inch
depths (7): 24, 1, 4, 8, 15, 16, 32
root window id: 0xbd
depth of root window: 24 planes
number of colormaps: minimum 1, maximum 1
default colormap: 0x42
default number of colormap cells: 256
preallocated pixels: black 0, white 16777215
options: backing-store WHEN MAPPED, save-unders NO
largest cursor: 256x256
current input event mask: 0xda4033
KeyPressMask KeyReleaseMask EnterWindowMask
LeaveWindowMask KeymapStateMask StructureNotifyMask
SubstructureNotifyMask SubstructureRedirectMask PropertyChangeMask
ColormapChangeMask
number of visuals: 20
default visual id: 0x40
visual:
visual id: 0x40
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0x41
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xab
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xac
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xad
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xae
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xaf
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb0
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb1
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb2
class: TrueColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb3
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb4
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb5
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb6
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb7
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb8
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xb9
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xba
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0xbb
class: DirectColor
depth: 24 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
visual:
visual id: 0x7e
class: TrueColor
depth: 32 planes
available colormap entries: 256 per subfield
red, green, blue masks: 0xff0000, 0xff00, 0xff
significant bits in color specification: 8 bits
Screen 0: minimum 8 x 8, current 3286 x 1080, maximum 32767 x 32767
LVDS1 connected primary 1366x768+0+0 (normal left inverted right x axis y axis) 344mm x 194mm
1366x768 60.0*+
1360x768 59.8 60.0
1024x768 60.0
800x600 60.3 56.2
640x480 59.9
VGA1 connected 1920x1080+1366+0 (normal left inverted right x axis y axis) 533mm x 300mm
1920x1080 60.0*+
1600x1200 60.0
1280x1024 60.0
1152x864 75.0
1024x768 60.0
800x600 60.3
640x480 60.0 59.9
HDMI1 disconnected (normal left inverted right x axis y axis)
DP1 disconnected (normal left inverted right x axis y axis)
VIRTUAL1 disconnected (normal left inverted right x axis y axis)
256 pixels for the marker, 1920px is the width of screen
and as I measurred it is 121cm in width
==> 256./1920 * 121 ~ 16cm is the width of our marker (also measured it, seems legit)

View file

@ -0,0 +1,14 @@
Recording Name 2015_10_03
Start Date 03.10.2015
Start Time 17:48:58
Duration Time 00:03:00
Eye Mode monocular
Duration Time 00:03:00
World Camera Frames 4309
World Camera Resolution 720x1280
Capture Software Version 0.5.7
User mmbrian
Platform Linux
Machine Brian
Release 3.16.0-49-generic
Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015
1 Recording Name 2015_10_03
2 Start Date 03.10.2015
3 Start Time 17:48:58
4 Duration Time 00:03:00
5 Eye Mode monocular
6 Duration Time 00:03:00
7 World Camera Frames 4309
8 World Camera Resolution 720x1280
9 Capture Software Version 0.5.7
10 User mmbrian
11 Platform Linux
12 Machine Brian
13 Release 3.16.0-49-generic
14 Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015

View file

@ -0,0 +1,2 @@
name
additional_field change_me
1 name
2 additional_field change_me

View file

@ -0,0 +1,14 @@
Recording Name 2015_10_03
Start Date 03.10.2015
Start Time 18:06:58
Duration Time 00:03:37
Eye Mode monocular
Duration Time 00:03:37
World Camera Frames 5215
World Camera Resolution 720x1280
Capture Software Version 0.5.7
User mmbrian
Platform Linux
Machine Brian
Release 3.16.0-49-generic
Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015
1 Recording Name 2015_10_03
2 Start Date 03.10.2015
3 Start Time 18:06:58
4 Duration Time 00:03:37
5 Eye Mode monocular
6 Duration Time 00:03:37
7 World Camera Frames 5215
8 World Camera Resolution 720x1280
9 Capture Software Version 0.5.7
10 User mmbrian
11 Platform Linux
12 Machine Brian
13 Release 3.16.0-49-generic
14 Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015

View file

@ -0,0 +1,2 @@
name
additional_field change_me
1 name
2 additional_field change_me

View file

@ -0,0 +1,20 @@
%YAML:1.0
calibration_time: "Sat 03 Oct 2015 18:42:34 CEST"
nframes: 30
image_width: 640
image_height: 360
board_width: 8
board_height: 6
square_size: 0.00122
flags: 0
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 678.75284504, 0. , 301.29715044, 0. ,
667.94939515, 209.10259404, 0. , 0. , 1. ]
distortion_coefficients: !!opencv-matrix
rows: 5
cols: 1
dt: d
data: [ 0.12812696, -0.13076179, 0.00631552, -0.01349366, -2.10210424]

View file

@ -0,0 +1,9 @@
18=(376.883,493.993) (367.418,432.916) (429.433,424.199) (438.674,484.834) Txyz=-0.138299 0.0442204 0.497911 Rxyz=-3.10292 0.221729 -0.0411571
130=(557.896,467.496) (549.296,407.464) (609.848,398.895) (617.985,458.804) Txyz=-0.0245979 0.0282039 0.504007 Rxyz=-3.08427 0.211876 -0.115737
301=(748.172,549.641) (740.68,491.47) (799.57,482.841) (806.625,541.024) Txyz=0.0988437 0.0832783 0.516703 Rxyz=-3.05697 0.208073 -0.108889
351=(573.305,576.144) (564.956,517.414) (624.868,508.643) (632.845,566.939) Txyz=-0.0150596 0.0992645 0.509626 Rxyz=-3.04795 0.206556 -0.108844
399=(720.388,332.456) (712.982,271.43) (773.556,263.268) (780.206,324.203) Txyz=0.0793988 -0.0583554 0.504764 Rxyz=-3.05028 0.207139 -0.10664
456=(542.036,356.791) (533.264,295.4) (594.619,287.303) (602.898,348.62) Txyz=-0.0341846 -0.0425535 0.498523 Rxyz=-3.05347 0.204274 -0.0319069
608=(358.957,381.543) (348.602,319.251) (411.724,311.228) (421.632,373.302) Txyz=-0.147483 -0.0265521 0.491548 Rxyz=-3.05284 0.204884 -0.0436
659=(734.247,442.123) (726.628,382.526) (786.131,374.246) (793.406,433.659) Txyz=0.0889698 0.0122942 0.510193 Rxyz=-3.07904 0.209142 -0.116808
707=(394.177,604.361) (384.954,544.505) (446.419,535.311) (455.177,594.624) Txyz=-0.129612 0.116083 0.506384 Rxyz=-3.09125 0.216361 -0.0913303

View file

@ -0,0 +1,10 @@
18=(337.292,468.882) (324.79,404.901) (390.651,394.692) (402.499,457.982) Txyz=-0.154857 0.0256425 0.473357 Rxyz=-3.04908 0.244502 -0.076421
130=(527.352,437.35) (516.93,374.763) (580.082,364.783) (589.941,426.958) Txyz=-0.0420971 0.00724924 0.483541 Rxyz=-3.06316 0.24193 -0.165829
301=(725.485,517.338) (716.988,457.961) (777.014,447.674) (785.252,507.095) Txyz=0.0819062 0.0595391 0.502175 Rxyz=-3.02787 0.238118 -0.13582
351=(546.056,549.115) (536.021,488.78) (598.013,478.114) (607.443,538.074) Txyz=-0.0314374 0.0779249 0.492233 Rxyz=-3.02323 0.23531 -0.158536
399=(693.675,293.595) (685.114,229.493) (747.716,220.171) (755.538,283.927) Txyz=0.0601648 -0.081201 0.484901 Rxyz=-3.01722 0.233661 -0.14594
456=(507.989,321.903) (497.092,257.244) (561.524,247.809) (571.784,312.326) Txyz=-0.052872 -0.0630324 0.476038 Rxyz=-3.02846 0.233055 -0.0858767
608=(313.964,351.087) (300.582,284.895) (368.329,275.447) (380.611,341.093) Txyz=-0.165616 -0.0445972 0.465881 Rxyz=-3.02714 0.234292 -0.108089
659=(709.625,407.24) (701.037,345.51) (762.366,335.935) (770.285,397.398) Txyz=0.0707768 -0.0109919 0.492042 Rxyz=-3.03724 0.23253 -0.158296
707=(359.279,582.974) (347.403,521.27) (412.05,510.011) (423.197,571.361) Txyz=-0.145168 0.0967286 0.484355 Rxyz=-3.05352 0.247565 -0.117771

View file

@ -0,0 +1,24 @@
173=(699.455,499.98) (691.307,438.567) (752.606,429.954) (760.682,490.932) Txyz=0.0649598 0.047518 0.494322 Rxyz=-3.09118 0.21103 -0.110259
243=(558.056,453.388) (549.028,391.358) (611.729,382.452) (620.144,444.44) Txyz=-0.0231608 0.0178109 0.487122 Rxyz=-3.06591 0.212071 -0.107365
288=(509.858,595.993) (501.039,534.333) (563.524,524.831) (572.148,586.371) Txyz=-0.0533126 0.106812 0.490712 Rxyz=-3.10034 0.213607 -0.101361
325=(690.555,434.231) (682.66,372.562) (744.469,363.995) (751.952,425.615) Txyz=0.0593093 0.0062092 0.491782 Rxyz=-3.06472 0.206759 -0.107539
347=(363.361,550.855) (353.573,487.91) (418.325,478.379) (427.426,540.869) Txyz=-0.141701 0.0769299 0.483147 Rxyz=-3.10662 0.220845 -0.0927639
356=(480.526,396.484) (470.666,333.017) (534.73,324.316) (543.978,387.486) Txyz=-0.0699126 -0.0175944 0.480141 Rxyz=-3.06477 0.215313 -0.0848652
358=(432.44,540.204) (423.087,477.693) (486.636,468.411) (495.612,530.387) Txyz=-0.10047 0.0711499 0.486519 Rxyz=-3.1106 0.220745 -0.0922127
392=(442.289,606.858) (433.197,544.676) (496.376,535.089) (505.121,596.737) Txyz=-0.0950006 0.113034 0.489782 Rxyz=-3.11116 0.21787 -0.102892
399=(708.4,565.16) (700.069,504.329) (761.353,495.298) (769.126,555.978) Txyz=0.0705462 0.0888017 0.496123 Rxyz=-3.07587 0.210512 -0.114342
449=(548.411,386.917) (539.147,323.647) (602.773,315.279) (611.129,378.093) Txyz=-0.0286112 -0.0233864 0.481884 Rxyz=-3.01791 0.206092 -0.143664
528=(624.563,443.811) (616.147,381.957) (678.347,373.238) (686.266,434.894) Txyz=0.0180922 0.0120028 0.488909 Rxyz=-3.05213 0.209138 -0.118358
554=(490.591,463.222) (481.316,400.801) (544.603,391.939) (553.41,454.083) Txyz=-0.0645677 0.0236915 0.485569 Rxyz=-3.08837 0.214389 -0.108485
559=(373.466,617.896) (364.062,555.286) (428.305,545.501) (437.448,607.679) Txyz=-0.136017 0.118491 0.485214 Rxyz=-3.10462 0.217903 -0.0966003
571=(615.48,377.577) (607.189,314.515) (670.369,305.988) (677.674,368.791) Txyz=0.0125728 -0.0290879 0.482861 Rxyz=-2.98808 0.202386 -0.146367
655=(342.345,415.599) (330.378,351.444) (396.27,342.763) (407.057,406.633) Txyz=-0.151935 -0.00585876 0.474021 Rxyz=-3.0196 0.207689 -0.0499907
660=(500.258,529.687) (491.297,467.655) (554.109,458.558) (562.902,520.323) Txyz=-0.0588932 0.0651044 0.487798 Rxyz=-3.09854 0.214559 -0.0948563
664=(576.63,585.616) (568.056,524.065) (629.927,514.768) (638.399,575.89) Txyz=-0.0118773 0.100667 0.49203 Rxyz=-3.0937 0.211299 -0.126647
735=(352.832,483.454) (343.004,420.029) (407.747,411.093) (417.501,473.927) Txyz=-0.147205 0.0354299 0.480331 Rxyz=3.16704 -0.223919 0.076249
737=(633.714,509.651) (625.26,448.175) (686.896,439.242) (695.163,500.561) Txyz=0.0237445 0.0533222 0.49199 Rxyz=-3.09203 0.212088 -0.117316
782=(642.802,575.179) (634.357,514.125) (695.692,504.957) (704.056,565.739) Txyz=0.0294044 0.0947937 0.494568 Rxyz=-3.08374 0.21306 -0.111104
786=(412.026,405.993) (401.221,342.19) (465.902,333.555) (475.958,397.018) Txyz=-0.11102 -0.011715 0.477259 Rxyz=-3.03908 0.211348 -0.0405065
787=(567.301,519.555) (558.638,457.877) (620.81,448.837) (629.291,510.389) Txyz=-0.0175444 0.0592119 0.489873 Rxyz=-3.09406 0.212163 -0.109216
842=(422.538,473.23) (412.683,410.393) (476.666,401.431) (486.064,463.924) Txyz=-0.10576 0.0295403 0.482561 Rxyz=-3.08552 0.217952 -0.0668197
914=(682.094,368.247) (674.609,305.389) (737.422,296.877) (743.864,359.666) Txyz=0.0536882 -0.0348577 0.485769 Rxyz=-2.99698 0.201008 -0.128513

View file

@ -0,0 +1,24 @@
173=(723.927,447.07) (715.643,384.723) (778.576,375.426) (786.258,437.702) Txyz=0.0790166 0.0136108 0.484619 Rxyz=-3.04246 0.219174 -0.091972
243=(579.827,400.658) (570.222,337.215) (634.345,327.765) (643.039,391.221) Txyz=-0.00925742 -0.0150964 0.476483 Rxyz=-3.00322 0.218584 -0.10575
288=(532.679,544.107) (522.959,482.482) (585.978,472.648) (595.254,534.363) Txyz=-0.0388098 0.0736001 0.484942 Rxyz=-3.05784 0.224629 -0.0772962
325=(715.033,380.428) (706.95,317.043) (770.634,307.64) (777.861,370.973) Txyz=0.0731347 -0.0274384 0.480342 Rxyz=-3.00927 0.218851 -0.0908707
347=(384.833,499.337) (373.829,436.216) (438.669,426.422) (449.139,489.416) Txyz=-0.127388 0.0447468 0.477125 Rxyz=-3.06431 0.229277 -0.0535486
356=(500.555,342.84) (489.461,277.411) (554.996,268.146) (565.07,333.476) Txyz=-0.0562687 -0.0497284 0.468424 Rxyz=-2.98365 0.21573 -0.033857
358=(454.078,488.67) (443.545,425.754) (507.685,416.119) (517.61,478.697) Txyz=-0.0861319 0.0385554 0.479532 Rxyz=-3.06777 0.229777 -0.0678913
392=(465.08,554.994) (454.885,493.098) (518.413,483.204) (527.941,544.889) Txyz=-0.0804108 0.0801372 0.484963 Rxyz=-3.0663 0.226716 -0.0831945
399=(732.737,512.68) (724.51,451.377) (786.84,442.078) (794.647,503.35) Txyz=0.0850416 0.0548219 0.489455 Rxyz=-3.04315 0.219029 -0.0818131
449=(569.515,332.769) (559.638,267.491) (624.982,258.069) (633.689,323.424) Txyz=-0.0150215 -0.0558136 0.469471 Rxyz=-2.95079 0.213833 -0.102914
528=(647.503,390.54) (638.787,327.07) (702.535,317.713) (710.537,381.069) Txyz=0.0319074 -0.0212714 0.47804 Rxyz=-3.00741 0.218382 -0.102638
554=(511.635,410.921) (501.203,347.341) (565.709,337.896) (575.31,401.427) Txyz=-0.0505453 -0.00890131 0.475441 Rxyz=-3.02306 0.221724 -0.0744128
559=(396.416,566.132) (385.832,503.765) (449.873,493.88) (460.227,555.802) Txyz=-0.121709 0.0862522 0.482184 Rxyz=-3.06672 0.23014 -0.062468
571=(638.16,322.645) (629.505,257.342) (694.553,247.905) (701.927,313.41) Txyz=0.0261123 -0.0619344 0.470728 Rxyz=-2.95023 0.212263 -0.114807
655=(360.505,363.079) (347.015,297.287) (413.74,288.078) (426.117,353.694) Txyz=-0.138416 -0.0372608 0.46466 Rxyz=-2.97141 0.2152 -0.031309
660=(522.304,477.958) (512.311,415.374) (575.931,405.843) (585.329,468.295) Txyz=-0.0447246 0.0322643 0.480759 Rxyz=-3.0578 0.225199 -0.0738372
664=(599.766,533.608) (590.643,472.009) (653.283,462.292) (661.946,523.804) Txyz=0.00257891 0.0672197 0.485662 Rxyz=-3.04987 0.219942 -0.114197
735=(373.089,431.793) (361.365,367.453) (426.782,358.194) (437.982,422.065) Txyz=-0.132966 0.00355214 0.471777 Rxyz=-3.03753 0.22429 -0.0346352
737=(657.115,457.277) (648.23,394.901) (711.199,385.518) (719.5,447.765) Txyz=0.0378741 0.0197911 0.483178 Rxyz=-3.04552 0.22143 -0.0968667
782=(666.4,523.065) (657.721,461.573) (720.035,452.19) (728.332,513.475) Txyz=0.0438073 0.0610251 0.487713 Rxyz=-3.04853 0.219505 -0.0967276
786=(430.999,352.987) (418.778,287.389) (484.649,278.121) (495.913,343.557) Txyz=-0.0973854 -0.0435011 0.466652 Rxyz=-2.98462 0.217406 -0.0168059
787=(589.799,467.54) (580.54,405.102) (643.78,395.603) (652.536,457.897) Txyz=-0.00336534 0.0259621 0.481289 Rxyz=-3.03955 0.220231 -0.11357
842=(442.786,421.342) (431.874,357.423) (496.653,347.995) (507.008,411.669) Txyz=-0.0918841 -0.0026965 0.474376 Rxyz=-3.05417 0.227679 -0.0500495
914=(706.368,312.641) (699.076,247.258) (764.04,237.726) (770.078,303.24) Txyz=0.0671587 -0.0679941 0.472159 Rxyz=-2.93732 0.21098 -0.108571

View file

@ -0,0 +1,2 @@
/p*
/Hosna

View file

@ -0,0 +1,15 @@
- for participants P1 to P11, eyetracker had to be readjusted during the experiment so that pupil could still be tracked with high confidence. eyetracker was rarely readjusted between two recordings at the same depth, mostly it was in between two depths. as a consequence, usage of different depth calibrations for a test recording would have another source of error caused by slighly repositioning the cameras.
- p12 to p26 > no readjusting the tracker
- p23 > a minor adjustment for better pupil tracking confidence from 2nd depth
- p25 is myself. my head was slightly looking down or up in between recordings so even for a fixed depth the data could result in large errors! (not so useful). it's quite hard to do better all by myself.
- for p6, 001 is splited into 001 and 002 but the movement from test point 15 to 16 is lost, so from 001 only extact data for the first 15 points, and extract point 16 from the entire 002
- for p9, depth 0, calibration video, the screen is a bit outside of the screen from left, therefore at least two marker points are lost (cannot be tracked by ArUco). it might be better not to use the data from this depth or at least not use the left edge of the grids.
- p1 to p11 + p23 had minor adjustments to eye camera (and in terms scene camera) in between recordings
- p12 to p22 + p24 and p26 didn't have any camera adjustments in between recordings
- p25 is data recorded from myself
- p6 and p9 are special cases to be handled separately
- data for Hosna is not useful as the eye camera wasn't robustly tracking the eye (less than 20% frames with nonzero confidence)

View file

@ -0,0 +1,14 @@
Recording Name 2015_10_03
Start Date 03.10.2015
Start Time 20:15:10
Duration Time 00:00:52
Eye Mode monocular
Duration Time 00:00:52
World Camera Frames 1265
World Camera Resolution 720x1280
Capture Software Version 0.5.7
User mmbrian
Platform Linux
Machine Brian
Release 3.16.0-49-generic
Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015
1 Recording Name 2015_10_03
2 Start Date 03.10.2015
3 Start Time 20:15:10
4 Duration Time 00:00:52
5 Eye Mode monocular
6 Duration Time 00:00:52
7 World Camera Frames 1265
8 World Camera Resolution 720x1280
9 Capture Software Version 0.5.7
10 User mmbrian
11 Platform Linux
12 Machine Brian
13 Release 3.16.0-49-generic
14 Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015

View file

@ -0,0 +1,2 @@
name
additional_field change_me
1 name
2 additional_field change_me

View file

@ -0,0 +1,61 @@
import os
from tracker import performMarkerTracking
ROOT_DATA_DIR = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants'
SQUARE_SIZE = '0.16'
def main(force = False):
'''
Processes all the participants recordings and performs marker tracking on each video and stores marker data in a npy file near the video
'''
c = 0
for d1 in os.listdir(ROOT_DATA_DIR):
if d1.startswith('p'): # every participant
d2 = os.path.join(ROOT_DATA_DIR, d1) # .../pi/
d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../
for d3 in os.listdir(d2): # every recording
d4 = os.path.join(d2, d3) # .../pi/../00X/
print '> Processing', d4
frames_dir = os.path.join(d4, '_aruco_frames.npy')
if os.path.isfile(frames_dir):
if not force: # already processed this recording
print '> Recording already processed...'
continue
else: # video processed, but forced to process again
# remove old file
print '> Reprocessing file:', frames_dir
print '> Removing old marker file...'
os.remove(frames_dir)
world_path = os.path.join(d4, 'world.avi')
log_dir = os.path.join(d4, '_ArUco.log')
read_log = False
if os.path.isfile(log_dir):
read_log = True
else: # no log available > process video
if not os.path.isfile(world_path): # .avi file exists
# Creating avi
print '> AVI video does not exists, generating it from mp4 source file...'
os.popen('avconv -i ' + os.path.join(d4, 'world.mp4') + ' -c:a copy -c:v copy ' + world_path + ' -v quiet')
# Perform tracking...
performMarkerTracking(read_from_log = read_log, log = True, log_dir = log_dir,
recording_video = world_path + ' ', frames_dir = frames_dir,
square_size = SQUARE_SIZE) # in case of relocating camera.yml input the new path as cam_math
if not read_log:
# Removing avi file
print '> Removing avi file:', world_path
os.remove(world_path)
c += 1
print '> Processed %s recordings so far...' % c
# print '> Halting...'
# return
if __name__ == '__main__':
main(force=False)
print 'Finished.'

View file

@ -0,0 +1,85 @@
'''
(*)~----------------------------------------------------------------------------------
author:
Julian Steil
Master Thesis (2014):
Discovery of eye movement patterns in long-term human visual behaviour using topic models
----------------------------------------------------------------------------------~(*)
'''
import sys,os
import numpy as np
PARTICIPANTS = ['p10', 'p16', 'p13', 'p24', 'p5', 'p14', 'p26', 'p12', 'p20', 'p7', 'p15', 'p11', 'p21', 'p25']
ROOT = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants'
timestamps_world_path = 'world_timestamps.npy'
timestamps_eye_path = 'eye0_timestamps.npy'
def main():
for p in os.listdir(ROOT):
if p in PARTICIPANTS:
print '> Correlating eye-world images for', p
d1 = os.path.join(ROOT, p)
d1 = os.path.join(d1, os.listdir(d1)[0]) # ../p_i/../
for d2 in os.listdir(d1):
path = os.path.join(d1, d2)
print '> Processing', path
process(path)
print 'Done.'
def process(root):
timestamps_world = list(np.load(os.path.join(root, timestamps_world_path)))
timestamps_eye = list(np.load(os.path.join(root, timestamps_eye_path)))
no_frames_eye = len(timestamps_eye)
no_frames_world = len(timestamps_world)
# Detection of Synchronization-Matchings to initialize the correlation-matrix
frame_idx_world = 0
frame_idx_eye = 0
while (frame_idx_world < no_frames_world):
# if the current world_frame is before the mean of the current eye frame timestamp and the next eyeframe timestamp
if timestamps_world[frame_idx_world] <= (timestamps_eye[frame_idx_eye]+timestamps_eye[frame_idx_eye+1])/2.:
frame_idx_world+=1
else:
if frame_idx_eye >= no_frames_eye-2:
break
frame_idx_eye+=1
no_of_matched_frames = frame_idx_eye
print "no_of_matched_frames: ", no_of_matched_frames
# Synchonizing eye and world cam
print no_frames_eye, no_frames_world
correlation = []
for i in xrange(no_frames_world):
correlation.append([])
for j in xrange(1):
correlation[i].append(float(0))
frame_idx_world = 0
frame_idx_eye = 0
while (frame_idx_world < no_frames_world):
# print frame_idx_world,frame_idx_eye
# if the current world_frame is before the mean of the current eye frame timestamp and the next eye timestamp
if timestamps_world[frame_idx_world] <= (timestamps_eye[frame_idx_eye]+timestamps_eye[frame_idx_eye+1])/2.:
correlation[frame_idx_world][0] = frame_idx_eye
frame_idx_world+=1
else:
if frame_idx_eye >= no_frames_eye-2:
frame_idx_eye += 1
while (frame_idx_world < no_frames_world):
correlation[frame_idx_world][1] = frame_idx_eye
frame_idx_world+=1
break
frame_idx_eye+=1
correlation_list_path = "eye_world_correlation.npy"
correlation_list_csv_path = "eye_world_correlation.csv"
np.save(os.path.join(root, correlation_list_path),np.asarray(correlation))
np.savetxt(os.path.join(root, correlation_list_csv_path),np.asarray(correlation), delimiter=",", fmt="%f")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,193 @@
#!/usr/bin/env python
'''
This is a modified version of opencv's calibrate.py that stores the camera
parameters in the YAML format required by ArUco. an example of this format
is given by:
%YAML:1.0
calibration_time: "Sa 08 Aug 2015 16:32:35 CEST"
nr_of_frames: 30
image_width: 752
image_height: 480
board_width: 8
board_height: 6
square_size: 24.
fix_aspect_ratio: 1.
# flags: +fix_aspect_ratio +fix_principal_point +zero_tangent_dist
flags: 14
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 418.164617459, 0., 372.480325679, 0., 417.850564673, 229.985538918, 0., 0., 1.]
distortion_coefficients: !!opencv-matrix
rows: 5
cols: 1
dt: d
data: [ -0.3107371474, 0.1187673445, -0.0002552599, -0.0001158436, -0.0233324616]
(Reference: https://wiki.mpi-inf.mpg.de/d2/ArUco)
How to use:
- Copy and paste this in /<OPENCV_SOURCE_DIR>/samples/python2
- Run it like
python calibrate_and_save.py --save "/<PATH_TO_SAVE_CAMERA_MATRIX>/camera.yml" "/<PATH_TO_IMAGE_DIR>/*.png"
- Then you can use ArUco like
./<PATH_TO_ARUCO_SRC>/build/utils/aruco_test_gl live /<PATH_TO_SAVE_CAMERA_MATRIX>/camera.yml 0.039
More on calibration: http://www.janeriksolem.net/2014/05/how-to-calibrate-camera-with-opencv-and.html
UPDATE:
Alternatively, you can follow the steps below
- After disabling integrated webcam:
echo "0" > /sys/bus/usb/devices/2-1.6/bConfigurationValue
(need to first find which usb device corresponds to your webcam)
- Plug in pupil tracker and use the official precompiled cpp script like:
/<PATH_TO_OPENCV>/build/bin/cpp-example-calibration -w 8 -h 6 -s 0.039 -o camera.yml -op
'''
import time
import numpy as np
import cv2, cv
import os
from common import splitfn
def saveCameraParams(save_dir, nframes, w, h, bw, bh, square_size,
camera_matrix, distortion_coefficients, fix_aspect_ratio = None, flags = 0):
time_str = time.strftime('%a %d %b %Y %H:%M:%S %Z')
lines = []
lines.append('%YAML:1.0')
lines.append('calibration_time: "%s"' %time_str)
lines.append('nframes: %s' %nframes)
lines.append('image_width: %s' %w)
lines.append('image_height: %s' %h)
lines.append('board_width: %s' %bw)
lines.append('board_height: %s' %bh)
lines.append('square_size: %s' %square_size)
if fix_aspect_ratio:
lines.append('fix_aspect_ratio: %s' %fix_aspect_ratio)
lines.append('flags: %s' %flags)
lines.append('camera_matrix: !!opencv-matrix')
lines.append(' rows: 3')
lines.append(' cols: 3')
lines.append(' dt: d')
lines.append(' data: %s' %repr(camera_matrix.reshape(1,9)[0])[6:-1]) # [6:-1] removes "array(" and ")"
lines.append('distortion_coefficients: !!opencv-matrix')
lines.append(' rows: 5')
lines.append(' cols: 1')
lines.append(' dt: d')
lines.append(' data: %s' %repr(distortion_coefficients)[6:-1])
with open(save_dir, 'w') as f:
f.writelines(map(lambda l: l+'\n', lines))
def readCameraParams(cam_mat = None):
'''
Reads an openCV camera.yml file and returns camera_matrix and distortion_coefficients
'''
if not cam_mat:
cam_mat = CAMERA_MATRIX
data = ''.join(open(cam_mat.strip(), 'r').readlines()).replace('\n', '').lower()
try:
ind1 = data.index('[', data.index('camera_matrix'))
ind2 = data.index(']', ind1)
camera_matrix = eval(data[ind1:ind2+1])
camera_matrix = np.array([camera_matrix[:3],
camera_matrix[3:6],
camera_matrix[6:]])
ind1 = data.index('[', data.index('distortion_coefficients'))
ind2 = data.index(']', ind1)
dist_coeffs = np.array(eval(data[ind1:ind2+1]))
return camera_matrix, dist_coeffs
except Exception:
print 'Could not load camera parameters'
print 'Invalid camera.yml file.'
if __name__ == '__main__':
import sys, getopt
from glob import glob
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
try: img_mask = img_mask[0]
except: img_mask = '../cpp/left*.jpg'
# print 'mask is', img_mask
# img_mask = img_mask.replace('10.png', '*.png')
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
square_size = 0.00122
save_dir = args.get('--save')
pattern_size = (8, 6)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_points = []
h, w = 0, 0
for fn in img_names:
print 'processing %s...' % fn,
img = cv2.imread(fn, 0)
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, pattern_size)
if found:
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv2.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print 'chessboard not found'
continue
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
# rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h))
root_dir = '/home/mmbrian/Pictures/eye_camera_images/'
cameraMatrixguess, distCoeffsguess = readCameraParams(os.path.join(root_dir,'_camera.yml'))
print "cameraM: ", cameraMatrixguess
print "dist: ", distCoeffsguess
cameraMatrixguess[1][1] = cameraMatrixguess[0][0]
cameraMatrixguess[0][2] = 320
cameraMatrixguess[1][2] = 180
# Calibrate camera intrinsics
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w,h),
cameraMatrixguess, distCoeffsguess, None, None, flags = \
cv.CV_CALIB_USE_INTRINSIC_GUESS + cv.CV_CALIB_FIX_PRINCIPAL_POINT + cv.CV_CALIB_FIX_ASPECT_RATIO)
np.save(os.path.join(root_dir,'dist.npy'), dist_coefs)
np.save(os.path.join(root_dir,'cameraMatrix.npy'), camera_matrix)
np.savetxt(os.path.join(root_dir,'dist.csv'),np.asarray(dist_coefs), delimiter=";", fmt="%s")
np.savetxt(os.path.join(root_dir,"cameraMatrix.csv"),np.asarray(camera_matrix), delimiter=";", fmt="%s")
print "RMS:", rms
print "camera matrix:\n", camera_matrix
print "distortion coefficients: ", dist_coefs.ravel()
print 'Width:', w, 'Height:', h
print 'nframes:', len(img_names)
print 'square_size:', square_size
print 'board_width:', pattern_size[0]
print 'board_height:', pattern_size[1]
saveCameraParams(save_dir, len(img_names), w, h, pattern_size[0], pattern_size[1], square_size,
camera_matrix, dist_coefs.ravel())
print "Saved camera matrix to", save_dir
cv2.destroyAllWindows()

View file

@ -0,0 +1,454 @@
from __future__ import division
'''
For each experiment, this script tracks movement of the marker in the video from the information in aruco_frames.npy
It then correlates this information with gaze data from pupil_positions.npy
finally, for every target in the video (25 targets in calibration, 16 in test), it maps 3D marker position (mean position over the duration of pause)
to the gaze position (mean position over the pause duration) and stores this info together with the projected 2D marker position in a separate npy file.
the resulting file contains the ground truth data for this experiment.
'''
import os, sys
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
from scipy.ndimage.filters import gaussian_filter1d as g1d
from scipy import signal
from sklearn.neighbors import NearestNeighbors as knn
# from sklearn import svm
from sklearn.cluster import AgglomerativeClustering
from tracker import readCameraParams, Marker
from util.tools import is_outlier, moving_average
sys.path.append('..') # so we can import from pupil
from pupil import player_methods
from vector import Vector as v
import pdb
ROOT_DATA_DIR = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants'
def unifiy_markers_per_frame(marker_data):
'''
Since ArUco sometimes detects a marker twice in a frame, we need to either ignore one or somehow compute their mean.
Also this method maps each final marker to its center's 3D and 2D position wrt scene camera
'''
camera_matrix, dist_coeffs = readCameraParams() # in case of relocating camera.yml input the new path as cam_math
mdata, mrdata = [], []
for fn in xrange(len(marker_data)):
if len(marker_data[fn]) > 0:
markers = map(lambda m: Marker.fromList(m), marker_data[fn])
markers = map(lambda m: np.array([np.array(m.getCenter()),
np.array(m.getCenterInImage(camera_matrix, dist_coeffs))]), markers)
marker = sum(markers)/len(markers)
marker = [marker[0][0], marker[0][1], marker[0][2], marker[1][0], marker[1][1]]
# marker_data[fn] = marker
mdata.append(marker)
mrdata.append(marker)
else: # if marker is not detected, assign last detected position to this frame
# marker_data[fn] = marker_data[fn-1]
mdata.append(mdata[fn-1])
mrdata.append([]) # this contains real marker information (all tracked positions)
# return marker_data
return np.array(mdata), mrdata
def fix_labels(labels, window = 2, elements = [0, 1], outliers = []):
labels = list(labels)
for i in xrange(window, len(labels)-window):
neighborhood = labels[i-window:i+window+1]
if outliers[i]: # removing this label from decision making
neighborhood = neighborhood[:i] + neighborhood[i+1:]
element_counts = [list(neighborhood).count(e) for e in elements]
dominant_element = elements[element_counts.index(max(element_counts))]
labels[i] = dominant_element
return labels
def find_intervals(labels, mean, marker_speed):
'''
Given the label information of frame to frame motion speed, this method returns the frame
intervals for which the marker is either "moving" or "not moving"
Notice that len(labels) equals the number of frames minus one
'''
nm_label = labels[0]
intervals = []
curr_label, start, end = -1, -1, -1
not_moving = 0
for i in xrange(len(labels)):
if curr_label < 0: # first label
curr_label = labels[i]
start = i
else:
if labels[i] != curr_label: # label changed
end = i
intervals.append([start, end, curr_label])
if curr_label == nm_label: not_moving+=1
curr_label = labels[i]
start = i+1
end = len(labels)
intervals.append([start, end, curr_label])
if curr_label == nm_label: not_moving+=1
# Now we do a post check to see if two non moving intervals are very close to each other,
# the middle interval is most likely a misclassification
# computing average interval length for moving intervals
if (len(intervals) > 49 and not_moving > 25) or (len(intervals)>31 and not_moving>16):
ret = merge_intervals(intervals, nm_label, mean, marker_speed, remove_outliers=True)
return ret, sum(1 for e in ret if e[2] == nm_label)
else:
return intervals, not_moving
def merge_intervals(intervals, nm_label, mean, marker_speed, remove_outliers=True):
mlength = np.array([seg[1] - seg[0] for seg in intervals if seg[2] != nm_label])
nmlength = np.array([seg[1] - seg[0] for seg in intervals if seg[2] == nm_label])
if remove_outliers:
mlength_outliers = mlength[is_outlier(mlength, thresh=3.5)]
avg_m_length = (sum(mlength)-sum(mlength_outliers))/(mlength.size - mlength_outliers.size)
nmlength_outliers = nmlength[is_outlier(nmlength, thresh=3.5)]
avg_nm_length = (sum(nmlength)-sum(nmlength_outliers))/(nmlength.size - nmlength_outliers.size)
else:
avg_m_length = sum(mlength)/mlength.size
avg_nm_length = sum(nmlength)/nmlength.size
thresh = 3.5 # removes a moving interval if average length is at least this time larger than its length
i = 1
ret = []
ret.append(intervals[0])
while i < len(intervals):
length = intervals[i][1] - intervals[i][0]
ratio, label = 1, intervals[i][2]
if label == nm_label:
ratio = avg_nm_length/length
else:
ratio = avg_m_length/length
if ratio>=thresh: # average length is at least 2 times larger than the length of this interval
# replace this interval by merge the two not moving intervals around it
# check if average of elements in this interval is greater than mean
if np.mean(marker_speed[intervals[i][0]:intervals[i][1]]) < mean:
last_intv = ret.pop()
ret.append([last_intv[0], intervals[i+1][1], 1-label])
print 'Merged two intervals'
i+=2
continue
else:
pass
ret.append(intervals[i])
i+=1
return ret
# def main(force=False):
# rcParams['figure.figsize'] = 15, 7
# recordings_processed = 0
# recordings_successful = 0
# for d1 in os.listdir(ROOT_DATA_DIR):
# if d1.startswith('p'): # every participant
# d2 = os.path.join(ROOT_DATA_DIR, d1) # .../pi/
# d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../
# for d3 in os.listdir(d2): # every recording
# d4 = os.path.join(d2, d3) # .../pi/../00X/
# print '> Processing', d4
# frames_dir = os.path.join(d4, '_aruco_frames.npy')
# if not os.path.isfile(frames_dir): # the recording is not yet processed for marker tracking
# print '> Recording does not contain marker data...'
# continue
# intervals_dir = os.path.join(d4, 'gaze_intervals.npy')
# if os.path.isfile(intervals_dir):
# print '> Recording already processed...'
# if force:
# print '> Processing again...'
# else:
# continue
# marker_data = np.load(frames_dir)
# # marker_data includes data on tracked markers per frame
# # it's a list with as many entries as the number of video frames, each entry
# # has a list of tracked markers, each marker item has marker id, marker corners, Rvec, Tvec
# wt = np.load(os.path.join(d4, 'world_timestamps.npy'))
# # Processing pupil positions
# pp = np.load(os.path.join(d4, 'pupil_positions.npy')) # timestamp confidence id pos_x pos_y diameter
# # pos_x and pos_y are normalized (Origin 0,0 at the bottom left and 1,1 at the top right)
# # converting each element to dictionary for correlation
# pp = map(lambda e: dict(zip(['timestamp', 'conf', 'id', 'x', 'y', 'diam'], e)), pp)
# pp_by_frame = player_methods.correlate_data(pp, wt)
# # Keeping only pupil positions with nonzero confidence
# pp_by_frame = map(lambda l: filter(lambda p: p['conf']>0, l), pp_by_frame)
# # Computing a single pupil position for the frame by taking mean of all detected pupil positions
# pp_by_frame = map(lambda data:
# sum(np.array([pp['x'], pp['y']]) for pp in data)/len(data) if data else np.array([-1, -1]), pp_by_frame)
# # Now each nonempty value of pp_by_frame is a tuple of (x, y) for pupil position in that frame
# # Checking if timestamps, markers per frame and pupil positions per frame are correlated
# assert len(marker_data) == len(wt) == len(pp_by_frame)
# # Good, now we need to find the frame ranges in which marker is not moving, for that we need the marker_data
# # and using the position info per frame, we can compute movement speed and detect when it is it almost zero
# marker_data, mrdata = unifiy_markers_per_frame(marker_data)
# # Smoothing x and y coords
# marker_data[:, 3] = g1d(marker_data[:, 3], sigma=2)
# marker_data[:, 4] = g1d(marker_data[:, 4], sigma=2)
# marker_speed = []
# for fn, fnp1 in ((f, f+1) for f in xrange(len(marker_data)-1)):
# if marker_data[fnp1] != [] and marker_data[fn] != []:
# # dx = marker_data[fnp1][0] - marker_data[fn][0]
# # dy = marker_data[fnp1][1] - marker_data[fn][1]
# # dz = marker_data[fnp1][2] - marker_data[fn][2]
# # speed = np.sqrt(dx**2 + dy**2 + dz**2) * 100
# # print fn, fnp1, len(marker_data), marker_data[fnp1], marker_data[fn]
# dx = marker_data[fnp1][3] - marker_data[fn][3]
# dy = marker_data[fnp1][4] - marker_data[fn][4]
# speed = np.sqrt(dx**2 + dy**2)
# # print 'marker speed:', speed
# marker_speed.append(speed)
# else:
# marker_speed.append(marker_speed[-1]) # set speed to last speed if marker could not be detected
# # Performing binary clustering on marker speed
# model = AgglomerativeClustering(n_clusters=2, linkage="ward", affinity="euclidean")
# marker_speed = np.array(marker_speed)
# # Checking for outliers based on "median absolute deviation"
# outliers = is_outlier(marker_speed, thresh=3.5)
# print sum(outliers == True), 'outliers detected'
# # removing outliers
# outlier_inds = [i for i in xrange(outliers.size) if outliers[i]]
# marker_speed = list(np.delete(marker_speed, outlier_inds))
# # replacing removed outliers by average of their neighbours
# outliers_inds = sorted(outlier_inds)
# window = 1
# for ind in outlier_inds:
# start = max(ind-window, 0)
# neighbours = marker_speed[start:ind+window]
# new_val = sum(neighbours)/len(neighbours)
# marker_speed.insert(ind, new_val)
# marker_speed = np.array(marker_speed)
# # smoothed_signal = marker_speed[:]
# smoothed_signal = signal.medfilt(marker_speed, 13)
# # smoothed_signal = g1d(marker_speed, sigma=2)
# # smoothed_signal = moving_average(smoothed_signal, 7)
# model.fit(map(lambda e: [e], smoothed_signal))
# labels = fix_labels(model.labels_, window=1, outliers = outliers)
# outliers = map(lambda e: 10 if e else 5, outliers)
# mean = np.mean(smoothed_signal)
# intervals, nm = find_intervals(labels, mean, smoothed_signal)
# print '>', len(intervals), 'Intervals found in total.', nm, 'gaze intervals.'
# interval_display = []
# for dur in intervals:
# interval_display.extend([dur[2]]*(dur[1]-dur[0]+1))
# interval_display = interval_display[:-1]
# print len(interval_display), len(marker_data)-1, intervals[-1][1]-intervals[0][0]
# # print intervals
# # print labels
# # return
# # print len(marker_data), len(marker_speed)
# plt.plot(range(len(marker_data)-1), marker_speed, 'b',
# # range(len(marker_data)-1), labels, 'r',
# range(len(marker_data)-1), smoothed_signal, 'g',
# range(len(marker_data)-1), interval_display, 'r')
# # plt.show()
# # plt.clf()
# # return
# # plt.clf()
# recordings_processed += 1
# intervals_okay = True
# if not nm in [16, 25]:
# intervals_okay = False
# pdb.set_trace()
# print '> Storing odd figure...'
# plt.savefig('./temp/%s-%s__%snm.png' % (d1, d3, str(nm)))
# # print '> Entering manual override mode...'
# # print '> Enter halt to quit.'
# # # set manual_bypass to True in case you wanna discard changes in override mode
# # cmd = raw_input(':')
# # while cmd != 'halt' and cmd != 'pass':
# # exec cmd in globals(), locals()
# # cmd = raw_input(':')
# if intervals_okay:
# print '> Intervals seem okay.'
# plt.savefig(os.path.join(d4, 'marker_motion.png'))
# recordings_successful += 1
# # Store interval information
# # Use pp_by_frame and marker_data to compute gaze and target points corresponding to this interval
# gaze_intervals = intervals[::2] # starting from the first interval, gaze, moving, gaze, moving, gaze, ...
# t2d, t3d, p = [], [], []
# for intv in gaze_intervals:
# s, e = intv[0], intv[1]
# null_gaze, null_marker = 0, 0
# gaze_point = np.array([0, 0])
# marker_3d_position = np.array([0, 0, 0])
# marker_2d_position = np.array([0, 0])
# for fn in xrange(s, e+1):
# if all(pp_by_frame[fn]==np.array([-1, -1])):
# null_gaze += 1
# else:
# gaze_point = gaze_point + pp_by_frame[fn]
# if mrdata[fn] == []:
# null_marker += 1
# else:
# marker_3d_position = marker_3d_position + np.array(mrdata[fn][:3])
# marker_2d_position = marker_2d_position + np.array(mrdata[fn][3:])
# gaze_point = gaze_point/(e-s+1-null_gaze)
# marker_3d_position = marker_3d_position/(e-s+1-null_marker)
# marker_2d_position = marker_2d_position/(e-s+1-null_marker)
# t2d.append(marker_2d_position)
# t3d.append(marker_3d_position)
# p.append(gaze_point)
# print '> Storing intervals, gaze data, and marker data...'
# np.save(intervals_dir, np.array(gaze_intervals))
# np.save(os.path.join(d4, 'p.npy'), np.array(p))
# np.save(os.path.join(d4, 't2d.npy'), np.array(t2d))
# np.save(os.path.join(d4, 't3d.npy'), np.array(t3d))
# print '>', recordings_processed, 'recordings processed.', recordings_successful, 'successful.'
# plt.clf()
PARTICIPANTS = ['p10', 'p16', 'p13', 'p24', 'p5', 'p14', 'p26', 'p12', 'p20', 'p7', 'p15', 'p11', 'p21', 'p25']
def main(force=False):
recordings_processed = 0
recordings_successful = 0
for d1 in os.listdir(ROOT_DATA_DIR):
if d1.startswith('p'): # every participant
if not d1 in PARTICIPANTS:
continue
d2 = os.path.join(ROOT_DATA_DIR, d1) # .../pi/
d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../
for d3 in os.listdir(d2): # every recording
d4 = os.path.join(d2, d3) # .../pi/../00X/
print '> Processing', d4
frames_dir = os.path.join(d4, '_aruco_frames.npy')
if not os.path.isfile(frames_dir): # the recording is not yet processed for marker tracking
print '> Recording does not contain marker data...'
continue
intervals_dir = os.path.join(d4, 'gaze_intervals.npy')
if os.path.isfile(intervals_dir):
print '> Recording already processed...'
if force:
print '> Processing again...'
else:
continue
marker_data = np.load(frames_dir)
# marker_data includes data on tracked markers per frame
# it's a list with as many entries as the number of video frames, each entry
# has a list of tracked markers, each marker item has marker id, marker corners, Rvec, Tvec
wt = np.load(os.path.join(d4, 'world_timestamps.npy'))
# Processing pupil positions
pp = np.load(os.path.join(d4, 'pupil_positions.npy')) # timestamp confidence id pos_x pos_y diameter
# pos_x and pos_y are normalized (Origin 0,0 at the bottom left and 1,1 at the top right)
# converting each element to dictionary for correlation
pp = map(lambda e: dict(zip(['timestamp', 'conf', 'id', 'x', 'y', 'diam'], e)), pp)
pp_by_frame = player_methods.correlate_data(pp, wt)
# Keeping only pupil positions with nonzero confidence
pp_by_frame = map(lambda l: filter(lambda p: p['conf']>0, l), pp_by_frame)
# Computing a single pupil position for the frame by taking mean of all detected pupil positions
pp_by_frame = map(lambda data:
sum(np.array([pp['x'], pp['y']]) for pp in data)/len(data) if data else np.array([-1, -1]), pp_by_frame)
# Now each nonempty value of pp_by_frame is a tuple of (x, y) for pupil position in that frame
# Checking if timestamps, markers per frame and pupil positions per frame are correlated
assert len(marker_data) == len(wt) == len(pp_by_frame)
# Good, now we need to find the frame ranges in which marker is not moving, for that we need the marker_data
# and using the position info per frame, we can compute movement speed and detect when it is it almost zero
marker_data, mrdata = unifiy_markers_per_frame(marker_data)
gaze_intervals = np.load(intervals_dir)
recordings_processed += 1
intervals_okay = True
if intervals_okay:
print '> Intervals seem okay.'
recordings_successful += 1
t2d, t3d, p = [], [], []
t2d_med, t3d_med, p_med, p_frames = [], [], [], []
for intv in gaze_intervals:
s, e = intv[0], intv[1]
null_gaze, null_marker = 0, 0
gaze_point = np.array([0, 0])
marker_3d_position = np.array([0, 0, 0])
marker_2d_position = np.array([0, 0])
gpts, m3ds, m2ds = [], [], []
valid_frames = []
for fn in xrange(s, e+1):
if all(pp_by_frame[fn]==np.array([-1, -1])) or mrdata[fn] == []:
# either pupil detection failed or marker detection
# the whole pupil-marker correspondence is invalid
# ignore this frame
pass
else:
gpts.append(pp_by_frame[fn])
marker_3d_position = marker_3d_position + np.array(mrdata[fn][:3])
marker_2d_position = marker_2d_position + np.array(mrdata[fn][3:])
m3ds.append(np.array(mrdata[fn][:3]))
m2ds.append(np.array(mrdata[fn][3:]))
valid_frames.append(fn)
if not len(valid_frames):
# this marker-pupil correspondece failed
print '> Failed to find reliable correspondece for a marker position...'
# In this case participant data should be completely ignored
# retrun
# Computing the median pupil position
final_p = np.median(gpts, axis=0)
p_med.append(final_p)
# Finding the closest pupil position to this median in the valid frames
dists = map(lambda pupil_position: (v(pupil_position)-v(final_p)).mag, gpts)
dists = zip(range(len(gpts)), dists)
closest = min(dists, key=lambda pair:pair[1])
# Getting the index for this position
ind = closest[0]
# Finding the k nearest pupil position to this one
k = 3
while True:
try:
nbrs = knn(n_neighbors=k, algorithm='ball_tree').fit(gpts)
dists, indices = nbrs.kneighbors(gpts)
break
except ValueError, err:
k-=1
nearest_ind = indices[ind]
frames_numbers = map(lambda i: valid_frames[i], nearest_ind)
p_frames.append(frames_numbers)
# Now we take eye images from these frames
# Also the pupil-marker correspondece is now final_p and m2ds[ind] m3d[ind]
t2d_med.append(m2ds[ind])
t3d_med.append(m3ds[ind])
# t2d_med.append(np.median(m2ds, axis=0))
# t3d_med.append(np.median(m3ds, axis=0))
print '> gaze and marker data...'
# np.save(intervals_dir, np.array(gaze_intervals))
np.save(os.path.join(d4, 'p_frames.npy'), np.array(p_frames))
# np.save(os.path.join(d4, 'p.npy'), np.array(p_med))
# np.save(os.path.join(d4, 't2d.npy'), np.array(t2d_med))
# np.save(os.path.join(d4, 't3d.npy'), np.array(t3d_med))
print '>', recordings_processed, 'recordings processed.', recordings_successful, 'successful.'
if __name__ == '__main__':
main(force=True)

150
code/recording/retrieve.py Normal file
View file

@ -0,0 +1,150 @@
import sys
import numpy as np
import cv2
import matplotlib.pyplot as plt
sys.path.append('..') # so we can import from pupil
from pupil import player_methods
# from tracker import processFrame
DATA_DIR = '/home/mmbrian/HiWi/pupil_clone/pupil/recordings/2015_09_10/007/'
OUTPUT_DIR = DATA_DIR + 'pp.npy'
def capture(frame_number):
cap = cv2.VideoCapture(DATA_DIR + "world.mp4")
fc = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
assert frame_number<fc
frame = 1
status, img = cap.read() # extract the first frame
while status:
if frame == frame_number:
save_dir = DATA_DIR + "frames/frame_%d.jpg" % frame_number
cv2.imwrite(save_dir, img)
break
frame+=1
status, img = cap.read()
def main():
p2d, t3d = [], [] # 2D-3D pupil position to target position correspondences
cap = cv2.VideoCapture(DATA_DIR + "world.mp4")
fc = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
wt = np.load(DATA_DIR + "world_timestamps.npy")
# time is measured in seconds (floating point) since the epoch.
# world timestamps correspond to each frame, we have to correlate timestamp information from
# pupil positions and world timestamps to find a 1-to-1 mapping between pupil positions and
# marker positions in the video
print fc, len(wt)
assert fc == len(wt)
########################################################################################################
# Processing markers
# print 'Processing markers...'
## Processing frame by frame does not work as ArUco process opens a Gtk which cannot be terminated
## automatically, therefore we better process all frames before and work with the data here
# frame = 1
# status, img = cap.read() # extract the first frame
# while status:
# # cv2.imwrite(DATA_DIR + "frames/frame-%d.jpg" % frame, img)
# save_dir = DATA_DIR + "frames/current_frame.jpg"
# cv2.imwrite(save_dir, img)
# processFrame(save_dir)
# frame+=1
# status, img = cap.read()
# print "Processed %d frames." % (frame-1)
########################################################################################################
# Processing pupil positions
print 'Processing pupil positions...'
pp = np.load(DATA_DIR + "pupil_positions.npy") # timestamp confidence id pos_x pos_y diameter
# pos_x and pos_y are normalized (Origin 0,0 at the bottom left and 1,1 at the top right)
# converting each element to dictionary for correlation
pp = map(lambda e: dict(zip(['timestamp', 'conf', 'id', 'x', 'y', 'diam'], e)), pp)
pp_by_frame = player_methods.correlate_data(pp, wt)
# Keeping only pupil positions with nonzero confidence
pp_by_frame = map(lambda l: filter(lambda p: p['conf']>0, l), pp_by_frame)
# Computing a single pupil position for the frame by taking mean of all detected pupil positions
pp_by_frame = map(lambda data:
sum(np.array([pp['x'], pp['y']]) for pp in data)/len(data) if data else np.array([-1, -1]), pp_by_frame)
# Now each nonempty value of pp_by_frame is a tuple of (x, y) for pupil position in that frame
# Next we need to associate each frame to a detected marker and by taking mean pupil point and
# mean 3D marker position over a series of frames corresponding to that marker find a 2D-3D
# mapping for calibration/test
tdiff = map(lambda e: e-wt[0], wt)
# This time correspondence to each marker was coordinated using the GazeHelper android application
# for 005 > starting from 00:56, 3 seconds gaze, 1 second for saccade
# These parameters are specific to the experiment
# 005 > 56, 3, 1
# 006 > 3, 3, 1
# 007 > 7, 3, 1 (or 8)
# 010 > 3, 3, 1
starting_point, gaze_duration, saccade_duration = 56, 3, 1 # these are specific to the experiment
# finding the starting frame
ind = 0
while tdiff[ind] < starting_point:
ind+=1
print ind
data = []
tstart = wt[ind]
for i in xrange(9):
print i
while ind<len(wt) and wt[ind] - tstart < saccade_duration:
ind+=1
if ind<len(wt):
tstart = wt[ind]
starting_ind = ind
while ind<len(wt) and wt[ind] - tstart < gaze_duration:
ind+=1
# all frames from starting_ind to ind-1 correspond to currently gazed marker
c = 0
cp = np.array([0, 0])
all_corresponding_points = []
for j in xrange(starting_ind, ind):
if pp_by_frame[j][0] >= 0:
c+=1
cp = cp + pp_by_frame[j]
all_corresponding_points.append(pp_by_frame[j])
# print c
if c>0:
ret = cp/c
else:
ret = np.array([-1, -1]) # no detected pupil for this marker
p2d.append(ret)
data.append([
np.array([starting_ind, ind-1]), # frame range
ret, # mean pupil position
all_corresponding_points]) # all pupil positions in range
if ind<len(wt):
tstart = wt[ind]
# p2d is now the list of detected pupil positions (not always 9 points)
print 'Saving data...'
np.save(OUTPUT_DIR, data)
# plt.plot([x[0] if x!=None else 0 for x in pp_by_frame], [y[1] if y!=None else 0 for y in pp_by_frame], 'ro')
# plt.show()
########################################################################################################
print len(p2d), 'gaze points'
print p2d
print len(wt), 'frames...'
if __name__ == '__main__':
main()

View file

@ -0,0 +1,27 @@
25 markers detected
[-10.2193 -4.3434 31.3079] [ 364.02288846 279.34265322]
[ 4.45719 -0.890039 30.1925 ] [ 738.90737229 366.01616286]
[ -0.241048 -0.781436 29.4198 ] [ 615.79477403 368.42821006]
[ 9.71565 2.76641 28.2099 ] [ 896.58718475 466.75202885]
[ -9.72456 -0.578541 28.3965 ] [ 351.64670069 372.91016582]
[ 3.95976 -4.62243 33.3605 ] [ 716.20539106 279.93901788]
[ 8.71796 -4.73768 34.2061 ] [ 824.90658662 279.37948968]
[-15.4225 -7.95261 33.4987 ] [ 257.11533368 200.38492012]
[-10.759 -8.10442 34.3725 ] [ 374.21786998 202.19909734]
[ -9.16547 3.18029 25.2879 ] [ 335.82956957 488.18787966]
[ 8.2166 -8.50777 37.2677 ] [ 797.87302812 207.99485459]
[ 0.251393 2.96439 26.5568 ] [ 629.74093457 476.94503839]
[ 4.95556 2.84818 27.3074 ] [ 765.81823876 471.39805837]
[ 9.2531 -0.996382 31.2532 ] [ 857.61665601 363.81620258]
[ -0.751971 -4.49805 32.4702 ] [ 604.03285282 280.22127036]
[ -4.4535 3.07115 26.0102 ] [ 487.44771208 481.98387392]
[ -1.26526 -8.25379 35.5274 ] [ 594.23607203 205.79036006]
[-14.1847 -0.48957 27.1355 ] [ 208.39215781 374.30642197]
[ -4.96301 -0.668829 28.9992 ] [ 487.46065075 371.08870379]
[-14.8024 -4.22344 30.2936 ] [ 234.96053223 278.18514623]
[-15.4051 -4.40168 31.5249 ] [ 234.93398339 278.01955418]
[ -5.99238 -8.19846 34.9627 ] [ 486.77529469 203.86450319]
[ -5.48118 -4.44441 32.0198 ] [ 487.21960875 279.85135589]
[-13.2512 3.09461 23.3448 ] [ 174.4254351 492.96000757]
[ 3.45682 -8.37202 36.4481 ] [ 697.60290572 207.58466561]

221
code/recording/tracker.py Normal file
View file

@ -0,0 +1,221 @@
import os, signal, subprocess
import cv2
import numpy as np
import json
LOG_DIR = "/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/log.log"
FRAMES_DIR = "/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/frames_012.npy"
ARUCO_EXECUTABLE = "/home/mmbrian/temp/aruco-1.3.0/build/utils/aruco_test "
ARUCO_SIMPLE_EXECUTABLE = "/home/mmbrian/temp/aruco-1.3.0/build/utils/aruco_simple "
RECORDING_VIDEO = "/home/mmbrian/HiWi/pupil_clone/pupil/recordings/2015_09_10/012/world.avi "
## convert videos to avi using:
## avconv -i world.mp4 -c:a copy -c:v copy world.avi
CAMERA_MATRIX = "/home/mmbrian/Pictures/chessboard_shots_new/camera.yml "
SQUARE_SIZE = "0.029"
class MyEncoder(json.JSONEncoder):
def default(self, o):
if '__dict__' in dir(o):
return o.__dict__
else:
return str(o)
class Marker(object):
def __init__(self, _id, p, Rvec, Tvec):
'''
p is the array of marker corners in 2D detected in target image
'''
self.id = _id
self.p = p
self.Rvec = Rvec
self.Tvec = Tvec
def getCenter(self):
'''
Returns 3D position of the marker's center in camera coordinate system
'''
ret = []
# Constructing 4x4 intrinsic matrix
R = cv2.Rodrigues(self.Rvec)[0]
for i, row in enumerate(R):
ret.append(np.concatenate((row, [self.Tvec[i]])))
ret.append([0, 0, 0, 1])
mat = np.array(ret)
# Applying the intrinsic matrix to marker center (wrt marker coordinate system)
return mat.dot(np.array([0, 0, 0, 1]))[:-1] # removing the 4th coordinate
def getCenterInImage(self, camera_matrix, dist_coeffs):
ret = cv2.projectPoints(np.array([(0.0, 0.0, 0.0)]), self.Rvec, self.Tvec, camera_matrix, dist_coeffs)
return ret[0][0][0]
@classmethod
def fromList(cls, data):
# np.array([np.array([curr_marker.id]), curr_marker.p, curr_marker.Rvec, curr_marker.Tvec])
return Marker(data[0][0], data[1], data[2], data[3])
# def processFrame(frame_path):
# cmd = ARUCO_SIMPLE_EXECUTABLE + frame_path + ' ' + CAMERA_MATRIX + SQUARE_SIZE
# p = subprocess.Popen('exec ' + cmd, shell=True, stdout=subprocess.PIPE)
# print '########################################################'
# print 'Begin Output'
# while True:
# line = p.stdout.readline()
# if line != '':
# #the real code does filtering here
# print line.rstrip()
# else:
# break
# print 'End Output'
# print '########################################################'
# # p.terminate()
# p.kill()
# # os.killpg(p.pid, signal.SIGTERM)
def readCameraParams(cam_mat = None):
'''
Reads an openCV camera.yml file and returns camera_matrix and distortion_coefficients
'''
if not cam_mat:
cam_mat = CAMERA_MATRIX
data = ''.join(open(cam_mat.strip(), 'r').readlines()).replace('\n', '').lower()
try:
ind1 = data.index('[', data.index('camera_matrix'))
ind2 = data.index(']', ind1)
camera_matrix = eval(data[ind1:ind2+1])
camera_matrix = np.array([camera_matrix[:3],
camera_matrix[3:6],
camera_matrix[6:]])
ind1 = data.index('[', data.index('distortion_coefficients'))
ind2 = data.index(']', ind1)
dist_coeffs = np.array(eval(data[ind1:ind2+1]))
return camera_matrix, dist_coeffs
except Exception:
print 'Could not load camera parameters'
print 'Invalid camera.yml file.'
def performMarkerTracking(read_from_log = False, log = False, ret = False,
log_dir = None, recording_video = None, frames_dir = None,
square_size = None, cam_mat = None):
if not log_dir:
log_dir = LOG_DIR
if not frames_dir:
frames_dir = FRAMES_DIR
if not recording_video:
recording_video = RECORDING_VIDEO
if not cam_mat:
cam_mat = CAMERA_MATRIX
if not square_size:
square_size = SQUARE_SIZE
square_size = str(square_size)
data = []
if read_from_log:
if log:
print '> Reading data from log file:', log_dir
with open(log_dir, 'r') as f:
for l in f:
if l.strip():
data.append(l)
else:
if log:
print '> Performing marker tracking on file:', recording_video
print '> Writing data to log file:', log_dir
with open(log_dir, 'w') as f:
for l in os.popen(ARUCO_EXECUTABLE + recording_video + cam_mat + square_size):
l = l.strip()
if '\r' in l:
for line in l.split('\r'):
if line:
f.write(line + "\n") # logging to file
data.append(line)
else:
if l:
f.write(l + "\n") # logging to file
data.append(l)
if log:
print '> Parsing marker data...'
frame_count = 0
curr_frame = 0
markers = {}
frames, frame = [], []
visited_first_frame = False
for line in data:
line = line.strip().lower()
# print
# print repr(line)
# print
if line.startswith('time'):
if visited_first_frame:
frames.append(frame)
visited_first_frame = True
frame = []
# if frame:
# frames.append(frame)
# frame = []
curr_frame=curr_frame+1
nmarkers = int(line[line.index('nmarkers')+9:])
if 'txyz' in line: # This line holds information of a detected marker
ind = line.index('=')
_id = int(line[:ind])
p = []
for i in xrange(4):
pind = ind
ind = line.index(' ', pind+1)
p.append(line[pind+1:ind])
pind = ind
ind = line.index('rxyz', pind)
T = line[pind+1+5:ind]
R = line[ind+5:]
if not _id in markers:
markers[_id] = []
curr_marker = Marker(_id,
map(lambda pstr: np.array(eval(pstr)), p),
np.array(eval('(' + R.strip().replace(' ', ',') + ')')),
np.array(eval('(' + T.strip().replace(' ', ',') + ')')))
markers[_id].append(curr_marker)
frame.append(np.array([np.array([curr_marker.id]), curr_marker.p, curr_marker.Rvec, curr_marker.Tvec]))
# Last frame data
frames.append(frame)
frames = np.array(frames)
if log:
print '> Saving marker data for all frames in:', frames_dir
np.save(frames_dir, frames)
if log:
print '> Successfully processed %s frames.' % curr_frame
if ret:
return markers
if __name__ == '__main__':
camera_matrix, dist_coeffs = readCameraParams()
markers = performMarkerTracking()
print '################################################'
print len(markers), 'markers detected'
for m in markers:
# print '################################################'
# print markers[m][0].__dict__
# print '################################################'
print m, ':', len(markers[m]), 'instances'
c = markers[m][0].getCenter()
print c * 100, markers[m][0].getCenterInImage(camera_matrix, dist_coeffs)
# # TODO: investigate if this is how to get projections of unit vectors
# # originating from the camera onto the image plane
# # the trick is that we consider a marker with no R and zero T
# v = [(0.0, 0.0, 0.0), (1, 0, 0), (0, 1, 0), (0, 0, 1)]
# ret = cv2.projectPoints(np.array(v), np.eye(3), np.zeros(3), camera_matrix, dist_coeffs)
# p = ret[0]
# for i, t in enumerate(v):
# print t, 'P->', p[i][0]

View file

@ -0,0 +1,41 @@
.gradle
/local.properties
/.idea/workspace.xml
/.idea/libraries
.DS_Store
/build
/captures
### Android ###
# Built application files
*.apk
*.ap_
# Files for the Dalvik VM
*.dex
# Java class files
*.class
# Generated files
bin/
gen/
# Gradle files
.gradle/
build/
# Local configuration file (sdk path, etc)
local.properties
# Proguard folder generated by Eclipse
proguard/
# Log Files
*.log
# Android Studio Navigation editor temp files
.navigation/
### Android Patch ###
gen-external-apklibs

View file

@ -0,0 +1 @@
GazeHelper

View file

@ -0,0 +1,22 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CompilerConfiguration">
<resourceExtensions />
<wildcardResourcePatterns>
<entry name="!?*.java" />
<entry name="!?*.form" />
<entry name="!?*.class" />
<entry name="!?*.groovy" />
<entry name="!?*.scala" />
<entry name="!?*.flex" />
<entry name="!?*.kt" />
<entry name="!?*.clj" />
<entry name="!?*.aj" />
</wildcardResourcePatterns>
<annotationProcessing>
<profile default="true" name="Default" enabled="false">
<processorPath useClasspath="true" />
</profile>
</annotationProcessing>
</component>
</project>

View file

@ -0,0 +1,3 @@
<component name="CopyrightManager">
<settings default="" />
</component>

View file

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="GradleSettings">
<option name="linkedExternalProjectsSettings">
<GradleProjectSettings>
<option name="distributionType" value="LOCAL" />
<option name="externalProjectPath" value="$PROJECT_DIR$" />
<option name="gradleHome" value="$APPLICATION_HOME_DIR$/gradle/gradle-2.2.1" />
<option name="gradleJvm" value="1.8" />
<option name="modules">
<set>
<option value="$PROJECT_DIR$" />
<option value="$PROJECT_DIR$/app" />
</set>
</option>
</GradleProjectSettings>
</option>
</component>
</project>

View file

@ -0,0 +1,22 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="EntryPointsManager">
<entry_points version="2.0" />
</component>
<component name="ProjectLevelVcsManager" settingsEditedManually="false">
<OptionsSetting value="true" id="Add" />
<OptionsSetting value="true" id="Remove" />
<OptionsSetting value="true" id="Checkout" />
<OptionsSetting value="true" id="Update" />
<OptionsSetting value="true" id="Status" />
<OptionsSetting value="true" id="Edit" />
<ConfirmationsSetting value="0" id="Add" />
<ConfirmationsSetting value="0" id="Remove" />
</component>
<component name="ProjectRootManager" version="2" languageLevel="JDK_1_7" default="true" assert-keyword="true" jdk-15="true" project-jdk-name="1.8" project-jdk-type="JavaSDK">
<output url="file://$PROJECT_DIR$/build/classes" />
</component>
<component name="ProjectType">
<option name="id" value="Android" />
</component>
</project>

View file

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/GazeHelper.iml" filepath="$PROJECT_DIR$/GazeHelper.iml" />
<module fileurl="file://$PROJECT_DIR$/app/app.iml" filepath="$PROJECT_DIR$/app/app.iml" />
</modules>
</component>
</project>

View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="" />
</component>
</project>

View file

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<module external.linked.project.id="GazeHelper" external.linked.project.path="$MODULE_DIR$" external.root.project.path="$MODULE_DIR$" external.system.id="GRADLE" external.system.module.group="" external.system.module.version="unspecified" type="JAVA_MODULE" version="4">
<component name="FacetManager">
<facet type="java-gradle" name="Java-Gradle">
<configuration>
<option name="BUILD_FOLDER_PATH" value="$MODULE_DIR$/build" />
<option name="BUILDABLE" value="false" />
</configuration>
</facet>
</component>
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/.gradle" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

View file

@ -0,0 +1 @@
/build

View file

@ -0,0 +1,95 @@
<?xml version="1.0" encoding="UTF-8"?>
<module external.linked.project.id=":app" external.linked.project.path="$MODULE_DIR$" external.root.project.path="$MODULE_DIR$/.." external.system.id="GRADLE" external.system.module.group="GazeHelper" external.system.module.version="unspecified" type="JAVA_MODULE" version="4">
<component name="FacetManager">
<facet type="android-gradle" name="Android-Gradle">
<configuration>
<option name="GRADLE_PROJECT_PATH" value=":app" />
</configuration>
</facet>
<facet type="android" name="Android">
<configuration>
<option name="SELECTED_BUILD_VARIANT" value="debug" />
<option name="SELECTED_TEST_ARTIFACT" value="_android_test_" />
<option name="ASSEMBLE_TASK_NAME" value="assembleDebug" />
<option name="COMPILE_JAVA_TASK_NAME" value="compileDebugSources" />
<option name="SOURCE_GEN_TASK_NAME" value="generateDebugSources" />
<option name="ASSEMBLE_TEST_TASK_NAME" value="assembleDebugAndroidTest" />
<option name="COMPILE_JAVA_TEST_TASK_NAME" value="compileDebugAndroidTestSources" />
<option name="TEST_SOURCE_GEN_TASK_NAME" value="generateDebugAndroidTestSources" />
<option name="ALLOW_USER_CONFIGURATION" value="false" />
<option name="MANIFEST_FILE_RELATIVE_PATH" value="/src/main/AndroidManifest.xml" />
<option name="RES_FOLDER_RELATIVE_PATH" value="/src/main/res" />
<option name="RES_FOLDERS_RELATIVE_PATH" value="file://$MODULE_DIR$/src/main/res" />
<option name="ASSETS_FOLDER_RELATIVE_PATH" value="/src/main/assets" />
</configuration>
</facet>
</component>
<component name="NewModuleRootManager" inherit-compiler-output="false">
<output url="file://$MODULE_DIR$/build/intermediates/classes/debug" />
<output-test url="file://$MODULE_DIR$/build/intermediates/classes/androidTest/debug" />
<exclude-output />
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/build/generated/source/r/debug" isTestSource="false" generated="true" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/source/aidl/debug" isTestSource="false" generated="true" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/source/buildConfig/debug" isTestSource="false" generated="true" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/source/rs/debug" isTestSource="false" generated="true" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/res/rs/debug" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/res/generated/debug" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/source/r/androidTest/debug" isTestSource="true" generated="true" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/source/aidl/androidTest/debug" isTestSource="true" generated="true" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/source/buildConfig/androidTest/debug" isTestSource="true" generated="true" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/source/rs/androidTest/debug" isTestSource="true" generated="true" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/res/rs/androidTest/debug" type="java-test-resource" />
<sourceFolder url="file://$MODULE_DIR$/build/generated/res/generated/androidTest/debug" type="java-test-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/debug/res" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/debug/resources" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/debug/assets" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/debug/aidl" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/debug/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/debug/jni" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/debug/rs" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/main/res" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/main/resources" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/main/assets" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/main/aidl" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/main/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/main/jni" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/main/rs" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/androidTest/res" type="java-test-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/androidTest/resources" type="java-test-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/androidTest/assets" type="java-test-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/androidTest/aidl" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/androidTest/java" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/androidTest/jni" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/androidTest/rs" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/assets" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/bundles" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/classes" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/coverage-instrumented-classes" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/dependency-cache" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/dex" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/dex-cache" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/exploded-aar/com.android.support/appcompat-v7/23.0.1/jars" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/exploded-aar/com.android.support/support-v4/23.0.1/jars" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/incremental" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/jacoco" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/javaResources" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/libs" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/lint" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/manifests" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/ndk" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/pre-dexed" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/proguard" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/res" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/rs" />
<excludeFolder url="file://$MODULE_DIR$/build/intermediates/symbols" />
<excludeFolder url="file://$MODULE_DIR$/build/outputs" />
<excludeFolder url="file://$MODULE_DIR$/build/tmp" />
</content>
<orderEntry type="jdk" jdkName="Android API 23 Platform" jdkType="Android SDK" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" exported="" name="support-v4-23.0.1" level="project" />
<orderEntry type="library" exported="" name="appcompat-v7-23.0.1" level="project" />
<orderEntry type="library" exported="" name="support-annotations-23.0.1" level="project" />
</component>
</module>

View file

@ -0,0 +1,25 @@
apply plugin: 'com.android.application'
android {
compileSdkVersion 23
buildToolsVersion "22.0.1"
defaultConfig {
applicationId "gazehelper.android.mmbrian.com.gazehelper"
minSdkVersion 14
targetSdkVersion 23
versionCode 1
versionName "1.0"
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
}
}
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
compile 'com.android.support:appcompat-v7:23.0.1'
}

View file

@ -0,0 +1,17 @@
# Add project specific ProGuard rules here.
# By default, the flags in this file are appended to flags specified
# in /home/mmbrian/Android/Sdk/tools/proguard/proguard-android.txt
# You can edit the include path and order by changing the proguardFiles
# directive in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# Add any project specific keep options here:
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}

View file

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="gazehelper.android.mmbrian.com.gazehelper" >
<application
android:allowBackup="true"
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
android:theme="@style/AppTheme" >
<activity
android:name=".MainActivity"
android:label="@string/app_name" >
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>

View file

@ -0,0 +1,108 @@
package gazehelper.android.mmbrian.com.gazehelper;
import android.app.Activity;
import android.media.AudioManager;
import android.media.ToneGenerator;
import android.os.Bundle;
import android.os.Handler;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import android.widget.ViewFlipper;
import java.util.Timer;
import java.util.TimerTask;
public class MainActivity extends Activity {
public static final String CLASS_TAG = "GazeHelper";
ViewFlipper flipper;
Button btn_start;
EditText txt_gaze_dur, txt_bgaze_dur, txt_ngaze;
TextView txt_stats;
int gaze_dur, bgaze_dur, ngaze, cgaze;
long total_dur, start_time;
static Timer timer;
ToneGenerator toneG;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
flipper = (ViewFlipper)findViewById(R.id.flipper);
txt_gaze_dur = (EditText)findViewById(R.id.txt_gaze_dur);
txt_bgaze_dur = (EditText)findViewById(R.id.txt_bgaze_dur);
txt_ngaze = (EditText)findViewById(R.id.txt_ngaze);
txt_stats = (TextView)findViewById(R.id.txt_stats);
timer = new Timer("Gaze Timer");
toneG = new ToneGenerator(AudioManager.STREAM_MUSIC, 100);
btn_start = (Button)findViewById(R.id.btn_start);
btn_start.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
txt_stats.setText("Starting...");
gaze_dur = Integer.valueOf(txt_gaze_dur.getText().toString());
bgaze_dur = Integer.valueOf(txt_bgaze_dur.getText().toString());
ngaze = Integer.valueOf(txt_ngaze.getText().toString());
total_dur = (ngaze * gaze_dur + (ngaze - 1) * bgaze_dur) * 1000; // in milliseconds
flipper.showNext();
cgaze = 0;
start_time = -1;
Log.d(CLASS_TAG, "Started...");
timer = new Timer("Gaze Timer");
timer.schedule(new TimerTask() {
@Override
public void run() {
runOnUiThread(new Runnable() {
@Override
public void run() {
updateUI();
}
});
}
}, bgaze_dur * 1000, (gaze_dur + bgaze_dur) * 1000); // initial delay, recall delay
// first gaze is after 2 bgaze durations, later gazes start after a bgaze
}
});
}
public void updateUI() {
Log.d(CLASS_TAG, cgaze + "/" + ngaze);
if (cgaze++ >= ngaze) {
txt_stats.setText("Finished :)");
new Handler().postDelayed(new Runnable() {
@Override
public void run() {
runOnUiThread(new Runnable() {
@Override
public void run() {
flipper.showPrevious();
}
});
}
}, gaze_dur * 1000);
timer.cancel();
return;
}
toneG.startTone(ToneGenerator.TONE_CDMA_ALERT_CALL_GUARD, 200);
// if (start_time < 0)
// start_time = System.currentTimeMillis();
// txt_stats.setText((System.currentTimeMillis()-start_time)/1000 + "");
txt_stats.setText("Gaze at Target #" + cgaze);
}
}

View file

@ -0,0 +1,59 @@
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools" android:layout_width="match_parent"
android:layout_height="match_parent" android:paddingLeft="@dimen/activity_horizontal_margin"
android:paddingRight="@dimen/activity_horizontal_margin"
android:paddingTop="@dimen/activity_vertical_margin"
android:paddingBottom="@dimen/activity_vertical_margin" tools:context=".MainActivity">
<ViewFlipper
android:id="@+id/flipper"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_centerInParent="true">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical"
android:layout_centerInParent="true">
<EditText
android:id="@+id/txt_gaze_dur"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:hint="Gaze Duration (Seconds)"
android:inputType="number"/>
<EditText
android:id="@+id/txt_bgaze_dur"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:hint="Between-Gaze Duration (Seconds)"
android:inputType="number"/>
<EditText
android:id="@+id/txt_ngaze"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:hint="Number of Targets"
android:inputType="number"/>
<Button
android:id="@+id/btn_start"
style="@android:style/TextAppearance.DeviceDefault.Medium"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Start"/>
</LinearLayout>
<TextView
android:id="@+id/txt_stats"
style="@android:style/TextAppearance.DeviceDefault.Large"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_gravity="center_vertical"
android:gravity="center"
android:text=""/>
</ViewFlipper>
</RelativeLayout>

View file

@ -0,0 +1,6 @@
<resources>
<!-- Example customization of dimensions originally defined in res/values/dimens.xml
(such as screen margins) for screens with more than 820dp of available width. This
would include 7" and 10" devices in landscape (~960dp and ~1280dp respectively). -->
<dimen name="activity_horizontal_margin">64dp</dimen>
</resources>

View file

@ -0,0 +1,5 @@
<resources>
<!-- Default screen margins, per the Android Design guidelines. -->
<dimen name="activity_horizontal_margin">16dp</dimen>
<dimen name="activity_vertical_margin">16dp</dimen>
</resources>

View file

@ -0,0 +1,6 @@
<resources>
<string name="app_name">GazeHelper</string>
<string name="hello_world">Hello world!</string>
<string name="action_settings">Settings</string>
</resources>

View file

@ -0,0 +1,8 @@
<resources>
<!-- Base application theme. -->
<style name="AppTheme" parent="Theme.AppCompat.Light.DarkActionBar">
<!-- Customize your theme here. -->
</style>
</resources>

View file

@ -0,0 +1,19 @@
// Top-level build file where you can add configuration options common to all sub-projects/modules.
buildscript {
repositories {
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:1.2.3'
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
}
}
allprojects {
repositories {
jcenter()
}
}

View file

@ -0,0 +1,18 @@
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
# Default value: -Xmx10248m -XX:MaxPermSize=256m
# org.gradle.jvmargs=-Xmx2048m -XX:MaxPermSize=512m -XX:+HeapDumpOnOutOfMemoryError -Dfile.encoding=UTF-8
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true

View file

@ -0,0 +1,6 @@
#Wed Apr 10 15:27:10 PDT 2013
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-2.2.1-all.zip

164
code/recording/util/GazeHelper/gradlew vendored Normal file
View file

@ -0,0 +1,164 @@
#!/usr/bin/env bash
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn ( ) {
echo "$*"
}
die ( ) {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
esac
# For Cygwin, ensure paths are in UNIX format before anything is touched.
if $cygwin ; then
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
fi
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >&-
APP_HOME="`pwd -P`"
cd "$SAVED" >&-
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
function splitJvmOpts() {
JVM_OPTS=("$@")
}
eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"

View file

@ -0,0 +1,90 @@
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windowz variants
if not "%OS%" == "Windows_NT" goto win9xME_args
if "%@eval[2+2]" == "4" goto 4NT_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
goto execute
:4NT_args
@rem Get arguments from the 4NT Shell from JP Software
set CMD_LINE_ARGS=%$
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View file

@ -0,0 +1 @@
include ':app'

View file

@ -0,0 +1,102 @@
float marker_image_width;
float screen_border_width;
float mark_pause; // seconds
// Represents a 9-point grid calibration board
class CalibrationBoard {
PVector[] marks;
float xgap, ygap, xOffset, yOffset;
// standard order for 9point grid, starting from center and then clockwise
int[] inds = new int[] {1, 2, 3, 8, 0, 4, 7, 6, 5};
public void shuffleCalibrationOrder() { // FisherYates shuffle
for (int i = inds.length - 1; i > 0; i--)
{
int index = int(random(0, i + 1));
int tmp = inds[index];
inds[index] = inds[i];
inds[i] = tmp;
}
}
public void generateInds(int grid_width) {
inds = new int[grid_width*grid_width];
for (int i=0; i<inds.length; i++) {
inds[i] = i;
}
}
public CalibrationBoard(int mode, int grid_width, float xOffset, float yOffset, float xgap, float ygap) {
if (mode == 1) {
this.xOffset = xOffset;
this.yOffset = yOffset;
generateInds(grid_width);
shuffleCalibrationOrder();
if (xgap == -1 || ygap == -1) {
computeGridMarks(grid_width, false);
} else {
this.xgap = xgap;
this.ygap = ygap;
computeGridMarks(grid_width, true);
}
} else {
int num = grid_width; // num of calibration points
inds = new int[num];
for (int i=0; i<inds.length; i++)
inds[i] = i;
generateRandomMarks();
}
}
public void generateRandomMarks() {
float w = width - 2*screen_border_width - marker_image_width;
float xOffset = screen_border_width + marker_image_width/2.;
float h = height - 2*screen_border_width - marker_image_width;
float yOffset = screen_border_width + marker_image_width/2.;
float x, y;
marks = new PVector[inds.length];
for (int i=0; i<inds.length; i++) {
x = random(0, w);
y = random(0, h);
marks[inds[i]] = new PVector(x + xOffset, y + yOffset);
}
}
public void computeGridMarks(int grid_width, boolean custom_gap_size) {
float w = width - 2*screen_border_width;
float h = height - 2*screen_border_width;
if (!custom_gap_size) {
xgap = (w - grid_width*marker_image_width)/float(grid_width-1);
ygap = (h - grid_width*marker_image_width)/float(grid_width-1);
}
marks = new PVector[grid_width*grid_width];
float x, y;
int c = 0;
for (int i=0; i<grid_width; i++) {
y = i * (ygap + marker_image_width) + screen_border_width + yOffset;
y += marker_image_width/2.;
for (int j=0; j<grid_width; j++) {
x = j * (xgap + marker_image_width) + screen_border_width + xOffset;
x += marker_image_width/2.;
marks[inds[c++]] = new PVector(x, y);
}
}
}
public void drawMarks() {
for (int i=0; i<marks.length; i++) {
drawMark(marks[i]);
fill(0);
//text(i + "", marks[i].x + 3, marks[i].y - 5);
}
}
private void drawMark(PVector pos) {
int w = 13;
stroke(0);
line(pos.x - w, pos.y, pos.x + w, pos.y);
line(pos.x, pos.y - w, pos.x, pos.y + w);
}
}

View file

@ -0,0 +1,69 @@
int marker_stroke_weight;
int marker_stroke_width;
float marker_move_step;
class Marker {
PVector p, c;
float w, h;
PImage image;
PVector dest;
boolean onDest;
public Marker(PImage img) {
this.image = img;
this.w = img.width;
this.h = img.height;
this.c = new PVector(width/2., height/2.);
this.p = new PVector(c.x - w/2., c.y - h/2.);
this.dest = this.p;
this.onDest = true;
}
public void draw() {
image(image, p.x, p.y);
stroke(255, 0, 0);
strokeWeight(marker_stroke_weight);
line(c.x - marker_stroke_width, c.y, c.x + marker_stroke_width, c.y);
line(c.x, c.y - marker_stroke_width, c.x, c.y + marker_stroke_width);
}
public void update() {
if (p.x != dest.x || p.y != dest.y) {
onDest = false;
PVector diff = dest.get();
diff.sub(p);
if (diff.mag() > marker_move_step) {
diff.normalize();
moveX(diff.x * marker_move_step);
moveY(diff.y * marker_move_step);
} else {
setX(dest.x);
setY(dest.y);
}
} else {
onDest = true;
}
}
public void moveX(float step) {
this.p.x += step;
this.c.x += step;
}
public void moveY(float step) {
this.p.y += step;
this.c.y += step;
}
public void setX(float x) {
this.p.x = x;
this.c.x = x + w/2.;
}
public void setY(float y) {
this.p.y = y;
this.c.y = y + h/2.;
}
}

View file

@ -0,0 +1,177 @@
Marker marker;
CalibrationBoard gc, gc_test;
String status = "not started";
int mark_pause_start = -1;
int curr_mark = 0;
boolean pause = true;
boolean finished = false;
boolean display_marks = true;
boolean calibration_mode = true;
boolean display_stats = true;
float start_delay = 3;
int count_down_begin;
void setup() {
fullScreen();
//size(640, 480);
frameRate(60);
screen_border_width = 0;
mark_pause = 2.0; // in seconds
marker_stroke_weight = 4;
marker_stroke_width = 13;
marker_move_step = 7;
PImage img = loadImage("1023.png");
marker_image_width = img.width;
marker = new Marker(img);
gc = new CalibrationBoard(1, 5, 0, 0, -1, -1); // means 5-point grid
gc_test = new CalibrationBoard(1, 4, (gc.xgap + marker_image_width)/2., (gc.ygap + marker_image_width)/2., gc.xgap, gc.ygap);
resetMarker();
}
void reset() {
mark_pause_start = -1;
curr_mark = 0;
pause = true;
finished = false;
display_marks = true;
calibration_mode = true;
gc = new CalibrationBoard(1, 5, 0, 0, -1, -1); // means 5-point grid
gc_test = new CalibrationBoard(1, 4, (gc.xgap + marker_image_width)/2., (gc.ygap + marker_image_width)/2., gc.xgap, gc.ygap);
resetMarker();
}
void resetMarker() {
float x, y;
if (calibration_mode) {
x = gc.marks[curr_mark].x - marker_image_width/2.;
y = gc.marks[curr_mark].y - marker_image_width/2.;
} else {
x = gc_test.marks[curr_mark].x - marker_image_width/2.;
y = gc_test.marks[curr_mark].y - marker_image_width/2.;
}
marker.dest = new PVector(x, y);
marker.setX(x);
marker.setY(y);
}
void draw() {
background(255);
if (!finished) {
marker.draw();
marker.update();
}
if (!pause) {
if (frameCount - count_down_begin > start_delay * frameRate) {
status = "started";
if (marker.onDest) {
if (mark_pause_start < 0) {
mark_pause_start = frameCount;
} else {
status = ((frameCount - mark_pause_start) / frameRate) + "";
if (frameCount - mark_pause_start > mark_pause * frameRate) {
if ((calibration_mode && curr_mark < gc.inds.length-1) || (!calibration_mode && curr_mark < gc_test.inds.length-1)) {
PVector destC;
if (calibration_mode) {
destC = gc.marks[++curr_mark];
} else {
destC = gc_test.marks[++curr_mark];
}
marker.dest = new PVector(destC.x - marker_image_width/2., destC.y - marker_image_width/2.);
} else {
status = "finished";
finished = true;
}
}
}
} else {
status = "moving";
mark_pause_start = -1;
}
} else {
text(((frameCount - count_down_begin)/frameRate) + "/" + start_delay, width/2, height-7);
}
} else {
status = "paused";
}
if (display_marks) {
gc.drawMarks();
gc_test.drawMarks();
}
//fill(255, 0, 0);
//noStroke();
//ellipse(marker.dest.x, marker.dest.y, 5, 5);
//fill(0, 0, 255);
//for (PVector m: gc.marks) {
// ellipse(m.x - marker_image_width/2., m.y - marker_image_width/2., 5, 5);
//}
if (finished) {
fill(255, 0, 0);
} else if (pause) {
fill(0);
} else {
fill(0, 0, 255);
}
if (display_stats) {
text("status: " + status, 3, 13);
String progress;
if (calibration_mode) {
progress = (curr_mark+1) + "/" + gc.inds.length;
} else {
progress = (curr_mark+1) + "/" + gc_test.inds.length;
}
text(progress, 3, height-7);
}
}
void keyPressed() {
println(keyCode);
switch (keyCode) {
case 32: // Space Button
pause = !pause;
display_marks = false;
if (!pause)
count_down_begin = frameCount;
break;
case 77: // M key
display_marks = !display_marks;
break;
case 67: // C key
curr_mark = 0;
calibration_mode = true;
println("calibration mode");
resetMarker();
break;
case 84: // T key
curr_mark = 0;
calibration_mode = false;
println("test mode");
resetMarker();
break;
case 82: // R key
reset();
break;
case 83: // S key
saveFrame("screenshot_###.png");
break;
case 72: // H key
display_stats = !display_stats;
break;
default:
break;
}
}

View file

@ -0,0 +1,2 @@
mode.id=processing.mode.java.JavaMode
mode=Java

View file

View file

@ -0,0 +1,18 @@
from __future__ import division
import numpy as np
import os, sys
if __name__ == '__main__':
try:
path = sys.argv[1]
time_stamps = np.load(os.path.join(path, 'world_timestamps.npy'))
pts = np.load(os.path.join(path, 'pupil_positions.npy'))
valid = filter(lambda e: e[1] > 0, pts) # filter all with zero confidence
csum = sum(e[1] for e in pts)
print 'Ratio: %s' % round(len(valid)*100/len(pts), 2) + '%'
if len(valid):
print 'Average Confidence:', csum/len(valid)
print 'Frames: %s' % len(time_stamps)
except Exception, err:
print 'Something went wrong.'
print err

View file

@ -0,0 +1,201 @@
import os, sys
import Image
from random import sample
'''
IMPORTANT:
- When printing images generated with this code you HAVE to ignore page margins! (use GIMP)
- Markers on a board must be separated by white borders or ArUco won't detect them
'''
ARUCO_MARKER_CREATOR = '/home/mmbrian/temp/aruco-1.3.0/build/utils/aruco_create_marker '
DESTINATION_DIR = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/util/markers/'
DEFAULT_MARKER_SIZE_IN_PIXELS = 64
DEFAULT_MARKER_BORDER_IN_PIXELS = 7 # only applies for random boards
DEFAULT_MARKER_SIZE_IN_MM = 40
A4_PADDING_IN_PIXELS = 50
def createMarker(marker_id, size_in_pixels, format = 'png'):
assert marker_id >= 0 and marker_id < 1024, 'Invalid Marker ID. Must be in Range 0-1023'
cmd = ARUCO_MARKER_CREATOR + '{0} ' + DESTINATION_DIR + '{0}.' + format + ' {1} 0'
os.popen(cmd.format(marker_id, size_in_pixels))
def handleMarkers():
'''
Usage:
marker.py num_of_markers size_in_pixels
marker.py -i marker_id size_in_pixels
'''
if len(sys.argv) > 3:
try:
createMarker(int(sys.argv[2]), int(sys.argv[3]))
except:
print 'Usage: marker.py -i marker_id size_in_pixels'
else:
try:
num = int(sys.argv[1])
assert num>0 and num<=1024
except:
print 'Invalid number of markers. please specify a number between 1 and 1024'
return
size = DEFAULT_MARKER_SIZE_IN_PIXELS
try:
size = int(sys.argv[2])
print 'Marker size set to', size, 'px'
except:
print 'Marker size set to default size', size, 'px'
print 'Creating', num, 'random markers...'
generateRandomMarkers(num, size)
def generateRandomMarkers(num, size):
# for i in range(num):
for i, _id in enumerate(sorted(sample(range(1024), num))):
createMarker(_id, size)
print i+1, '/', num
print 'Finished.'
def computeDimensions():
A4W, A4H = 210, 297 # A4 dimension in millimeters
MM2IN = 25.4 # millimeters per inch
dpi = -1 # Must be given (use xdpyinfo under linux)
marker_width = -1 # Arbitrary value which must be given by user (in millimeters)
try:
dpi = int(sys.argv[2])
assert dpi>0
except:
print sys.argv
print 'Invalid dpi.'
return
marker_width = DEFAULT_MARKER_SIZE_IN_MM
try:
marker_width = int(sys.argv[3])
print 'Marker size set to', marker_width, 'mm'
except:
print 'Marker size set to default size', marker_width, 'mm'
# Width and Height of A4 image in pixels
xWidth = A4W / MM2IN * dpi
yWidth = A4H / MM2IN * dpi
# Marker width in pixels
mWidth = marker_width / MM2IN * dpi
e = mWidth - int(mWidth) # subpixel error in marker width
eMM = e / dpi * MM2IN # subpixel error in millimeters (report to user)
print 'Marker size finally set to', marker_width - eMM, 'mm'
# Converting pixel values to real integers
xWidth, yWidth, mWidth = int(xWidth), int(yWidth), int(mWidth)
return xWidth, yWidth, mWidth
def removeMarkers():
print 'Removing old markers...'
os.popen('rm ' + DESTINATION_DIR + '*.png') # Removing previous markers
def generateGrid():
'''
Generates a picture of a 9-point calibration grid using random markers, the size of which is
calculated such that after printing the marker size is of a specified length in millimeters
Usage: marker.py dpi marker_size_in_mm
'''
xWidth, yWidth, mWidth = computeDimensions()
# Create markers with this pixel size
removeMarkers()
print 'Generating new markers...'
generateRandomMarkers(9, mWidth)
# Stitch markers and generate grid
print 'Creating grid...'
markers = []
for f in os.listdir(DESTINATION_DIR):
if f.endswith('.png'):
markers.append(Image.open(f))
grid = Image.new("RGB", (xWidth, yWidth), (255, 255, 255))
for row in range(3):
if row == 0:
y = A4_PADDING_IN_PIXELS
elif row == 1:
y = (yWidth - mWidth) / 2
else:
y = yWidth - A4_PADDING_IN_PIXELS - mWidth
for col in range(3):
if col == 0:
x = A4_PADDING_IN_PIXELS
elif col == 1:
x = (xWidth - mWidth) / 2
else:
x = xWidth - A4_PADDING_IN_PIXELS - mWidth
grid.paste(markers[row*3+col], (x, y))
grid.save(DESTINATION_DIR + 'grid_board/grid.jpg')
print 'Finished.'
def generateRandomBoard():
xWidth, yWidth, mWidth = computeDimensions()
# Get number of random markers
try:
n = int(sys.argv[4])
assert n>0 and n<150 # no more than 150 markers allowed
except:
print 'Please choose between 1 to 20 markers'
return
# Create markers with this pixel size
removeMarkers()
print 'Generating new markers...'
generateRandomMarkers(n, mWidth)
# Stitch markers and generate board
print 'Creating board...'
markers = []
for f in os.listdir(DESTINATION_DIR):
if f.endswith('.png'):
markers.append(Image.open(f))
board = Image.new("RGB", (xWidth, yWidth), (255, 255, 255))
# Now we have to randomly append markers to board in a way that they do not collide
# To simplify this, we gridify the image based on marker size and choose a random sample
# of grids (not entirely random but in case markers are small enough it gives a good sample)
w = (xWidth - 2*A4_PADDING_IN_PIXELS) / mWidth
h = (yWidth - 2*A4_PADDING_IN_PIXELS) / mWidth
w, h = int(w), int(h) # w and h are maximum row and column size for the grid
# Now markers would take w*mWidth pixels in a row with no borders, applying border size:
w = ((xWidth - 2*A4_PADDING_IN_PIXELS) - (w-1)*DEFAULT_MARKER_BORDER_IN_PIXELS) / mWidth
h = ((yWidth - 2*A4_PADDING_IN_PIXELS) - (h-1)*DEFAULT_MARKER_BORDER_IN_PIXELS) / mWidth
w, h = int(w), int(h) # w and h now also count for marker borders
xOffset = A4_PADDING_IN_PIXELS + ((xWidth - 2*A4_PADDING_IN_PIXELS) - w*mWidth - (w-1)*DEFAULT_MARKER_BORDER_IN_PIXELS)/2
yOffset = A4_PADDING_IN_PIXELS + ((yWidth - 2*A4_PADDING_IN_PIXELS) - h*mWidth - (h-1)*DEFAULT_MARKER_BORDER_IN_PIXELS)/2
grids = [(xOffset + c*mWidth + c*DEFAULT_MARKER_BORDER_IN_PIXELS,
yOffset + r*mWidth + r*DEFAULT_MARKER_BORDER_IN_PIXELS) for r in xrange(h) for c in xrange(w)]
print len(grids), 'possible positions exist for markers...'
n = min(n, len(grids))
for i, pos in enumerate(sample(grids, n)):
board.paste(markers[i], pos)
board.save(DESTINATION_DIR + 'random_board/board.jpg')
print 'Finished.'
def main():
assert len(sys.argv) > 1, 'Invalid number of arguments.'
if sys.argv[1] == 'grid':
generateGrid()
elif sys.argv[1] == 'rand':
generateRandomBoard()
else:
handleMarkers()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,44 @@
from __future__ import division
import numpy as np
'''
Reference: https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data
'''
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def moving_average(interval, window_size):
window = np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')