2016-03-09 19:52:35 +01:00
|
|
|
from __future__ import division
|
|
|
|
import numpy as np
|
|
|
|
from numpy import linalg as LA
|
|
|
|
from minimize import findInitialW, _q, g, minimizeEnergy
|
|
|
|
|
|
|
|
from time import time
|
|
|
|
|
|
|
|
from geom import getSphericalCoords, getAngularDiff
|
|
|
|
from recording.tracker import Marker
|
|
|
|
|
2016-04-28 18:14:38 +02:00
|
|
|
try:
|
|
|
|
from visual import vector as v
|
|
|
|
except ImportError:
|
|
|
|
from vector import Vector as v
|
2016-03-09 19:52:35 +01:00
|
|
|
|
|
|
|
DATA_DIR = './recording/data/'
|
|
|
|
# DATA_DIR = '.\\recording\\data\\'
|
|
|
|
|
2016-04-28 18:14:38 +02:00
|
|
|
# Update these accordingly for testing out your own data
|
2016-03-09 19:52:35 +01:00
|
|
|
EYE_CAMERA_IMAGE_WIDTH = 640
|
|
|
|
EYE_CAMERA_IMAGE_HEIGHT = 360
|
|
|
|
|
|
|
|
curr_calibration_experiment = '006'
|
|
|
|
curr_test_experiment = '010'
|
|
|
|
# markers in order of being targeted:
|
|
|
|
experiments = {'005': [130, 608, 456, 399, 659, 301, 351, 707, 18],
|
|
|
|
'006': [130, 608, 456, 399, 659, 301, 351, 707, 18],
|
|
|
|
'007': [449, 914, 735, 842, 347, 660, 392, 782],
|
|
|
|
'010': [449, 914, 554, 243, 347, 173, 664, 399]}
|
|
|
|
|
|
|
|
def denormalize(p):
|
|
|
|
# return p * np.array([EYE_CAMERA_IMAGE_WIDTH, EYE_CAMERA_IMAGE_HEIGHT])
|
|
|
|
return p * np.array([EYE_CAMERA_IMAGE_WIDTH, EYE_CAMERA_IMAGE_HEIGHT]) - \
|
|
|
|
(np.array([EYE_CAMERA_IMAGE_WIDTH, EYE_CAMERA_IMAGE_HEIGHT]) / 2)
|
|
|
|
|
|
|
|
def main():
|
|
|
|
single_point = False
|
|
|
|
__p, _t = [], []
|
|
|
|
p, t = [], []
|
|
|
|
|
|
|
|
# Fetching marker position wrt camera t from calibration data
|
|
|
|
marker_data = np.load(DATA_DIR + 'frames_%s.npy' % curr_calibration_experiment)
|
|
|
|
# marker_data includes data on tracked markers per frame
|
|
|
|
# it's a list with as many entries as the number of video frames, each entry
|
|
|
|
# has a list of tracked markers, each marker item has marker id, marker corners, Rvec, Tvec
|
|
|
|
# TODO (remember the unit)
|
|
|
|
|
|
|
|
# Fetching pupil positions p from calibration data
|
|
|
|
pupil_data = np.load(DATA_DIR + 'pp_%s.npy' % curr_calibration_experiment)
|
|
|
|
# pupil_data is a list of tracked pupil positions, each entry has 3 elements
|
|
|
|
# array: frame range (start, end)
|
|
|
|
# array: mean pupil position
|
|
|
|
# list: all pupil positions in the range
|
|
|
|
# TODO (also remember to denormalize)
|
|
|
|
for i, pos in enumerate(pupil_data):
|
|
|
|
corresponding_marker_id = experiments[curr_calibration_experiment][i]
|
|
|
|
# print corresponding_marker_id
|
|
|
|
|
|
|
|
start, end = pos[0]
|
|
|
|
if len(pos[2]) == end-start+1: # all samples for this points are reliable
|
|
|
|
# add all corresponding pupil-3d points as mappings
|
|
|
|
# print start, end, len(pos[2])
|
|
|
|
for i, _p in enumerate(pos[2]):
|
|
|
|
frame_number = start + i
|
|
|
|
if frame_number >= len(marker_data): continue # TODO: investigate
|
|
|
|
for marker in marker_data[frame_number]:
|
|
|
|
if marker[0][0] == corresponding_marker_id:
|
|
|
|
if single_point:
|
|
|
|
__p.append(denormalize(_p))
|
|
|
|
_t.append(Marker.fromList(marker).getCenter())
|
|
|
|
else:
|
|
|
|
p.append(denormalize(_p))
|
|
|
|
t.append(Marker.fromList(marker).getCenter())
|
|
|
|
if single_point and len(__p):
|
|
|
|
p.append(sum(__p)/len(__p))
|
|
|
|
t.append(sum(_t)/len(_t))
|
|
|
|
__p, _t = [], []
|
|
|
|
|
|
|
|
else: # if pos[2] is nonempty consider the mean
|
|
|
|
if len(pos[2]): # TODO: here we can still map the corresponding pupil points to their detected marker given
|
|
|
|
# we have the frame correspondence (investigate)
|
|
|
|
# map pos[1] to corresponding markers
|
|
|
|
for frame_number in xrange(start, end+1):
|
|
|
|
if frame_number >= len(marker_data): continue # TODO: investigate
|
|
|
|
for marker in marker_data[frame_number]:
|
|
|
|
if marker[0][0] == corresponding_marker_id:
|
|
|
|
if single_point:
|
|
|
|
__p.append(denormalize(pos[1]))
|
|
|
|
_t.append(Marker.fromList(marker).getCenter())
|
|
|
|
else:
|
|
|
|
p.append(denormalize(pos[1]))
|
|
|
|
t.append(Marker.fromList(marker).getCenter())
|
|
|
|
if single_point and len(__p):
|
|
|
|
p.append(sum(__p)/len(__p))
|
|
|
|
t.append(sum(_t)/len(_t))
|
|
|
|
__p, _t = [], []
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
# No mapping here
|
|
|
|
|
|
|
|
print len(p), len(t)
|
|
|
|
# print p[0], t[0]
|
|
|
|
|
|
|
|
# we have to denormalize pupil points and correlated the two data streams (frame correspondence)
|
|
|
|
|
|
|
|
print 'Successfully loaded calibration data...'
|
|
|
|
# return
|
|
|
|
|
|
|
|
print 'Performing minimization...'
|
|
|
|
## Finding the optimal transformation matrix by minimizing the nonlinear energy
|
|
|
|
# w0 is the initial w by solving the leastsq with e=(0,0,0)
|
|
|
|
# w is by solving the leastsq again optimizing for both e and w
|
|
|
|
start = time()
|
|
|
|
w, e, w0 = minimizeEnergy(p, t)
|
|
|
|
minimizationTime = time() - start
|
|
|
|
print 'minimization time:', minimizationTime
|
|
|
|
|
|
|
|
p, t = [], []
|
|
|
|
marker_data = np.load(DATA_DIR + 'frames_%s.npy' % curr_test_experiment)
|
|
|
|
pupil_data = np.load(DATA_DIR + 'pp_%s.npy' % curr_test_experiment)
|
|
|
|
print len(pupil_data), len(experiments[curr_test_experiment])
|
|
|
|
for i, pos in enumerate(pupil_data):
|
|
|
|
corresponding_marker_id = experiments[curr_test_experiment][i]
|
|
|
|
# print corresponding_marker_id
|
|
|
|
|
|
|
|
start, end = pos[0]
|
|
|
|
if len(pos[2]) == end-start+1: # all samples for this points are reliable
|
|
|
|
# add all corresponding pupil-3d points as mappings
|
|
|
|
# print start, end, len(pos[2])
|
|
|
|
for i, _p in enumerate(pos[2]):
|
|
|
|
frame_number = start + i
|
|
|
|
if frame_number >= len(marker_data): continue # TODO: investigate
|
|
|
|
for marker in marker_data[frame_number]:
|
|
|
|
if marker[0][0] == corresponding_marker_id:
|
|
|
|
if single_point:
|
|
|
|
__p.append(denormalize(_p))
|
|
|
|
_t.append(Marker.fromList(marker).getCenter())
|
|
|
|
else:
|
|
|
|
p.append(denormalize(_p))
|
|
|
|
t.append(Marker.fromList(marker).getCenter())
|
|
|
|
if single_point and len(__p):
|
|
|
|
p.append(sum(__p)/len(__p))
|
|
|
|
t.append(sum(_t)/len(_t))
|
|
|
|
__p, _t = [], []
|
|
|
|
|
|
|
|
else: # if pos[2] is nonempty consider the mean
|
|
|
|
if len(pos[2]): # TODO: here we can still map the corresponding pupil points to their detected marker given
|
|
|
|
# we have the frame correspondence (investigate)
|
|
|
|
# map pos[1] to corresponding markers
|
|
|
|
for frame_number in xrange(start, end+1):
|
|
|
|
if frame_number >= len(marker_data): continue # TODO: investigate
|
|
|
|
for marker in marker_data[frame_number]:
|
|
|
|
if marker[0][0] == corresponding_marker_id:
|
|
|
|
if single_point:
|
|
|
|
__p.append(denormalize(pos[1]))
|
|
|
|
_t.append(Marker.fromList(marker).getCenter())
|
|
|
|
else:
|
|
|
|
p.append(denormalize(pos[1]))
|
|
|
|
t.append(Marker.fromList(marker).getCenter())
|
|
|
|
if single_point and len(__p):
|
|
|
|
p.append(sum(__p)/len(__p))
|
|
|
|
t.append(sum(_t)/len(_t))
|
|
|
|
__p, _t = [], []
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
print 'Successfully loaded test data...'
|
|
|
|
|
|
|
|
# closest point distance to scene camera
|
|
|
|
cDist = min(v(pt).mag for pt in t)
|
|
|
|
# farthest point distance to scene camera
|
|
|
|
fDist = max(v(pt).mag for pt in t)
|
|
|
|
# average point distance to scene camera
|
|
|
|
avgDist = sum(v(pt).mag for pt in t)/len(t)
|
|
|
|
|
|
|
|
qi = map(_q, p) # computing feature vectors from raw pupil coordinates in 2D
|
|
|
|
# computing unit gaze vectors corresponding to pupil positions
|
|
|
|
# here we use the computed mapping matrix w
|
|
|
|
gis = map(lambda q: g(q, w), qi)
|
|
|
|
gis0 = map(lambda q: g(q, w0), qi)
|
|
|
|
|
|
|
|
# now we can compare unit gaze vectors with their corresponding gaze rays t
|
|
|
|
# normalizing gaze rays first
|
|
|
|
t = np.array(map(lambda vec: v(vec).norm(), t))
|
|
|
|
# TODO: compare spherical coordinates instead
|
|
|
|
|
|
|
|
|
|
|
|
AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gis, t))
|
|
|
|
N = len(t)
|
|
|
|
AAE = sum(AE)/N
|
|
|
|
VAR = sum((ae - AAE)**2 for ae in AE)/N
|
|
|
|
print 'AAE:', AAE, '\nVariance:', VAR, 'STD:', np.sqrt(VAR), '\nMin:', min(AE), 'Max:', max(AE), '(N=' + str(N) + ')'
|
|
|
|
print 'Target Distances: m=%s M=%s Avg=%s' % (cDist, fDist, avgDist)
|
|
|
|
|
|
|
|
AE0 = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gis0, t))
|
|
|
|
AAE0 = sum(AE0)/N
|
|
|
|
print 'AAE (only optimizing W for e=(0,0,0)):', AAE0
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|