updated code with more comments
This commit is contained in:
parent
460a71d2b3
commit
67123bb970
11 changed files with 262 additions and 1116 deletions
|
@ -60,17 +60,13 @@ class Parallax2Dto2DMapping(Experiment):
|
|||
print 'scene_camera', sim.scene_camera.t
|
||||
print 'calibration'
|
||||
print len(sim.calibration_points)
|
||||
# print sim.calibration_points
|
||||
print min(np.array(sim.calibration_points)[:,0]) - sim.scene_camera.t[0], max(np.array(sim.calibration_points)[:,0]) - sim.scene_camera.t[0]
|
||||
print min(np.array(sim.calibration_points)[:,1]) - sim.scene_camera.t[1], max(np.array(sim.calibration_points)[:,1]) - sim.scene_camera.t[1]
|
||||
print 'depths', set(np.array(sim.calibration_points)[:,2])
|
||||
print 'test'
|
||||
print len(sim.test_points)
|
||||
print min(np.array(sim.test_points)[:,0]) - sim.scene_camera.t[0], max(np.array(sim.test_points)[:,0]) - sim.scene_camera.t[0]
|
||||
print min(np.array(sim.test_points)[:,1]) - sim.scene_camera.t[1], max(np.array(sim.test_points)[:,1]) - sim.scene_camera.t[1]
|
||||
print 'depths', set(np.array(sim.test_points)[:,2])
|
||||
|
||||
|
||||
print 'depths', set(np.array(sim.test_points)[:,2])
|
||||
|
||||
plt.ylabel('Y (mm)')
|
||||
plt.xlabel('X (mm)')
|
||||
|
@ -84,18 +80,17 @@ class Parallax2Dto2DMapping(Experiment):
|
|||
class Parallax2Dto3DMapping(Experiment):
|
||||
'''
|
||||
IMPORTANT!
|
||||
In all experiments, scene camera's rvec = (0, 0, 0) i.e. the corresponding rotation matrix is the identity matrix therefore
|
||||
I have not included the dot production with this rotation matrix to convert points in world coordinates
|
||||
into scene camera coordinates. however, one should know that if the scene camera is rotated differentl7y
|
||||
this transformation is essential. I would add the corresponding computations later on.
|
||||
In all experiments, scene camera's rvec = (0, 0, 0) i.e. the corresponding rotation matrix is the identity
|
||||
matrix therefore I have not included the dot production with this rotation matrix to convert points in world
|
||||
coordinates into scene camera coordinates. however, one should know that if the scene camera is rotated
|
||||
differently this transformation is essential.
|
||||
'''
|
||||
|
||||
def __run__(self):
|
||||
sim = GazeSimulation(log = False)
|
||||
|
||||
sim.place_eyeball_on_scene_camera = False
|
||||
sim.setEyeRelativeToSceneCamera(v(-65, -33, -73))
|
||||
# sim.setEyeRelativeToSceneCamera(v(-65, -33, 0)) # assuming eyeball and scene camera are coplanar i.e. e = (e.x, e.y, 0)
|
||||
sim.setEyeRelativeToSceneCamera(v(-65, -33, -73)) # based on actual measurements in our study
|
||||
|
||||
sim.setCalibrationDepth(1 * 1000) # mm, wrt scene camera
|
||||
sim.setTestDepth(1.5 * 1000)
|
||||
|
@ -213,49 +208,8 @@ class Parallax2Dto3DMapping(Experiment):
|
|||
results.append([np.mean(np.array(aae_2ds_aae)[:,0]), np.mean(np.array(aae_3ds_aae)[:,0]), np.mean(np.array(aae_3D3Ds)[:,0])])
|
||||
results_std.append([np.std(np.array(aae_2ds_aae)[:,0]), np.std(np.array(aae_3ds_aae)[:,0]), np.std(np.array(aae_3D3Ds)[:,0])])
|
||||
|
||||
# Old plot code
|
||||
######################################################################################################
|
||||
# plt.ylabel('Angular Error')
|
||||
# plt.xlabel('Depth')
|
||||
|
||||
# fig = plt.figure(figsize=(14.0, 10.0))
|
||||
# ax = fig.add_subplot(111)
|
||||
|
||||
# clrs = ['b', 'r', 'orange']
|
||||
|
||||
|
||||
# _xrange = [0.5,1.5,2.5,3.5,4.5]
|
||||
# ax.plot(_xrange, [res[0] for res in results], 'r', label='2D-to-2D', marker="o", linestyle='-',lw=3)
|
||||
# ax.plot(_xrange, [res[1] for res in results], 'b', label='2D-to-3D', marker="o", linestyle='-',lw=3)
|
||||
# ax.plot(_xrange, [res[2] for res in results], 'g', label='3D-to-3D', marker="o", linestyle='-',lw=3)
|
||||
|
||||
# ax.set_ylabel(r'Angular Error',fontsize=22, fontweight='bold')
|
||||
# ax.set_xlabel(r'Number of Calibration Depths',fontsize=22, fontweight='bold')
|
||||
|
||||
# plt.legend(fontsize=20)
|
||||
# # plt.legend(loc="upper left", ncol=3, title=r"$d_c$")
|
||||
# # plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
|
||||
# # ncol=5, mode="expand", borderaxespad=0., fontsize=20)
|
||||
# # plt.xticks(fontsize='18')
|
||||
# plt.yticks(fontsize='18')
|
||||
|
||||
|
||||
# TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0]
|
||||
# LABELs = ['', '1', '2', '3', '4', '5', '']
|
||||
# # ax.set_xticklabels(LABELs,fontsize=18)
|
||||
# plt.xticks(TOPICs, LABELs,fontsize=18)
|
||||
|
||||
# left = 0.1 # the left side of the subplots of the figure
|
||||
# right = 0.975 # the right side of the subplots of the figure
|
||||
# bottom = 0.075 # the bottom of the subplots of the figure
|
||||
# top = 0.925 # the top of the subplots of the figure
|
||||
# wspace = 0.2 # the amount of width reserved for blank space between subplots
|
||||
# hspace = 0.4 # the amount of height reserved for white space between subplots
|
||||
|
||||
# plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
|
||||
# plt.show()
|
||||
######################################################################################################
|
||||
# New plot code based on EffectNumberofClusters.py
|
||||
# plot code based on EffectNumberofClusters.py
|
||||
mean2D2D = [res[0] for res in results]
|
||||
mean2D3D = [res[1] for res in results]
|
||||
mean3D3D = [res[2] for res in results]
|
||||
|
@ -263,36 +217,17 @@ class Parallax2Dto3DMapping(Experiment):
|
|||
std2D3D = [res[1] for res in results_std]
|
||||
std3D3D = [res[2] for res in results_std]
|
||||
|
||||
|
||||
N = 5
|
||||
ind = np.asarray([0.25,1.25,2.25,3.25,4.25])
|
||||
|
||||
width = 0.5 # the width of the bars
|
||||
|
||||
# x1 = [0.4,1.4,2.4,3.4,4.4]
|
||||
width = 0.5 # the width of the bars
|
||||
x2 = [0.45,1.45,2.45,3.45,4.45]
|
||||
# x3 = [0.5,1.5,2.5,3.5,4.5]
|
||||
x4 = [0.55,1.55,2.55,3.55,4.55]
|
||||
# x5 = [0.6,1.6,2.6,3.6,4.6]
|
||||
x6 = [0.50,1.50,2.50,3.50,4.50]
|
||||
|
||||
fig = plt.figure(figsize=(14.0, 10.0))
|
||||
|
||||
ax = fig.add_subplot(111)
|
||||
|
||||
# print mean2D2D
|
||||
# print mean2D3D
|
||||
|
||||
# ax.axhline(linewidth=2, y = np.mean(mean2D2D),color='r')
|
||||
# ax.axhline(linewidth=2, y = np.mean(mean2D3D),color='blue')
|
||||
|
||||
# ax.axhline(linewidth=2, y = minvaluevalue,color='black')
|
||||
|
||||
# ax.text(0.98, Participantmeanvalue+0.5, "Mean %.2f" % Participantmeanvalue,fontsize=12, fontweight='bold',color='r')
|
||||
# ax.text(0.98, maxvaluevalue+0.5, "Maximum %.2f" % maxvaluevalue,fontsize=12, fontweight='bold',color='black')
|
||||
# ax.text(0.98, minvaluevalue+0.5, "Minimum %.2f" % minvaluevalue,fontsize=12, fontweight='bold', color='black')
|
||||
|
||||
# rects1 = ax.bar(ind, Participantmean,width, color='r',edgecolor='black',)#, hatch='//')
|
||||
rects1 = ax.errorbar(x2, mean2D2D,yerr=[std2D2D,std2D2D],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2)
|
||||
plt.plot(x2, mean2D2D, marker="o", linestyle='-',lw=3,color='red',label = r'2D-to-2D')
|
||||
|
||||
|
@ -304,35 +239,20 @@ class Parallax2Dto3DMapping(Experiment):
|
|||
|
||||
legend(fontsize=20,loc='upper right')
|
||||
|
||||
# rects3 = ax.errorbar(x3, meanC3,yerr=[stdC3,stdC3],fmt='o',color='black',ecolor='black',lw=3, capsize=5, capthick=2)
|
||||
# plt.plot(x3, meanC3, marker="o", linestyle='-',lw=3,color='black')
|
||||
#
|
||||
# rects4 =ax.errorbar(x4, meanC4,yerr=[stdC4,stdC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2)
|
||||
# plt.plot(x4, meanC4, marker="o", linestyle='-',lw=3,color='green')
|
||||
#
|
||||
# rects5 =ax.errorbar(x5, meanC5,yerr=[stdC5,stdC5],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2)
|
||||
# plt.plot(x5, meanC5, marker="o", linestyle='-',lw=3,color='orange')
|
||||
|
||||
|
||||
ax.set_ylabel(r'Angular Error',fontsize=22)
|
||||
ax.set_xlabel(r'Number of Calibration Depths',fontsize=22)
|
||||
ax.set_xticks(ind+0.25)
|
||||
ax.set_xticklabels( ('D1', 'D2', 'D3','D4', 'D5') ,fontsize=18)
|
||||
|
||||
TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0]#,110]#,120]
|
||||
TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0]
|
||||
print TOPICs
|
||||
LABELs = ["",r'1',r'2', r'3', r'4', r'5', ""]#, ""]#, ""]
|
||||
|
||||
# fig.canvas.set_window_title('Distance Error Correlation')
|
||||
LABELs = ["",r'1',r'2', r'3', r'4', r'5', ""]
|
||||
plt.xticks(TOPICs, LABELs,fontsize=18)
|
||||
|
||||
# legend([rects1,rects2], [r'\LARGE\textbf{2D2D}', r'\LARGE\textbf{2D3D}'], loc='lower right')
|
||||
|
||||
TOPICS = [0.5,1,1.5,2,2.5,3,3.5,4,4.5,5]#,110]#,120]
|
||||
TOPICS = [0.5,1,1.5,2,2.5,3,3.5,4,4.5,5]
|
||||
print TOPICS
|
||||
LABELS = [r'0.5', r'1',r'1.5', r'2',r'2.5', r'3',r'3.5', r'4',r'4.5',r'5']#, ""]#, ""]
|
||||
LABELS = [r'0.5', r'1',r'1.5', r'2',r'2.5', r'3',r'3.5', r'4',r'4.5',r'5']
|
||||
|
||||
# fig.canvas.set_window_title('Accuracy - Activity Statistics')
|
||||
plt.yticks(TOPICS, LABELS,fontsize=18)
|
||||
|
||||
def autolabel(rects):
|
||||
|
@ -342,8 +262,6 @@ class Parallax2Dto3DMapping(Experiment):
|
|||
ax.text(0.26+rect.get_x()+rect.get_width()/2., height +0.35, "%.2f"%float(height),
|
||||
ha='center', va='bottom',fontweight='bold',fontsize=13.5)
|
||||
|
||||
# autolabel(rects1)
|
||||
|
||||
|
||||
left = 0.1 # the left side of the subplots of the figure
|
||||
right = 0.975 # the right side of the subplots of the figure
|
||||
|
@ -351,19 +269,15 @@ class Parallax2Dto3DMapping(Experiment):
|
|||
top = 0.925 # the top of the subplots of the figure
|
||||
wspace = 0.2 # the amount of width reserved for blank space between subplots
|
||||
hspace = 0.4 # the amount of height reserved for white space between subplots
|
||||
|
||||
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
|
||||
plt.show()
|
||||
######################################################################################################
|
||||
|
||||
class Parallax3Dto3DMapping(Experiment): # GT pupil pose instead of estimating the pose
|
||||
'''
|
||||
'''
|
||||
def __run__(self):
|
||||
sim = GazeSimulation(log = False)
|
||||
sim.place_eyeball_on_scene_camera = False
|
||||
sim.setEyeRelativeToSceneCamera(v(-65, -33, -73))
|
||||
# sim.setEyeRelativeToSceneCamera(v(-65, -33, 0)) # assuming eyeball and scene camera are coplanar i.e. e = (e.x, e.y, 0)
|
||||
sim.setCalibrationDepth(1 * 1000) # mm, wrt scene camera
|
||||
sim.setTestDepth(1.5 * 1000)
|
||||
|
||||
|
@ -465,8 +379,7 @@ class Parallax3Dto3DMapping(Experiment): # GT pupil pose instead of estimating t
|
|||
gprimes = map(lambda tg: v(((tg[0].z - e2d3d.z)/tg[1].z)*tg[1] + e2d3d), zip(ti, gis))
|
||||
|
||||
AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, ti))
|
||||
# AE = list(np.degrees(np.arccos(v(p[0]).dot(p[1])/v(p[0]).mag/v(p[1]).mag)) for p in zip(gprimes, ti))
|
||||
|
||||
|
||||
N = len(t)
|
||||
AAE = np.mean(AE)
|
||||
STD_2D3D = np.std(AE)
|
||||
|
@ -478,15 +391,14 @@ class Parallax3Dto3DMapping(Experiment): # GT pupil pose instead of estimating t
|
|||
PHE_STD = np.std(PHE)
|
||||
PHE_m, PHE_M = min(PHE), max(PHE)
|
||||
|
||||
# aae_3Ds.append((AAE, STD, PHE, PHE_STD))
|
||||
aae_3Ds.append(AAE)
|
||||
std_3Ds.append(STD_2D3D)
|
||||
# break
|
||||
|
||||
print 'depth', cdepth, 'finished.'
|
||||
results.append([aae_2Ds, aae_3Ds, aae_3D3Ds, std_2Ds, std_3Ds, std_3D3Ds])
|
||||
|
||||
|
||||
######################################################################################################
|
||||
## Plotting part
|
||||
clrs = ['r', 'g', 'b', 'k', 'o']
|
||||
colors = ['blue', 'orange', 'red', 'black', 'orange']
|
||||
patches = []
|
||||
|
@ -532,7 +444,6 @@ class Parallax3Dto3DMapping(Experiment): # GT pupil pose instead of estimating t
|
|||
|
||||
ax.set_ylabel(r'\textbf{Angular Error}',fontsize=22)
|
||||
ax.set_xlabel(r'\textbf{Depth}',fontsize=22)
|
||||
# ax.set_ylim((0, 2.4))
|
||||
|
||||
TOPICS = [-0.2, 0, 0.2, 0.4,0.6,0.8,1.0,1.2,1.4,1.6,1.8,2.0,2.2,2.4]#,110]#,120]
|
||||
LABELS = [r'', r'0', r'0.2',r'0.4',r'0.6', r'0.8', r'1.0', r'1.2', r'1.4', r'1.6', r'1.8', r'2.0', r'2.2', r'2.4']#, ""]#, ""]
|
||||
|
@ -558,9 +469,10 @@ class Parallax3Dto3DMapping(Experiment): # GT pupil pose instead of estimating t
|
|||
|
||||
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
|
||||
plt.show()
|
||||
|
||||
######################################################################################################
|
||||
# self.sim = sim
|
||||
|
||||
# This should be root path to the participants folder (including subfolders for each participant)
|
||||
ROOT_DATA_DIR = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants'
|
||||
class Parallax2Dto2DRealData(Experiment):
|
||||
'''
|
||||
|
@ -662,8 +574,6 @@ class Parallax2Dto2DRealData(Experiment):
|
|||
|
||||
|
||||
print 'done.'
|
||||
# plt.plot(tdrange, aaes)
|
||||
# plt.show()
|
||||
|
||||
def getDepth(depth_experiments):
|
||||
_map = {'000': 1, '002': 1.25, '004': 1.5, '006': 1.75, '008': 2.0}
|
||||
|
@ -673,6 +583,7 @@ class Parallax2Dto3DRealData(Experiment):
|
|||
def __run__(self):
|
||||
sim = GazeSimulation(log = False)
|
||||
aae_3ds = []
|
||||
# Path to whatever directory you want the results to be stored in
|
||||
root_result_path = '/home/mmbrian/3D_Gaze_Tracking/work/results/2D3D/'
|
||||
if not os.path.exists(root_result_path):
|
||||
os.makedirs(root_result_path)
|
||||
|
@ -823,136 +734,136 @@ class Parallax2Dto3DRealData(Experiment):
|
|||
|
||||
|
||||
|
||||
class Parallax3Dto3DRealData(Experiment):
|
||||
def __run__(self):
|
||||
sim = GazeSimulation(log = False)
|
||||
aae_3ds = []
|
||||
# class Parallax3Dto3DRealData(Experiment):
|
||||
# def __run__(self):
|
||||
# sim = GazeSimulation(log = False)
|
||||
# aae_3ds = []
|
||||
|
||||
root_result_path = '/home/mmbrian/3D_Gaze_Tracking/work/results/3D3D/'
|
||||
root_pose_path = '/home/mmbrian/3D_Gaze_Tracking/work/Marker_Eye_Images/ImagesUndist/'
|
||||
root_data_path = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants/'
|
||||
take_only_nearest_neighbor_for_calibration = True
|
||||
# root_result_path = '/home/mmbrian/3D_Gaze_Tracking/work/results/3D3D/'
|
||||
# root_pose_path = '/home/mmbrian/3D_Gaze_Tracking/work/Marker_Eye_Images/ImagesUndist/'
|
||||
# root_data_path = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants/'
|
||||
# take_only_nearest_neighbor_for_calibration = True
|
||||
|
||||
participants = ['p14']
|
||||
# participants = ['p14']
|
||||
|
||||
if not os.path.exists(root_result_path):
|
||||
os.makedirs(root_result_path)
|
||||
# if not os.path.exists(root_result_path):
|
||||
# os.makedirs(root_result_path)
|
||||
|
||||
for d1 in os.listdir(root_data_path):
|
||||
if d1.startswith('p'): # every participant
|
||||
if not d1 in participants:
|
||||
# if not d1 in PARTICIPANTS:
|
||||
continue
|
||||
participant_label = d1
|
||||
participant_results = []
|
||||
d2 = os.path.join(root_data_path, d1) # .../pi/
|
||||
d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../
|
||||
print '> Processing participant', d1
|
||||
participant_experiment = {}
|
||||
for d3 in os.listdir(d2): # every recording
|
||||
d4 = os.path.join(d2, d3) # .../pi/../00X/
|
||||
# for d1 in os.listdir(root_data_path):
|
||||
# if d1.startswith('p'): # every participant
|
||||
# if not d1 in participants:
|
||||
# # if not d1 in PARTICIPANTS:
|
||||
# continue
|
||||
# participant_label = d1
|
||||
# participant_results = []
|
||||
# d2 = os.path.join(root_data_path, d1) # .../pi/
|
||||
# d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../
|
||||
# print '> Processing participant', d1
|
||||
# participant_experiment = {}
|
||||
# for d3 in os.listdir(d2): # every recording
|
||||
# d4 = os.path.join(d2, d3) # .../pi/../00X/
|
||||
|
||||
# pose_info = np.loadtxt(open(os.path.join(root_pose_path+d1+'/'+d3, "null_pupils.csv"),"rb"),delimiter=";")
|
||||
pose_info = np.loadtxt(open(os.path.join(root_pose_path+d1+'/'+d3, "simple_pupils.csv"),"rb"),delimiter=";")
|
||||
frames_numbers = pose_info[:, 0]
|
||||
pose_estimates = pose_info[:,4:7]
|
||||
pose_info = dict(zip(frames_numbers, pose_estimates))
|
||||
p_frames = np.load(os.path.join(d4, 'p_frames.npy'))
|
||||
# print d4
|
||||
# Fetching pose information for every target
|
||||
poses = []
|
||||
for target in p_frames:
|
||||
pose = []
|
||||
for fn in target: # all frames corresponding to this pupil
|
||||
# first fn corresponds to the nearest neighbor
|
||||
# for test use all correspondents from these 3 or 2 estimates
|
||||
# i.e. each pose-marker creates a correspondence so 3*16=48 correspondents for test
|
||||
# for calibration compare two cases, one similar to above take all the 75 correspondents
|
||||
# and the other taking only the pose corresponding to nearest neighbor which results in
|
||||
# the same number of correspondents as target markers
|
||||
try:
|
||||
pose.append(pose_info[fn])
|
||||
except KeyError, err:
|
||||
print err
|
||||
poses.append(pose)
|
||||
# # pose_info = np.loadtxt(open(os.path.join(root_pose_path+d1+'/'+d3, "null_pupils.csv"),"rb"),delimiter=";")
|
||||
# pose_info = np.loadtxt(open(os.path.join(root_pose_path+d1+'/'+d3, "simple_pupils.csv"),"rb"),delimiter=";")
|
||||
# frames_numbers = pose_info[:, 0]
|
||||
# pose_estimates = pose_info[:,4:7]
|
||||
# pose_info = dict(zip(frames_numbers, pose_estimates))
|
||||
# p_frames = np.load(os.path.join(d4, 'p_frames.npy'))
|
||||
# # print d4
|
||||
# # Fetching pose information for every target
|
||||
# poses = []
|
||||
# for target in p_frames:
|
||||
# pose = []
|
||||
# for fn in target: # all frames corresponding to this pupil
|
||||
# # first fn corresponds to the nearest neighbor
|
||||
# # for test use all correspondents from these 3 or 2 estimates
|
||||
# # i.e. each pose-marker creates a correspondence so 3*16=48 correspondents for test
|
||||
# # for calibration compare two cases, one similar to above take all the 75 correspondents
|
||||
# # and the other taking only the pose corresponding to nearest neighbor which results in
|
||||
# # the same number of correspondents as target markers
|
||||
# try:
|
||||
# pose.append(pose_info[fn])
|
||||
# except KeyError, err:
|
||||
# print err
|
||||
# poses.append(pose)
|
||||
|
||||
t2d = np.load(os.path.join(d4, 't2d.npy'))
|
||||
t3d = np.load(os.path.join(d4, 't3d.npy'))
|
||||
participant_experiment[d3] = [poses, t2d, t3d]
|
||||
# t2d = np.load(os.path.join(d4, 't2d.npy'))
|
||||
# t3d = np.load(os.path.join(d4, 't3d.npy'))
|
||||
# participant_experiment[d3] = [poses, t2d, t3d]
|
||||
|
||||
keys = sorted(participant_experiment.keys())
|
||||
depths = zip(keys[::2], keys[1::2])
|
||||
# keys = sorted(participant_experiment.keys())
|
||||
# depths = zip(keys[::2], keys[1::2])
|
||||
|
||||
for calib_depth in depths:
|
||||
pose_data, ct3d = participant_experiment[calib_depth[0]][0], participant_experiment[calib_depth[0]][2]
|
||||
cdepth_value = getDepth(calib_depth)
|
||||
if take_only_nearest_neighbor_for_calibration:
|
||||
pose = np.array(list(p[0] for p in pose_data))
|
||||
calib_3ds = ct3d[:]
|
||||
else:
|
||||
calib_3ds = []
|
||||
pose = []
|
||||
for i, p3d in enumerate(ct3d):
|
||||
for p in pose_data[i]:
|
||||
pose.append(p)
|
||||
calib_3ds.append(p3d)
|
||||
# Performing calibration
|
||||
# First we convert gaze rays to actual pupil pose in our right hand coordinate system
|
||||
# _pose = [(np.arctan(g.x/g.z), np.arctan(g.y/g.z)) for g in map(v, pose)]
|
||||
_pose = map(v, pose)
|
||||
print '> Running tests for calibration depth', cdepth_value
|
||||
if any(g.z == 0 for g in _pose):
|
||||
print 'Calibration is flawed'
|
||||
# print pose
|
||||
else:
|
||||
print 'Calibration data is okay'
|
||||
# print [g.mag for g in map(v, pose)]
|
||||
# w, e, w0 = minimizeEnergy(_pose, calib_3ds, pose_given=True)
|
||||
R, e = minimizeEnergy(pose, calib_3ds, pose_given=True)
|
||||
# R = LA.inv(R)
|
||||
print 'R', R
|
||||
print 'e', e
|
||||
# for calib_depth in depths:
|
||||
# pose_data, ct3d = participant_experiment[calib_depth[0]][0], participant_experiment[calib_depth[0]][2]
|
||||
# cdepth_value = getDepth(calib_depth)
|
||||
# if take_only_nearest_neighbor_for_calibration:
|
||||
# pose = np.array(list(p[0] for p in pose_data))
|
||||
# calib_3ds = ct3d[:]
|
||||
# else:
|
||||
# calib_3ds = []
|
||||
# pose = []
|
||||
# for i, p3d in enumerate(ct3d):
|
||||
# for p in pose_data[i]:
|
||||
# pose.append(p)
|
||||
# calib_3ds.append(p3d)
|
||||
# # Performing calibration
|
||||
# # First we convert gaze rays to actual pupil pose in our right hand coordinate system
|
||||
# # _pose = [(np.arctan(g.x/g.z), np.arctan(g.y/g.z)) for g in map(v, pose)]
|
||||
# _pose = map(v, pose)
|
||||
# print '> Running tests for calibration depth', cdepth_value
|
||||
# if any(g.z == 0 for g in _pose):
|
||||
# print 'Calibration is flawed'
|
||||
# # print pose
|
||||
# else:
|
||||
# print 'Calibration data is okay'
|
||||
# # print [g.mag for g in map(v, pose)]
|
||||
# # w, e, w0 = minimizeEnergy(_pose, calib_3ds, pose_given=True)
|
||||
# R, e = minimizeEnergy(pose, calib_3ds, pose_given=True)
|
||||
# # R = LA.inv(R)
|
||||
# print 'R', R
|
||||
# print 'e', e
|
||||
|
||||
e = v(e)
|
||||
for test_depth in depths:
|
||||
tdepth_value = getDepth(test_depth)
|
||||
tpose_data, tt3d = participant_experiment[test_depth[1]][0], participant_experiment[test_depth[1]][2]
|
||||
# e = v(e)
|
||||
# for test_depth in depths:
|
||||
# tdepth_value = getDepth(test_depth)
|
||||
# tpose_data, tt3d = participant_experiment[test_depth[1]][0], participant_experiment[test_depth[1]][2]
|
||||
|
||||
test_3ds = []
|
||||
tpose = []
|
||||
for i, p3d in enumerate(tt3d):
|
||||
for p in tpose_data[i]:
|
||||
tpose.append(p)
|
||||
test_3ds.append(p3d)
|
||||
# test_3ds = []
|
||||
# tpose = []
|
||||
# for i, p3d in enumerate(tt3d):
|
||||
# for p in tpose_data[i]:
|
||||
# tpose.append(p)
|
||||
# test_3ds.append(p3d)
|
||||
|
||||
# applying estimated rotation to bring pose vectors to scene camera coordinates
|
||||
tpose = map(lambda p: v(R.dot(np.array(p))), tpose)
|
||||
# # applying estimated rotation to bring pose vectors to scene camera coordinates
|
||||
# tpose = map(lambda p: v(R.dot(np.array(p))), tpose)
|
||||
|
||||
if any(g.z == 0 for g in map(v, tpose)):
|
||||
print 'Test depth', tdepth_value, 'is flawed'
|
||||
# if any(g.z == 0 for g in map(v, tpose)):
|
||||
# print 'Test depth', tdepth_value, 'is flawed'
|
||||
|
||||
gis = map(lambda vec: v(vec), tpose)
|
||||
t = map(lambda vec: v(vec), test_3ds)
|
||||
gprimes = map(lambda tg: v(((tg[0].z - e.z)/tg[1].z)*tg[1] + e), zip(t, gis))
|
||||
# AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t))
|
||||
AE = list(np.degrees(np.arccos(v(p[0]).dot(p[1])/v(p[0]).mag/v(p[1]).mag)) for p in zip(gprimes, t))
|
||||
# gis = map(lambda vec: v(vec), tpose)
|
||||
# t = map(lambda vec: v(vec), test_3ds)
|
||||
# gprimes = map(lambda tg: v(((tg[0].z - e.z)/tg[1].z)*tg[1] + e), zip(t, gis))
|
||||
# # AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t))
|
||||
# AE = list(np.degrees(np.arccos(v(p[0]).dot(p[1])/v(p[0]).mag/v(p[1]).mag)) for p in zip(gprimes, t))
|
||||
|
||||
AAE = np.mean(AE)
|
||||
STD = np.std(AE)
|
||||
m, M = min(AE), max(AE)
|
||||
# AAE = np.mean(AE)
|
||||
# STD = np.std(AE)
|
||||
# m, M = min(AE), max(AE)
|
||||
|
||||
# Computing physical distance error (in meters)
|
||||
PHE = list((u-v).mag for u,v in zip(t, gprimes))
|
||||
APHE = np.mean(PHE)
|
||||
PHE_STD = np.std(PHE)
|
||||
PHE_m, PHE_M = min(PHE), max(PHE)
|
||||
# # Computing physical distance error (in meters)
|
||||
# PHE = list((u-v).mag for u,v in zip(t, gprimes))
|
||||
# APHE = np.mean(PHE)
|
||||
# PHE_STD = np.std(PHE)
|
||||
# PHE_m, PHE_M = min(PHE), max(PHE)
|
||||
|
||||
print 'Calibration', cdepth_value, 'Test', tdepth_value, AAE, 'degrees', APHE, 'meters'
|
||||
participant_results.append([cdepth_value, tdepth_value] + [AAE, STD, m, M, APHE, PHE_STD, PHE_m, PHE_M])
|
||||
# print 'Calibration', cdepth_value, 'Test', tdepth_value, AAE, 'degrees', APHE, 'meters'
|
||||
# participant_results.append([cdepth_value, tdepth_value] + [AAE, STD, m, M, APHE, PHE_STD, PHE_m, PHE_M])
|
||||
|
||||
print len(participant_results), 'combinations processed...'
|
||||
np.save(os.path.join(root_result_path, '%s_3d3d_all.npy' % participant_label), np.array(participant_results))
|
||||
np.savetxt(os.path.join(root_result_path, '%s_3d3d_all.csv' % participant_label), np.array(participant_results), delimiter=",")
|
||||
# print len(participant_results), 'combinations processed...'
|
||||
# np.save(os.path.join(root_result_path, '%s_3d3d_all.npy' % participant_label), np.array(participant_results))
|
||||
# np.savetxt(os.path.join(root_result_path, '%s_3d3d_all.csv' % participant_label), np.array(participant_results), delimiter=",")
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -971,8 +882,6 @@ def main():
|
|||
ex = Parallax2Dto3DMapping()
|
||||
ex.performExperiment()
|
||||
|
||||
# ex = Parallax2Dto3DMappingEye()
|
||||
# ex.performExperiment()i
|
||||
if mode == '2d2d_2d3d':
|
||||
ex = Parallax3Dto3DMapping()
|
||||
ex.performExperiment()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue