import sys import numpy as np import cv2 import matplotlib.pyplot as plt sys.path.append('..') # so we can import from pupil from pupil import player_methods # from tracker import processFrame DATA_DIR = '/home/mmbrian/HiWi/pupil_clone/pupil/recordings/2015_09_10/007/' OUTPUT_DIR = DATA_DIR + 'pp.npy' def capture(frame_number): cap = cv2.VideoCapture(DATA_DIR + "world.mp4") fc = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) assert frame_number0, l), pp_by_frame) # Computing a single pupil position for the frame by taking mean of all detected pupil positions pp_by_frame = map(lambda data: sum(np.array([pp['x'], pp['y']]) for pp in data)/len(data) if data else np.array([-1, -1]), pp_by_frame) # Now each nonempty value of pp_by_frame is a tuple of (x, y) for pupil position in that frame # Next we need to associate each frame to a detected marker and by taking mean pupil point and # mean 3D marker position over a series of frames corresponding to that marker find a 2D-3D # mapping for calibration/test tdiff = map(lambda e: e-wt[0], wt) # This time correspondence to each marker was coordinated using the GazeHelper android application # for 005 > starting from 00:56, 3 seconds gaze, 1 second for saccade # These parameters are specific to the experiment # 005 > 56, 3, 1 # 006 > 3, 3, 1 # 007 > 7, 3, 1 (or 8) # 010 > 3, 3, 1 starting_point, gaze_duration, saccade_duration = 56, 3, 1 # these are specific to the experiment # finding the starting frame ind = 0 while tdiff[ind] < starting_point: ind+=1 print ind data = [] tstart = wt[ind] for i in xrange(9): print i while ind= 0: c+=1 cp = cp + pp_by_frame[j] all_corresponding_points.append(pp_by_frame[j]) # print c if c>0: ret = cp/c else: ret = np.array([-1, -1]) # no detected pupil for this marker p2d.append(ret) data.append([ np.array([starting_ind, ind-1]), # frame range ret, # mean pupil position all_corresponding_points]) # all pupil positions in range if ind