gazesim/code/recording/tracker.py
2016-03-09 19:52:35 +01:00

221 lines
No EOL
6.7 KiB
Python

import os, signal, subprocess
import cv2
import numpy as np
import json
LOG_DIR = "/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/log.log"
FRAMES_DIR = "/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/frames_012.npy"
ARUCO_EXECUTABLE = "/home/mmbrian/temp/aruco-1.3.0/build/utils/aruco_test "
ARUCO_SIMPLE_EXECUTABLE = "/home/mmbrian/temp/aruco-1.3.0/build/utils/aruco_simple "
RECORDING_VIDEO = "/home/mmbrian/HiWi/pupil_clone/pupil/recordings/2015_09_10/012/world.avi "
## convert videos to avi using:
## avconv -i world.mp4 -c:a copy -c:v copy world.avi
CAMERA_MATRIX = "/home/mmbrian/Pictures/chessboard_shots_new/camera.yml "
SQUARE_SIZE = "0.029"
class MyEncoder(json.JSONEncoder):
def default(self, o):
if '__dict__' in dir(o):
return o.__dict__
else:
return str(o)
class Marker(object):
def __init__(self, _id, p, Rvec, Tvec):
'''
p is the array of marker corners in 2D detected in target image
'''
self.id = _id
self.p = p
self.Rvec = Rvec
self.Tvec = Tvec
def getCenter(self):
'''
Returns 3D position of the marker's center in camera coordinate system
'''
ret = []
# Constructing 4x4 intrinsic matrix
R = cv2.Rodrigues(self.Rvec)[0]
for i, row in enumerate(R):
ret.append(np.concatenate((row, [self.Tvec[i]])))
ret.append([0, 0, 0, 1])
mat = np.array(ret)
# Applying the intrinsic matrix to marker center (wrt marker coordinate system)
return mat.dot(np.array([0, 0, 0, 1]))[:-1] # removing the 4th coordinate
def getCenterInImage(self, camera_matrix, dist_coeffs):
ret = cv2.projectPoints(np.array([(0.0, 0.0, 0.0)]), self.Rvec, self.Tvec, camera_matrix, dist_coeffs)
return ret[0][0][0]
@classmethod
def fromList(cls, data):
# np.array([np.array([curr_marker.id]), curr_marker.p, curr_marker.Rvec, curr_marker.Tvec])
return Marker(data[0][0], data[1], data[2], data[3])
# def processFrame(frame_path):
# cmd = ARUCO_SIMPLE_EXECUTABLE + frame_path + ' ' + CAMERA_MATRIX + SQUARE_SIZE
# p = subprocess.Popen('exec ' + cmd, shell=True, stdout=subprocess.PIPE)
# print '########################################################'
# print 'Begin Output'
# while True:
# line = p.stdout.readline()
# if line != '':
# #the real code does filtering here
# print line.rstrip()
# else:
# break
# print 'End Output'
# print '########################################################'
# # p.terminate()
# p.kill()
# # os.killpg(p.pid, signal.SIGTERM)
def readCameraParams(cam_mat = None):
'''
Reads an openCV camera.yml file and returns camera_matrix and distortion_coefficients
'''
if not cam_mat:
cam_mat = CAMERA_MATRIX
data = ''.join(open(cam_mat.strip(), 'r').readlines()).replace('\n', '').lower()
try:
ind1 = data.index('[', data.index('camera_matrix'))
ind2 = data.index(']', ind1)
camera_matrix = eval(data[ind1:ind2+1])
camera_matrix = np.array([camera_matrix[:3],
camera_matrix[3:6],
camera_matrix[6:]])
ind1 = data.index('[', data.index('distortion_coefficients'))
ind2 = data.index(']', ind1)
dist_coeffs = np.array(eval(data[ind1:ind2+1]))
return camera_matrix, dist_coeffs
except Exception:
print 'Could not load camera parameters'
print 'Invalid camera.yml file.'
def performMarkerTracking(read_from_log = False, log = False, ret = False,
log_dir = None, recording_video = None, frames_dir = None,
square_size = None, cam_mat = None):
if not log_dir:
log_dir = LOG_DIR
if not frames_dir:
frames_dir = FRAMES_DIR
if not recording_video:
recording_video = RECORDING_VIDEO
if not cam_mat:
cam_mat = CAMERA_MATRIX
if not square_size:
square_size = SQUARE_SIZE
square_size = str(square_size)
data = []
if read_from_log:
if log:
print '> Reading data from log file:', log_dir
with open(log_dir, 'r') as f:
for l in f:
if l.strip():
data.append(l)
else:
if log:
print '> Performing marker tracking on file:', recording_video
print '> Writing data to log file:', log_dir
with open(log_dir, 'w') as f:
for l in os.popen(ARUCO_EXECUTABLE + recording_video + cam_mat + square_size):
l = l.strip()
if '\r' in l:
for line in l.split('\r'):
if line:
f.write(line + "\n") # logging to file
data.append(line)
else:
if l:
f.write(l + "\n") # logging to file
data.append(l)
if log:
print '> Parsing marker data...'
frame_count = 0
curr_frame = 0
markers = {}
frames, frame = [], []
visited_first_frame = False
for line in data:
line = line.strip().lower()
# print
# print repr(line)
# print
if line.startswith('time'):
if visited_first_frame:
frames.append(frame)
visited_first_frame = True
frame = []
# if frame:
# frames.append(frame)
# frame = []
curr_frame=curr_frame+1
nmarkers = int(line[line.index('nmarkers')+9:])
if 'txyz' in line: # This line holds information of a detected marker
ind = line.index('=')
_id = int(line[:ind])
p = []
for i in xrange(4):
pind = ind
ind = line.index(' ', pind+1)
p.append(line[pind+1:ind])
pind = ind
ind = line.index('rxyz', pind)
T = line[pind+1+5:ind]
R = line[ind+5:]
if not _id in markers:
markers[_id] = []
curr_marker = Marker(_id,
map(lambda pstr: np.array(eval(pstr)), p),
np.array(eval('(' + R.strip().replace(' ', ',') + ')')),
np.array(eval('(' + T.strip().replace(' ', ',') + ')')))
markers[_id].append(curr_marker)
frame.append(np.array([np.array([curr_marker.id]), curr_marker.p, curr_marker.Rvec, curr_marker.Tvec]))
# Last frame data
frames.append(frame)
frames = np.array(frames)
if log:
print '> Saving marker data for all frames in:', frames_dir
np.save(frames_dir, frames)
if log:
print '> Successfully processed %s frames.' % curr_frame
if ret:
return markers
if __name__ == '__main__':
camera_matrix, dist_coeffs = readCameraParams()
markers = performMarkerTracking()
print '################################################'
print len(markers), 'markers detected'
for m in markers:
# print '################################################'
# print markers[m][0].__dict__
# print '################################################'
print m, ':', len(markers[m]), 'instances'
c = markers[m][0].getCenter()
print c * 100, markers[m][0].getCenterInImage(camera_matrix, dist_coeffs)
# # TODO: investigate if this is how to get projections of unit vectors
# # originating from the camera onto the image plane
# # the trick is that we consider a marker with no R and zero T
# v = [(0.0, 0.0, 0.0), (1, 0, 0), (0, 1, 0), (0, 0, 1)]
# ret = cv2.projectPoints(np.array(v), np.eye(3), np.zeros(3), camera_matrix, dist_coeffs)
# p = ret[0]
# for i, t in enumerate(v):
# print t, 'P->', p[i][0]