conan/container.py

152 lines
5.1 KiB
Python

import numpy as np
import pandas as pd
import pickle as pkl
import os.path
import cv2
from PyQt5 import QtCore, QtGui
class DataContainer():
def __init__(self, movie_fileName, data_fileName):
self.movie_fileName = movie_fileName
self.data_fileName = data_fileName
self.frameCount = 0
self.fps = 0
self.frameSize = [0, 0]
self.dataGaze = None
self.dataMovement = None
self.number_ids = None
self.dataRTGene = None
self.image = None
def readData(self, verbose=False, rtgene=False):
if (verbose):
print("## Start Reading Data")
# Read Video Data
f = self.movie_fileName
print(f)
if os.path.isfile(f):
cap = cv2.VideoCapture(f)
ret, frame = cap.read()
self.image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = self.image.shape
bytesPerLine = ch * w
convertToQtFormat = QtGui.QImage(self.image.data, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
self.fps = cap.get(cv2.CAP_PROP_FPS)
self.frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
self.frameSize = [width, height]
print(self.fps, self.frameCount)
if (verbose):
print("Video frameCount %i" % self.frameCount)
duration = self.frameCount / self.fps
minutes = int(duration / 60)
seconds = duration % 60
print('Video duration (M:S) = ' + str(minutes) + ':' + str(seconds))
else:
print("WARNING: no video avaibale.")
# read data file
with open(self.data_fileName, 'rb') as f:
data = pkl.load(f)
# Read RT-Gene Gaze
if rtgene:
f = "%s_Gaze.pkl" % self.dataPath[0]
df_gt = pd.read_pickle('exampledata/G2_VID1_GroundTruth.pkl')
df_gt = df_gt[['Frame', 'ID0_target_spher', 'ID1_target_spher']]
if os.path.isfile(f):
df = pd.read_pickle(f)
self.number_ids = len(df.PId.unique())
self.dataRTGene = df.pivot(index='Frame', columns="PId", values=["GazeTheta", "GazePhi", "HeadCenter",
"HeadPoseYaw", "HeadPoseTheta",
"Phi", "Theta"])
lst = []
for label in ["GazeTheta", "GazePhi", "Head", "HeadPoseYaw", "HeadPoseTheta", "Phi", "Theta"]:
for head_id in range(self.number_ids):
lst.append("ID%i_%s" % (head_id, label))
self.dataRTGene.columns = lst
self.dataRTGene = self.dataRTGene.reset_index()
self.dataRTGene = pd.merge(self.dataRTGene, df_gt, on=['Frame'])
self.dataRTGene = self.dataRTGene.rename(
columns={'ID0_target_spher': 'ID1_target', 'ID1_target_spher': 'ID0_target'},
errors='raise')
print('Detected %i IDs in video' % self.number_ids)
if (verbose):
print("Gaze sample count %i" % len(self.dataRTGene))
else:
print("WARNING: no RT-Gene data avaibale.")
# Read Gaze Data
if "HeadPose" in data:
self.dataGaze = data["HeadPose"]
self.number_ids = len([col for col in self.dataGaze.columns if 'head' in col])
print('Detected %i IDs in video' % self.number_ids)
if (verbose):
print("Gaze sample count %i" % len(self.dataGaze))
else:
print("WARNING: no gaze data avaibale.")
"""
# Read OpenPose Data
f = self.__get_filename_with_substring("OpenPose")
if os.path.isfile(f):
self.dataPose = pd.read_pickle(f)
if (verbose):
print("Pose sample count %i" % len(self.dataPose))
else:
print("WARNING: no pose data avaibale.")
"""
# Read Movement Data
if "BodyMovement" in data:
self.dataMovement = data["BodyMovement"]
if verbose:
print('Body movement sample count %i' % len(self.dataMovement))
else:
print('WARNING: no body movement data available.')
def getFrameCount(self):
return self.frameCount
def getFrameSize(self):
return self.frameSize
def getFPS(self):
return self.fps
def getVideo(self):
return self.movie_fileName
def getGazeData(self):
return self.dataGaze
def getFrame(self, frameIdx):
return frameIdx
def getFrameCurrent(self):
return 1
def getNumberIDs(self):
return self.number_ids
def getMovementData(self):
return self.dataMovement
def getRTGeneData(self):
return self.dataRTGene
def getImage(self):
return self.image