From 74df5cb3f0fa630bd48c396be3838787686dfbd5 Mon Sep 17 00:00:00 2001
From: apenzko <38656523+apenzko@users.noreply.github.com>
Date: Sun, 17 Oct 2021 12:49:49 +0200
Subject: [PATCH] Added GUI
---
README.md | 32 +-
container.py | 151 +++++
gui.ui | 183 +++++
icons/logo.png | Bin 0 -> 4352 bytes
icons/pause.png | Bin 0 -> 2605 bytes
icons/play.png | Bin 0 -> 2907 bytes
icons/stop.png | Bin 0 -> 1614 bytes
main.py | 287 ++++++++
processing.py | 1015 ++++++++++++++++++++++++++++
requirements.txt | 5 +
uiwidget/__init__.py | 0
uiwidget/widgetfacialexpression.py | 89 +++
uiwidget/widgetgaze.py | 147 ++++
uiwidget/widgetobject.py | 81 +++
uiwidget/widgetplayer.py | 309 +++++++++
uiwidget/widgetpose.py | 147 ++++
uiwidget/widgetspeaking.py | 157 +++++
uiwidget/widgetstatic.py | 15 +
uiwidget/widgettimeline.py | 451 ++++++++++++
uiwidget/widgetvideosettings.py | 63 ++
utils/__init__.py | 0
utils/colors.py | 13 +
utils/util.py | 31 +
23 files changed, 3174 insertions(+), 2 deletions(-)
create mode 100644 container.py
create mode 100644 gui.ui
create mode 100644 icons/logo.png
create mode 100644 icons/pause.png
create mode 100644 icons/play.png
create mode 100644 icons/stop.png
create mode 100644 main.py
create mode 100644 processing.py
create mode 100644 requirements.txt
create mode 100644 uiwidget/__init__.py
create mode 100644 uiwidget/widgetfacialexpression.py
create mode 100644 uiwidget/widgetgaze.py
create mode 100644 uiwidget/widgetobject.py
create mode 100644 uiwidget/widgetplayer.py
create mode 100644 uiwidget/widgetpose.py
create mode 100644 uiwidget/widgetspeaking.py
create mode 100644 uiwidget/widgetstatic.py
create mode 100644 uiwidget/widgettimeline.py
create mode 100644 uiwidget/widgetvideosettings.py
create mode 100644 utils/__init__.py
create mode 100644 utils/colors.py
create mode 100644 utils/util.py
diff --git a/README.md b/README.md
index 6a3a960..1e0a058 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,31 @@
-# conan
+# ConAn
+This is the official repository for [ConAn: A Usable Tool for Multimodal Conversation Analysis](https://www.perceptualui.org/publications/penzkofer21_icmi.pdf)
+ConAn – our graphical tool for multimodal conversation analysis – takes 360 degree videos recorded during multiperson group interactions as input. ConAn integrates state-of-the-art models for gaze estimation, active speaker detection,
+facial action unit detection, and body movement detection and can output quantitative reports both at individual and group
+level, as well as different visualizations that provide qualitative insights into group interaction.
-ConAn: A Usable Tool for Multimodal Conversation Analysis
\ No newline at end of file
+## Installation
+For the graphical user interface (GUI) you need python>3.6 to install the [requirements](requirements.txt) via pip:
+```
+pip install requirements.txt
+```
+## Get Started
+To test the GUI you can download our example use case videos from googledrive:
+As well as the respective processed ``.dat`` files which include all the analyses.
+Run [main.py](main.py) and import the video file you would like to analyze.
+## Processing
+
+
+
+## Citation
+Please cite this paper if you use ConAn or parts of this publication in your research:
+```
+@inproceedings{penzkofer21_icmi,
+ author = {Penzkofer, Anna and Müller, Philipp and Bühler, Felix and Mayer, Sven and Bulling, Andreas},
+ title = {ConAn: A Usable Tool for Multimodal Conversation Analysis},
+ booktitle = {Proc. ACM International Conference on Multimodal Interaction (ICMI)},
+ year = {2021},
+ doi = {10.1145/3462244.3479886},
+ video = {https://www.youtube.com/watch?v=H2KfZNgx6CQ}
+}
+```
\ No newline at end of file
diff --git a/container.py b/container.py
new file mode 100644
index 0000000..774e94f
--- /dev/null
+++ b/container.py
@@ -0,0 +1,151 @@
+import numpy as np
+import pandas as pd
+import pickle as pkl
+import os.path
+import cv2
+from PyQt5 import QtCore, QtGui
+
+
+class DataContainer():
+
+ def __init__(self, movie_fileName, data_fileName):
+ self.movie_fileName = movie_fileName
+ self.data_fileName = data_fileName
+
+ self.frameCount = 0
+ self.fps = 0
+ self.frameSize = [0, 0]
+
+ self.dataGaze = None
+ self.dataMovement = None
+ self.number_ids = None
+ self.dataRTGene = None
+ self.image = None
+
+ def readData(self, verbose=False, rtgene=False):
+ if (verbose):
+ print("## Start Reading Data")
+
+ # Read Video Data
+ f = self.movie_fileName
+ print(f)
+ if os.path.isfile(f):
+ cap = cv2.VideoCapture(f)
+ ret, frame = cap.read()
+ self.image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+
+ h, w, ch = self.image.shape
+ bytesPerLine = ch * w
+ convertToQtFormat = QtGui.QImage(self.image.data, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
+
+ self.fps = cap.get(cv2.CAP_PROP_FPS)
+ self.frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
+ width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
+ height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
+ self.frameSize = [width, height]
+ print(self.fps, self.frameCount)
+
+ if (verbose):
+ print("Video frameCount %i" % self.frameCount)
+ duration = self.frameCount / self.fps
+ minutes = int(duration / 60)
+ seconds = duration % 60
+ print('Video duration (M:S) = ' + str(minutes) + ':' + str(seconds))
+ else:
+ print("WARNING: no video avaibale.")
+
+ # read data file
+ with open(self.data_fileName, 'rb') as f:
+ data = pkl.load(f)
+
+ # Read RT-Gene Gaze
+ if rtgene:
+ f = "%s_Gaze.pkl" % self.dataPath[0]
+ df_gt = pd.read_pickle('exampledata/G2_VID1_GroundTruth.pkl')
+ df_gt = df_gt[['Frame', 'ID0_target_spher', 'ID1_target_spher']]
+ if os.path.isfile(f):
+ df = pd.read_pickle(f)
+ self.number_ids = len(df.PId.unique())
+
+ self.dataRTGene = df.pivot(index='Frame', columns="PId", values=["GazeTheta", "GazePhi", "HeadCenter",
+ "HeadPoseYaw", "HeadPoseTheta",
+ "Phi", "Theta"])
+ lst = []
+ for label in ["GazeTheta", "GazePhi", "Head", "HeadPoseYaw", "HeadPoseTheta", "Phi", "Theta"]:
+ for head_id in range(self.number_ids):
+ lst.append("ID%i_%s" % (head_id, label))
+ self.dataRTGene.columns = lst
+ self.dataRTGene = self.dataRTGene.reset_index()
+ self.dataRTGene = pd.merge(self.dataRTGene, df_gt, on=['Frame'])
+ self.dataRTGene = self.dataRTGene.rename(
+ columns={'ID0_target_spher': 'ID1_target', 'ID1_target_spher': 'ID0_target'},
+ errors='raise')
+
+ print('Detected %i IDs in video' % self.number_ids)
+ if (verbose):
+ print("Gaze sample count %i" % len(self.dataRTGene))
+ else:
+ print("WARNING: no RT-Gene data avaibale.")
+
+ # Read Gaze Data
+ if "HeadPose" in data:
+ self.dataGaze = data["HeadPose"]
+ self.number_ids = len([col for col in self.dataGaze.columns if 'head' in col])
+
+ print('Detected %i IDs in video' % self.number_ids)
+ if (verbose):
+ print("Gaze sample count %i" % len(self.dataGaze))
+ else:
+ print("WARNING: no gaze data avaibale.")
+
+ """
+ # Read OpenPose Data
+ f = self.__get_filename_with_substring("OpenPose")
+ if os.path.isfile(f):
+ self.dataPose = pd.read_pickle(f)
+ if (verbose):
+ print("Pose sample count %i" % len(self.dataPose))
+ else:
+ print("WARNING: no pose data avaibale.")
+ """
+
+ # Read Movement Data
+ if "BodyMovement" in data:
+ self.dataMovement = data["BodyMovement"]
+ if verbose:
+ print('Body movement sample count %i' % len(self.dataMovement))
+ else:
+ print('WARNING: no body movement data available.')
+
+ def getFrameCount(self):
+ return self.frameCount
+
+ def getFrameSize(self):
+ return self.frameSize
+
+ def getFPS(self):
+ return self.fps
+
+ def getVideo(self):
+ return self.movie_fileName
+
+ def getGazeData(self):
+ return self.dataGaze
+
+ def getFrame(self, frameIdx):
+ return frameIdx
+
+ def getFrameCurrent(self):
+ return 1
+
+ def getNumberIDs(self):
+ return self.number_ids
+
+ def getMovementData(self):
+ return self.dataMovement
+
+ def getRTGeneData(self):
+ return self.dataRTGene
+
+ def getImage(self):
+ return self.image
diff --git a/gui.ui b/gui.ui
new file mode 100644
index 0000000..d80a386
--- /dev/null
+++ b/gui.ui
@@ -0,0 +1,183 @@
+
+
+ ConAn
+
+
+
+ 0
+ 0
+ 800
+ 450
+
+
+
+ MainWindow
+
+
+
+ -
+
+
+
+ 4
+ 1
+
+
+
+
+ 0
+ 300
+
+
+
+ 4
+
+
+
+ Eye Gaze
+
+
+
+
+ Speaking Activity
+
+
+
+
+
+ 0
+ 0
+
+
+
+ Body Pose
+
+
+
+
+ Facial Expression
+
+
+
+
+ Object Detection
+
+
+
+
+ -
+
+
+
+ 4
+ 1
+
+
+
+
+ 0
+ 60
+
+
+
+
+ -
+
+
+
+ 3
+ 1
+
+
+
+
+ -
+
+
+
+ 1
+ 1
+
+
+
+
+
+
+
+
+
+
+ Open
+
+
+
+
+ Exit
+
+
+
+
+
+ WidgetTimeLine
+ QWidget
+
+ 1
+
+
+ WidgetPlayer
+ QWidget
+
+ 1
+
+
+ WidgetPose
+ QWidget
+
+ 1
+
+
+ WidgetSpeaking
+ QWidget
+
+ 1
+
+
+ WidgetGaze
+ QWidget
+
+ 1
+
+
+ WidgetFacialExpression
+ QWidget
+ uiwidget/widgetfacialexpression
+ 1
+
+
+ WidgetVideoSettings
+ QWidget
+ uiwidget/widgetvideosettings
+ 1
+
+
+ WidgetObject
+ QWidget
+
+ 1
+
+
+
+ tab
+
+
+
+
diff --git a/icons/logo.png b/icons/logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..554137acec15dd28ae9fc75cc40b374e73484e7c
GIT binary patch
literal 4352
zcmYM2cTf{dx5g6!gdzb%dJ(1f62OEe61sp&QF;&cCG_5rCS7_H5k#p0={1BREp!M?
zN)+iukRrkhUcC34@9xaAzh}=pXXecQwX?6D>Z(&wuuuR104hxl6}=lK{}mbO&Axdh
zPId#Jm!7&Z06oUGeRJ?f8fn_==l}$7JQ;u($O0hw8@Z7pkoCV_4ag4w{coKJ0El%0
z5dSBmd&9p=<3@jb{*Ry{;D6FLwIZVbrjv_6|L^~{MbrB>-+_tEr>XMDz#q78ZvLFL^>$Vs{GMAaDELKqJ(0W+(*ea{2xS179Z7E$pdi+6rN4^c
z`GTfCGJ@W${^g-euGd!K1@|J;^ua?z&7FQ(@KQBOvvg`suiQXTppl#v(m)7Dz5cMO
zsXOk6w{&(CEN_m{a%fMWaByyDvFMy|KD$%8DHxJ7ePwoaP2SZ>S;GkXF%H(EQ}UKH
z5D3MGoA_ClMY|cQaca#ZX-($_KJH?Ci=oV1=lL=RVgqGVBQlX(Ag0o@4tM{Mdw^+e
zC!#5tE8klhSEOS{a3BxxDWv>L8H7}bI^@>>qyGw{=f;6<0~B8aH+z4lVm-BVfVF!?
zI@Tjxf1W_T;#F
z_Ha+4kFsF2_B$=`>&E^1|^Mu@=Z1Zb-a^~Ri4T#
zB(vvz;Ko^XtV9jMY1C6}V?Quc>99_yKynH?4izBwI?O}Rd?()(-Z$q|U)F-pBuno5__*{W=IrsMH_jZsWbiuiHO|$mN${-7V5vjx*bKWAG0v`
zfFWvHMlR5$nZ9YqR{TL4Tu=%BxH0mDbYncFRIN`gb7lL9HE+}G%glf65_kc2b%IW2
zXQzEGrWZoO91~_~=D5s@3vwv@4{)PO@ZCdu%Fh-=eDaP5j_0jUzfez??FEQf_7fL_
z9Eg8GV6&wDYKi3clvV_)#|`?L+DXNq_)e);180N?Mvpu50Y%%SR?nY(vssvDC0Olm
zt4ngN?A82KKe4D>one(cIG9%~b&&rA$kQ{sx;FxK|Ndd^p;hoYy;O5Ut7{b|Sebfm
z->R7^B}3XKUk_QFKZP5qsLI`JZJtGl9j|{MF%7^_UWmiF@ZB|~p92p(&a>G#x5`Ht
z9W@x!ua{gV03zr7-WSQ}qR!;|heTjcoh)qyeBznK$KHuAD)RVDECO*jmwwNwDeU;U
zz2>71?C*po^!aNSG~QSC_VihnxbS(&$CX%b{jWrC6KzRa-8d>dH7!@>5pYjcCKvVS
zm0q+9eI~IVKePpt88GQeVZqUa5`oe
z!SZgOkfeHcx29uZ3-ELOmZf!Or87{h^lWtY;H{G&Y8}MrS*??(CDYxwM=l
z(6|nsjg1rzWB9v!TpW_#5m7PqGZPIxe*ZwOaEZSJQK{27oq)nl+F$IeC~1BR(hFqI
z^6lk$)v2H?R+9EdFF=RH0(JYBW~>af-*XAh_%`Kpa=_9$siH7Bu8x2_8lgX@rdM%FA{2$bm1}jKqi=Er
zW*pL1AFP3O#;C(%XQ`0Na)h`u89Nr*O7SK3iSLdJ<6bkg8tGdHihr!?!U;?*ct(tsLf_e1=GEBn8FHVpK;G{~8=!i>Ow3+RVF
zF}o9@Ow=qr825!K5MEiY(e`v%@Cj|0Zarek^bq`y^($#d+Z^8VrS_8O2_Y{g4K@2a
zm6C<{u^)M^pU>t?&USSZ<J}f7tVx&JH
zWAxirp&r0%JUF>uJo6}>$;HihLb2cKfI+V=G>-&d`HawaXKn8mUUKckOw{0-KGB%ul>z
zj2At)ZL*o2WXfl*8z%g-?L-l%Qr7nBW>1
z0zsunxQ}w^FTxmQ0WBLeQq0QE0ClSQgnr%{rkA=f#WWnDQ)kRn1Wb^{!UwmlE<;Sh
ztZ`{PTA_D1X2Pqr8?E=%sNAX>U7ec0R#A5)JvP}G}l!`%!Xa$LkQ
zV}JpH+Ur_)!P6VhkT7Ovg4f$MuJopVH5{S;x&?3RZyqOM^!Y6-Qnr8L+e@-8Tm+W{
zee8HPtQVVGB~0(c&DRzsHAjyf@_iF0s>R0Tm9X^>9hedW1GSqOL)gf9`*3E-i(_y}
z+QFA^ME_uE30hLdF|!Oob}DdpqZ1YmmwJI=R`75H*CVx#o=+;SLHQrU2xOYSpEaZ=
z_DIE7RNwx37gl-aWZOc`nuRC!U(i5F`zL-lM@$*e@6&ZedYb+g6DW2sle>heQcbE=
zw6vq_IlcRanBIwWB;stCSo^8jZxP#WP2E*oUFO%-HNSeIbGs024j<%5K^y2_G0%7-LG|8QjNK1Csk9?W%etSmY+r
z3XtDOtEG`m3Z>ogLFw$ClXXO9U*vlR35ZaY+1lWV^(<@@M|`QMP|zhDD!^;IxKp2j
z486G$p8ISAd`cTjwFwyqykzBN1HTIve_9%6_s}CUN1T5o#~VwJf0AY{REYY}z3yln
zSb?N=DBtl=dUeUzVsO`sx!&0V>bgveYB9`GY;XQJ)Z!)_oV9(NFN@86_@u`@o0}`f
z%&xAs{SHi#Aun$&oMybcuqT+;+*#V4`&O-O^Km!1HyaQfWn?MG)@ry3q~&~!-ct3L
zL*cQnbJBXDK_i}!O@fA|f)Q^U`m3=2x%X=}MRRrk%t_TSHW
zcFd3lW~O9liwgas7UOUw9=~HxQQ5L_z7>dllsu3QzuL^6uRpPzJWqe5kPx}Mu&f&3
z%Y&kF-=Q)rp$1S!Zt3clViqLM_)aDEen~bvB0prp|!O!RyCAkMFkzZXM}p{pGvT
zJ%#Yoq{e6x=*xYGZY;U`#C)C!Z5;WCFUd3I8x`MCQl`
z@rM@IXx?Yqwa!PTIKl74X@U0XB<3-qy
z$V?RG{q(F1I9lyFf7r~_?*+P%${4*8ev-i
z$e{}$Rl%oT4=Z-K1S;~q`>E!%D9pqptb)7W0t7E;ROg9lyiZV!aA>t)$DsJugkKn)
zD~af5G>jg2hP3%onU=2&CArltM}WGu50$D(5HbYgS{o1R7z}aI5sI)5VVn6@4G+`v
zB()A`1g5Nmt`V4qjS{5f)b!=Z3wi93_s^>GOBXP;z4H&g7j_lyV~Npf3r7I3sr|ERMbIKpww6NEU5AE#8bAiydO#e|wRhcKjuz
zxo9Xv7GuJXp)l>n^XdIEnrFQ9%^wEdl^exyqW0%DayB{CA+WMf
zhT8a=U{_H?jK2a1YdbU*^FxSkVv#4%&9#nA<3voT!>VtbnwnsLTqNuOB$m5o--Hk<9N}3
zn^0+^K^`8Qx7@LyJ+kmZ@a#qY1RN3tf*3{TCi<-VF2Kh+JtKRG_#70^?i;T{8)h5G
z%?V0uSm4JZe3HvuU&ci-rlcdlKzMAF{h-2pTr>GIFU}F~4#?vwr|t|3s>-|N2$x$w
zI^wte+0_s(?K=8rIzRMdO;?b;Ein~_4A!&Joolio>M1hQXNK2SlMDxjP1D^t^*V_o
zh@tA~$}LNrS!=M=^L#P&i?#y>cTcoYeH64U#Pw&2G-U+clid`F&A4BysGAAXqs$){
z`k8h4eGmAm?D|k!b@U<2OzRiykdQN@PJr!
z4p2;^_7lz{+SNSMu5G(x9xpg8Mi8ExUcpp_GbG%*tc)bCy2Qx>on@l8nh_S;y&9gmF95)Ot6$pewJD7jjh64^nDI
i+aOB+
literal 0
HcmV?d00001
diff --git a/icons/pause.png b/icons/pause.png
new file mode 100644
index 0000000000000000000000000000000000000000..391b48a874eed4391061d5255cb576aa260d9ee1
GIT binary patch
literal 2605
zcmV+|3exq7P)
zaB^>EX>4U6ba`-PAZ2)IW&i+q+U;0dmgFi9{AU%j1dtFC%i*PSW(TwU2}oylxvKAU
zch=m$o31nzwgE>3LXlzp=ieUwg%5KrA&Xk6r9^yk&6OG|79XE_&MD?|J|D{0wRCR}
zuM31GM#+zVwO#i&w9AhNo@1nbZx41|A-g(Cua3`$_sF|GKTZ;^Y>K+fl}fVTF0o)-|TFncHXF1wxWvyvYIhd0h0z5Bh;r!Jr(N4KLVYe{|6$
z{m>R4o*DNE%EuB)LFd&0Ai~&!$s`5?u_TsMQdA;M2{;-EYM423rVa*@2u)0qGIODz
zWVFY|Gn&r5BvfcOBQ}8u;lc`Tu)aW6&I0{tHfX2`OBRtLDi)+lD_nAsQi_%x9ZqC(2F55SFs_>c2%0u$(NfN|ncJMjN>~&^n51lO!c3bnP?%RD
z@$};E2Xnv0TM+v#-so4%g-zZ6gSi0ezVh}H*5=;l#$oKS3OA;avHEa*Sqo%sZ))wk
zNp@Jdj}TosB{pO`*@*-~V@Phu
ze3Z^OvE{zfQlGKU-BP*hj{Hh}7%y&Wq=Wm4%NJ&}Mx2bZdMM3%);hhfN_oz%qtEV;
zJ@T5pI<#R8unY8Emr3K*b%}cEr3zNFV^!dp;9X|NL^`vL#Ts`JHG9U!j7VmZ_EOpN
z#U6~b?BLRpAe`^oNV+Q{3Va+tLc6*{)oQtGN
zd!&k8k$DXY7J0Pwh29B7qWFaf1C0q(rf)=M^&;NcD<433_TIx7b2@@qQSPG6n+RkP
zWvc?=!wr15>5?a}N#3GNCd4XR3-4DYv(}#Cz>G%RsMJ(lSv+sz5i`vj;TE3|nc!U!
z-;f;bx2R>+R2-5^*gxVfcxpg!0{ahXePaLO9)Kd~9=!>IaYU>FdkX;9*-!i8Dq$g{
zBk$IFx*7=5Zu_6DSU=oOsKnD~UJDO|RxaL(+UPPMd^R5)>mtb9SyeJ&O+YDupx791
z#mMzkpt8EKO4K*>QWgTLAp9D{Uu<%A>R1Al1JW>}9rUqjz=_B{^H>FJE==R_hIg#NAN9wndK8zcyQLARfW
zoV#~$68v
z-bdvQN_ZcYJ1F6ORPLaJ_fffn65dDU4oY|*m3vcoAC>#1@IEScP{RAD+(8NNqw+gp
ztz8!4C`>5PO3GbtF2PM3Z%DoKV*@8ZT627wqeFP4^bwTsoh|4iDB(L>&___hcebEE42{Ax|4wC#jSv
z4aRriStN;b6(smbUduu|E
z7EWP~ymQ*L{dSONM%!UA9vSW6&5m3dZCNirGy5}pzx+(*XZD`M@iX;G-!DItPxik3
z%+7cFmG7#q!>6~@{P>S#i#4;Ew8ZjCwrpDrI@MhENMgI(j-8I$R>&_kg>Sf@B^KoA;0`<+fK6{Ol(5PIK66
z{4fok_v(aK+lC>5Z4gklVNe)`1pkaDJni3lV7VbFl5ICM<`ktZUWtI4xc+8{=
z5=s_B$3q&!wRZ*kU&Wmdl@e_=4A
zuPk$&<{)BN#1bTkP*6qzMc9bYs*_?NLHp4b{sGr7kxL<00gN0AC_#hl`oaI;cehq{
za>7drMuGMh$N3lrI(C6t#c{rm9jA5z_@99*z2PraftgRzD-A7t1oUhJ7uO9<*#jw3KAd>IW<2F0000?NklBCC-di2io?D{ixjb3QiLB;o^
P00000NkvXXu0mjfJ%P;~
literal 0
HcmV?d00001
diff --git a/icons/play.png b/icons/play.png
new file mode 100644
index 0000000000000000000000000000000000000000..eab4f9949d6db2766e0823f32d553c9d9145b3e9
GIT binary patch
literal 2907
zcmV-h3#9akP)
zaB^>EX>4U6ba`-PAZ2)IW&i+q+U=NGlH@oJME`k;IRbZr$Kh%=H<;tk1Eq-EyR#~5
z=Bq;%AqGW}6dZ6kWY&NFz0JS)gixZ1sYtFlTRySI>KhHQ*QcIeXXE)jA0O{)?rpz$
zUhs4U&f)r7+C9F}pI&Zwtl|3Gek1o4FZV?5Iq~@fU0K`n^(N`}SHHVAV(PjYxk`Dx
z)qYPj9y^woZ+vd+I_ZD&rlVk`nOG=zzk&;vUmxxrgzMY*R(|#BqbCG!IQdZS$uP*9
z$9tTueQlsOMt-!+d+R@2?~L!;-5=iBGT%0$@#zJZ-t~v)GXC|#;khIK_QDq1KRnL;
z-FwR3>w3BtGZxLaM;&9k$21INk{HfynWOMcw97dvhuCHZkWV{Uu+y&2WA|-bbkjAr
z-8$#t23w4N`NG-z;lt^ZP<-_Rj9iF%xa|w*%^-j^{4zingMaSDoptM3Z#ZG)=@{xA
zlR0kr@-TmO@mCJB_fQi#D^FXoFRy;hHWWGiWfXw8edCsnfgi^WzkTIDxGD&yV`kw2
zo9)*WJ-HX(3J2%FxWxGiA!o|GC%{Cko!E>EAYii#$z^AY_r*EvC;-&hxew8I5O68^
z$tHaaA$wE2cwL)&iaTR{^4V)Q+yoFQ#g$fGWyz|mZG3Hq9e3J!mo2;Q
z_C)PQ^~3QCsJR<8e~{8~<%t@-rj*ANPU$2mW+3LH1M#8=;Lu(%v*Kd(ikxC*t0yRu
zXOl^>G1DnxKp2>!TzhX1BEJCS2g?T&wFnmfUR{#6TaD981KI;5FvQ?$+?6+xK>!nw|aj
z+5FNCVe)Lbke#X*0<$0ne2eGNcov5l0+AJLSz{e5_+)oFR_Hdxx_2W{00+VNGV4mR
zrSIj!cHR*3+Y#xtkB;s3YQ10#UoeZ$;zx_sv6w}8EOPizQj2A|G%j!yG8GY90ZRj%
zfNP#haVk(TDQs3@E3R-s!YHNXTqkU`OKlFO%2169V+Io-h-Cw$9GxG*^)nYZgv8QTu^X&1ysWHYK^ma)zz>@d(QHYd0$yuP88
zfNnWbC!RPr)>udpEL8*0m0;LFb;|ozR`132T6W)ctkM(k;uhg3L&6Y$z8qxD&Q{lP
zji#YJw-_v>&)63CbHnmk|4%GT1AkfAi{w*#$c)Hfu=dV)2RD$d(=jz?^Jx6+?3Nik
zXprO46~n+qYq;duicD#JX;Q0ShUzm?qxr^S8XwjLfWv0w+{{{C`=oNpI&T<8O;|G4
zC<`Y9ibj2f2SL0z3E(?qZ@=t#MWT`(+sT49-(&Tddfma0${{w^Dp*zK0QCW6_+npU
z6NY=_zHRMMJqjnaOXWeDtKp#ArQT+5ZJIVj=tr3%NY;uu)Y0qOt6t8SoVw=OOTd;&
z+6yOCl#eUOd2pE~sh2NL(#TE;J0-Pn$>b6W4unq+aCDv?>CB3uqSC03Tpe~tH6hV;
z$C~?10(eUV=I^NW6Dqig0EPdG3ck%p%5yihBKD6aJIJgoD652xW^6meaX6>fLDZor
zk(xS;+@kp>M5=rl6Wn1xa84jSM`z
z)E{t?dCeC;;U@E%oW8MmAskRt(nHgi7CTibvCl+NLxodYsd>(fGH5dgsuJ50K4nV@
z4U#5!GB%XM=;KNNk_y?rUZm$`n}u}4DV6Ie99GIhiJlE-SP8l>IDQ@BB&5~vgd`cc
zsYJQ1gp{S5JQrzapMzW17$Mb#{jkZ6^LUdOii6I`TJWc~yf>8$4MGM6#|N5sgjOi;rO
zstuEB5iRE&tYB;MaD75Z5)}GEZ|a-ThcuaP-pVAeqfAczCK%LN!CW&_(G0KEdqC|V
zH*wB#vXbdqagvZ=HqA@~LkGPFBi+sr!`-7qJHVEgji4+T}sH!HO_h+h!!hC2GIw
zdl1wibv<6(3((B;@g*Hjp`;IHejutSEQdqPrFHmI>DbykufwSC2e=e3&Zw
zB@qw*l8CD+vGlpIfB$SgQ>ecqP|XL03Jdbc$c}{LD?u(_v&yXOmba74;R)>RA+lf-
zEa{qO6{Bwh^fiO4o=qSaP6Wk8^Q;;}TK((}m8qdk-o{fzTU~u)#vK`99%MXlgC_Ms
z#*8H!S;!bJCcrGi+XA4kiYN2KGVSd|0o{y7&22V(8ux|=&`QL(?Ic}@BVr1dV{f%)
zpFD|rL`oKlrf|vkY0%>H6hAo3+z-w{o8L8S4p+_U2vL10sH!PnddcHl*Qn+isA(ze
zQX}4NZ#k0V5nCA6H$gY%6b$Ms9=5W@twqeW^;>TM
z%+KFJ{4>%2Y9c&nR`Xx%kgco4r$%G|00AUvLqkwWLqi~Na&Km7Y-Iodc$}S(Jxat-
z6otQx;?JOsiiHTKuynx2scc4tASlE*g3&6IWN1Pl{-u9q&%d!qn-#X$3Boe(x$X=kpcI=u5|V
zHP8Kd=AyU^rBhWF=0ye8zPLN7a>FXtZu{P$_(5z(X%>l}#M@CC88*9KnvBCAPjj24
zIJ-DEebpuo$DHC~Ghob!go=(dQ|E{-ds2sjfO|?pX+o^tL^BmPqEb!{2co`6w`s`R
zsPkN<4CFDAmr1xIm8Z$W{O|YL$638;txxly2xQg_-*0P}
z>DDv
zaB^>EX>4U6ba`-PAZ2)IW&i+q+U-|caw{ne{bv=k1SBB@%i&oyJDBCq0e3sLOjxoBIyXyc$
zBxvRGY2}z-AlJ7aoNG|Mm%DU~a5@yxRq=jgt-SjE79`|+k<$Uh<#Fl6oa`2AJruPQ
zae8~h71pI$-|kLeuu@knB+NrdJZhE$fn45#H=|yor$k`v-K0Yq405-!2dH~{x1Hmiv!^-7Wp^b+STr6U
zwT(@u8ceJ(x$K`s6vM0N+b$U;`QjTW@4oqp(kK%g+fPSq|e+Uw@MUZaGSJ7Fpllp&%%F7C6zSBjgnEfbMX
zS6cxWFTVzi#oVef3xv=-c++j*vmEsM8~=f;fuuy@ng82oK%2jHpT4S}EYObZheOhSRVzZW7Zl!aVj$@+7
zZawwf%OE#!Qj9Qk#98gHi7JiD?6sWUrO
z&^k_ZaR$aX5E!?`034bZXFgE~yqH^@`OF9kWq{Phjq9`+1BGD_>!d4pZ_Is(H>c@K
zyz#G?bBns)!JGqi?|FN|T3`2E+l*bJaM{!gq7UOE700QpORfC0{iEprt7uGVWCJ0L
zW3kLMYLjk=ra`60NQn_^Ve~wZsfFq@!Ulm^%K9wpMwQ%_<66*_2ci0~bx#MZSsoIm
z*O^Me`lf>k(PGp0SaB5+k7CA?TBe>tX|9ZdXl0z~Wb62EBi7LbTk---W$*3_t$RKo
zW6l?}T#Wrh%WFEGA$lFjGeqA)@_^|3NNy4R2+0Y={S?Uw#Qjvq3B>&r$uAu8>qtIx
z$d{0O6VZvJ4rx22yEH|5G{lwZr`FyqWm9B29*tDw;w+ix*jbb?u3Ch3Up4;h;KD;*
z@Ap}uN9*)K@Ii~LVAxte=E0@nv*YfLeRO0ixQUxXexfD!R)SG8n_O^*r>y&rP+S==
zvWj&``tj*8>VC_C{YTM%UlFphjC}cDTT%AynU*%00004nX+uL$Nkc;*aB^>EX>4Tx
z0C=2zkv&MmKpe$iQ$;NmK|6>DW~hQ-K~%(1t5Adrp;l;lcSTOimEM7-bHv;>vMmOZZ%^uz$X&VGQ+fqH;AVFVUWr1gg%w%GoI7BSwnpkOKmNzxxDdMoI>69;|JXSexan_1u
zR=+2IVKAewEOVXaAYxd=5+sOFP(}eo*oe@olVTx3`_UHu0oN~)OCeVQj2sInL4)l2
z!T;cQw^nv?!b=K9f%X^2`4|Q|c7a;OalVfor*;DPpMfjA;V)HznNQLy4J~{G^lSqc
z*9}eC11@)f{wG5=Wmodk6tY?1{fxdT4fNdtfiy{D4^000SaNLh0L01FcU01FcV
z0GgZ_00007bV*G`2jmF~5+Ejfho@iw002TsL_t(o!|m3w3IHGoK*8+)|Jml63L7Hs
zX2Az8wg$i?b_X*vON%1nT2M9SB&xr5GC_g_2@)ho(C bool array for all frames
+ self.frame = 0
+ self.frameCount = self.processor.getFrameCount()
+ self.frames_bitmask = np.ones(self.frameCount)
+ self.segments = [(0, self.frameCount)]
+
+ self.wTimeLine.setInit(self.processor.getFrameCount(), self.processor.getNumberIDs(), self.processor.getFPS())
+ self.wPlayer.setInit(self.processor.getVideo(), self.processor.getFPS(),
+ self.processor.getOriginalVideoResolution(), self.processor.getNumberIDs(),
+ self.processor.getColors(), self.processor.getTags(), self.processor.getTagColors())
+
+ self.wTimeLine.rangeslider.segmentsChanged.connect(self.segmentsChanged)
+ self.sendSegments.connect(self.processor._updateSegments)
+ self.processor.setReady(True)
+ self.status.showMessage('Ready')
+
+ def segmentsChanged(self, segments, last_segment_changed):
+ self.segments = segments
+ self.frames_bitmask = np.zeros(self.frameCount)
+ for segment in segments:
+ start, end = segment
+ self.frames_bitmask[start:end] = np.ones((end - start))
+ self.sendSegments.emit(self.frames_bitmask)
+
+ start, end = segments[last_segment_changed]
+
+ if start > self.frame:
+ self.wPlayer.setFrame(start)
+
+ if end < self.frame:
+ self.wPlayer.pause()
+ self.wPlayer.setFrame(end)
+
+ """
+ def closeEvent(self, event):
+ self.wPlayer.stop()
+
+ #quit_msg = "Are you sure you want to exit the program?"
+ # reply = QtGui.QMessageBox.question(self, 'Message',
+ # quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
+ #
+ # if reply == QtGui.QMessageBox.Yes:
+ # event.accept()
+ # else:
+ # event.ignore()
+ """
+
+ def open(self):
+ movie_fileName, _ = QFileDialog.getOpenFileName(self, "Open Movie File", "", "Files (*.mp4)", self.movieDir)
+ data_fileName, _ = QFileDialog.getOpenFileName(
+ self, "Open Preprocessed File", "", "Files (*.dat)", self.movieDir)
+
+ if movie_fileName and data_fileName and os.path.isfile(movie_fileName) and os.path.isfile(data_fileName):
+ self.status.showMessage('Loading data... ')
+ self.openProject(movie_fileName, data_fileName)
+ else:
+ print("ERROR: select two files")
+
+ def export(self):
+ self.status.showMessage('Exporting data... This might take some time.')
+ # has to be defined in the main window, and then call the processing function
+ self.processor.export()
+ self.status.showMessage('Exported data successfully!')
+
+ def _setupMenuBar(self):
+ self.mainMenu = self.menuBar()
+
+ fileMenu = self.mainMenu.addMenu('&File')
+
+ openAct = QAction('&Open', self)
+ openAct.setStatusTip('Open files')
+ openAct.setShortcut('Ctrl+O')
+ openAct.triggered.connect(self.open)
+ fileMenu.addAction(openAct)
+
+ exportAct = QAction('&Export', self)
+ exportAct.setStatusTip('Export calculations')
+ exportAct.setShortcut('Ctrl+E')
+ exportAct.triggered.connect(self.export)
+ fileMenu.addAction(exportAct)
+
+ exitAct = QAction('&Exit', self)
+ exitAct.setStatusTip('Exit application')
+ exitAct.setShortcut('Ctrl+Q')
+ exitAct.triggered.connect(self.close)
+ fileMenu.addAction(exitAct)
+
+ #@QtCore.pyqtSlot(str)
+ # def statusUpdate(self, message):
+ # self.status.showMessage(message)
+
+ def updateFrame(self, position):
+ self.frame = int((position / 1000.0) * self.fps)
+ if self.frames_bitmask[self.frame] == 0:
+ for segment in self.segments:
+ if segment[0] > self.frame:
+ self.wPlayer.setFrame(segment[0] + 1)
+ break
+
+ @QtCore.pyqtSlot(bool)
+ def togglePoseTab(self, deactivate):
+ self.wTab.setTabEnabled(2, not deactivate)
+
+ @QtCore.pyqtSlot(bool)
+ def toggleGazeTab(self, deactivate):
+ self.wTab.setTabEnabled(0, not deactivate)
+
+ @QtCore.pyqtSlot(bool)
+ def toggleFaceTab(self, deactivate):
+ self.wTab.setTabEnabled(3, not deactivate)
+
+ @QtCore.pyqtSlot(bool)
+ def toggleSpeakingTab(self, deactivate):
+ self.wTab.setTabEnabled(1, not deactivate)
+
+ @QtCore.pyqtSlot(bool)
+ def toggleObjectTab(self, deactivate):
+ self.wTab.setTabEnabled(4, not deactivate)
+
+
+if __name__ == "__main__":
+
+ app = QtWidgets.QApplication(sys.argv)
+ # print(sys.argv)
+ app.setWindowIcon(QtGui.QIcon('icons/logo.png'))
+
+ app.setStyle("Fusion")
+
+ # Now use a palette to switch to dark colors:
+ palette = QPalette()
+ palette.setColor(QPalette.Window, QColor(53, 53, 53))
+ palette.setColor(QPalette.WindowText, QtCore.Qt.white)
+ palette.setColor(QPalette.Base, QColor(25, 25, 25))
+ palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
+ palette.setColor(QPalette.ToolTipBase, QtCore.Qt.black)
+ palette.setColor(QPalette.ToolTipText, QtCore.Qt.white)
+ palette.setColor(QPalette.Text, QtCore.Qt.white)
+ palette.setColor(QPalette.Button, QColor(53, 53, 53))
+ palette.setColor(QPalette.ButtonText, QtCore.Qt.white)
+ palette.setColor(QPalette.BrightText, QtCore.Qt.red)
+ palette.setColor(QPalette.Link, QColor(42, 130, 218))
+ palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
+ palette.setColor(QPalette.HighlightedText, QtCore.Qt.black)
+
+ app.setPalette(palette)
+
+
+
+ if ("--verbose" in sys.argv):
+ verbose = True
+ print("### Verbose ENABLED")
+ else:
+ verbose = False
+
+ main = MainWindow(verbose=verbose)
+ main.setWindowTitle('ConAn: A Usable Tool for Multimodal Conversation Analysis')
+ main.show()
+
+ sys.exit(app.exec_())
diff --git a/processing.py b/processing.py
new file mode 100644
index 0000000..9755eb3
--- /dev/null
+++ b/processing.py
@@ -0,0 +1,1015 @@
+import numpy as np
+import pandas as pd
+import json
+import colorsys
+import pickle as pkl
+import os
+import cv2
+import math
+from PyQt5 import QtCore, QtGui, QtWidgets
+
+from threading import Lock, Thread
+from utils.util import sperical2equirec
+
+POSE_PAIRS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [
+ 1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17]]
+
+POSE_PAIRS_NEW = [[10, 9], [9, 8], [8, 1], [1, 11], [11, 12], [12, 13], [13, 12], [12, 11], [11, 1], [1, 2], [2, 3],
+ [3, 4], [4, 3], [3, 2], [2, 1], [1, 5], [5, 6], [6, 7], [7, 6], [6, 5], [5, 1], [1, 0], [0, 15],
+ [15, 17], [17, 15], [15, 0], [0, 14], [14, 16]]
+
+
+def getColors(N, bright=True):
+ """
+ To get visually distinct colors, generate them in HSV space then
+ convert to RGB.
+ """
+ brightness = 1.0 if bright else 0.7
+ hsv = [(i / N, 1, brightness) for i in range(N)]
+ colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
+ return colors
+
+
+class Processor(QtWidgets.QWidget):
+ frame: int = None
+ frameData: int = None
+ fps: int = None
+ frameCount: int = None
+ frameSize: [float, float] = None
+ movieFileName: str = None
+ originalVideoResolution: (int, int) = None
+ scaledVideoResolution: (int, int) = None
+ dataFileName: str = None
+ numberIDs: int = None
+ visualize: list = None
+ segments: list = None
+ tags: list = None
+
+ signalPoseSetInit = QtCore.pyqtSignal(dict, dict, list, int)
+ signalSpeakerSetInit = QtCore.pyqtSignal(dict, list, int)
+ signalGazeSetInit = QtCore.pyqtSignal(dict, list, int)
+ signalInit = QtCore.pyqtSignal(list, int)
+ signalInitTags = QtCore.pyqtSignal(list, tuple, dict, list)
+ signalUpdateMovementGraph = QtCore.pyqtSignal(dict, list, int)
+ signalUpdateSpeakGraph = QtCore.pyqtSignal(dict, int, int)
+ signalUpdateHandVelocity = QtCore.pyqtSignal(dict, int)
+ signalUpdateFaceAus = QtCore.pyqtSignal(dict)
+ signalUpdateFaceImgs = QtCore.pyqtSignal(dict, int)
+ signalVideoLabel = QtCore.pyqtSignal(int, int, int)
+ signalPosePoints = QtCore.pyqtSignal(int, list, list)
+ signalPoseChangedLabels = QtCore.pyqtSignal(dict, dict, int)
+ signalSpeakChangedLabels = QtCore.pyqtSignal(dict, int)
+ signalUpdateGazeGraph = QtCore.pyqtSignal(dict, int)
+ signalUpdateGazeMap = QtCore.pyqtSignal(int, list, list)
+ signalUpdateTagGraph = QtCore.pyqtSignal(dict)
+ signalUpdateTags = QtCore.pyqtSignal(int, list, list)
+ signalClearLabels = QtCore.pyqtSignal()
+ signalClearPose = QtCore.pyqtSignal()
+ signalClearGaze = QtCore.pyqtSignal()
+ signalClearTags = QtCore.pyqtSignal()
+ signalDeactivatePoseTab = QtCore.pyqtSignal(bool)
+ signalDeactivateGazeTab = QtCore.pyqtSignal(bool)
+ signalDeactivateFaceTab = QtCore.pyqtSignal(bool)
+ signalDeactivateSpeakingTab = QtCore.pyqtSignal(bool)
+ signalDeactivateObjectTab = QtCore.pyqtSignal(bool)
+
+ def __init__(self, parent=None):
+ super(Processor, self).__init__(parent)
+
+ self.cap = None
+ self.dataGaze = None
+ self.dataGazeMeasures = None
+ self.dataMovement = None
+ self.dataFace = None
+ self.dataRTGene = None
+ self.dataSpeaker = None
+ self.dataObjects = None
+ self.colors = None
+ self.tagColors = None
+ self.videoScale = 1
+
+ self.updateAUs = dict()
+ self.movementActivity = dict()
+ self.tagMovement = dict()
+ self.handActivity = dict()
+ self.selectedIDs = None
+ self._ready = False
+ self.activeTab = 0
+
+ @QtCore.pyqtSlot(QtGui.QImage)
+ def saveCurrentFrameData(self, newFrameData):
+ if newFrameData is None:
+ return
+
+ newFrameData = newFrameData.convertToFormat(4)
+
+ width = newFrameData.width()
+ height = newFrameData.height()
+
+ ptr = newFrameData.bits()
+ ptr.setsize(newFrameData.byteCount())
+ self.frameData = np.array(ptr).reshape(height, width, 4)
+
+ def updateFrame(self, position):
+ threshold = 100
+ self.position = position
+ if self._ready:
+ frame = int((position / 1000.0) * self.fps)
+ self.frame = frame
+
+ movement = {}
+ velocity = {}
+ gaze = {}
+ face_aus = {}
+ speaking = {}
+ tagData = {}
+ # neck_points = list()
+ f = self.dataRTGene.loc[self.dataRTGene['Frame'] == self.frame]
+ for id_no in range(self.numberIDs):
+
+ ### Facial Activity Data ###
+ if self.dataFace is not None and self.activeTab == 3:
+
+ face_aus[id_no] = self.dataFace.loc[
+ self.dataFace['Frame'] == self.frame, ['ID%i_AUs' % id_no]].values
+
+ if len(face_aus[id_no]) > 0 \
+ and np.sum(np.logical_xor(self.updateAUs[id_no], [face_aus[id_no].flatten()[0] > 0.5])) > 0 \
+ and np.sum([face_aus[id_no].flatten()[0] > 0.5]) > 0 \
+ and np.sum([face_aus[id_no].flatten()[0] > 0.5]) > np.sum(self.updateAUs[id_no]):
+ self.updateAUs[id_no] = [face_aus[id_no].flatten()[0] > 0.5]
+ # print('Update AU Image: ', frame)
+ self.get_current_frame(self.frame, id_no)
+ elif len(face_aus[id_no]) > 0:
+ self.updateAUs[id_no] = [face_aus[id_no].flatten()[0] > 0.5]
+
+ ### Body Movement Data ###
+ if self.dataMovement is not None:
+ if self.visualize and self.visualize['Pose'].isChecked():
+ if self.selectedIDs[id_no]:
+ keypoints = self.dataMovement['ID%i_Keypoints' % id_no].iloc[frame]
+ lstX = []
+ lstY = []
+ # Plot Skeleton --> connections via pose pairs
+ for i in range(len(POSE_PAIRS_NEW)):
+ index = POSE_PAIRS_NEW[i]
+ if keypoints is None:
+ continue
+ A, B = keypoints[index]
+
+ if A is None or B is None:
+ continue
+
+ lstX.append(A[0])
+ lstX.append(B[0])
+ lstY.append(A[1])
+ lstY.append(B[1])
+
+ if len(lstX) > 0 and len(lstY) > 0:
+ self.signalPosePoints.emit(id_no, lstX, lstY)
+ else:
+ self.signalPosePoints.emit(id_no, [], [])
+ else:
+ self.signalClearPose.emit()
+
+ movement[id_no] = self.movementActivity[id_no][frame: frame + 200], np.arange(frame - 199,
+ frame + 1)
+ velocity[id_no] = self.dataMovement['ID%i_Velocity' % id_no].iloc[frame]
+
+ ### Gaze RTGene Data ###
+ if self.dataRTGene is not None:
+ # Update Labels
+ head = self.dataRTGene['ID%i_Head' % id_no].iloc[frame]
+ if head is not None:
+ if self.visualize and self.visualize['Label'].isChecked():
+ self.signalVideoLabel.emit(id_no, head[0], head[1])
+ else:
+ self.signalClearLabels.emit()
+
+ # Build heatmap
+ if self.visualize and self.visualize['Gaze'].isChecked():
+ if self.selectedIDs[id_no]:
+ if frame <= threshold:
+ target_x = self.dataRTGene['ID%i_target_x' % id_no].iloc[: frame + 1].values.tolist()
+ target_y = self.dataRTGene['ID%i_target_y' % id_no].iloc[: frame + 1].values.tolist()
+ else:
+ target_x = self.dataRTGene['ID%i_target_x' % id_no].iloc[
+ frame - threshold: frame + 1].values.tolist()
+ target_y = self.dataRTGene['ID%i_target_y' % id_no].iloc[
+ frame - threshold: frame + 1].values.tolist()
+ self.signalUpdateGazeMap.emit(id_no, target_x, target_y)
+ else:
+ self.signalClearGaze.emit()
+
+ if not f.empty and self.activeTab == 0:
+ position = f['ID%i_Head' % id_no].values.flatten()[0]
+ gaze_phi = f['ID%i_Phi' % id_no].values.flatten()[0]
+ if not np.any(pd.isnull(position)) and not np.any(pd.isnull(gaze_phi)):
+ gaze[id_no] = self.calculateGazeData(position, gaze_phi)
+
+ elif self.dataMovement is not None:
+ neck = self.dataMovement['ID%s_Keypoints' % id_no].map(
+ lambda x: x[1] if x is not None else None).map(
+ lambda x: x[:2] if x is not None else None)
+ # neck_points.append(neck.iloc[frame])
+ if self.visualize and self.visualize['Label'].isChecked():
+ if neck.iloc[frame] is not None:
+ self.signalVideoLabel.emit(id_no, neck.iloc[frame][0], neck.iloc[frame][1])
+ else:
+ self.signalClearLabels.emit()
+
+ ### Speaking Data ###
+ if self.dataSpeaker is not None and self.activeTab == 1:
+ e = self.dataSpeaker.loc[self.dataSpeaker.Frame < frame]
+ rst = e['ID%i_is_speaker' % id_no].sum() / (len(e) + 1)
+ speaking[id_no] = rst
+
+
+ ### Object Data ###
+ if self.dataObjects is not None:
+ for tag in self.tags:
+ tagData[tag] = self.tagMovement[tag][frame: frame + 200], np.arange(frame - 199, frame + 1)
+ if self.visualize and self.visualize['Tags'].isChecked():
+ if frame <= 30:
+ position = self.dataObjects[tag].iloc[: frame + 1].values.tolist()
+ else:
+ position = self.dataObjects[tag].iloc[frame - 30: frame + 1].values.tolist()
+
+
+ x_values = [x[0] for x in position if x is not None]
+ y_values = [x[1] for x in position if x is not None]
+ self.signalUpdateTags.emit(tag, x_values, y_values)
+ else:
+ self.signalClearTags.emit()
+
+ ### Send collected data to respective Tabs ###
+ if self.dataFace is not None and self.activeTab == 3:
+ self.signalUpdateFaceAus.emit(face_aus)
+
+ if self.dataMovement is not None and self.activeTab == 2:
+ self.signalUpdateMovementGraph.emit(movement, self.colors, self.numberIDs)
+ self.signalUpdateHandVelocity.emit(velocity, self.numberIDs)
+
+ if self.dataRTGene is not None and self.activeTab == 0:
+ self.signalUpdateGazeGraph.emit(gaze, self.numberIDs)
+
+ if self.dataSpeaker is not None and self.activeTab == 1:
+ active = self.dataSpeaker.loc[self.dataSpeaker.Frame == frame, sorted(
+ [col for col in self.dataSpeaker.columns if 'speak_score' in col])].values.flatten()
+ active = active[~pd.isnull(active)]
+ if active.size > 0:
+ active_speaker = np.argmax(active)
+ else:
+ active_speaker = None
+ self.signalUpdateSpeakGraph.emit(speaking, active_speaker, self.numberIDs)
+
+ if self.dataObjects is not None and self.activeTab == 4:
+ self.signalUpdateTagGraph.emit(tagData)
+
+ @QtCore.pyqtSlot(int)
+ def tabChanged(self, current):
+ self.activeTab = current
+
+ @QtCore.pyqtSlot(list)
+ def onSelectedID(self, lst):
+ for i, button in enumerate(lst):
+ if button.isChecked():
+ self.selectedIDs[i] = True
+ else:
+ self.selectedIDs[i] = False
+
+ @QtCore.pyqtSlot(int)
+ def get_current_frame(self, frame, id_no):
+ face_imgs = {}
+
+ if os.name == 'nt':
+ # if on windows we have to read the image
+ self.cap.set(1, frame)
+ ret, image = self.cap.read()
+ if ret:
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ else:
+ return
+ else:
+ # we can use the image from QT-decoding
+ image = cv2.cvtColor(self.frameData, cv2.COLOR_BGR2RGB)
+ # Get 66 landmarks from RT Gene
+ img_land = self.dataRTGene.loc[self.dataRTGene.Frame == frame, ['ID%i_Landmarks' % id_no]].values[0]
+ if len(img_land) > 0:
+ img_land = img_land[0] * self.videoScale
+
+ # Convert 68 landmarks to 49
+ img_land = np.delete(img_land, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 62, 66], axis=0).flatten()
+ face_crop, _ = self.crop_face(image, img_land)
+ face_imgs[id_no] = face_crop
+ else:
+ face_imgs[id_no] = None
+
+ self.signalUpdateFaceImgs.emit(face_imgs, id_no)
+
+ def crop_face(self, img, img_land, box_enlarge=4, img_size=200):
+
+ leftEye0 = (img_land[2 * 19] + img_land[2 * 20] + img_land[2 * 21] + img_land[2 * 22] + img_land[2 * 23] +
+ img_land[2 * 24]) / 6.0
+ leftEye1 = (img_land[2 * 19 + 1] + img_land[2 * 20 + 1] + img_land[2 * 21 + 1] + img_land[2 * 22 + 1] +
+ img_land[2 * 23 + 1] + img_land[2 * 24 + 1]) / 6.0
+ rightEye0 = (img_land[2 * 25] + img_land[2 * 26] + img_land[2 * 27] + img_land[2 * 28] + img_land[2 * 29] +
+ img_land[2 * 30]) / 6.0
+ rightEye1 = (img_land[2 * 25 + 1] + img_land[2 * 26 + 1] + img_land[2 * 27 + 1] + img_land[2 * 28 + 1] +
+ img_land[2 * 29 + 1] + img_land[2 * 30 + 1]) / 6.0
+ deltaX = (rightEye0 - leftEye0)
+ deltaY = (rightEye1 - leftEye1)
+ l = math.sqrt(deltaX * deltaX + deltaY * deltaY)
+ sinVal = deltaY / l
+ cosVal = deltaX / l
+ mat1 = np.mat([[cosVal, sinVal, 0], [-sinVal, cosVal, 0], [0, 0, 1]])
+
+ mat2 = np.mat([[leftEye0, leftEye1, 1], [rightEye0, rightEye1, 1], [img_land[2 * 13], img_land[2 * 13 + 1], 1],
+ [img_land[2 * 31], img_land[2 * 31 + 1], 1], [img_land[2 * 37], img_land[2 * 37 + 1], 1]])
+
+ mat2 = (mat1 * mat2.T).T
+
+ cx = float((max(mat2[:, 0]) + min(mat2[:, 0]))) * 0.5
+ cy = float((max(mat2[:, 1]) + min(mat2[:, 1]))) * 0.5
+
+ if (float(max(mat2[:, 0]) - min(mat2[:, 0])) > float(max(mat2[:, 1]) - min(mat2[:, 1]))):
+ halfSize = 0.5 * box_enlarge * float((max(mat2[:, 0]) - min(mat2[:, 0])))
+ else:
+ halfSize = 0.5 * box_enlarge * float((max(mat2[:, 1]) - min(mat2[:, 1])))
+
+ scale = (img_size - 1) / 2.0 / halfSize
+ mat3 = np.mat([[scale, 0, scale * (halfSize - cx)], [0, scale, scale * (halfSize - cy)], [0, 0, 1]])
+ mat = mat3 * mat1
+
+ aligned_img = cv2.warpAffine(img, mat[0:2, :], (img_size, img_size), cv2.INTER_LINEAR,
+ borderValue=(128, 128, 128))
+
+ land_3d = np.ones((int(len(img_land) / 2), 3))
+ land_3d[:, 0:2] = np.reshape(np.array(img_land), (int(len(img_land) / 2), 2))
+ mat_land_3d = np.mat(land_3d)
+ new_land = np.array((mat * mat_land_3d.T).T)
+ new_land = np.reshape(new_land[:, 0:2], len(img_land))
+
+ return aligned_img, new_land
+
+ def calculateAllMeasures(self):
+ """Recalculate all measures for selected segments for export"""
+ movement = dict()
+ gaze = dict()
+ speaking_dict = dict()
+ face = dict()
+
+ if self.segments is None:
+ segments = np.ones(len(self.dataRTGene))
+ else:
+ segments = self.segments
+
+ if self.dataMovement is not None:
+ dataMov = self.dataMovement.loc[segments == 1]
+ total = len(dataMov)
+
+ for id_no in range(self.numberIDs):
+ x_mov = [np.linalg.norm(x) if x is not None else np.nan for x in dataMov['ID%i_Movement' % id_no]]
+ # Add frames until start of segment to frame number
+ mostActivity = np.argmax(np.array(x_mov)) + np.argmax(segments)
+
+ # Frames with both hands tracked
+ tracked = dataMov.loc[dataMov['ID%s_HandsTracked' % id_no] == 2, ['ID%s_HandsTracked' % id_no]].count()
+ high_vel = dataMov.loc[dataMov['ID%i_Velocity' % id_no] > 1]['ID%i_Velocity' % id_no].count()
+
+ movement[id_no] = {'Most activity': int(mostActivity),
+ 'Hands above table (relative)': float(tracked[0] / total),
+ 'Gestures (relative)': float(high_vel / total)}
+
+ if self.dataSpeaker is not None:
+ dataSpeak = self.dataSpeaker.loc[segments == 1]
+
+ for id_no in range(self.numberIDs):
+ tracked_frames = dataSpeak[dataSpeak.notnull()].count()['ID%i_is_speaker' % id_no]
+ rst = dataSpeak['ID%i_is_speaker' % id_no].sum() / len(dataSpeak)
+
+ turns = []
+ counters = []
+ counter = 0
+ turn = 0
+ lastFrame = 0
+ switch = False
+ for frame in sorted(dataSpeak.Frame):
+ if dataSpeak.loc[dataSpeak.Frame == frame, ['ID%i_is_speaker' % id_no]].values and frame == (
+ lastFrame + 1):
+ switch = True
+ turn = turn + 1
+ elif switch:
+ if turn >= 30:
+ turns.append(turn)
+ counter = counter + 1
+ turn = 0
+ switch = False
+ if frame % int(self.fps * 60) == 0:
+ counters.append(counter)
+ counter = 0
+ lastFrame = frame
+
+ avg_turn = np.mean(np.array(turns)) / self.fps
+ avg_count = np.mean(np.array(counters))
+ num_turns = len(turns)
+
+ speaking_dict[id_no] = {'Tracked frames': int(tracked_frames), 'Speaking time (relative)': float(rst),
+ 'Number ofr speaking turns': int(num_turns),
+ 'Average length of speaking turn (seconds)': float(avg_turn),
+ 'Average number of speaking turns per minute': float(avg_count)}
+
+ if self.dataGazeMeasures is not None:
+ dataGaze = self.dataGazeMeasures.loc[segments == 1]
+
+ for id_no in range(self.numberIDs):
+ # ID looked at other people for frames
+ look = dataGaze['ID%i_looks_at' % id_no].dropna().count()
+ # ID was watched by other people for frames
+ watched = dataGaze['ID%i_watched_by' % id_no].map(
+ lambda x: 1 if not np.any(pd.isna(x)) and len(x) > 0 else 0).sum()
+ tracked = dataGaze['ID%i_tracked' % id_no].sum()
+
+ gaze[id_no] = {'Tracked frames': int(tracked),
+ 'lookSomeone': float(look / tracked),
+ 'totalNoLook': float((tracked - look) / tracked),
+ 'totalWatched': float(watched / tracked),
+ 'ratioWatcherLookSOne': float(watched / look)}
+
+ if self.dataFace is not None:
+ dataFaceAUs = self.dataFace.loc[segments == 1]
+ dict_aus = np.array(['AU1: Inner Brow Raiser', 'AU2: Outer Brow Raiser', 'AU4: Brow Lowerer', 'AU5: Upper Lid Raiser',
+ 'AU6: Cheek Raiser', 'AU9: Nose Wrinkler', 'AU12: Lip Corner Puller', 'AU15: Lip Corner Depressor',
+ 'AU17: Chin Raiser', 'AU20: Lip Stretcher', 'AU25: Lips Part', 'AU26: Jaw Drop'])
+ for id_no in range(self.numberIDs):
+ face[id_no] = []
+ for i, au in enumerate(dict_aus):
+ au_data = [a[i] for a in dataFaceAUs['ID%i_AUs' % id_no] if not np.all(pd.isna(a))]
+ au_data = np.array(au_data) > 0.5
+ face[id_no].append(au + ' : ' + str(au_data.sum()))
+
+ return gaze, speaking_dict, movement, face
+
+ def calculateGazeData(self, position, yaw):
+ # Get position in shperical coordinates (in radian)
+ id_u = position[0] / self.frameSize[0]
+ id_theta = id_u * 2 * np.pi
+
+ # Adjust position to more intuitive from video
+ id_theta = (id_theta * -1) - np.pi
+
+ # ID position on coordinate system
+ id_pos_x = np.cos(id_theta)
+ id_pos_y = np.sin(id_theta)
+
+ x, y = self.get_circle(0.05)
+ circle_x = x + id_pos_x
+ circle_y = y + id_pos_y
+
+ # Add angle - RTGene yaw is in radian
+ id_target = id_theta + np.pi - yaw
+ id_x1_target = np.cos(id_target)
+ id_x2_target = np.sin(id_target)
+
+ # Line
+ line_x = np.array([id_pos_x, id_x1_target])
+ line_y = np.array([id_pos_y, id_x2_target])
+
+ xdata = np.append(circle_x, line_x)
+ ydata = np.append(circle_y, line_y)
+
+ return [xdata, ydata]
+
+ def get_circle(self, radius):
+ theta = np.linspace(0, 2 * np.pi, 100)
+
+ x = radius * np.cos(theta)
+ y = radius * np.sin(theta)
+ return np.array(x), np.array(y)
+
+ @QtCore.pyqtSlot(dict)
+ def onVisualize(self, lst):
+ self.visualize = lst
+
+ @QtCore.pyqtSlot(np.ndarray)
+ def _updateSegments(self, segments):
+ """ Recalculate movement and speaking measures when segment was changed"""
+ # save segments for exporting only wanted timeranges
+ self.segments = segments
+
+ if self.dataMovement is not None:
+ dataMov = self.dataMovement.loc[segments == 1]
+ total = len(dataMov)
+ mostActivity = dict()
+ hand = dict()
+
+ for id_no in range(self.numberIDs):
+ x_mov = [x[0] if x is not None else np.nan for x in dataMov['ID%i_Movement' % id_no]]
+ # Add frames until start of segment to frame number
+ mostActivity[id_no] = np.argmax(np.array(x_mov)) + np.argmax(segments)
+
+ # Frames with both hands tracked
+ tracked = dataMov.loc[dataMov['ID%s_HandsTracked' % id_no] == 2, ['ID%s_HandsTracked' % id_no]].count()
+ high_vel = dataMov.loc[dataMov['ID%i_Velocity' % id_no] > 1]['ID%i_Velocity' % id_no].count()
+ hand[id_no] = [total, tracked[0], high_vel]
+
+ self.signalPoseChangedLabels.emit(mostActivity, hand, self.numberIDs)
+
+ if self.dataSpeaker is not None:
+ diff = len(segments) - len(self.dataSpeaker)
+ dataSpeak = self.dataSpeaker
+ # dataSpeak['Frame'] = dataSpeak.index
+
+ if diff > 0:
+ speakSegments = segments[:-diff]
+ elif diff < 0:
+ speakSegments = np.append(segments, [*np.zeros(diff)])
+ else:
+ speakSegments = segments
+
+ dataSpeak = self.dataSpeaker.loc[speakSegments == 1]
+
+ speaking_dict = dict()
+ for id_no in range(self.numberIDs):
+ tracked_frames = dataSpeak[dataSpeak.notnull()].count()['ID%i_is_speaker' % id_no]
+ rst = dataSpeak['ID%i_is_speaker' % id_no].sum() / len(dataSpeak)
+ speaking_dict[id_no] = [tracked_frames, rst] # , num_turns, avg_turn, avg_count
+
+ self.signalSpeakChangedLabels.emit(speaking_dict, self.numberIDs)
+
+ def calculateSpeakingMeasures(self):
+ if self.dataSpeaker is None:
+ return
+
+ speaking_dict = dict()
+ total = len(self.dataSpeaker)
+ for id_no in range(self.numberIDs):
+ tracked_frames = self.dataSpeaker[self.dataSpeaker.notnull()].count()['ID%i_is_speaker' % id_no]
+ rst = self.dataSpeaker['ID%i_is_speaker' % id_no].sum() / total
+
+ turns = []
+ counters = []
+ counter = 0
+ turn = 0
+ switch = False
+ for frame in sorted(self.dataSpeaker.Frame):
+ if self.dataSpeaker.loc[self.dataSpeaker.Frame == frame, ['ID%i_is_speaker' % id_no]].values:
+ switch = True
+ turn = turn + 1
+ elif switch:
+ if turn >= 30:
+ turns.append(turn)
+ counter = counter + 1
+ turn = 0
+ switch = False
+ if frame % int(self.fps * 60) == 0:
+ counters.append(counter)
+ counter = 0
+
+ avg_turn = np.mean(np.array(turns)) / self.fps
+ avg_count = np.mean(np.array(counters))
+ num_turns = len(turns)
+
+ speaking_dict[id_no] = [tracked_frames, rst, num_turns, avg_turn, avg_count]
+
+ self.signalSpeakerSetInit.emit(speaking_dict, self.colors, self.numberIDs)
+
+ def calculateMovementMeasures(self):
+ """ initial calculation of hand velocity on full data """
+ if self.dataMovement is None:
+ return
+
+ total = len(self.dataMovement)
+ mostActivity = {}
+ for id_no in range(self.numberIDs):
+ x_mov = [np.linalg.norm(x) if x is not None else np.nan for x in self.dataMovement['ID%i_Movement' % id_no]]
+ mostActivity[id_no] = np.argmax(np.array(x_mov))
+ self.movementActivity[id_no] = np.array([*np.zeros(200), *x_mov])
+
+ # Left Wrist and Right Wrist: idx 4, idx 7
+ self.dataMovement['ID%i_HandsTracked' % id_no] = self.dataMovement['ID%i_Keypoints' % id_no].map(
+ lambda x: ((np.sum(x[4] is not None) + np.sum(x[7] is not None)) // 3) if x is not None else None)
+
+ # Pixel position of left and right wrist
+ self.dataMovement['ID%i_Hand1_Vel' % id_no] = self.dataMovement['ID%s_Keypoints' % id_no].map(
+ lambda x: x[4] if not np.all(pd.isna(x)) else np.nan).map(
+ lambda x: x[:2].astype(float) if not np.all(pd.isna(x)) else np.nan)
+ self.dataMovement['ID%i_Hand1_Vel' % id_no] = self.dataMovement['ID%i_Hand1_Vel' % id_no].pct_change(1).map(
+ lambda x: np.abs(x.mean()) * 100 if not np.all(pd.isna(x)) else None)
+ self.dataMovement['ID%i_Hand2_Vel' % id_no] = self.dataMovement['ID%s_Keypoints' % id_no].map(
+ lambda x: x[7] if not np.all(pd.isna(x)) else np.nan).map(
+ lambda x: x[:2].astype(float) if not np.all(pd.isna(x)) else np.nan)
+ self.dataMovement['ID%i_Hand2_Vel' % id_no] = self.dataMovement['ID%i_Hand2_Vel' % id_no].pct_change(1).map(
+ lambda x: np.abs(x.mean()) * 100 if not np.all(pd.isna(x)) else None)
+
+ self.dataMovement['ID%i_Velocity' % id_no] = self.dataMovement[
+ ['ID%i_Hand1_Vel' % id_no, 'ID%i_Hand2_Vel' % id_no]].mean(axis=1)
+
+ # Frames with both hands tracked
+ tracked = self.dataMovement.loc[self.dataMovement['ID%s_HandsTracked' %
+ id_no] == 2, ['ID%s_HandsTracked' % id_no]].count()
+ high_vel = self.dataMovement.loc[self.dataMovement[
+ 'ID%i_Velocity' % id_no] > 1]['ID%i_Velocity' % id_no].count()
+
+ self.handActivity[id_no] = [total, tracked[0], high_vel]
+
+ self.signalPoseSetInit.emit(mostActivity, self.handActivity, self.colors, self.numberIDs)
+
+ def calculateGazeMeasures(self):
+ """Initial calculation of gaze measures: dataGazeMeasures """
+ thresh = 15
+ eq_width = self.frameSize[0]
+
+ totWatcher = {}
+ lookSomeOne = {}
+ tracked = {}
+ for i in range(self.numberIDs):
+ totWatcher[i] = []
+ lookSomeOne[i] = []
+ tracked[i] = []
+
+ for frame in self.dataRTGene.Frame:
+
+ f = self.dataRTGene.loc[self.dataRTGene.Frame == frame]
+
+ angles = []
+ positions = []
+ targets = []
+ for id_no in range(self.numberIDs):
+ pos = f['ID%i_Head' % id_no].values.flatten()[0]
+ phi = f['ID%i_Phi' % id_no].values.flatten()[0]
+ pos = np.array(pos, dtype=np.float)
+ phi = np.array(phi, dtype=np.float)
+
+ if np.any(np.isnan(pos)) or np.any(np.isnan(phi)):
+ positions.append(np.nan)
+ angles.append(np.nan)
+ targets.append(np.nan)
+ tracked[id_no].append(False)
+ continue
+
+ tracked[id_no].append(True)
+ # Get position in shperical coordinates
+ id_u = pos[0] / eq_width
+ id_theta = id_u * 2 * np.pi
+ id_theta = np.rad2deg(id_theta)
+ positions.append(id_theta)
+
+ # Add angle - gaze[1] is yaw
+ angle = np.rad2deg(phi)
+ id_target = id_theta + 180 + angle
+ targets.append(id_target % 360)
+ angles.append(angle)
+
+ # plot_frame_calculated(positions, angles)
+
+ watcher = dict()
+ for i in range(self.numberIDs):
+ watcher[i] = []
+
+ for i, t in enumerate(targets):
+
+ inside_min = np.array([(e - thresh) < targets[i] if not np.isnan(e) else False for e in positions])
+ inside_max = np.array([(e + thresh) > targets[i] if not np.isnan(e) else False for e in positions])
+ # print(inside_min, inside_max)
+
+ if np.any(inside_min) and np.any(inside_max):
+ test = np.logical_and(inside_min, inside_max)
+ idx = np.where(test)[0]
+ for j in range(len(idx)):
+ # ID i watches idx[j]
+ lookSomeOne[i].append([frame, idx[j]])
+ # ID idx[j] is being looked at by i
+ watcher[idx[j]].append(i)
+
+ for k, v in watcher.items():
+ totWatcher[k].append([frame, v])
+
+ df_totWatcher = pd.DataFrame(columns={'Frame'})
+ for i in range(self.numberIDs):
+ df_id = pd.DataFrame.from_dict(totWatcher.get(i))
+ df_id = df_id.rename(columns={0: "Frame", 1: "ID{}_watched_by".format(i)})
+ df_totWatcher = pd.merge(df_totWatcher, df_id, how='outer', on=['Frame'], sort=True)
+
+ df_lookSomeOne = pd.DataFrame(columns={'Frame'})
+ for i in range(self.numberIDs):
+ df_id = pd.DataFrame.from_dict(lookSomeOne.get(i))
+ df_id = df_id.rename(columns={0: "Frame", 1: "ID{}_looks_at".format(i)})
+ df_lookSomeOne = pd.merge(df_lookSomeOne, df_id, how='outer', on=['Frame'], sort=True)
+
+ df_tracked = pd.DataFrame(columns={'Frame'})
+ for i in range(self.numberIDs):
+ df_id = pd.DataFrame.from_dict(tracked.get(i))
+ df_id.index.name = 'Frame'
+ df_id = df_id.rename(columns={0: "ID{}_tracked".format(i)})
+ df_tracked = pd.merge(df_tracked, df_id, how='outer', on=['Frame'], sort=True)
+
+ self.dataGazeMeasures = pd.merge(df_lookSomeOne, df_totWatcher, how='outer', on=['Frame'], sort=True)
+ self.dataGazeMeasures = pd.merge(self.dataGazeMeasures, df_tracked, how='outer', on=['Frame'], sort=True)
+
+ gaze = dict()
+ for id_no in range(self.numberIDs):
+ # print(self.dataGazeMeasures['ID%i_watched_by' % id_no])
+ # ID looked at other people for frames
+ look = self.dataGazeMeasures['ID%i_looks_at' % id_no].dropna().count()
+ # ID was watched by other people for frames
+ watched = self.dataGazeMeasures['ID%i_watched_by' % id_no].map(
+ lambda x: 1 if not np.any(pd.isna(x)) and len(x) > 0 else 0).sum()
+ tracked = self.dataGazeMeasures['ID%i_tracked' % id_no].sum()
+ gaze[id_no] = [look, watched, tracked]
+
+ self.signalGazeSetInit.emit(gaze, self.colors, self.numberIDs)
+
+ def calculateGazeTargets(self):
+ # Compute gaze targets
+
+ for id_no in range(self.numberIDs):
+ # self.dataRTGene['ID%i_Phi' % id_no] = self.dataRTGene['ID%i_Phi' % id_no].rolling(15).mean()
+ self.id_no = id_no
+ self.dataRTGene['ID%i_alpha' % id_no] = self.dataRTGene['ID%i_Phi' % id_no].map(
+ lambda x: np.rad2deg(x) - 180 if x is not None else None)
+ self.dataRTGene['ID%i_beta' % id_no] = self.dataRTGene['ID%i_Theta' % id_no].map(
+ lambda x: 180 - 2 * np.rad2deg(x) if x is not None else None)
+ self.dataRTGene['ID%i_target_spher' % id_no] = self.dataRTGene.apply(self.fun, axis=1)
+ self.dataRTGene[['ID%i_target_x' % id_no, 'ID%i_target_y' % id_no]] = self.dataRTGene.apply(self.fun,
+ axis=1,
+ result_type="expand")
+
+ def fun(self, x):
+ alpha = x['ID%i_alpha' % self.id_no]
+ beta = x['ID%i_beta' % self.id_no]
+ pos = x['ID%i_Head' % self.id_no]
+ # print(pos, pd.isna(pos), type(pos))
+ # Discard frames where not all detected
+ if np.any(pd.isna(pos)) or np.any(pd.isna(alpha)) or np.any(pd.isna(beta)):
+ return None, None
+ # Get position in spherical coordinates
+ theta = np.rad2deg((pos[0] / self.frameSize[0]) * 2 * np.pi)
+ phi = np.rad2deg((pos[1] / self.frameSize[1]) * np.pi)
+
+ # Get position in image frame (equirectangular projection)
+ x, y = sperical2equirec((theta + alpha) % 360, (phi + beta) % 180, self.frameSize[0], self.frameSize[1])
+
+ return x, y
+
+ def calculateTagMeasures(self):
+ if self.dataObjects is None:
+ return
+
+ for tag in self.tags:
+ neutral = self.dataObjects[tag].dropna().iloc[0]
+ # print('Tag #%i Starting point set to: %s' % (tag, str(neutral)))
+ self.dataObjects['%i_Movement' % tag] = self.dataObjects[tag].map(
+ lambda x: np.subtract(x, neutral) if x is not None else None)
+ # Euclidian distance
+ x_mov = [np.linalg.norm(x) if x is not None else None for x in self.dataObjects['%i_Movement' % tag]]
+ self.tagMovement[tag] = np.array([*np.zeros(200), *x_mov])
+
+
+ def readData(self, movieFileName, dataFileName, verbose=False):
+ self.movieFileName = movieFileName
+ self.dataFileName = dataFileName
+ if (verbose):
+ print("## Start Reading Data")
+
+ # Read Video Data
+ f = self.movieFileName
+ print('Reading video from %s' % f)
+ if os.path.isfile(f):
+
+ self.cap = cv2.VideoCapture(f)
+ self.fps = self.cap.get(cv2.CAP_PROP_FPS)
+ self.frameCount = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
+ self.scaledVideoResolution = [self.cap.get(cv2.CAP_PROP_FRAME_WIDTH),
+ self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)]
+
+ if (verbose):
+ print('Video resolution: ', self.scaledVideoResolution)
+ print("Video frameCount %i" % self.frameCount)
+ duration = self.frameCount / self.fps
+ minutes = int(duration / 60)
+ seconds = duration % 60
+ print('Video duration (M:S) = ' + str(minutes) + ':' + str(seconds))
+ else:
+ print("WARNING: no video available.")
+
+ # read data file
+ with open(self.dataFileName, 'rb') as f:
+ data = pkl.load(f)
+
+ if "originalVideoResolution" in data:
+ self.originalVideoResolution = data["originalVideoResolution"]
+ self.videoScale = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) / self.originalVideoResolution[0]
+ self.frameSize = data["originalVideoResolution"]
+ if verbose:
+ print('Video resolution scale factor: ', self.videoScale)
+
+ # Read RTGene Data
+ if "RTGene" in data:
+ self.dataRTGene = data["RTGene"]
+ self.dataRTGene = self.dataRTGene.where(pd.notnull(self.dataRTGene), None)
+ self.numberIDs = len([col for col in self.dataRTGene.columns if 'Landmarks' in col])
+
+ else:
+ self.signalDeactivateGazeTab.emit(True)
+ print("WARNING: no RTGene data avaibale. Deactivating gaze tab.")
+
+ # Read Movement Data
+ if "BodyMovement" in data:
+ self.dataMovement = data["BodyMovement"]
+
+ if not self.numberIDs:
+ self.numberIDs = len([col for col in self.dataMovement if 'Movement' in col])
+ if verbose:
+ print('Body movement sample count %i' % len(self.dataMovement))
+ else:
+ self.signalDeactivatePoseTab.emit(True)
+ print('WARNING: no body movement data available. Deactivating pose tab.')
+
+ # Read Facial Activity Data
+ if "ActivityUnits" in data:
+ self.dataFace = data["ActivityUnits"]
+ if not self.numberIDs:
+ self.numberIDs = len([col for col in self.dataFace.columns if 'AUs' in col])
+
+ if (verbose):
+ print("Activity Units sample count %i" % len(self.dataFace))
+ else:
+ self.signalDeactivateFaceTab.emit(True)
+ print("WARNING: no face activity data available. Deactivating face tab.")
+
+ # Read Speaker Diarization Data
+ if 'Speaker' in data:
+ self.dataSpeaker = data['Speaker']
+ else:
+ self.signalDeactivateSpeakingTab.emit(True)
+ print('WARNING: no speaking data available. Deactivating speaking tab.')
+
+ # Read AprilTag Data
+ if 'April' in data:
+ self.dataObjects = data['April']
+ self.tags = [col for col in self.dataObjects.columns if type(col) == int]
+ self.tagColors = [tuple(np.random.random(size=3) * 256) for i in range(len(self.tags))]
+ tracked = dict()
+ for tag in self.tags:
+ tracked[tag] = self.dataObjects[tag].dropna().count() / len(self.dataObjects)
+
+ self.signalInitTags.emit(self.tags, self.originalVideoResolution, tracked, self.tagColors)
+ else:
+ self.signalDeactivateObjectTab\
+ .emit(True)
+ print('WARNING: no object detection data available. Deactivating object tab.')
+
+ # Set colors: To get visually distinct colors, generate them in HSV space then convert to RGB.
+ hsv = [(i / self.numberIDs, 1, 1.0) for i in range(self.numberIDs)] # 1.0 brightness
+ self.colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
+
+ self.selectedIDs = []
+ for id_no in range(self.numberIDs):
+ self.updateAUs[id_no] = np.zeros(12)
+ self.selectedIDs.append(True)
+
+ self.calculateTagMeasures()
+ self.calculateGazeTargets()
+ self.calculateGazeMeasures()
+ self.signalInit.emit(self.colors, self.numberIDs)
+
+ def export(self):
+ # get export location
+ fileName = QtGui.QFileDialog.getSaveFileName(self, "Export calculations", self.dataFileName.replace(
+ "dat", "json"), "Json File (*.json);;All Files (*)")
+ if fileName[0] == '':
+ return
+
+ # collect all new calculated values
+ data = dict()
+ gaze, speaking, movement, face = self.calculateAllMeasures()
+
+ for id_no in range(self.numberIDs):
+ data['ID%i' % id_no] = {'Eye Gaze': gaze.get(id_no),
+ 'Speaking Activity': speaking.get(id_no),
+ 'Body and Hand Movement': movement.get(id_no),
+ 'Face Activity': face.get(id_no)}
+
+ with open(fileName[0], 'w', encoding='utf-8') as f:
+ json.dump(data, f, ensure_ascii=False, indent=4)
+
+ segment_id = self._get_segment_ids()
+
+ # export all dataframes as csv
+ if self.dataRTGene is not None:
+ if self.segments is None:
+ self.dataRTGene['segment'] = 0
+ self.dataRTGene.to_csv(fileName[0].replace(
+ ".json", "-gaze.csv"), index=True, encoding='utf-8')
+ else:
+ self.dataRTGene['segment'] = segment_id
+ self.dataRTGene[self.segments[1:] == 1].to_csv(fileName[0].replace(
+ ".json", "-gaze.csv"), index=True, encoding='utf-8')
+
+ if self.dataMovement is not None:
+ if self.segments is None:
+ self.dataMovement['segment'] = 0
+ self.dataMovement.to_csv(fileName[0].replace(
+ ".json", "-body-movement.csv"), index=True, encoding='utf-8')
+ else:
+ self.dataMovement['segment'] = segment_id
+ self.dataMovement[self.segments == 1].to_csv(fileName[0].replace(
+ ".json", "-body-movement.csv"), index=True, encoding='utf-8')
+
+
+ if self.dataFace is not None:
+ if self.segments is None:
+ self.dataFace['segment'] = 0
+ self.dataFace.to_csv(fileName[0].replace(
+ ".json", "-facial-activity.csv"), index=True, encoding='utf-8')
+ else:
+ self.dataFace['segment'] = segment_id
+ self.dataFace[self.segments == 1].to_csv(fileName[0].replace(
+ ".json", "-facial-activity.csv"), index=True, encoding='utf-8')
+
+ if self.dataSpeaker is not None:
+ if self.segments is None:
+ self.dataSpeaker['segment'] = 0
+ self.dataSpeaker.to_csv(fileName[0].replace(
+ ".json", "-speaker.csv"), index=True, encoding='utf-8')
+ else:
+ self.dataSpeaker['segment'] = segment_id
+ self.dataSpeaker[self.segments == 1].to_csv(fileName[0].replace(
+ ".json", "-speaker.csv"), index=True, encoding='utf-8')
+
+ if self.dataObjects is not None:
+ if self.dataObjects is None:
+ self.dataObjects['segment'] = 0
+ self.dataObjects.to_csv(fileName[0].replace(
+ ".json", "-objects.csv"), index=True, encoding='utf-8')
+ else:
+ self.dataObjects['segment'] = segment_id
+ self.dataObjects[self.segments == 1].to_csv(fileName[0].replace(
+ ".json", "-objects.csv"), index=True, encoding='utf-8')
+
+
+ print('Exported data to', fileName[0])
+
+ def _get_segment_ids(self):
+ if not self.segments:
+ return None
+ segment_id = [-1 for s in self.segments]
+ segment_counter = -1
+
+ old = self.segments[0]
+ segment_id[0] = 0
+ for i, current in enumerate(self.segments[1:]):
+ if current == 1:
+
+ if old != current:
+ segment_counter += 1
+ segment_id[i + 1] = segment_counter
+ old = current
+ return segment_id
+
+ def getColors(self):
+ return self.colors
+
+ def getTags(self):
+ return self.tags
+
+ def getTagColors(self):
+ return self.tagColors
+
+ def getFrameCount(self):
+ return self.frameCount
+
+ def getFrameSize(self):
+ return self.frameSize
+
+ def getFPS(self):
+ return self.fps
+
+ def getVideo(self):
+ return self.movieFileName
+
+ def getGazeData(self):
+ return self.dataGaze
+
+ def getFrame(self, frameIdx):
+ return frameIdx
+
+ def getFrameCurrent(self):
+ return 1
+
+ def getNumberIDs(self):
+ return self.numberIDs
+
+ def getMovementData(self):
+ return self.dataMovement
+
+ def setReady(self, ready):
+ self._ready = ready
+
+ def getOriginalVideoResolution(self):
+ return self.originalVideoResolution
+
+
+
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..7566feb
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,5 @@
+PyQt5==5.14.2
+opencv-python==4.2.0.34
+pandas
+pyqtgraph
+matplotlib
\ No newline at end of file
diff --git a/uiwidget/__init__.py b/uiwidget/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uiwidget/widgetfacialexpression.py b/uiwidget/widgetfacialexpression.py
new file mode 100644
index 0000000..e48a4bd
--- /dev/null
+++ b/uiwidget/widgetfacialexpression.py
@@ -0,0 +1,89 @@
+import numpy as np
+
+import pyqtgraph as pg
+from PyQt5 import QtWidgets, QtCore, QtGui
+from PyQt5.QtGui import QColor
+
+lst_aus = ['AU1: Inner Brow Raiser', 'AU2: Outer Brow Raiser', 'AU4: Brow Lowerer', 'AU5: Upper Lid Raiser',
+ 'AU6: Cheek Raiser', 'AU9: Nose Wrinkler', 'AU12: Lip Corner Puller', 'AU15: Lip Corner Depressor',
+ 'AU17: Chin Raiser', 'AU20: Lip Stretcher', 'AU25: Lips Part', 'AU26: Jaw Drop']
+
+class WidgetFacialExpression(QtWidgets.QWidget):
+ def __init__(self, parent=None):
+ super(WidgetFacialExpression, self).__init__(parent)
+
+ self.faceLayout = QtWidgets.QHBoxLayout()
+ self.setLayout(self.faceLayout)
+
+ self.numberIDs = None
+ self.valueLabels = dict()
+ self.imgPlots = dict()
+
+ @QtCore.pyqtSlot(list, int)
+ def setInit(self, colors, numberIDs):
+ self.numberIDs = numberIDs
+
+ for id_no in range(numberIDs):
+ idLayout = QtWidgets.QHBoxLayout()
+ labelNameLayout = QtWidgets.QVBoxLayout()
+ labelValueLayout = QtWidgets.QVBoxLayout()
+ imageLayout = QtWidgets.QVBoxLayout()
+ imageWidget = pg.PlotWidget(background=QColor(53, 53, 53))
+ imageWidget.invertY()
+ imageWidget.hideAxis('bottom'), imageWidget.hideAxis('left')
+ imageWidget.setMaximumHeight(150), imageWidget.setMaximumWidth(150)
+ imageWidget.setAspectLocked(True)
+ self.imgPlots[id_no] = imageWidget
+
+ color = tuple([int(a * 255) for a in colors[id_no]])
+
+ labelID = QtWidgets.QLabel('ID%i' % id_no)
+ labelID.setStyleSheet('font: bold 12px; color: black; background-color: rgb(%i,%i,%i)' % color)
+
+
+
+ #labelID.setFixedWidth(60)
+
+ labelNameLayout.addWidget(labelID)
+ labelID = QtWidgets.QLabel(' ')
+
+ #labelID.setStyleSheet('background-color: rgb(%i,%i,%i)' % color)
+ labelValueLayout.addWidget(labelID)
+ lst = []
+ for au in lst_aus:
+ nLabel = QtWidgets.QLabel(au)
+ labelNameLayout.addWidget(nLabel)
+
+ vLabel = QtWidgets.QLabel(' ')
+ labelValueLayout.addWidget(vLabel)
+ lst.append(vLabel)
+
+ self.valueLabels[id_no] = lst
+ idLayout.addWidget(imageWidget)
+ idLayout.addLayout(labelNameLayout)
+ idLayout.addLayout(labelValueLayout)
+ self.faceLayout.addLayout(idLayout)
+
+ @QtCore.pyqtSlot(dict, int)
+ def updateImages(self, imgs, id_no):
+ if imgs[id_no] is not None:
+ img = np.moveaxis(imgs[id_no], 0, 1)
+ img = pg.ImageItem(img)
+ self.imgPlots[id_no].addItem(img)
+
+
+ @QtCore.pyqtSlot(dict)
+ def updateFrame(self, aus):
+ if self.numberIDs is None:
+ return
+
+ for id_no in range(self.numberIDs):
+ if len(aus[id_no]) > 0:
+ for i, label in enumerate(self.valueLabels[id_no]):
+ if not np.any(np.isnan(np.array(aus[id_no].flatten()[0], dtype=np.float64))):
+ label.setText('%.2f' % aus[id_no].flatten()[0][i])
+
+
+
+
+
diff --git a/uiwidget/widgetgaze.py b/uiwidget/widgetgaze.py
new file mode 100644
index 0000000..5f5cb5f
--- /dev/null
+++ b/uiwidget/widgetgaze.py
@@ -0,0 +1,147 @@
+import numpy as np
+import pyqtgraph
+import matplotlib
+
+matplotlib.use("Qt5Agg")
+import matplotlib.animation as animation
+from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
+from matplotlib.figure import Figure
+from PyQt5 import QtWidgets, QtGui
+from PyQt5 import QtCore
+import pyqtgraph as pg
+import pyqtgraph.exporters
+
+from utils.colors import random_colors
+from utils.util import sperical2equirec
+
+
+class WidgetGaze(QtWidgets.QWidget):
+
+ def __init__(self, parent=None):
+ super(WidgetGaze, self).__init__(parent)
+
+ layout = QtWidgets.QGridLayout()
+
+ # Setup gaze graph
+ self.gazeGraph = pg.PlotWidget()
+ self.gazeGraph.setBackground('w')
+ self.gazeGraph.setYRange(-1.25, 1.25, padding=0)
+ self.gazeGraph.setXRange(-1.25, 1.25, padding=0)
+ self.gazeGraph.hideAxis('left')
+ self.gazeGraph.hideAxis('bottom')
+ self.gazeGraph.setAspectLocked()
+ self.gazeGraph.getPlotItem().setTitle(title='Top-down View of Gaze')
+ self.gazeGraphPlots = []
+
+ self.measures = QtWidgets.QWidget()
+ self.measuresLayout = QtWidgets.QHBoxLayout()
+
+ # self.gazeMap = QtWidgets.QWidget()
+ # self.heatmapSlider = HeatmapSlider()
+ # self.heatmapSlider.signalSetThreshold.connect(self.setThreshold)
+ # self.heatmapSlider.signalSaveImage.connect(self.gazeMap.saveImage)
+
+ # row, column, row span, column span
+ layout.addWidget(self.measures, 0, 1, 2, 1)
+ layout.addWidget(self.gazeGraph, 0, 0, 2, 1)
+ layout.setColumnStretch(0, 1)
+ layout.setColumnStretch(1, 1)
+
+ self.setLayout(layout)
+ # layout.addWidget(self.gazeMap, 0, 0, 3, 1)
+ # layout.addWidget(self.heatmapSlider, 3, 0, 1, 1)
+ self.gazeLabels = []
+ self.colors = None
+
+ @QtCore.pyqtSlot(dict, list, int)
+ def setInit(self, measures, colors, numberIDs):
+ """Initialize measure widget with labels for all IDs"""
+ self.colors = colors # Necessary for ID updates
+ idLayout = QtWidgets.QVBoxLayout()
+ labelID = QtWidgets.QLabel(' ')
+ labelID.setFixedWidth(60)
+ labelID.setFixedHeight(20)
+ labelA = QtWidgets.QLabel('LookSomeone: ')
+ labelNoLook = QtWidgets.QLabel('TotalNoLook: ')
+ labelG = QtWidgets.QLabel('TotalWatched: ')
+ labelRatio = QtWidgets.QLabel('RatioWatcherLookSOne: ')
+ label = QtWidgets.QLabel('Tracked: ')
+ # labelVel = QtWidgets.QLabel('totNoLook: ')
+ idLayout.addWidget(labelID)
+ idLayout.addWidget(labelA)
+ idLayout.addWidget(labelNoLook)
+ idLayout.addWidget(labelG)
+ idLayout.addWidget(labelRatio)
+ idLayout.addWidget(label)
+ # idLayout.addWidget(labelVel)
+ self.measuresLayout.insertLayout(-1, idLayout)
+
+ for id_no in range(numberIDs):
+ idLayout = QtWidgets.QVBoxLayout()
+
+ color = tuple([int(a * 255) for a in colors[id_no]])
+
+ labelID = QtWidgets.QLabel('ID%i' % id_no)
+ labelID.setStyleSheet('font: bold 12px; color: black; background-color: rgb(%i,%i,%i)' % color)
+ labelID.setFixedWidth(60)
+ labelID.setFixedHeight(20)
+ # Look Someone
+ labelA = QtWidgets.QLabel('{:.2%}'.format(measures[id_no][1] / measures[id_no][2]))
+ labelNoLook = QtWidgets.QLabel('{:.2%}'.format((measures[id_no][2] - measures[id_no][1]) / measures[id_no][2]))
+ # Total Watched
+ labelG = QtWidgets.QLabel('{:.2%}'.format(measures[id_no][0] / measures[id_no][2]))
+ # ratio totWatcher / lookSomeone
+ labelRatio = QtWidgets.QLabel('{:.2}'.format(measures[id_no][0] / measures[id_no][1]))
+ label = QtWidgets.QLabel('%i frames' % measures[id_no][2])
+ # labelVel = QtWidgets.QLabel('%.2f' % np.random.uniform(0, 1))
+ idLayout.addWidget(labelID)
+ idLayout.addWidget(labelA)
+ idLayout.addWidget(labelNoLook)
+ idLayout.addWidget(labelG)
+ idLayout.addWidget(labelRatio)
+ idLayout.addWidget(label)
+ # idLayout.addWidget(labelVel)
+ # self.gazeLabels.append(labelVel)
+ self.measuresLayout.insertLayout(-1, idLayout)
+ self.measures.setLayout(self.measuresLayout)
+
+ @QtCore.pyqtSlot(list, int)
+ def initGazeGraph(self, colors, numberIDs):
+ """ initialize gaze graph """
+ # Big circle
+ x1, y1 = self.get_circle(radius=1)
+ self.gazeGraph.addItem(self.gazeGraph.plot(x1, y1, pen=pg.mkPen(0.5)))
+
+ # Camera
+ x2, y2 = self.get_circle(radius=0.02)
+ self.gazeGraph.addItem(self.gazeGraph.plot(x2, y2, pen=pg.mkPen(color=(0, 0, 0), width=3)))
+
+ for id_no in range(numberIDs):
+ color = tuple([int(a * 255) for a in colors[id_no]])
+ plt = self.gazeGraph.plot(x=[], y=[], pen=pg.mkPen(color=color, width=2))
+ self.gazeGraphPlots.append(plt)
+
+ @QtCore.pyqtSlot(dict, int)
+ def updateGazeGraph(self, data, numberIDs):
+ """ frame updates for gaze graph """
+ for id_no in range(numberIDs):
+ if data and id_no in data:
+ self.gazeGraphPlots[id_no].setData(data[id_no][0], data[id_no][1])
+
+ def get_circle(self, radius):
+ """ helper function returns circle to x, y coordinates"""
+ theta = np.linspace(0, 2 * np.pi, 100)
+ x = radius * np.cos(theta)
+ y = radius * np.sin(theta)
+ return np.array(x), np.array(y)
+
+ @QtCore.pyqtSlot(list)
+ def onSelectedID(self, lst):
+ """Change color to None of gaze graph plot if ID should not be visible"""
+ for i, button in enumerate(lst):
+ if not button.isChecked():
+ self.gazeGraphPlots[i].setPen(None)
+ else:
+ color = tuple([int(a * 255) for a in self.colors[i]])
+ pen = pg.mkPen(color=color)
+ self.gazeGraphPlots[i].setPen(pen)
diff --git a/uiwidget/widgetobject.py b/uiwidget/widgetobject.py
new file mode 100644
index 0000000..39cbbcf
--- /dev/null
+++ b/uiwidget/widgetobject.py
@@ -0,0 +1,81 @@
+import numpy as np
+import pyqtgraph
+from PyQt5 import QtWidgets, QtCore
+from PyQt5.QtCore import Qt
+from PyQt5.QtWidgets import QTextEdit
+import pyqtgraph as pg
+
+
+class WidgetObject(QtWidgets.QWidget):
+ def __init__(self, parent=None):
+ super(WidgetObject, self).__init__(parent)
+
+ self.frame = 0
+ self.tagFields = QtWidgets.QWidget()
+ self.tagLayout = QtWidgets.QVBoxLayout()
+
+ # Setup Graph Plot Widget
+ self.tagGraph = pg.PlotWidget()
+ self.tagGraph.setBackground('w')
+ self.tagGraph.setYRange(0, 400, padding=0)
+ self.tagGraph.getPlotItem().getAxis('bottom').setTickSpacing(minor=50, major=100)
+ self.tagGraph.getPlotItem().setTitle(title='Movement of Object Tags')
+ self.tagPlots = dict()
+
+ self.tagTextFields = dict()
+ self.plotText = dict()
+
+ layout = QtWidgets.QGridLayout()
+ layout.addWidget(self.tagGraph, 0, 0)
+ layout.addWidget(self.tagFields, 0, 1)
+ self.setLayout(layout)
+
+ @QtCore.pyqtSlot(list, tuple, dict, list)
+ def setInit(self, tags, frameSize, tracked, colors):
+
+ for i, tag in enumerate(tags):
+ label = QtWidgets.QLabel('Object tag #%i:' % tag)
+ label.setStyleSheet('color: black; background-color: rgb(%i,%i,%i)' % colors[i])
+ label.setFixedHeight(20)
+ field = QtWidgets.QTextEdit()
+ field.setFixedHeight(20)
+ field.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+ field.textChanged.connect(self.tagTextChanged)
+
+ self.tagTextFields[tag] = field
+ trackedLabel = QtWidgets.QLabel('Tracked: {:.0%}'.format(tracked[tag]))
+ oneTagLayout = QtWidgets.QHBoxLayout()
+ oneTagLayout.addWidget(label)
+ oneTagLayout.addWidget(field)
+ oneTagLayout.addWidget(trackedLabel)
+
+ self.tagLayout.insertLayout(-1, oneTagLayout)
+
+ x = list(range(-200, 0)) # 200 time points
+ y = [0 for _ in range(200)] # 200 data points
+
+ dataLine = self.tagGraph.plot(x, y, pen=pg.mkPen(color=colors[i]))
+ self.tagPlots[tag] = dataLine
+ text = pg.TextItem(text='', color=colors[i])
+ text.setAnchor((1, i + 1))
+
+ self.plotText[tag] = text
+ self.tagGraph.addItem(text)
+
+ self.tagFields.setLayout(self.tagLayout)
+
+ @QtCore.pyqtSlot(dict)
+ def updateTagGraph(self, tagData):
+ for tag, values in tagData.items():
+ self.tagPlots[tag].setData(x=values[1], y=values[0]) # Update the data.
+ if tagData:
+ self.tagGraph.setXRange(np.min(values[1]), np.max(values[1]))
+
+ def tagTextChanged(self):
+ for tag, field in self.tagTextFields.items():
+ self.plotText[tag].setText(field.toPlainText())
+ x, y = self.tagPlots[tag].getData()
+ if len(x) > 0 and len(y) > 0:
+ #print(tag, x[-1], y[-1])
+ self.plotText[tag].setPos(x[-1], y[-1])
+
diff --git a/uiwidget/widgetplayer.py b/uiwidget/widgetplayer.py
new file mode 100644
index 0000000..a322c63
--- /dev/null
+++ b/uiwidget/widgetplayer.py
@@ -0,0 +1,309 @@
+from PyQt5 import QtCore, QtGui, QtWidgets, QtMultimedia, QtMultimediaWidgets, Qt
+
+import os
+import numpy as np
+
+
+
+class WidgetPlayer(QtWidgets.QWidget):
+ updateFrame = QtCore.pyqtSignal(int)
+ # sendFileName = QtCore.pyqtSignal(str)
+ sendState = QtCore.pyqtSignal(QtMultimedia.QMediaPlayer.State)
+ frameAvailable = QtCore.pyqtSignal(QtGui.QImage)
+
+ labels = list()
+ colors = list()
+ pose_data = list()
+ gaze_data = list()
+ tag_data = dict()
+ tags = list()
+ tag_colors = dict()
+
+ def __init__(self, parent=None):
+ super(WidgetPlayer, self).__init__(parent)
+ self.root = QtCore.QFileInfo(__file__).absolutePath()
+
+ # mediaplayer for decoding the video
+ self.mediaPlayer = QtMultimedia.QMediaPlayer(self, QtMultimedia.QMediaPlayer.VideoSurface)
+ #self.mediaPlayer.setMuted(True)
+
+ # top = graphicsscene, middle = graphiscview, bottom = graphicsvideoitem, lowest = graphisctextitems, ...
+ self._scene = QtWidgets.QGraphicsScene(self)
+ self._scene.setBackgroundBrush(QtGui.QBrush(QtGui.QColor('black')))
+ self._gv = QtWidgets.QGraphicsView(self._scene)
+ self._videoitem = QtMultimediaWidgets.QGraphicsVideoItem()
+ self._videoitem.setPos(0, 0)
+ self._videoitem.setZValue(-1000)
+ self._scene.addItem(self._videoitem)
+
+ if os.name != 'nt':
+ # grab frames to forward them to facial emotion tab
+ probe = QtMultimedia.QVideoProbe(self)
+ probe.videoFrameProbed.connect(self.on_videoFrameProbed)
+ probe.setSource(self.mediaPlayer)
+
+ # disable scrollbars
+ self._gv.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+ self._gv.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+
+ # just a holder for the graphics view to expand to maximum to use full size
+ self.lay = QtWidgets.QVBoxLayout(self)
+ self.lay.setContentsMargins(0, 0, 0, 0)
+ self.lay.addWidget(self._gv)
+
+ self.errorLabel = QtWidgets.QLabel()
+ self.errorLabel.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
+ QtWidgets.QSizePolicy.Maximum)
+
+ self.mediaPlayer.setVideoOutput(self._videoitem)
+ self.mediaPlayer.stateChanged.connect(self.on_stateChanged)
+ self.mediaPlayer.positionChanged.connect(self.mediaChangedPosition)
+ # self.mediaPlayer.durationChanged.connect(self.durationChanged)
+ self.mediaPlayer.error.connect(self.handleError)
+
+ self.movieDir = ''
+ self.duration = 0
+
+ def setInit(self, video, fps, originalVideoResolution, number_ids, colors, tags, tag_colors):
+ self.fps = fps
+ self.originalVideoResolution = originalVideoResolution
+
+ f = os.path.abspath(video)
+ self.mediaPlayer.setMedia(QtMultimedia.QMediaContent(QtCore.QUrl.fromLocalFile(f)))
+ self.mediaPlayer.setNotifyInterval(1000 // self.fps)
+
+ # init pose data
+ for i in range(number_ids):
+ self.pose_data.append(self._scene.addPath(QtGui.QPainterPath()))
+
+ # init gaze data
+ for i in range(number_ids):
+ self.gaze_data.append(self._scene.addPath(QtGui.QPainterPath()))
+
+ # init label data
+ for i in range(number_ids):
+ self.labels.append(self._scene.addPath(QtGui.QPainterPath()))
+
+ # init tag data
+ if tags:
+ for i, tag in enumerate(tags):
+ self.tag_data[tag] = self._scene.addPath(QtGui.QPainterPath())
+ self.tag_colors[tag] = tag_colors[i]
+
+
+ self.number_ids = number_ids
+ self.colors = colors
+ self.tags = tags
+
+ def play(self):
+ if self.mediaPlayer.state() == QtMultimedia.QMediaPlayer.PlayingState:
+ self.mediaPlayer.pause()
+ else:
+ self.mediaPlayer.play()
+ self.sendState.emit(self.mediaPlayer.state())
+
+ def pause(self):
+ if self.mediaPlayer.state() == QtMultimedia.QMediaPlayer.PlayingState:
+ self.mediaPlayer.pause()
+ self.sendState.emit(self.mediaPlayer.state())
+
+ def setFrame(self, frame):
+ # RESPECT FPS! position is time in millisconds
+ position = int(frame * 1000 / self.fps)
+ # print("Received", position)
+ self.mediaPlayer.setPosition(position)
+
+ def stop(self):
+ self.mediaPlayer.stop()
+
+ @QtCore.pyqtSlot(QtMultimedia.QMediaPlayer.State)
+ def on_stateChanged(self, state):
+ self.focus_on_video()
+
+ def mediaChangedPosition(self, position):
+ frame = int((position / 1000.0) * self.fps)
+ # print("Video Running %i" % frame)
+ self.updateFrame.emit(frame)
+ self._gv.fitInView(self._videoitem, QtCore.Qt.KeepAspectRatio)
+
+ def handleError(self):
+ # self.playButton.setEnabled(False)
+ print("Error: " + self.mediaPlayer.errorString())
+
+ def createButtons(self):
+ iconSize = QtCore.QSize(28, 28)
+
+ openButton = QtWidgets.QToolButton()
+ openButton.setStyleSheet('border: none;')
+ openButton.setIcon(QtGui.QIcon(self.root + '/icons/open.png'))
+ openButton.setIconSize(iconSize)
+ openButton.setToolTip("Open File")
+ # openButton.clicked.connect(self.open)
+
+ self.playButton = QtWidgets.QToolButton()
+ self.playButton.setStyleSheet('border: none;')
+ self.playButton.setIcon(QtGui.QIcon(self.root + '/icons/play.png'))
+ self.playButton.setIconSize(iconSize)
+ self.playButton.setToolTip("Play movie")
+ self.playButton.clicked.connect(self.play)
+ self.playButton.setEnabled(False)
+
+ self.stopButton = QtWidgets.QToolButton()
+ self.stopButton.setStyleSheet('border: none;')
+ self.stopButton.setIcon(QtGui.QIcon(self.root + '/icons/stop.png'))
+ self.stopButton.setIconSize(iconSize)
+ self.stopButton.setToolTip("Stop movie")
+ self.stopButton.clicked.connect(self.stop)
+ self.stopButton.setEnabled(False)
+
+ @QtCore.pyqtSlot(QtMultimedia.QVideoFrame)
+ def on_videoFrameProbed(self, frame):
+ cloneFrame = QtMultimedia.QVideoFrame(frame)
+ cloneFrame.map(QtMultimedia.QAbstractVideoBuffer.ReadOnly)
+ image = QtGui.QImage(cloneFrame.bits(), cloneFrame.width(), cloneFrame.height(), cloneFrame.bytesPerLine(),
+ QtMultimedia.QVideoFrame.imageFormatFromPixelFormat(cloneFrame.pixelFormat()))
+ self.frameAvailable.emit(image)
+ cloneFrame.unmap()
+
+ def focus_on_video(self):
+ native_video_resolution = self.mediaPlayer.metaData("Resolution")
+ # we also update the sceneview to zoom to the video
+ if native_video_resolution is not None:
+ self._videoitem.setSize(QtCore.QSizeF(native_video_resolution.width(), native_video_resolution.height()))
+ self._gv.fitInView(self._videoitem, QtCore.Qt.KeepAspectRatio)
+
+ # set scale of video to bigger size
+ if self.originalVideoResolution is not None:
+ width_ratio = self.originalVideoResolution[0] / native_video_resolution.width()
+ self._videoitem.setScale(width_ratio)
+
+ @QtCore.pyqtSlot()
+ def clear_tags(self):
+ self.focus_on_video()
+ # clear all tags
+ for tag in self.tags:
+ self._scene.removeItem(self.tag_data[tag])
+ self.tag_data[tag] = self._scene.addPath(QtGui.QPainterPath())
+
+ @QtCore.pyqtSlot(int, list, list)
+ def draw_tags(self, tag, lstX, lstY):
+ # this is removing the old tag data
+ self._scene.removeItem(self.tag_data[tag])
+
+ path = QtGui.QPainterPath()
+ path.setFillRule(Qt.Qt.WindingFill)
+ # set starting points
+ for (x, y) in zip(lstX, lstY):
+ path.addRect(x-50, y-50, 100, 100)
+
+ # by adding it gets converted into an QGraphicsPathItem
+ # save it for later removal
+ self.tag_data[tag] = self._scene.addPath(path)
+
+ # set colors
+ color = self.tag_colors[tag]
+ pen = QtGui.QPen(QtGui.QColor(color[0], color[1], color[2], 255), 2, QtCore.Qt.SolidLine)
+ self.tag_data[tag].setPen(pen)
+ # fill ellipses - alpha value is set to 50%
+ # self.tag_data[tag].setBrush(QtGui.QColor(color[0], color[1], color[2], int(0.5 * 255)))
+
+ @QtCore.pyqtSlot()
+ def clear_labels(self):
+ self.focus_on_video()
+ # clear all labels
+ for id_no in range(self.number_ids):
+ self._scene.removeItem(self.labels[id_no])
+ self.labels[id_no] = self._scene.addPath(QtGui.QPainterPath())
+
+ @QtCore.pyqtSlot(int, int, int)
+ def draw_labels(self, id_no, x, y):
+ # this is removing the old pose data
+ self._scene.removeItem(self.labels[id_no])
+
+ path = QtGui.QPainterPath()
+ # then draw text
+ font = QtGui.QFont("Arial", 70)
+ font.setStyleStrategy(QtGui.QFont.ForceOutline)
+ # sadly there is no easy way to claculate the width of the text so minus 100 is fine, but not ideal
+ # also moving the text up by 500, so that is does not cover the face
+ path.addText(x - 100, y - 300, font, "ID " + str(id_no))
+
+ # by adding it gets converted into an QGraphicsPathItem
+ # save it for later removal
+ self.labels[id_no] = self._scene.addPath(path)
+
+ # set colors
+ color = tuple([int(a * 255) for a in self.colors[id_no]])
+ # alpha value is set to 70%
+ pen = QtGui.QPen(QtGui.QColor(color[0], color[1], color[2], int(0.9 * 255)), 10, QtCore.Qt.SolidLine)
+ self.labels[id_no].setPen(pen)
+
+ @QtCore.pyqtSlot()
+ def clear_pose(self):
+ # empty pose data
+ for id_no in range(self.number_ids):
+ self._scene.removeItem(self.pose_data[id_no])
+ self.pose_data[id_no] = self._scene.addPath(QtGui.QPainterPath())
+
+ @QtCore.pyqtSlot(int, list, list)
+ def draw_pose(self, id_no, lstX, lstY):
+ # this is removing the old pose data
+ self._scene.removeItem(self.pose_data[id_no])
+
+ if len(lstX) > 0 and len(lstY) > 0:
+ path = QtGui.QPainterPath()
+
+ # set starting points
+ path.moveTo(lstX[0], lstY[0])
+ # then draw remaing lines
+ for (x, y) in zip(lstX[1:], lstY[1:]):
+ path.lineTo(x, y)
+
+ # by adding it gets converted into an QGraphicsPathItem
+ # save it for later removal
+ self.pose_data[id_no] = self._scene.addPath(path)
+
+ # set colors
+ color = tuple([int(a * 255) for a in self.colors[id_no]])
+ # alpha value is set to 70%
+ pen = QtGui.QPen(QtGui.QColor(color[0], color[1], color[2], int(0.7 * 255)), 10, QtCore.Qt.SolidLine)
+ self.pose_data[id_no].setPen(pen)
+ else:
+ self.pose_data[id_no] = self._scene.addPath(QtGui.QPainterPath())
+
+ @QtCore.pyqtSlot()
+ def clear_gaze(self):
+ # empty pose data
+ for id_no in range(self.number_ids):
+ self._scene.removeItem(self.gaze_data[id_no])
+ self.gaze_data[id_no] = self._scene.addPath(QtGui.QPainterPath())
+
+ @QtCore.pyqtSlot(int, list, list)
+ def draw_gaze(self, id_no, lstX, lstY):
+ # this is removing the old pose data
+ self._scene.removeItem(self.gaze_data[id_no])
+
+ path = QtGui.QPainterPath()
+ path.setFillRule(Qt.Qt.WindingFill)
+ # set starting points
+ for (x, y) in zip(lstX, lstY):
+ path.addEllipse(x, y, 100, 100)
+
+ # by adding it gets converted into an QGraphicsPathItem
+ # save it for later removal
+ self.gaze_data[id_no] = self._scene.addPath(path)
+
+ # set colors
+ color = tuple([int(a * 255) for a in self.colors[id_no]])
+ # alpha value is set to 50%
+ pen = QtGui.QPen(QtGui.QColor(color[0], color[1], color[2], int(0.5 * 255)), 1, QtCore.Qt.SolidLine)
+ self.gaze_data[id_no].setPen(pen)
+ # fill ellipses
+ self.gaze_data[id_no].setBrush(QtGui.QColor(color[0], color[1], color[2], int(0.5 * 255)))
+
+ @QtCore.pyqtSlot(list)
+ def onSelectedID(self, lst):
+ self.clear_labels()
+ self.clear_gaze()
+ self.clear_pose()
+
diff --git a/uiwidget/widgetpose.py b/uiwidget/widgetpose.py
new file mode 100644
index 0000000..a1e0e35
--- /dev/null
+++ b/uiwidget/widgetpose.py
@@ -0,0 +1,147 @@
+import numpy as np
+import pyqtgraph as pg
+from PyQt5 import QtWidgets, QtGui, QtCore
+
+class WidgetPose(QtWidgets.QWidget):
+
+ video_label_signal = QtCore.pyqtSignal(list)
+
+ def __init__(self, parent=None):
+ super(WidgetPose, self).__init__(parent)
+
+ layout = QtWidgets.QGridLayout()
+ self.poseGraph = QtWidgets.QWidget()
+ self.measures = QtWidgets.QWidget()
+ self.measuresLayout = QtWidgets.QHBoxLayout()
+
+ # Setup Movement Graph Plot Widget
+ self.movementGraph = pg.PlotWidget()
+ self.movementGraph.setBackground('w')
+ self.movementGraph.setYRange(0, 400, padding=0)
+ self.movementGraph.getPlotItem().getAxis('bottom').setTickSpacing(minor=50, major=100)
+ self.movementGraph.getPlotItem().setTitle(title='Body Movement over Time')
+ self.movementPlots = []
+
+ layout.addWidget(self.movementGraph, 1, 0, 1, 1)
+ layout.addWidget(self.measures, 1, 1, 1, 1)
+ layout.setColumnStretch(0, 1)
+ layout.setColumnStretch(1, 1)
+
+ self.setLayout(layout)
+ self.colors = None
+ self.labels = dict()
+
+ @QtCore.pyqtSlot(list)
+ def onSelectedID(self, lst):
+ """Change color of movement graph plot if ID should not be visible"""
+ for i, button in enumerate(lst):
+ if not button.isChecked():
+ self.movementPlots[i].setPen(None)
+ elif self.colors is not None:
+ color = tuple([int(a * 255) for a in self.colors[i]])
+ pen = pg.mkPen(color=color)
+ self.movementPlots[i].setPen(pen)
+
+ @QtCore.pyqtSlot(dict, list, int)
+ def updateMovementGraph(self, data, colors, numberIDs):
+ """Plot ID specific movement data from processing class
+ data[id: (movements, frames)]
+ """
+ # handle NaN https://github.com/pyqtgraph/pyqtgraph/issues/1057
+ # downgrade to 5.13 fixes the issue
+ for id_no in range(numberIDs):
+ if data.get(id_no):
+ if not np.all(np.isnan(data.get(id_no)[0])):
+ self.movementPlots[id_no].setData(data.get(id_no)[1], data.get(id_no)[0]) # Update the data.
+
+ self.movementGraph.setXRange(np.min(data.get(id_no)[1]), np.max(data.get(id_no)[1]))
+
+ @QtCore.pyqtSlot(dict, int)
+ def updateHandVelocity(self, data, numberIDs):
+ """Update Velocity Label
+ data[id: velocity for frame]"""
+ for id_no in range(numberIDs):
+ if data.get(id_no) is not None:
+ self.labels['Velocity'][id_no].setText('%.2f' % data[id_no])
+ else:
+ self.labels['Velocity'][id_no].setText(' ')
+
+ @QtCore.pyqtSlot(list, int)
+ def initMovementGraph(self, colors, numberIDs):
+ """Initialize plot lines with 0
+ colors: plot color for each ID
+ """
+ for i in range(numberIDs):
+ x = list(range(-200, 0)) # 100 time points
+ y = [0 for _ in range(200)] # 100 data points
+ color = tuple([int(a * 255) for a in colors[i]])
+ pen = pg.mkPen(color=color)
+ dataLine = self.movementGraph.plot(x, y, pen=pen)
+ self.movementPlots.append(dataLine)
+
+ @QtCore.pyqtSlot(dict, dict, list, int)
+ def setInit(self, mostActivity, hand, colors, numberIDs):
+ self.colors = colors
+ idLayout = QtWidgets.QVBoxLayout()
+
+ labelID = QtWidgets.QLabel(' ')
+ labelID.setFixedWidth(60)
+ labelID.setFixedHeight(20)
+ label = QtWidgets.QLabel('Most body activity in frame: ')
+ labelA = QtWidgets.QLabel('Hands above table (relative): ')
+ labelG = QtWidgets.QLabel('Gestures (relative): ')
+ labelVel = QtWidgets.QLabel('Hand velocity: ')
+ idLayout.addWidget(labelID)
+ idLayout.addWidget(label)
+ idLayout.addWidget(labelA)
+ idLayout.addWidget(labelG)
+ idLayout.addWidget(labelVel)
+ self.measuresLayout.insertLayout(-1, idLayout)
+
+ activityLabel = []
+ aboveLabel = []
+ gestureLabel = []
+ velLabel = []
+ for id_no in range(numberIDs):
+ idLayout = QtWidgets.QVBoxLayout()
+
+ [total, tracked, high_vel] = hand.get(id_no)
+
+ color = tuple([int(a * 255) for a in colors[id_no]])
+
+ labelID = QtWidgets.QLabel('ID%i' % id_no)
+ labelID.setStyleSheet('font: bold 12px; color: black; background-color: rgb(%i,%i,%i)' % color)
+ labelID.setFixedWidth(60)
+ labelID.setFixedHeight(20)
+
+ label = QtWidgets.QLabel('%i' % mostActivity.get(id_no))
+ activityLabel.append(label)
+ labelG = QtWidgets.QLabel('%.2f' % (high_vel / total))
+ gestureLabel.append(labelG)
+ labelA = QtWidgets.QLabel('%.2f' % (tracked / total))
+ aboveLabel.append(labelA)
+ labelVel = QtWidgets.QLabel(' ')
+ velLabel.append(labelVel)
+ idLayout.addWidget(labelID)
+ idLayout.addWidget(label)
+ idLayout.addWidget(labelA)
+ idLayout.addWidget(labelG)
+ idLayout.addWidget(labelVel)
+
+ self.measuresLayout.insertLayout(-1, idLayout)
+
+ # Velocity will be updated each frame, rest is updated in _updateLabels
+ self.labels['Velocity'] = velLabel
+ self.labels['Above'] = aboveLabel
+ self.labels['Gesture'] = gestureLabel
+ self.labels['Activity'] = activityLabel
+ self.measures.setLayout(self.measuresLayout)
+
+ @QtCore.pyqtSlot(dict, dict, int)
+ def updateLables(self, mostActivity, hand, numberIDs):
+ """ Update above hands, gestures and most activity labels when segment was changed"""
+ for id_no in range(numberIDs):
+ [total, tracked, high_vel] = hand[id_no]
+ self.labels['Activity'][id_no].setText('%i' % mostActivity[id_no])
+ self.labels['Above'][id_no].setText('%.2f' % (tracked / total))
+ self.labels['Gesture'][id_no].setText('%.2f' % (high_vel / total))
diff --git a/uiwidget/widgetspeaking.py b/uiwidget/widgetspeaking.py
new file mode 100644
index 0000000..5d3fea2
--- /dev/null
+++ b/uiwidget/widgetspeaking.py
@@ -0,0 +1,157 @@
+import numpy as np
+import pyqtgraph as pg
+from PyQt5 import QtWidgets, QtCore
+from utils.util import get_circle
+
+
+class WidgetSpeaking(QtWidgets.QWidget):
+ def __init__(self, parent=None):
+ super(WidgetSpeaking, self).__init__(parent)
+
+ layout = QtWidgets.QGridLayout()
+
+ self.measures = QtWidgets.QWidget()
+ self.measuresLayout = QtWidgets.QHBoxLayout()
+
+ # Setup Speaking Graph Plot Widget
+ self.speakingGraph = pg.PlotWidget()
+ self.speakingGraph.setBackground('w')
+ self.speakingGraph.hideAxis('left')
+ self.speakingGraph.hideAxis('bottom')
+ self.speakingGraph.setAspectLocked()
+ self.speakingGraph.setYRange(-2.25, 2.25, padding=0)
+ self.speakingGraph.setXRange(-2.25, 2.25, padding=0)
+ self.speakingGraph.getPlotItem().setTitle(title='Speaking Distribution')
+
+ self.speakingGraphPlots = []
+
+ layout.addWidget(self.speakingGraph, 1, 0, 1, 1)
+ layout.addWidget(self.measures, 1, 1, 1, 2)
+ layout.setColumnStretch(0, 1)
+ layout.setColumnStretch(1, 2)
+
+ self.setLayout(layout)
+ self.colors = None
+ self.labels = dict()
+ self.speakBlob = None
+ self.positions = dict()
+ self.idBlobs = dict()
+
+ @QtCore.pyqtSlot(dict, int)
+ def updateLables(self, speak, numberIDs):
+ """ Update labels when segment was changed"""
+ for id_no in range(numberIDs):
+ self.labels['RST'][id_no].setText('%.2f' % speak[id_no][1])
+ # self.labels['Turns'][id_no].setText('%i' % speak[id_no][2])
+ # self.labels['TurnLength'][id_no].setText('%.2f sec' % speak[id_no][3])
+ # self.labels['TurnPerMin'][id_no].setText('%.2f /min' % speak[id_no][4])
+
+ @QtCore.pyqtSlot(dict, list, int)
+ def setInit(self, speak, colors, numberIDs):
+ self.colors = colors
+ idLayout = QtWidgets.QVBoxLayout()
+
+ labelID = QtWidgets.QLabel(' ')
+ labelID.setFixedWidth(60)
+ labelID.setFixedHeight(20)
+ labelRST = QtWidgets.QLabel('Relative speaking time:')
+ labelTurn = QtWidgets.QLabel('Number of speaking turns:')
+ labelTurnLen = QtWidgets.QLabel('Average length of turn:')
+ labelTurnMin = QtWidgets.QLabel('Average number of turns:')
+ idLayout.addWidget(labelID)
+ idLayout.addWidget(labelRST)
+ idLayout.addWidget(labelTurn)
+ idLayout.addWidget(labelTurnLen)
+ idLayout.addWidget(labelTurnMin)
+
+ self.measuresLayout.insertLayout(-1, idLayout)
+
+ rstLabels = []
+ turnLabels = []
+ turnLenLabels = []
+ turnMinLabels = []
+ for id_no in range(numberIDs):
+ idLayout = QtWidgets.QVBoxLayout()
+ color = tuple([int(a * 255) for a in colors[id_no]])
+
+ labelID = QtWidgets.QLabel('ID%i' % id_no)
+ labelID.setFixedWidth(60)
+ labelID.setFixedHeight(20)
+ labelID.setStyleSheet('font: bold 12px; color: black; background-color: rgb(%i,%i,%i)' % color)
+
+ labelRST = QtWidgets.QLabel('%.2f' % speak[id_no][1])
+ rstLabels.append(labelRST)
+ labelTurn = QtWidgets.QLabel('%i' % speak[id_no][2])
+ turnLabels.append(labelTurn)
+ labelTurnLen = QtWidgets.QLabel('%.2f sec' % speak[id_no][3])
+ turnLenLabels.append(labelTurnLen)
+ labelTurnMin = QtWidgets.QLabel('%.2f /min' % speak[id_no][4])
+ turnMinLabels.append(labelTurnMin)
+
+ idLayout.addWidget(labelID)
+ idLayout.addWidget(labelRST)
+ idLayout.addWidget(labelTurn)
+ idLayout.addWidget(labelTurnLen)
+ idLayout.addWidget(labelTurnMin)
+
+ self.measuresLayout.insertLayout(-1, idLayout)
+
+ self.labels['RST'] = rstLabels
+ self.labels['Turns'] = turnLabels
+ self.labels['TurnLength'] = turnLenLabels
+ self.labels['TurnPerMin'] = turnMinLabels
+
+ self.measures.setLayout(self.measuresLayout)
+
+ @QtCore.pyqtSlot(list, int)
+ def initSpeakingGraph(self, colors, numberIDs):
+ """ initialize speaking graph """
+
+ self.speakBlob = pg.ScatterPlotItem([0], [0], size=0.3, pxMode=False,
+ pen=pg.mkPen(color=0.5, width=2), brush=pg.mkBrush(255, 255, 255))
+ self.speakBlob.setZValue(100)
+ self.speakingGraph.addItem(self.speakBlob)
+
+ pos = 360 // numberIDs
+ for id_no in range(numberIDs):
+ color = tuple([int(a * 255) for a in colors[id_no]])
+ idPos = pos * id_no
+ x = 2 * np.cos(np.deg2rad(idPos))
+ y = 2 * np.sin(np.deg2rad(idPos))
+ scatterPlot = pg.ScatterPlotItem([x], [y], size=0.2, pxMode=False,
+ pen=pg.mkPen(color=color, width=2), brush=pg.mkBrush(*color))
+ self.speakingGraph.addItem(scatterPlot)
+ self.positions[id_no] = [x, y]
+ self.idBlobs[id_no] = scatterPlot
+
+ plt = self.speakingGraph.plot(x=[], y=[], pen=pg.mkPen(color=color, width=1))
+ self.speakingGraphPlots.append(plt)
+
+ @QtCore.pyqtSlot(dict, int, int)
+ def updateSpeakingGraph(self, rst, active_speaker, numberIDs):
+ """ frame updates for gaze graph """
+ blobPos = np.array([0, 0])
+ for id_no in range(numberIDs):
+ diff = rst[id_no] - (1.0 / numberIDs)
+ if diff > 0:
+ blobPos = blobPos + 2 * diff * np.array(self.positions[id_no])
+ self.speakBlob.setData([blobPos[0]], [blobPos[1]])
+
+ for id_no in range(numberIDs):
+ color = tuple([int(a * 255) for a in self.colors[id_no]])
+ pen = pg.mkPen(color=color, width=(rst[id_no]*4))
+
+ x = [self.positions[id_no][0], blobPos[0]]
+ y = [self.positions[id_no][1], blobPos[1]]
+ self.speakingGraphPlots[id_no].setData(x, y)
+ self.speakingGraphPlots[id_no].setPen(pen)
+
+ if id_no == active_speaker:
+ self.idBlobs[id_no].setData([self.positions[id_no][0]], [self.positions[id_no][1]],
+ pen=pg.mkPen(color=(0, 0, 0), width=2))
+ else:
+ self.idBlobs[id_no].setData([self.positions[id_no][0]], [self.positions[id_no][1]],
+ pen=pg.mkPen(color=color, width=2))
+
+
+
diff --git a/uiwidget/widgetstatic.py b/uiwidget/widgetstatic.py
new file mode 100644
index 0000000..1aa5166
--- /dev/null
+++ b/uiwidget/widgetstatic.py
@@ -0,0 +1,15 @@
+import numpy as np
+import pyqtgraph
+from PyQt5 import QtWidgets
+from PyQt5.QtWidgets import QLabel
+
+
+class WidgetStatic(QtWidgets.QWidget):
+ labels = list()
+
+ def __init__(self, parent=None):
+ super(WidgetStatic, self).__init__(parent)
+
+ self.frame = 0
+
+ layout = QtWidgets.QGridLayout()
diff --git a/uiwidget/widgettimeline.py b/uiwidget/widgettimeline.py
new file mode 100644
index 0000000..ad56432
--- /dev/null
+++ b/uiwidget/widgettimeline.py
@@ -0,0 +1,451 @@
+from PyQt5 import QtWidgets
+from PyQt5 import QtCore
+from PyQt5 import QtGui
+from PyQt5.QtCore import QSize, QFileInfo, pyqtSlot
+from PyQt5.QtGui import QIcon, QKeySequence
+from PyQt5.QtMultimedia import QMediaPlayer
+
+import sys
+
+__all__ = ['QRangeSlider']
+
+
+def scale(val, src, dst):
+ """
+ Scale the given value from the scale of src to the scale of dst.
+ """
+ return int(((val - src[0]) / float(src[1] - src[0])) * (dst[1] - dst[0]) + dst[0])
+
+
+class WidgetTimeLine(QtWidgets.QWidget):
+
+ signalSetPosition = QtCore.pyqtSignal(int)
+ signalPlay = QtCore.pyqtSignal()
+ signalStop = QtCore.pyqtSignal()
+ signalSelectID = QtCore.pyqtSignal(list)
+
+ def __init__(self, parent=None):
+ super(WidgetTimeLine, self).__init__(parent)
+ self.root = QFileInfo(__file__).absolutePath()
+
+ self.setFixedHeight(110)
+ self.frame = 0
+
+ layout = QtWidgets.QGridLayout()
+
+ iconSize = QSize(28, 28)
+
+ self.playButton = QtWidgets.QToolButton()
+ self.playButton.setStyleSheet('border: none;')
+ self.playButton.setIcon(QIcon(self.root + '/../icons/play.png'))
+ self.playButton.setIconSize(iconSize)
+ self.playButton.setToolTip("Play movie")
+ self.playButton.clicked.connect(self.play)
+
+ # See https://stackoverflow.com/questions/50880660/how-to-change-space-bar-behaviour-in-pyqt5-python3
+ # self.shortcut_play = QtWidgets.QShortcut(QKeySequence(Qt.Key_Space), self)
+ # self.shortcut_play.activated.connect(self.play)
+
+ self.labelFrame = QtWidgets.QLabel('Frame\n')
+ self.labelFrame.setAlignment(QtCore.Qt.AlignRight)
+ self.labelFrame.setFixedWidth(40)
+ self.sl = QtWidgets.QSlider(QtCore.Qt.Horizontal)
+ # Directly connect: Slider value changed to player set position
+ self.sl.valueChanged.connect(self.signalSetPosition.emit)
+
+ self.rangeslider = QMultiRangeSlider()
+ self.rangeslider.setFixedHeight(30)
+ self.rangeslider.setMin(0)
+ self.rangeslider.setMax(200)
+ self.rangeslider.setRanges([(5, 25), (30, 50), (70, 90)])
+ # self.rangeslider.setBackgroundStyle(
+ # 'background: qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #222, stop:1 #333);')
+ # szlf.rangeslider.setHandleStyle(
+ # 'background: qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #289, stop:1 #289);')
+
+ layout.addWidget(self.rangeslider, 1, 1, 1, 6)
+ layout.addWidget(self.labelFrame, 1, 0, 2, 1)
+ layout.addWidget(self.sl, 2, 1, 1, 6)
+ layout.addWidget(self.playButton, 2, 0, 1, 1)
+
+ self.setLayout(layout)
+ self.number_ids = None
+ self.fps = None
+
+ def setInit(self, frames, number_ids, fps):
+ self.fps = fps
+ self.frame = 0
+ self.sl.setMinimum(0)
+ # Use position = (frame * 1000) / FPS for all slider and player values!
+ self.sl.setMaximum(int((frames * 1000) / self.fps))
+ self.sl.setValue(0)
+ self.sl.setTickPosition(QtWidgets.QSlider.TicksBelow)
+ self.sl.setTickInterval(5)
+ self.labelFrame.setText('Frame\n %i' % self.frame)
+ self.playButton.setEnabled(True)
+ self.number_ids = number_ids
+
+ self.rangeslider.setMin(0)
+ self.rangeslider.setMax(frames)
+ self.rangeslider.setRanges([(0, frames)])
+
+ @pyqtSlot(QMediaPlayer.State)
+ def mediaStateChanged(self, state):
+ if state == QMediaPlayer.PlayingState:
+ print('MediaState changed...')
+ self.playButton.setIcon(QIcon(self.root + '/../icons/pause.png'))
+ self.playButton.setToolTip('Pause movie')
+ else:
+ self.playButton.setIcon(QIcon(self.root + '/../icons/play.png'))
+ self.playButton.setToolTip('Play movie')
+
+ def play(self):
+ self.signalPlay.emit()
+
+ def stop(self):
+ self.signalStop.emit()
+
+ def updateSlider(self, position):
+ # Directly connect Player position changed to slider set value
+ frame = int((position / 1000.0) * self.fps)
+ self.frame = frame
+ # Disable the events to prevent updating triggering a setPosition event (can cause stuttering).
+ self.sl.blockSignals(True)
+ self.sl.setValue(position)
+ self.sl.blockSignals(False)
+
+ self.labelFrame.setText('Frame\n %i' % frame)
+
+
+class QMultiRangeSlider(QtWidgets.QWidget):
+
+ minValueChanged = QtCore.pyqtSignal(int)
+ maxValueChanged = QtCore.pyqtSignal(int)
+
+ segmentsChanged = QtCore.pyqtSignal(list, int)
+
+ segments = []
+ last_update_segment = 0
+
+ def __init__(self, parent=None):
+ """Create a new QRangeSlider instance.
+
+ :param parent: QWidget parent
+ :return: New QRangeSlider instance.
+
+ """
+ super(QMultiRangeSlider, self).__init__(parent)
+ self.setMinimumSize(1, 30)
+ self.padding = 5
+ self.hande_width = 4
+
+ self.selected_segment = None
+
+ self.setMin(0)
+ self.setMax(99)
+ self.setRanges([(20, 30), [40, 90]])
+
+ def setMin(self, value):
+ """sets minimum value"""
+ assert type(value) is int
+ setattr(self, '__min', value)
+ self.minValueChanged.emit(value)
+ self.segments = []
+
+ def setMax(self, value):
+ """sets maximum value"""
+ assert type(value) is int
+ setattr(self, '__max', value)
+ self.maxValueChanged.emit(value)
+ self.segments = []
+
+ def initSegment(self):
+ self.segments = []
+
+ if (self.min() != None & self.max() != None):
+ self.setRanges([(self.min(), self.max())])
+
+ def keyPressEvent(self, event):
+ """overrides key press event to move range left and right"""
+ key = event.key()
+ if key == QtCore.Qt.Key_Left:
+ s = self.start() - 1
+ e = self.end() - 1
+ elif key == QtCore.Qt.Key_Right:
+ s = self.start() + 1
+ e = self.end() + 1
+ else:
+ event.ignore()
+ return
+ event.accept()
+ if s >= self.parent().min() and e <= self.max():
+ self.setRange(s, e)
+
+ def min(self):
+ """:return: minimum value"""
+ return getattr(self, '__min', None)
+
+ def max(self):
+ """:return: maximum value"""
+ return getattr(self, '__max', None)
+
+ def _setStart(self, value):
+ """stores the start value only"""
+ setattr(self, '__start', value)
+ self.startValueChanged.emit(value)
+ # self.segmentChanged.emit(value, self.end())
+
+ def setStart(self, values):
+ """sets the range slider start value"""
+ # assert type(value) is int
+
+ for i, s in enumerate(self.segments):
+ s.setStart(values[i])
+
+ def setEnd(self, values):
+ """set the range slider end value"""
+ # assert type(value) is int
+
+ for i, s in enumerate(self.segments):
+ s.setEnd(values[i])
+
+ def getRanges(self):
+ """:return: the start and end values as a tuple"""
+ ret = []
+ for i, s in enumerate(self.segments):
+ ret.append(s.getRange())
+
+ return ret
+
+ def setRanges(self, values):
+ """set the start and end values"""
+
+ while len(self.segments) < len(values):
+ self.segments.append(QRangeSliderSegment(self))
+
+ while len(self.segments) > len(values):
+ self.segments.remove(-1)
+
+ for i, (s, e) in enumerate(values):
+ self.segments[i].setStart(s)
+ self.segments[i].setEnd(e)
+
+ self._trigger_refresh(0)
+
+ def mouseDoubleClickEvent(self, e):
+ self.selected_segment = None
+
+ d_width = self.width() - (self.padding * 2)
+ step_size = d_width / (self.max() - self.min())
+ pos = (e.x() - (self.padding)) / step_size
+
+ # removing existing segment
+ for (i, s) in enumerate(self.segments):
+ if (s.start() < pos) and (pos < s.end()):
+ # always keep one segment
+ if len(self.segments) > 1:
+ self.segments.remove(s)
+ # since one segement exists all the time the first can not be -1
+ if i == 0:
+ self._trigger_refresh(0)
+ else:
+ self._trigger_refresh(i - 1)
+ return
+
+ # if new segment in front
+ if (pos < self.segments[0].end()):
+ start = 0
+ end = self.segments[0].start()
+ diff = end - start
+ start += .25 * diff
+ end -= .25 * diff
+ start = int(start)
+ end = int(end)
+ seg = QRangeSliderSegment(self)
+ seg.setStart(start)
+ seg.setEnd(end)
+ self.segments.insert(0, seg)
+ self._trigger_refresh(0)
+ return
+
+ # if new segment in back
+ if (pos > self.segments[-1].end()):
+ start = self.segments[-1].end()
+ end = self.max()
+ diff = end - start
+ start += .25 * diff
+ end -= .25 * diff
+ start = int(start)
+ end = int(end)
+ seg = QRangeSliderSegment(self)
+ seg.setStart(start)
+ seg.setEnd(end)
+ self.segments.append(seg)
+ self._trigger_refresh(len(self.segments) - 1)
+ return
+
+ # if new segment in between
+ for i in range(len(self.segments) - 1):
+ if (self.segments[i].end() < pos) and (pos < self.segments[i + 1].start()):
+ start = self.segments[i].end()
+ end = self.segments[i + 1].start()
+ diff = end - start
+ start += .25 * diff
+ end -= .25 * diff
+ start = int(start)
+ end = int(end)
+ seg = QRangeSliderSegment(self)
+ seg.setStart(start)
+ seg.setEnd(end)
+ self.segments.insert(i + 1, seg)
+ self._trigger_refresh(i + 1)
+ return
+
+ def mousePressEvent(self, e):
+ # d_height = painter.device().height() - (self.padding * 2)
+ d_width = self.width() - (self.padding * 2)
+ step_size = d_width / (self.max() - self.min())
+ pos = (e.x() - (self.padding)) / step_size
+
+ distance = sys.maxsize
+
+ for i, s in enumerate(self.segments):
+ if (abs(s.start() - pos) < distance):
+ distance = abs(s.start() - pos)
+ if (distance * step_size < 10):
+ self.selected_segment = {"Seg": s, "Type": "start", "Index": i}
+
+ if (abs(s.end() - pos) < distance):
+ distance = abs(s.end() - pos)
+ if (distance * step_size < 10):
+ self.selected_segment = {"Seg": s, "Type": "end", "Index": i}
+
+ self._trigger_refresh(self.last_update_segment)
+
+ def mouseMoveEvent(self, e):
+ d_width = self.width() - (self.padding * 2)
+ step_size = d_width / (self.max() - self.min())
+ pos = int(round((e.x() - (self.padding)) / step_size))
+
+ if self.selected_segment is None:
+ return
+
+ s = self.selected_segment["Seg"]
+ if (self.selected_segment["Type"] == "start"):
+ if ((self.selected_segment["Index"] == 0) & (0 <= pos)):
+ s.setStart(pos)
+ elif (self.segments[self.selected_segment["Index"] - 1].end() < pos - 5) & (s.end() > pos + 5):
+ s.setStart(pos)
+ elif (self.selected_segment["Type"] == "end"):
+ if (self.selected_segment["Index"] == len(self.segments) - 1) & (pos <= self.max()):
+ s.setEnd(pos)
+ elif self.selected_segment["Index"] + 1 < len(self.segments) and (self.segments[self.selected_segment["Index"] + 1].start() > pos + 5) and (s.start() < pos - 5):
+ s.setEnd(pos)
+
+ self._trigger_refresh(self.selected_segment["Index"])
+
+ def mouseReleaseEvent(self, e):
+ self.selected_segment = None
+
+ def _trigger_refresh(self, last_update_segment):
+ self.last_update_segment = last_update_segment
+ self.update()
+ self.segmentsChanged.emit(self.getRanges(), last_update_segment)
+
+ def paintEvent(self, e):
+ painter = QtGui.QPainter(self)
+ brush = QtGui.QBrush()
+ brush.setColor(QtGui.QColor(53, 53, 53))
+ brush.setStyle(QtCore.Qt.SolidPattern)
+ rect = QtCore.QRect(0, 0, painter.device().width(), painter.device().height())
+ painter.fillRect(rect, brush)
+
+ # Define our canvas.
+ d_height = painter.device().height() - (self.padding * 2)
+ d_width = painter.device().width() - (self.padding * 2)
+ step_size = d_width / (self.max() - self.min())
+
+ for s in self.segments:
+
+ brush.setColor(QtGui.QColor('white'))
+ rect = QtCore.QRect(
+ self.padding + s.start() * step_size,
+ self.padding * 2,
+ (s.end() - s.start()) * step_size,
+ d_height - 2 * self.padding
+ )
+ painter.fillRect(rect, brush)
+
+ brush.setColor(QtGui.QColor('red'))
+ rect = QtCore.QRect(
+ self.padding + s.end() * step_size - (self.hande_width / 2),
+ self.padding,
+ (self.hande_width / 2),
+ d_height
+ )
+ painter.fillRect(rect, brush)
+
+ brush.setColor(QtGui.QColor('green'))
+ rect = QtCore.QRect(
+ self.padding + s.start() * step_size - (self.hande_width / 2),
+ self.padding,
+ (self.hande_width / 2),
+ d_height
+ )
+ painter.fillRect(rect, brush)
+
+
+class QRangeSliderSegment(QtWidgets.QWidget):
+
+ # signals
+ # startValueChanged = QtCore.pyqtSignal(int)
+ # endValueChanged = QtCore.pyqtSignal(int)
+ # segmentChanged = QtCore.pyqtSignal(int, int)
+
+ def __init__(self, parent=None):
+ """Create a new QRangeSlider instance.
+
+ :param parent: QWidget parent
+ :return: New QRangeSlider instance.
+
+ """
+ super(QRangeSliderSegment, self).__init__(parent)
+
+ def start(self):
+ """:return: range slider start value"""
+ return getattr(self, '__start', None)
+
+ def end(self):
+ """:return: range slider end value"""
+ return getattr(self, '__end', None)
+
+ def getRange(self):
+ """:return: the start and end values as a tuple"""
+ return (self.start(), self.end())
+
+ def setRange(self, start, end):
+ """set the start and end values"""
+ self.setStart(start)
+ self.setEnd(end)
+
+ def _setEnd(self, value):
+ """stores the end value only"""
+ setattr(self, '__end', value)
+ # self.endValueChanged.emit(value)
+ # self.segmentChanged.emit(self.start(), value)
+
+ def setEnd(self, value):
+ """set the range slider end value"""
+ assert type(value) is int
+ self._setEnd(value)
+
+ def _setStart(self, value):
+ """stores the start value only"""
+ setattr(self, '__start', value)
+ # self.startValueChanged.emit(value)
+ # self.segmentChanged.emit(value, self.end())
+
+ def setStart(self, value):
+ """sets the range slider start value"""
+ assert type(value) is int
+ self._setStart(value)
diff --git a/uiwidget/widgetvideosettings.py b/uiwidget/widgetvideosettings.py
new file mode 100644
index 0000000..d680873
--- /dev/null
+++ b/uiwidget/widgetvideosettings.py
@@ -0,0 +1,63 @@
+from PyQt5 import QtWidgets, QtCore, QtGui
+
+
+class WidgetVideoSettings(QtWidgets.QWidget):
+ signalVisualize = QtCore.pyqtSignal(dict)
+ signalSelectID = QtCore.pyqtSignal(list)
+
+ def __init__(self, parent=None):
+ super(WidgetVideoSettings, self).__init__(parent)
+
+ self.setFixedWidth(200)
+
+ layout = QtWidgets.QGridLayout()
+
+ iconSize = QtCore.QSize(28, 28)
+
+ self.checkBoxes = QtWidgets.QWidget()
+ self.checkBoxesLayout = QtWidgets.QVBoxLayout()
+
+ label = QtWidgets.QLabel('Visualize')
+ label.setFixedWidth(100)
+
+ self.checkBoxes.setLayout(self.checkBoxesLayout)
+ self.checkBoxesList = dict()
+
+ for data in ['Pose', 'Gaze', 'Label', 'Tags']:
+ box = QtWidgets.QCheckBox(data)
+ box.setChecked(True)
+ box.stateChanged.connect(lambda: self.boxstate(box))
+ self.checkBoxesLayout.addWidget(box)
+ self.checkBoxesList[data] = box
+
+ self.idButtons = QtWidgets.QWidget()
+ self.buttonsLayout = QtWidgets.QVBoxLayout()
+
+ labelID = QtWidgets.QLabel('Showing')
+ labelID.setFixedWidth(100)
+
+ self.idButtons.setLayout(self.buttonsLayout)
+ self.buttonsList = []
+
+ layout.addWidget(label, 0, 0)
+ layout.addWidget(labelID, 0, 1)
+ layout.addWidget(self.checkBoxes, 1, 0)
+ layout.addWidget(self.idButtons, 1, 1)
+ self.setLayout(layout)
+
+ @QtCore.pyqtSlot(list, int)
+ def setInit(self, colors, numberIDs):
+ # Emit initial state
+ self.signalVisualize.emit(self.checkBoxesList)
+ for id_no in range(numberIDs):
+ button = QtWidgets.QCheckBox("ID%i" % id_no)
+ button.setChecked(True)
+ button.stateChanged.connect(lambda: self.btnstate(button))
+ self.buttonsLayout.addWidget(button)
+ self.buttonsList.append(button)
+
+ def btnstate(self, b):
+ self.signalSelectID.emit(self.buttonsList)
+
+ def boxstate(self, b):
+ self.signalVisualize.emit(self.checkBoxesList)
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/utils/colors.py b/utils/colors.py
new file mode 100644
index 0000000..bfbe66b
--- /dev/null
+++ b/utils/colors.py
@@ -0,0 +1,13 @@
+import colorsys
+
+
+def random_colors(N, bright=True):
+ """
+ Generate random colors.
+ To get visually distinct colors, generate them in HSV space then
+ convert to RGB.
+ """
+ brightness = 1.0 if bright else 0.7
+ hsv = [(i / N, 1, brightness) for i in range(N)]
+ colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
+ return colors
diff --git a/utils/util.py b/utils/util.py
new file mode 100644
index 0000000..5c57c8d
--- /dev/null
+++ b/utils/util.py
@@ -0,0 +1,31 @@
+import numpy as np
+
+
+def sperical2equirec(theta, phi, img_w, img_h):
+ u = np.deg2rad(theta) / (2 * np.pi)
+ v = np.deg2rad(phi) / np.pi
+
+ x = u * img_w
+ y = v * img_h
+ return x, y
+
+
+def equirec2spherical(x, y, img_w, img_h):
+ # Normalize coordinates
+ u = x / img_w
+ v = y / img_h
+
+ # Calculate spherical coordinates from normalized coordinates
+ # theta is horizontal angle, phi is polar angle
+ theta = u * 2 * np.pi
+ phi = v * np.pi
+
+ return np.rad2deg(theta), np.rad2deg(phi)
+
+
+def get_circle(radius):
+ """ helper function returns circle to x, y coordinates"""
+ theta = np.linspace(0, 2 * np.pi, 100)
+ x = radius * np.cos(theta)
+ y = radius * np.sin(theta)
+ return np.array(x), np.array(y)