conan/main.py

288 lines
12 KiB
Python
Raw Permalink Normal View History

2021-10-17 12:49:49 +02:00
#!/usr/bin/env python3
import sys
import json
import numpy as np
import pandas as pd
import os
import PyQt5
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5.QtCore import QSize, QUrl, pyqtSignal, QFile, QTextStream
from PyQt5.QtGui import QIcon
from PyQt5.QtMultimedia import QMediaContent
from PyQt5.QtWidgets import QAction, QFileDialog, QStatusBar
from PyQt5.QtGui import QPalette, QColor
print("Qt version {0}".format(QtCore.QT_VERSION_STR))
from PyQt5 import QtWidgets
import pyqtgraph as pg
import cv2
print("OpenCV version {0} ".format(cv2.__version__))
from uiwidget import widgetplayer, widgettimeline, widgetgaze, widgetspeaking, widgetpose, widgetfacialexpression, widgetobject, widgetvideosettings
from processing import Processor
class MainWindow(QtWidgets.QMainWindow):
sendPath = pyqtSignal(str)
sendSegments = pyqtSignal(np.ndarray)
def __init__(self, verbose=False, parent=None):
super(MainWindow, self).__init__(parent)
self.setWindowTitle('Conversation Analysis')
PyQt5.uic.loadUi('gui.ui', self) # Load the .ui file
self.show() # Show the GUI
self.resize(1600, 900)
self.dataContainer = None
self.verbose = verbose
self.movieDir = ''
self.segments = []
self.frames_bitmask = None
self.frame = None
self.frameCount = None
self.fps = None
self._setupMenuBar()
self.status = QStatusBar()
self.setStatusBar(self.status)
self.status.showMessage('Welcome', 1000)
self.wPlayer = self.findChild(widgetplayer.WidgetPlayer, "video")
self.wTimeLine = self.findChild(widgettimeline.WidgetTimeLine, "time")
self.wTab = self.findChild(QtWidgets.QTabWidget, "tab")
self.wGaze = self.findChild(widgetgaze.WidgetGaze, "gaze")
self.wSpeaking = self.findChild(widgetspeaking.WidgetSpeaking, "speak")
self.wPose = self.findChild(widgetpose.WidgetPose, "pose")
self.wFace = self.findChild(widgetfacialexpression.WidgetFacialExpression, "face")
self.wObject = self.findChild(widgetobject.WidgetObject, "object")
self.wVideoSettings = self.findChild(widgetvideosettings.WidgetVideoSettings, "vsettings")
self.wPlayer.mediaPlayer.positionChanged.connect(self.wTimeLine.updateSlider)
self.wPlayer.sendState.connect(self.wTimeLine.mediaStateChanged)
self.wTimeLine.signalSetPosition.connect(self.wPlayer.mediaPlayer.setPosition)
self.wTimeLine.signalPlay.connect(self.wPlayer.play)
self.processor = Processor()
self.processor.signalInit.connect(self.wPose.initMovementGraph)
self.processor.signalInit.connect(self.wSpeaking.initSpeakingGraph)
self.processor.signalInit.connect(self.wGaze.initGazeGraph)
self.processor.signalInitTags.connect(self.wObject.setInit)
self.processor.signalInit.connect(self.wFace.setInit)
self.processor.signalInit.connect(self.wVideoSettings.setInit)
self.wPlayer.frameAvailable.connect(self.processor.saveCurrentFrameData)
self.processor.signalPoseSetInit.connect(self.wPose.setInit)
self.processor.signalGazeSetInit.connect(self.wGaze.setInit)
self.processor.signalSpeakerSetInit.connect(self.wSpeaking.setInit)
self.processor.signalUpdateHandVelocity.connect(self.wPose.updateHandVelocity)
self.processor.signalUpdateMovementGraph.connect(self.wPose.updateMovementGraph)
self.processor.signalUpdateSpeakGraph.connect(self.wSpeaking.updateSpeakingGraph)
self.processor.signalUpdateFaceAus.connect(self.wFace.updateFrame)
self.processor.signalUpdateFaceImgs.connect(self.wFace.updateImages)
self.processor.signalUpdateTagGraph.connect(self.wObject.updateTagGraph)
self.processor.signalVideoLabel.connect(self.wPlayer.draw_labels)
self.processor.signalPosePoints.connect(self.wPlayer.draw_pose)
self.processor.signalPoseChangedLabels.connect(self.wPose.updateLables)
self.processor.signalSpeakChangedLabels.connect(self.wSpeaking.updateLables)
self.processor.signalUpdateGazeGraph.connect(self.wGaze.updateGazeGraph)
self.processor.signalUpdateGazeMap.connect(self.wPlayer.draw_gaze)
self.processor.signalUpdateTags.connect(self.wPlayer.draw_tags)
self.wVideoSettings.signalVisualize.connect(self.processor.onVisualize)
self.wVideoSettings.signalSelectID.connect(self.wPose.onSelectedID)
self.wVideoSettings.signalSelectID.connect(self.wGaze.onSelectedID)
self.wVideoSettings.signalSelectID.connect(self.wPlayer.onSelectedID)
self.wVideoSettings.signalSelectID.connect(self.processor.onSelectedID)
self.processor.signalClearLabels.connect(self.wPlayer.clear_labels)
self.processor.signalClearPose.connect(self.wPlayer.clear_pose)
self.processor.signalClearGaze.connect(self.wPlayer.clear_gaze)
self.processor.signalClearTags.connect(self.wPlayer.clear_tags)
self.processor.signalDeactivatePoseTab.connect(self.togglePoseTab)
self.processor.signalDeactivateFaceTab.connect(self.toggleFaceTab)
self.processor.signalDeactivateGazeTab.connect(self.toggleGazeTab)
self.processor.signalDeactivateSpeakingTab.connect(self.toggleSpeakingTab)
self.processor.signalDeactivateObjectTab.connect(self.toggleObjectTab)
self.wTab.setCurrentIndex(0)
self.wTab.currentChanged.connect(self.processor.tabChanged)
def openProject(self, movie_fileName, data_fileName):
self.status.showMessage('Reading data...')
self.processor.readData(movie_fileName, data_fileName)
self.processor.calculateMovementMeasures()
self.processor.calculateSpeakingMeasures()
self.wPlayer.mediaPlayer.positionChanged.connect(self.processor.updateFrame)
self.wPlayer.mediaPlayer.positionChanged.connect(self.updateFrame)
self.fps = self.processor.getFPS()
# Variables for segment updates --> bool array for all frames
self.frame = 0
self.frameCount = self.processor.getFrameCount()
self.frames_bitmask = np.ones(self.frameCount)
self.segments = [(0, self.frameCount)]
self.wTimeLine.setInit(self.processor.getFrameCount(), self.processor.getNumberIDs(), self.processor.getFPS())
self.wPlayer.setInit(self.processor.getVideo(), self.processor.getFPS(),
self.processor.getOriginalVideoResolution(), self.processor.getNumberIDs(),
self.processor.getColors(), self.processor.getTags(), self.processor.getTagColors())
self.wTimeLine.rangeslider.segmentsChanged.connect(self.segmentsChanged)
self.sendSegments.connect(self.processor._updateSegments)
self.processor.setReady(True)
self.status.showMessage('Ready')
def segmentsChanged(self, segments, last_segment_changed):
self.segments = segments
self.frames_bitmask = np.zeros(self.frameCount)
for segment in segments:
start, end = segment
self.frames_bitmask[start:end] = np.ones((end - start))
self.sendSegments.emit(self.frames_bitmask)
start, end = segments[last_segment_changed]
if start > self.frame:
self.wPlayer.setFrame(start)
if end < self.frame:
self.wPlayer.pause()
self.wPlayer.setFrame(end)
"""
def closeEvent(self, event):
self.wPlayer.stop()
#quit_msg = "Are you sure you want to exit the program?"
# reply = QtGui.QMessageBox.question(self, 'Message',
# quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
#
# if reply == QtGui.QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
"""
def open(self):
movie_fileName, _ = QFileDialog.getOpenFileName(self, "Open Movie File", "", "Files (*.mp4)", self.movieDir)
data_fileName, _ = QFileDialog.getOpenFileName(
self, "Open Preprocessed File", "", "Files (*.dat)", self.movieDir)
if movie_fileName and data_fileName and os.path.isfile(movie_fileName) and os.path.isfile(data_fileName):
self.status.showMessage('Loading data... ')
self.openProject(movie_fileName, data_fileName)
else:
print("ERROR: select two files")
def export(self):
self.status.showMessage('Exporting data... This might take some time.')
# has to be defined in the main window, and then call the processing function
self.processor.export()
self.status.showMessage('Exported data successfully!')
def _setupMenuBar(self):
self.mainMenu = self.menuBar()
fileMenu = self.mainMenu.addMenu('&File')
openAct = QAction('&Open', self)
openAct.setStatusTip('Open files')
openAct.setShortcut('Ctrl+O')
openAct.triggered.connect(self.open)
fileMenu.addAction(openAct)
exportAct = QAction('&Export', self)
exportAct.setStatusTip('Export calculations')
exportAct.setShortcut('Ctrl+E')
exportAct.triggered.connect(self.export)
fileMenu.addAction(exportAct)
exitAct = QAction('&Exit', self)
exitAct.setStatusTip('Exit application')
exitAct.setShortcut('Ctrl+Q')
exitAct.triggered.connect(self.close)
fileMenu.addAction(exitAct)
#@QtCore.pyqtSlot(str)
# def statusUpdate(self, message):
# self.status.showMessage(message)
def updateFrame(self, position):
self.frame = int((position / 1000.0) * self.fps)
if self.frames_bitmask[self.frame] == 0:
for segment in self.segments:
if segment[0] > self.frame:
self.wPlayer.setFrame(segment[0] + 1)
break
@QtCore.pyqtSlot(bool)
def togglePoseTab(self, deactivate):
self.wTab.setTabEnabled(2, not deactivate)
@QtCore.pyqtSlot(bool)
def toggleGazeTab(self, deactivate):
self.wTab.setTabEnabled(0, not deactivate)
@QtCore.pyqtSlot(bool)
def toggleFaceTab(self, deactivate):
self.wTab.setTabEnabled(3, not deactivate)
@QtCore.pyqtSlot(bool)
def toggleSpeakingTab(self, deactivate):
self.wTab.setTabEnabled(1, not deactivate)
@QtCore.pyqtSlot(bool)
def toggleObjectTab(self, deactivate):
self.wTab.setTabEnabled(4, not deactivate)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
# print(sys.argv)
app.setWindowIcon(QtGui.QIcon('icons/logo.png'))
app.setStyle("Fusion")
# Now use a palette to switch to dark colors:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, QtCore.Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, QtCore.Qt.black)
palette.setColor(QPalette.ToolTipText, QtCore.Qt.white)
palette.setColor(QPalette.Text, QtCore.Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, QtCore.Qt.white)
palette.setColor(QPalette.BrightText, QtCore.Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, QtCore.Qt.black)
app.setPalette(palette)
if ("--verbose" in sys.argv):
verbose = True
print("### Verbose ENABLED")
else:
verbose = False
main = MainWindow(verbose=verbose)
main.setWindowTitle('ConAn: A Usable Tool for Multimodal Conversation Analysis')
main.show()
sys.exit(app.exec_())