conan/uiwidget/widgetplayer.py

310 lines
12 KiB
Python
Raw Permalink Normal View History

2021-10-17 12:49:49 +02:00
from PyQt5 import QtCore, QtGui, QtWidgets, QtMultimedia, QtMultimediaWidgets, Qt
import os
import numpy as np
class WidgetPlayer(QtWidgets.QWidget):
updateFrame = QtCore.pyqtSignal(int)
# sendFileName = QtCore.pyqtSignal(str)
sendState = QtCore.pyqtSignal(QtMultimedia.QMediaPlayer.State)
frameAvailable = QtCore.pyqtSignal(QtGui.QImage)
labels = list()
colors = list()
pose_data = list()
gaze_data = list()
tag_data = dict()
tags = list()
tag_colors = dict()
def __init__(self, parent=None):
super(WidgetPlayer, self).__init__(parent)
self.root = QtCore.QFileInfo(__file__).absolutePath()
# mediaplayer for decoding the video
self.mediaPlayer = QtMultimedia.QMediaPlayer(self, QtMultimedia.QMediaPlayer.VideoSurface)
#self.mediaPlayer.setMuted(True)
# top = graphicsscene, middle = graphiscview, bottom = graphicsvideoitem, lowest = graphisctextitems, ...
self._scene = QtWidgets.QGraphicsScene(self)
self._scene.setBackgroundBrush(QtGui.QBrush(QtGui.QColor('black')))
self._gv = QtWidgets.QGraphicsView(self._scene)
self._videoitem = QtMultimediaWidgets.QGraphicsVideoItem()
self._videoitem.setPos(0, 0)
self._videoitem.setZValue(-1000)
self._scene.addItem(self._videoitem)
if os.name != 'nt':
# grab frames to forward them to facial emotion tab
probe = QtMultimedia.QVideoProbe(self)
probe.videoFrameProbed.connect(self.on_videoFrameProbed)
probe.setSource(self.mediaPlayer)
# disable scrollbars
self._gv.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self._gv.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
# just a holder for the graphics view to expand to maximum to use full size
self.lay = QtWidgets.QVBoxLayout(self)
self.lay.setContentsMargins(0, 0, 0, 0)
self.lay.addWidget(self._gv)
self.errorLabel = QtWidgets.QLabel()
self.errorLabel.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum)
self.mediaPlayer.setVideoOutput(self._videoitem)
self.mediaPlayer.stateChanged.connect(self.on_stateChanged)
self.mediaPlayer.positionChanged.connect(self.mediaChangedPosition)
# self.mediaPlayer.durationChanged.connect(self.durationChanged)
self.mediaPlayer.error.connect(self.handleError)
self.movieDir = ''
self.duration = 0
def setInit(self, video, fps, originalVideoResolution, number_ids, colors, tags, tag_colors):
self.fps = fps
self.originalVideoResolution = originalVideoResolution
f = os.path.abspath(video)
self.mediaPlayer.setMedia(QtMultimedia.QMediaContent(QtCore.QUrl.fromLocalFile(f)))
self.mediaPlayer.setNotifyInterval(1000 // self.fps)
# init pose data
for i in range(number_ids):
self.pose_data.append(self._scene.addPath(QtGui.QPainterPath()))
# init gaze data
for i in range(number_ids):
self.gaze_data.append(self._scene.addPath(QtGui.QPainterPath()))
# init label data
for i in range(number_ids):
self.labels.append(self._scene.addPath(QtGui.QPainterPath()))
# init tag data
if tags:
for i, tag in enumerate(tags):
self.tag_data[tag] = self._scene.addPath(QtGui.QPainterPath())
self.tag_colors[tag] = tag_colors[i]
self.number_ids = number_ids
self.colors = colors
self.tags = tags
def play(self):
if self.mediaPlayer.state() == QtMultimedia.QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
else:
self.mediaPlayer.play()
self.sendState.emit(self.mediaPlayer.state())
def pause(self):
if self.mediaPlayer.state() == QtMultimedia.QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
self.sendState.emit(self.mediaPlayer.state())
def setFrame(self, frame):
# RESPECT FPS! position is time in millisconds
position = int(frame * 1000 / self.fps)
# print("Received", position)
self.mediaPlayer.setPosition(position)
def stop(self):
self.mediaPlayer.stop()
@QtCore.pyqtSlot(QtMultimedia.QMediaPlayer.State)
def on_stateChanged(self, state):
self.focus_on_video()
def mediaChangedPosition(self, position):
frame = int((position / 1000.0) * self.fps)
# print("Video Running %i" % frame)
self.updateFrame.emit(frame)
self._gv.fitInView(self._videoitem, QtCore.Qt.KeepAspectRatio)
def handleError(self):
# self.playButton.setEnabled(False)
print("Error: " + self.mediaPlayer.errorString())
def createButtons(self):
iconSize = QtCore.QSize(28, 28)
openButton = QtWidgets.QToolButton()
openButton.setStyleSheet('border: none;')
openButton.setIcon(QtGui.QIcon(self.root + '/icons/open.png'))
openButton.setIconSize(iconSize)
openButton.setToolTip("Open File")
# openButton.clicked.connect(self.open)
self.playButton = QtWidgets.QToolButton()
self.playButton.setStyleSheet('border: none;')
self.playButton.setIcon(QtGui.QIcon(self.root + '/icons/play.png'))
self.playButton.setIconSize(iconSize)
self.playButton.setToolTip("Play movie")
self.playButton.clicked.connect(self.play)
self.playButton.setEnabled(False)
self.stopButton = QtWidgets.QToolButton()
self.stopButton.setStyleSheet('border: none;')
self.stopButton.setIcon(QtGui.QIcon(self.root + '/icons/stop.png'))
self.stopButton.setIconSize(iconSize)
self.stopButton.setToolTip("Stop movie")
self.stopButton.clicked.connect(self.stop)
self.stopButton.setEnabled(False)
@QtCore.pyqtSlot(QtMultimedia.QVideoFrame)
def on_videoFrameProbed(self, frame):
cloneFrame = QtMultimedia.QVideoFrame(frame)
cloneFrame.map(QtMultimedia.QAbstractVideoBuffer.ReadOnly)
image = QtGui.QImage(cloneFrame.bits(), cloneFrame.width(), cloneFrame.height(), cloneFrame.bytesPerLine(),
QtMultimedia.QVideoFrame.imageFormatFromPixelFormat(cloneFrame.pixelFormat()))
self.frameAvailable.emit(image)
cloneFrame.unmap()
def focus_on_video(self):
native_video_resolution = self.mediaPlayer.metaData("Resolution")
# we also update the sceneview to zoom to the video
if native_video_resolution is not None:
self._videoitem.setSize(QtCore.QSizeF(native_video_resolution.width(), native_video_resolution.height()))
self._gv.fitInView(self._videoitem, QtCore.Qt.KeepAspectRatio)
# set scale of video to bigger size
if self.originalVideoResolution is not None:
width_ratio = self.originalVideoResolution[0] / native_video_resolution.width()
self._videoitem.setScale(width_ratio)
@QtCore.pyqtSlot()
def clear_tags(self):
self.focus_on_video()
# clear all tags
for tag in self.tags:
self._scene.removeItem(self.tag_data[tag])
self.tag_data[tag] = self._scene.addPath(QtGui.QPainterPath())
@QtCore.pyqtSlot(int, list, list)
def draw_tags(self, tag, lstX, lstY):
# this is removing the old tag data
self._scene.removeItem(self.tag_data[tag])
path = QtGui.QPainterPath()
path.setFillRule(Qt.Qt.WindingFill)
# set starting points
for (x, y) in zip(lstX, lstY):
path.addRect(x-50, y-50, 100, 100)
# by adding it gets converted into an QGraphicsPathItem
# save it for later removal
self.tag_data[tag] = self._scene.addPath(path)
# set colors
color = self.tag_colors[tag]
pen = QtGui.QPen(QtGui.QColor(color[0], color[1], color[2], 255), 2, QtCore.Qt.SolidLine)
self.tag_data[tag].setPen(pen)
# fill ellipses - alpha value is set to 50%
# self.tag_data[tag].setBrush(QtGui.QColor(color[0], color[1], color[2], int(0.5 * 255)))
@QtCore.pyqtSlot()
def clear_labels(self):
self.focus_on_video()
# clear all labels
for id_no in range(self.number_ids):
self._scene.removeItem(self.labels[id_no])
self.labels[id_no] = self._scene.addPath(QtGui.QPainterPath())
@QtCore.pyqtSlot(int, int, int)
def draw_labels(self, id_no, x, y):
# this is removing the old pose data
self._scene.removeItem(self.labels[id_no])
path = QtGui.QPainterPath()
# then draw text
font = QtGui.QFont("Arial", 70)
font.setStyleStrategy(QtGui.QFont.ForceOutline)
# sadly there is no easy way to claculate the width of the text so minus 100 is fine, but not ideal
# also moving the text up by 500, so that is does not cover the face
path.addText(x - 100, y - 300, font, "ID " + str(id_no))
# by adding it gets converted into an QGraphicsPathItem
# save it for later removal
self.labels[id_no] = self._scene.addPath(path)
# set colors
color = tuple([int(a * 255) for a in self.colors[id_no]])
# alpha value is set to 70%
pen = QtGui.QPen(QtGui.QColor(color[0], color[1], color[2], int(0.9 * 255)), 10, QtCore.Qt.SolidLine)
self.labels[id_no].setPen(pen)
@QtCore.pyqtSlot()
def clear_pose(self):
# empty pose data
for id_no in range(self.number_ids):
self._scene.removeItem(self.pose_data[id_no])
self.pose_data[id_no] = self._scene.addPath(QtGui.QPainterPath())
@QtCore.pyqtSlot(int, list, list)
def draw_pose(self, id_no, lstX, lstY):
# this is removing the old pose data
self._scene.removeItem(self.pose_data[id_no])
if len(lstX) > 0 and len(lstY) > 0:
path = QtGui.QPainterPath()
# set starting points
path.moveTo(lstX[0], lstY[0])
# then draw remaing lines
for (x, y) in zip(lstX[1:], lstY[1:]):
path.lineTo(x, y)
# by adding it gets converted into an QGraphicsPathItem
# save it for later removal
self.pose_data[id_no] = self._scene.addPath(path)
# set colors
color = tuple([int(a * 255) for a in self.colors[id_no]])
# alpha value is set to 70%
pen = QtGui.QPen(QtGui.QColor(color[0], color[1], color[2], int(0.7 * 255)), 10, QtCore.Qt.SolidLine)
self.pose_data[id_no].setPen(pen)
else:
self.pose_data[id_no] = self._scene.addPath(QtGui.QPainterPath())
@QtCore.pyqtSlot()
def clear_gaze(self):
# empty pose data
for id_no in range(self.number_ids):
self._scene.removeItem(self.gaze_data[id_no])
self.gaze_data[id_no] = self._scene.addPath(QtGui.QPainterPath())
@QtCore.pyqtSlot(int, list, list)
def draw_gaze(self, id_no, lstX, lstY):
# this is removing the old pose data
self._scene.removeItem(self.gaze_data[id_no])
path = QtGui.QPainterPath()
path.setFillRule(Qt.Qt.WindingFill)
# set starting points
for (x, y) in zip(lstX, lstY):
path.addEllipse(x, y, 100, 100)
# by adding it gets converted into an QGraphicsPathItem
# save it for later removal
self.gaze_data[id_no] = self._scene.addPath(path)
# set colors
color = tuple([int(a * 255) for a in self.colors[id_no]])
# alpha value is set to 50%
pen = QtGui.QPen(QtGui.QColor(color[0], color[1], color[2], int(0.5 * 255)), 1, QtCore.Qt.SolidLine)
self.gaze_data[id_no].setPen(pen)
# fill ellipses
self.gaze_data[id_no].setBrush(QtGui.QColor(color[0], color[1], color[2], int(0.5 * 255)))
@QtCore.pyqtSlot(list)
def onSelectedID(self, lst):
self.clear_labels()
self.clear_gaze()
self.clear_pose()