import torch import torch.nn as nn import torchvision.models as models from torch_geometric.nn.conv import GCNConv from .utils import left_bias, right_bias, build_ocr_graph, pose_edge_index from .base import CNN, MindNetSL class SLToMnet(nn.Module): """ Speaker-Listener ToMnet """ def __init__(self, hidden_dim, device, tom_weight, resnet=False, dropout=0.1, mods=['rgb', 'pose', 'gaze', 'ocr', 'bbox']): super(SLToMnet, self).__init__() self.tom_weight = tom_weight # ---- Images ----# if resnet: resnet = models.resnet34(weights="IMAGENET1K_V1") self.cnn = nn.Sequential( *(list(resnet.children())[:-1]) ) for param in self.cnn.parameters(): param.requires_grad = False self.rgb_ff = nn.Linear(512, hidden_dim) else: self.cnn = CNN(hidden_dim) self.rgb_ff = nn.Linear(hidden_dim, hidden_dim) # ---- OCR and bbox -----# self.ocr_x, self.ocr_edge_index, self.ocr_edge_attr = build_ocr_graph(device) self.ocr_gnn = GCNConv(-1, hidden_dim) self.bbox_ff = nn.Linear(108, hidden_dim) # ---- Others ----# self.act = nn.GELU() self.dropout = nn.Dropout(dropout) self.device = device # ---- Mind nets ----# self.mind_net_left = MindNetSL(hidden_dim, dropout, mods) self.mind_net_right = MindNetSL(hidden_dim, dropout, mods) self.left = nn.Linear(hidden_dim, 27) self.right = nn.Linear(hidden_dim, 27) self.left.bias.data = torch.tensor(left_bias).log() self.right.bias.data = torch.tensor(right_bias).log() def forward(self, images, poses, gazes, bboxes, ocr_tensor=None): batch_size, sequence_len, channels, height, width = images.shape bbox_feat = self.act(self.bbox_ff(bboxes)) rgb_feat = [] for i in range(sequence_len): images_i = images[:,i] img_i_feat = self.cnn(images_i) img_i_feat = img_i_feat.view(batch_size, -1) rgb_feat.append(img_i_feat) rgb_feat = torch.stack(rgb_feat, 1) rgb_feat = self.dropout(self.act(self.rgb_ff(rgb_feat))) ocr_feat = self.dropout(self.act(self.ocr_gnn(self.ocr_x, self.ocr_edge_index, self.ocr_edge_attr))) ocr_feat = ocr_feat.mean(0).unsqueeze(0).repeat(batch_size, sequence_len, 1) out_left, feats_left = self.mind_net_left(rgb_feat, ocr_feat, poses[:, :, 0, :], gazes[:, :, 0, :], bbox_feat) left_logits = self.left(out_left) out_right, feats_right = self.mind_net_right(rgb_feat, ocr_feat, poses[:, :, 1, :], gazes[:, :, 1, :], bbox_feat) right_logits = self.left(out_right) left_ranking = torch.log_softmax(left_logits, dim=-1) right_ranking = torch.log_softmax(right_logits, dim=-1) right_beliefs = left_ranking + self.tom_weight * right_ranking left_beliefs = right_ranking + self.tom_weight * left_ranking return left_beliefs, right_beliefs, [out_left, out_right] + feats_left + feats_right if __name__ == "__main__": images = torch.ones(3, 22, 3, 128, 128) poses = torch.ones(3, 22, 2, 75) gazes = torch.ones(3, 22, 2, 3) bboxes = torch.ones(3, 22, 108) model = SLToMnet(64, 'cpu', 2.0, False, 0.5) out = model(images, poses, gazes, bboxes, None) print(out[0].shape)