init commit

This commit is contained in:
Yao Wang 2022-05-09 14:32:31 +02:00
commit d5d633b6c7
517 changed files with 18824 additions and 0 deletions

54
README.md Normal file
View file

@ -0,0 +1,54 @@
# VisRecall: Quantifying Information Visualisation Recallability via Question Answering
*Yao Wang, Chuhan Jiao(Aalto University), Mihai Bâce and Andreas Bulling*
submitted to The IEEE Transactions on Visualization and Computer Graphics (TVCG2022)
This repository contains the dataset and models for predicting visualisation recallability.
```
$Root Directory
│─ README.md —— this file
|─ RecallNet —— Source code of the network to predict infovis recallability
│ │
│ │─ environment.yaml —— conda environments
│ │
│ │─ notebooks
│ │ │
│ │ │─ train_RecallNet.ipynb —— main notebook for training and validation
│ │ │
│ │ └─ massvis_recall.json —— saved recallability scores for MASSVIS dataset
│ │
│ └─ src
│ │
│ │─ singleduration_models.py —— RecallNet model
│ │
│ │─ sal_imp_utilities.py —— image processing utilities
│ │
│ │─ losses_keras2.py —— loss functions
│ │
│ ...
└─ VisRecall —— The dataset
│─ answer_raw —— raw answers from AMT workers
│─ merged
│ │
│ │─ src —— original images
│ │
│ │─ qa —— question annotations
│ │
│ └─ image_annotation —— other metadata annotations
└─ training_data
│─ all —— all averaged questions
└─ X-question —— a specific type of question (T-, FE-, F-, RV-, U-)
```
contact: yao.wang@vis.uni-stuttgart.de

160
RecallNet/environment.yaml Normal file
View file

@ -0,0 +1,160 @@
name: tf-cuda9
channels:
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=main
- _tflow_select=2.1.0=gpu
- absl-py=0.11.0=py37h06a4308_0
- argon2-cffi=20.1.0=py37h8f50634_2
- astor=0.8.1=py37_0
- async_generator=1.10=py_0
- attrs=20.2.0=pyh9f0ad1d_0
- backports=1.0=py_2
- backports.functools_lru_cache=1.6.1=py_0
- blas=1.0=mkl
- bleach=3.2.1=pyh9f0ad1d_0
- c-ares=1.16.1=h7b6447c_0
- ca-certificates=2021.4.13=h06a4308_1
- certifi=2020.12.5=py37h06a4308_0
- cffi=1.14.3=py37he30daa8_0
- cudatoolkit=9.0=h13b8566_0
- cudnn=7.3.1=cuda9.0_0
- cupti=9.0.176=0
- decorator=4.4.2=py_0
- defusedxml=0.6.0=py_0
- entrypoints=0.3=py37hc8dfbb8_1002
- gast=0.4.0=py_0
- google-pasta=0.2.0=py_0
- grpcio=1.31.0=py37hf8bcb03_0
- h5py=2.10.0=py37hd6299e0_1
- hdf5=1.10.6=hb1b8bf9_0
- importlib-metadata=2.0.0=py_1
- importlib_metadata=2.0.0=1
- intel-openmp=2020.2=254
- ipykernel=5.3.4=py37hc6149b9_1
- ipython=5.8.0=py37_1
- ipython_genutils=0.2.0=py_1
- ipywidgets=7.5.1=pyh9f0ad1d_1
- jinja2=2.11.2=pyh9f0ad1d_0
- jsonschema=3.2.0=py_2
- jupyter_client=6.1.7=py_0
- jupyter_core=4.6.3=py37hc8dfbb8_2
- jupyterlab_pygments=0.1.2=pyh9f0ad1d_0
- keras=2.3.1=0
- keras-applications=1.0.8=py_1
- keras-base=2.3.1=py37_0
- keras-preprocessing=1.1.0=py_1
- ld_impl_linux-64=2.33.1=h53a641e_7
- libedit=3.1.20191231=h14c3975_1
- libffi=3.3=he6710b0_2
- libgcc-ng=9.1.0=hdf63c60_0
- libgfortran-ng=7.3.0=hdf63c60_0
- libprotobuf=3.13.0.1=hd408876_0
- libsodium=1.0.18=h516909a_1
- libstdcxx-ng=9.1.0=hdf63c60_0
- markdown=3.3.2=py37_0
- markupsafe=1.1.1=py37hb5d75c8_2
- mistune=0.8.4=py37h8f50634_1002
- mkl=2020.2=256
- mkl-service=2.3.0=py37he904b0f_0
- mkl_fft=1.2.0=py37h23d657b_0
- mkl_random=1.1.1=py37h0573a6f_0
- nbclient=0.5.1=py_0
- nbconvert=6.0.7=py37hc8dfbb8_2
- nbformat=5.0.8=py_0
- ncurses=6.2=he6710b0_1
- nest-asyncio=1.4.1=py_0
- notebook=6.1.4=py37hc8dfbb8_1
- numpy=1.19.2=py37h54aff64_0
- numpy-base=1.19.2=py37hfa32c7d_0
- openssl=1.1.1k=h27cfd23_0
- packaging=20.4=pyh9f0ad1d_0
- pandoc=2.11.0.4=hd18ef5c_0
- pandocfilters=1.4.2=py_1
- pexpect=4.8.0=pyh9f0ad1d_2
- pickleshare=0.7.5=py_1003
- pip=20.2.4=py37_0
- prometheus_client=0.8.0=pyh9f0ad1d_0
- prompt_toolkit=1.0.15=py_1
- protobuf=3.13.0.1=py37he6710b0_1
- ptyprocess=0.6.0=py_1001
- pycparser=2.20=pyh9f0ad1d_2
- pygments=2.7.2=py_0
- pyparsing=2.4.7=pyh9f0ad1d_0
- pyrsistent=0.17.3=py37h8f50634_1
- python=3.7.9=h7579374_0
- python-dateutil=2.8.1=py_0
- python_abi=3.7=1_cp37m
- pyyaml=5.3.1=py37h7b6447c_1
- pyzmq=19.0.2=py37hac76be4_2
- readline=8.0=h7b6447c_0
- send2trash=1.5.0=py_0
- setuptools=50.3.0=py37hb0f4dca_1
- simplegeneric=0.8.1=py_1
- six=1.15.0=py_0
- sqlite=3.33.0=h62c20be_0
- tensorboard=1.14.0=py37hf484d3e_0
- tensorflow=1.14.0=gpu_py37hae64822_0
- tensorflow-base=1.14.0=gpu_py37h8f37b9b_0
- tensorflow-estimator=1.14.0=py_0
- tensorflow-gpu=1.14.0=h0d30ee6_0
- termcolor=1.1.0=py37_1
- terminado=0.9.1=py37hc8dfbb8_1
- testpath=0.4.4=py_0
- tk=8.6.10=hbc83047_0
- tornado=6.0.4=py37h8f50634_2
- traitlets=5.0.5=py_0
- wcwidth=0.2.5=pyh9f0ad1d_2
- webencodings=0.5.1=py_1
- werkzeug=1.0.1=py_0
- wheel=0.35.1=py_0
- widgetsnbextension=3.5.1=py37hc8dfbb8_4
- wrapt=1.12.1=py37h7b6447c_1
- xz=5.2.5=h7b6447c_0
- yaml=0.2.5=h7b6447c_0
- zeromq=4.3.3=he1b5a44_2
- zipp=3.4.0=pyhd3eb1b0_0
- zlib=1.2.11=h7b6447c_3
- pip:
- adjusttext==0.7.3
- alignment==0.0.1
- chardet==4.0.0
- cmasher==1.6.2
- colorspacious==1.1.2
- cycler==0.10.0
- e13tools==0.9.6
- editdistance==0.5.3
- fastdtw==0.3.4
- idna==2.10
- imageio==2.9.0
- joblib==1.0.0
- kiwisolver==1.2.0
- mat4py==0.5.0
- matplotlib==3.4.3
- natsort==7.1.1
- networkx==2.5
- opencv-python==4.4.0.44
- pandas==1.2.1
- patsy==0.5.1
- pillow==8.0.1
- pytz==2021.1
- pywavelets==1.1.1
- requests==2.25.1
- researchpy==0.3.2
- salicon==1.0
- scikit-image==0.18.1
- scikit-learn==0.24.1
- scipy==1.2.0
- seaborn==0.11.1
- simplejson==3.17.5
- sklearn==0.0
- spotlight==2.3.1
- statsmodels==0.12.2
- threadpoolctl==2.1.0
- tifffile==2021.3.17
- tqdm==4.51.0
- urllib3==1.26.4
- wget==3.2
prefix: /netpool/homes/wangyo/.conda/envs/tf-cuda9

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,183 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
"/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
]
}
],
"source": [
"from xception_custom import Xception_wrapper"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from keras.layers import Input"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[[155 225 83]\n",
" [174 33 86]\n",
" [ 24 223 10]\n",
" ...\n",
" [147 233 79]\n",
" [232 187 173]\n",
" [ 69 126 85]]\n",
"\n",
" [[166 203 47]\n",
" [111 65 37]\n",
" [210 182 244]\n",
" ...\n",
" [154 62 70]\n",
" [ 62 93 101]\n",
" [132 231 126]]\n",
"\n",
" [[ 30 110 125]\n",
" [242 45 71]\n",
" [150 10 217]\n",
" ...\n",
" [ 38 165 128]\n",
" [ 64 58 127]\n",
" [179 174 72]]\n",
"\n",
" ...\n",
"\n",
" [[159 2 99]\n",
" [201 220 158]\n",
" [170 172 13]\n",
" ...\n",
" [ 79 72 65]\n",
" [ 10 228 7]\n",
" [ 99 60 129]]\n",
"\n",
" [[187 249 6]\n",
" [ 57 166 83]\n",
" [187 243 66]\n",
" ...\n",
" [109 184 147]\n",
" [142 158 83]\n",
" [190 61 30]]\n",
"\n",
" [[146 238 74]\n",
" [156 20 43]\n",
" [ 55 217 43]\n",
" ...\n",
" [208 181 141]\n",
" [196 88 15]\n",
" [132 225 63]]]\n"
]
},
{
"ename": "TypeError",
"evalue": "Error converting shape to a TensorShape: only size-1 arrays can be converted to Python scalars.",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36mmake_shape\u001b[0;34m(v, arg_name)\u001b[0m\n\u001b[1;32m 145\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 146\u001b[0;31m \u001b[0mshape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtensor_shape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 147\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mTypeError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/tensor_shape.py\u001b[0m in \u001b[0;36mas_shape\u001b[0;34m(shape)\u001b[0m\n\u001b[1;32m 1203\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1204\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mTensorShape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1205\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/tensor_shape.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, dims)\u001b[0m\n\u001b[1;32m 773\u001b[0m \u001b[0;31m# Got a list of dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 774\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dims\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mas_dimension\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0md\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdims_iter\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 775\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/tensor_shape.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 773\u001b[0m \u001b[0;31m# Got a list of dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 774\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dims\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mas_dimension\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0md\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdims_iter\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 775\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/tensor_shape.py\u001b[0m in \u001b[0;36mas_dimension\u001b[0;34m(value)\u001b[0m\n\u001b[1;32m 715\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 716\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mDimension\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 717\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/framework/tensor_shape.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, value)\u001b[0m\n\u001b[1;32m 184\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 185\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 186\u001b[0m if (not isinstance(value, compat.bytes_or_text_types) and\n",
"\u001b[0;31mTypeError\u001b[0m: only size-1 arrays can be converted to Python scalars",
"\nDuring handling of the above exception, another exception occurred:\n",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-8-8f06c869009e>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mxception\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mXception_wrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude_top\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweights\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'imagenet'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_tensor\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpooling\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'xception:'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mxception\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mtest_xception_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-8-8f06c869009e>\u001b[0m in \u001b[0;36mtest_xception_shape\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0minput_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m256\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m240\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m320\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0minp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mInput\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0mxception\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mXception_wrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude_top\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweights\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'imagenet'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_tensor\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpooling\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'xception:'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mxception\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/keras/engine/input_layer.py\u001b[0m in \u001b[0;36mInput\u001b[0;34m(shape, batch_shape, name, dtype, sparse, tensor)\u001b[0m\n\u001b[1;32m 176\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 177\u001b[0m \u001b[0msparse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 178\u001b[0;31m input_tensor=tensor)\n\u001b[0m\u001b[1;32m 179\u001b[0m \u001b[0;31m# Return tensor including _keras_shape and _keras_history.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 180\u001b[0m \u001b[0;31m# Note that in this case train_output and test_output are the same pointer.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/keras/legacy/interfaces.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 89\u001b[0m warnings.warn('Update your `' + object_name + '` call to the ' +\n\u001b[1;32m 90\u001b[0m 'Keras 2 API: ' + signature, stacklevel=2)\n\u001b[0;32m---> 91\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 92\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_original_function\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/keras/engine/input_layer.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, input_shape, batch_size, batch_input_shape, dtype, input_tensor, sparse, name)\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0msparse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msparse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 87\u001b[0;31m name=self.name)\n\u001b[0m\u001b[1;32m 88\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_placeholder\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36mplaceholder\u001b[0;34m(shape, ndim, dtype, sparse, name)\u001b[0m\n\u001b[1;32m 734\u001b[0m \u001b[0mdtype\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfloatx\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 735\u001b[0m x = tf_keras_backend.placeholder(\n\u001b[0;32m--> 736\u001b[0;31m shape=shape, ndim=ndim, dtype=dtype, sparse=sparse, name=name)\n\u001b[0m\u001b[1;32m 737\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 738\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mndim\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/keras/backend.py\u001b[0m in \u001b[0;36mplaceholder\u001b[0;34m(shape, ndim, dtype, sparse, name)\u001b[0m\n\u001b[1;32m 996\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0marray_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msparse_placeholder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 997\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 998\u001b[0;31m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0marray_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplaceholder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 999\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1000\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/ops/array_ops.py\u001b[0m in \u001b[0;36mplaceholder\u001b[0;34m(dtype, shape, name)\u001b[0m\n\u001b[1;32m 2141\u001b[0m \"eager execution.\")\n\u001b[1;32m 2142\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2143\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mgen_array_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplaceholder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2144\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2145\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mplaceholder\u001b[0;34m(dtype, shape, name)\u001b[0m\n\u001b[1;32m 6258\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6259\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 6260\u001b[0;31m \u001b[0mshape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_execute\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmake_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"shape\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6261\u001b[0m _, _, _op = _op_def_lib._apply_op_helper(\n\u001b[1;32m 6262\u001b[0m \"Placeholder\", dtype=dtype, shape=shape, name=name)\n",
"\u001b[0;32m/netpool/homes/wangyo/.conda/envs/tf-cuda9/lib/python3.7/site-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36mmake_shape\u001b[0;34m(v, arg_name)\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtensor_shape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mTypeError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 148\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mTypeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Error converting %s to a TensorShape: %s.\"\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0marg_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 149\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 150\u001b[0m raise ValueError(\"Error converting %s to a TensorShape: %s.\" % (arg_name,\n",
"\u001b[0;31mTypeError\u001b[0m: Error converting shape to a TensorShape: only size-1 arrays can be converted to Python scalars."
]
}
],
"source": [
"def test_xception_shape():\n",
" input_ = np.random.randint(0,256, (240,320,3))\n",
" print(input_)\n",
" inp = Input(input_)\n",
" xception = Xception_wrapper(include_top=False, weights='imagenet', input_tensor=inp, pooling=None)\n",
" print('xception:',xception.output.shape)\n",
"test_xception_shape()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,717 @@
# -*- coding: utf-8 -*-
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import backend as K
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
#from keras.layers.recurrent import _generate_dropout_mask
#from keras.layers.recurrent import _standardize_args
import numpy as np
import warnings
from keras.engine.base_layer import InputSpec, Layer
from keras.utils import conv_utils
#from keras.legacy import interfaces
#from keras.legacy.layers import Recurrent, ConvRecurrent2D
from keras.layers.recurrent import RNN
from keras.utils.generic_utils import has_arg
from keras.utils.generic_utils import to_list
from keras.utils.generic_utils import transpose_shape
from tensorflow.python.keras.layers.convolutional_recurrent import ConvRNN2D
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
class AttentiveConvLSTM2DCell(Layer):
def __init__(self,
filters,
attentive_filters,
kernel_size,
attentive_kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
attentive_activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
attentive_initializer='zeros',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
attentive_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
attentive_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
attentive_dropout=0.,
**kwargs):
super(AttentiveConvLSTM2DCell, self).__init__(**kwargs)
self.filters = filters
self.attentive_filters = attentive_filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.attentive_kernel_size = conv_utils.normalize_tuple(attentive_kernel_size, 2, 'attentive_kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = K.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
'dilation_rate')
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.attentive_activation = activations.get(attentive_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.attentive_initializer = initializers.get(attentive_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.attentive_regularizer = regularizers.get(attentive_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.attentive_constraint = constraints.get(attentive_constraint)
self.bias_constraint = constraints.get(bias_constraint)
if K.backend() == 'theano' and (dropout or recurrent_dropout):
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.attentive_dropout = min(1., max(0., attentive_dropout))
self.state_size = (self.filters, self.filters)
self._dropout_mask = None
self._recurrent_dropout_mask = None
self._attentive_dropout_mask = None
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
self.kernel_shape = kernel_shape
print('kernel_shape', kernel_shape)
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)
input_attentive_kernel_shape = self.attentive_kernel_size + (input_dim, self.attentive_filters)
hidden_attentive_kernel_shape = self.attentive_kernel_size + (self.filters, self.attentive_filters)
squeeze_attentive_kernel_shape = self.attentive_kernel_size + (self.attentive_filters, 1)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name='recurrent_kernel',
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.input_attentive_kernel = self.add_weight(
shape=input_attentive_kernel_shape,
initializer=self.attentive_initializer,
name='input_attentive_kernel',
regularizer=self.attentive_regularizer,
constraint=self.attentive_constraint)
self.hidden_attentive_kernel = self.add_weight(
shape=hidden_attentive_kernel_shape,
initializer=self.attentive_initializer,
name='hidden_attentive_kernel',
regularizer=self.attentive_regularizer,
constraint=self.attentive_constraint)
self.squeeze_attentive_kernel = self.add_weight(
shape=squeeze_attentive_kernel_shape,
initializer=self.attentive_initializer,
name='squeeze_attentive_kernel',
regularizer=self.attentive_regularizer,
constraint=self.attentive_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.filters,), *args, **kwargs),
initializers.Ones()((self.filters,), *args, **kwargs),
self.bias_initializer((self.filters * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.filters * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.attentive_bias = self.add_weight(
shape=(self.attentive_filters * 2,),
name='attentive_bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :, :, :self.filters]
self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]
self.kernel_f = self.kernel[:, :, :, self.filters: self.filters * 2]
self.recurrent_kernel_f = (self.recurrent_kernel[:, :, :, self.filters:
self.filters * 2])
self.kernel_c = self.kernel[:, :, :, self.filters * 2: self.filters * 3]
self.recurrent_kernel_c = (self.recurrent_kernel[:, :, :, self.filters * 2:
self.filters * 3])
self.kernel_o = self.kernel[:, :, :, self.filters * 3:]
self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.filters]
self.bias_f = self.bias[self.filters: self.filters * 2]
self.bias_c = self.bias[self.filters * 2: self.filters * 3]
self.bias_o = self.bias[self.filters * 3:]
self.bias_wa = self.attentive_bias[:self.attentive_filters ]
self.bias_ua = self.attentive_bias[self.attentive_filters : self.attentive_filters * 2]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
K.ones_like(inputs),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
K.ones_like(states[1]),
self.recurrent_dropout,
training=training,
count=4)
# if (0 < self.attentive_dropout < 1 and self._attentive_dropout_mask is None):
# self._attentive_dropout_mask = _generate_dropout_mask(
# K.ones_like(inputs),
# self.attentive_dropout,
# training=training,
# count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
# dropout matrices for attentive units
# att_dp_mask = self._attentive_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
##### ATTENTION MECHANISM
h_and_x = self.input_conv(h_tm1, self.hidden_attentive_kernel, self.bias_ua, padding='same') + self.input_conv(inputs, self.input_attentive_kernel, self.bias_wa, padding='same')
e = self.recurrent_conv(self.attentive_activation(h_and_x), self.squeeze_attentive_kernel)
a = K.reshape(K.softmax(K.batch_flatten(e)), K.shape(e))
inputs = inputs * K.repeat_elements(a, inputs.shape[-1], -1)
##### END OF ATTENTION MECHANISM
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x_i = self.input_conv(inputs_i, self.kernel_i, self.bias_i,
padding=self.padding)
x_f = self.input_conv(inputs_f, self.kernel_f, self.bias_f,
padding=self.padding)
x_c = self.input_conv(inputs_c, self.kernel_c, self.bias_c,
padding=self.padding)
x_o = self.input_conv(inputs_o, self.kernel_o, self.bias_o,
padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i,
self.recurrent_kernel_i)
h_f = self.recurrent_conv(h_tm1_f,
self.recurrent_kernel_f)
h_c = self.recurrent_conv(h_tm1_c,
self.recurrent_kernel_c)
h_o = self.recurrent_conv(h_tm1_o,
self.recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h, c]
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(x, w, strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if b is not None:
conv_out = K.bias_add(conv_out, b,
data_format=self.data_format)
return conv_out
def recurrent_conv(self, x, w):
conv_out = K.conv2d(x, w, strides=(1, 1),
padding='same',
data_format=self.data_format)
return conv_out
def get_config(self):
config = {'filters': self.filters,
'attentive_filters': self.attentive_filters,
'kernel_size': self.kernel_size,
'attentive_kernel_size': self.attentive_kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(
self.recurrent_activation),
'attentive_activation': activations.serialize(
self.attentive_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'recurrent_initializer': initializers.serialize(
self.recurrent_initializer),
'attentive_initializer': initializers.serialize(
self.attentive_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(
self.recurrent_regularizer),
'attentive_regularizer': regularizers.serialize(
self.attentive_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'recurrent_constraint': constraints.serialize(
self.recurrent_constraint),
'attentive_constraint': constraints.serialize(
self.attentive_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'attentive_dropout': self.attentive_dropout}
base_config = super(AttentiveConvLSTM2DCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AttentiveConvLSTM2D(ConvRNN2D):
"""Convolutional LSTM.
It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.]
(http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Input shape:
- if data_format='channels_first'
5D tensor with shape:
`(samples, time, channels, rows, cols)`
- if data_format='channels_last'
5D tensor with shape:
`(samples, time, rows, cols, channels)`
Output shape:
- if `return_sequences`
- if data_format='channels_first'
5D tensor with shape:
`(samples, time, filters, output_row, output_col)`
- if data_format='channels_last'
5D tensor with shape:
`(samples, time, output_row, output_col, filters)`
- else
- if data_format ='channels_first'
4D tensor with shape:
`(samples, filters, output_row, output_col)`
- if data_format='channels_last'
4D tensor with shape:
`(samples, output_row, output_col, filters)`
where o_row and o_col depend on the shape of the filter and
the padding
Raises:
ValueError: in case of invalid constructor arguments.
References:
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
The current implementation does not include the feedback loop on the
cells output.
"""
def __init__(self,
filters,
attentive_filters,
kernel_size,
attentive_kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
attentive_activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
attentive_initializer='zeros',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
attentive_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
attentive_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
attentive_dropout=0.,
**kwargs):
cell = AttentiveConvLSTM2DCell(filters=filters,
attentive_filters=attentive_filters,
kernel_size=kernel_size,
attentive_kernel_size=attentive_kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
attentive_activation=attentive_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
attentive_initializer=attentive_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
attentive_regularizer=attentive_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
attentive_constraint=attentive_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
attentive_dropout=attentive_dropout)
super(AttentiveConvLSTM2D, self).__init__(cell,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(AttentiveConvLSTM2D, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def filters(self):
return self.cell.filters
@property
def attentive_filters(self):
return self.cell.attentive_filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def attentive_kernel_size(self):
return self.cell.attentive_kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def attentive_activation(self):
return self.cell.attentive_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def attentive_initializer(self):
return self.cell.attentive_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def attentive_regularizer(self):
return self.cell.attentive_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def attentive_constraint(self):
return self.cell.attentive_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def attentive_dropout(self):
return self.cell.attentive_dropout
def get_config(self):
config = {'filters': self.filters,
'attentive_filters': self.attentive_filters,
'kernel_size': self.kernel_size,
'attentive_kernel_size': self.attentive_kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(
self.recurrent_activation),
'attentive_activation': activations.serialize(
self.attentive_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'recurrent_initializer': initializers.serialize(
self.recurrent_initializer),
'attentive_initializer': initializers.serialize(
self.attentive_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(
self.recurrent_regularizer),
'attentive_regularizer': regularizers.serialize(
self.attentive_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(
self.activity_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'recurrent_constraint': constraints.serialize(
self.recurrent_constraint),
'attentive_constraint': constraints.serialize(
self.attentive_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'attentive_dropout': self.attentive_dropout}
base_config = super(AttentiveConvLSTM2D, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)

80
RecallNet/src/cb.py Normal file
View file

@ -0,0 +1,80 @@
import keras
import matplotlib.pyplot as plt
from IPython.display import clear_output
import os
import keras
from keras.callbacks import ModelCheckpoint
import numpy as np
import math
class Unfreeze(keras.callbacks.Callback):
def __init__(self,it_to_unfreeze):
self.it_to_unfreeze = it_to_unfreeze
self.c=0
self.frozen=True
def on_batch_end(self, batch, logs=None):
self.c+=1
if not self.c > self.it_to_unfreeze and self.frozen:
print('Iteration %d reached: UNFREEZING ENCODER' % self.c)
self.frozen=False
for layer in self.model.layers:
layer.trainable=True
class InteractivePlot(keras.callbacks.Callback):
def __init__(self):
pass
def on_train_begin(self, logs={}):
self.losses = []
self.logs = []
self.batchnr = 0
self.icount = 0
def on_train_end(self, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
self.batchnr = 0
loss_train = logs.get('loss')
self.losses.append(loss_train)
self.icount+=1
clear_output(wait=True)
plt.figure(figsize=(14,10))
train_vals = [self.losses]
desc = ['loss']
for i in range(len(train_vals)):
#plt.subplot(2, 3, i+1)
plt.plot(range(self.icount), train_vals[i], label=desc[i])
plt.legend()
#plt.savefig(self.logfile.replace('.txt', '.png'), bbox_inches='tight', format='png')
plt.show()
def on_batch_end(self, batch, logs=None):
self.batchnr+=1
if self.batchnr % 10 == 0:
self.on_epoch_end(epoch=0, logs=logs)
def ckpt_callback(model_name, dataset, l_str, bs, extra_str='',
period=1, save_weights_only=True,
ckpt_folder_path = '../../predimportance_shared/models/ckpt/'):
path = os.path.join(ckpt_folder_path, model_name)
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path, model_name+'_'+dataset+'_'+l_str+'_bs'+str(bs)+extra_str+'_ep{epoch:02d}_valloss{val_loss:.4f}.hdf5')
cb_chk = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_weights_only=True, period=1)
return cb_chk
def step_decay(init_lr = 0.0001, drop = 0.1, epochs_drop = 3.0):
def inner(epoch):
lrate = init_lr * math.pow(drop, math.floor((1+epoch)/epochs_drop))
if not (epoch+1)%epochs_drop:
print('Reducing lr. New lr is:', lrate)
return lrate
return inner

View file

@ -0,0 +1,566 @@
import numpy as np
import keras
import sys
import os
from keras.layers import Layer, Input, Multiply, Dropout,DepthwiseConv2D, TimeDistributed, LSTM, Activation, Lambda, Conv2D, Dense, GlobalAveragePooling2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, BatchNormalization, Concatenate
import keras.backend as K
from keras.models import Model
import tensorflow as tf
from keras.utils import Sequence
import cv2
import scipy.io
import math
from attentive_convlstm_new import AttentiveConvLSTM2D
from dcn_resnet_new import dcn_resnet
from gaussian_prior_new import LearningPrior
from sal_imp_utilities import *
from multiduration_models import decoder_block_timedist
from xception_custom import Xception_wrapper
from keras.applications import keras_modules_injection
def xception_cl(input_shape = (None, None, 3),
verbose=True,
print_shapes=True,
n_outs=1,
ups=8,
freeze_enc=False,
dil_rate = (2,2),
freeze_cl=True,
append_classif=True,
num_classes=5):
"""Xception with classification capabilities"""
inp = Input(shape=input_shape)
### ENCODER ###
xception = Xception_wrapper(include_top=False, weights='imagenet', input_tensor=inp, pooling=None)
if print_shapes: print('xception output shapes:',xception.output.shape)
if freeze_enc:
for layer in xception.layers:
layer.trainable = False
### CLASSIFIER ###
cl = GlobalAveragePooling2D(name='gap_cl')(xception.output)
cl = Dense(512,name='dense_cl')(cl)
cl = Dropout(0.3, name='dropout_cl')(cl)
cl = Dense(num_classes, activation='softmax', name='dense_cl_out')(cl)
## DECODER ##
outs_dec = decoder_block(xception.output, dil_rate=dil_rate, print_shapes=print_shapes, dec_filt=512, prefix='decoder')
outs_final = [outs_dec]*n_outs
if append_classif:
outs_final.append(cl)
# Building model
m = Model(inp, outs_final) # Last element of outs_final is classification vector
if verbose:
m.summary()
if freeze_cl:
print('Freezing classification dense layers')
m.get_layer('dense_cl').trainable = False
m.get_layer('dense_cl_out').trainable = False
return m
def xception_cl_fus(input_shape=(None, None, 3),
verbose=True,
print_shapes=True,
n_outs=1,
ups=8,
dil_rate=(2,2),
freeze_enc=False,
freeze_cl=True,
internal_filts=256,
num_classes=5,
dp=0.3):
"""Xception with classification capabilities that fuses representations from both tasks"""
inp = Input(shape=input_shape)
### ENCODER ###
xception = Xception_wrapper(include_top=False, weights='imagenet', input_tensor=inp, pooling=None)
if print_shapes: print('xception output shapes:',xception.output.shape)
if freeze_enc:
for layer in xception.layers:
layer.trainable = False
### GLOBAL FEATURES ###
g_n = global_net(xception.output, nfilts=internal_filts, dp=dp)
if print_shapes: print('g_n shapes:', g_n.shape)
### CLASSIFIER ###
# We potentially need another layer here
out_classif = Dense(num_classes, activation='softmax', name='out_classif')(g_n)
### ASPP (MID LEVEL FEATURES) ###
aspp_out = app(xception.output, internal_filts)
if print_shapes: print('aspp out shapes:', aspp_out.shape)
### FUSION ###
dense_f = Dense(internal_filts, name = 'dense_fusion')(g_n)
if print_shapes: print('dense_f shapes:', dense_f.shape)
reshap = Lambda(lambda x: K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(x, axis=1), K.int_shape(aspp_out)[2], axis=1), axis=1), K.int_shape(aspp_out)[1], axis=1),
lambda s: (s[0], K.int_shape(aspp_out)[1], K.int_shape(aspp_out)[2], s[1]))(dense_f)
if print_shapes: print('after lambda shapes:', reshap.shape)
conc = Concatenate()([aspp_out,reshap])
### Projection ###
x = Conv2D(internal_filts, (1, 1), padding='same', use_bias=False, name='concat_projection')(conc)
x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation('relu')(x)
x = Dropout(dp)(x)
### DECODER ###
outs_dec = decoder_block(x, dil_rate=dil_rate, print_shapes=print_shapes, dec_filt=internal_filts, dp=dp)
outs_final = [outs_dec]*n_outs
outs_final.append(out_classif)
# Building model
m = Model(inp, outs_final) # Last element of outs_final is classification vector
if freeze_cl:
m.get_layer('out_classif').trainable = False
# for l in g_n.layers:
# l.trainable=False
if verbose:
m.summary()
return m
def xception_cl_fus_aspp(input_shape=(None