diff --git a/python/ModelSnapshots/CNN-33767.h5 b/python/ModelSnapshots/CNN-33767.h5
new file mode 100644
index 0000000..a4aa55f
Binary files /dev/null and b/python/ModelSnapshots/CNN-33767.h5 differ
diff --git a/python/ModelSnapshots/LSTM-v1-01605.h5 b/python/ModelSnapshots/LSTM-v1-01605.h5
new file mode 100644
index 0000000..536c217
Binary files /dev/null and b/python/ModelSnapshots/LSTM-v1-01605.h5 differ
diff --git a/python/ModelSnapshots/LSTM-v2-00398.h5 b/python/ModelSnapshots/LSTM-v2-00398.h5
new file mode 100644
index 0000000..f74ae40
Binary files /dev/null and b/python/ModelSnapshots/LSTM-v2-00398.h5 differ
diff --git a/python/Models/CNN.pb b/python/Models/CNN.pb
new file mode 100644
index 0000000..6f968d8
Binary files /dev/null and b/python/Models/CNN.pb differ
diff --git a/python/Models/LSTM.pb b/python/Models/LSTM.pb
new file mode 100644
index 0000000..76d6eab
Binary files /dev/null and b/python/Models/LSTM.pb differ
diff --git a/python/Step_01_UserData.ipynb b/python/Step_01_UserData.ipynb
new file mode 100644
index 0000000..7bc7721
--- /dev/null
+++ b/python/Step_01_UserData.ipynb
@@ -0,0 +1,340 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "from multiprocessing import Pool"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def cast_to_int(row):\n",
+ " try:\n",
+ " return np.array([a if float(a) >= 0 else 0 for a in row[2:-1]], dtype=np.uint8)\n",
+ " except Exception as e:\n",
+ " return None\n",
+ " \n",
+ "def load_csv(file):\n",
+ " temp_df = pd.read_csv(file, header=None, names = [\"UserID\", \"Age\", \"Gender\"], delimiter=\";\")\n",
+ " return temp_df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 298 ms, sys: 443 ms, total: 741 ms\n",
+ "Wall time: 937 ms\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(os.cpu_count() - 2)\n",
+ "data_files = [\"DataStudyCollection/%s\" % file for file in os.listdir(\"DataStudyCollection\") if file.endswith(\".csv\") and \"userData\" in file]\n",
+ "df_lst = pool.map(load_csv, data_files)\n",
+ "dfAll = pd.concat(df_lst)\n",
+ "dfAll = dfAll.sort_values(\"UserID\")\n",
+ "dfAll = dfAll.reset_index(drop=True)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "24.166666666666668"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.Age.mean()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1.4245742398014511"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.Age.std()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "21"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.Age.min()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "26"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.Age.max()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " UserID | \n",
+ " Age | \n",
+ " Gender | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 23 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " 25 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 4 | \n",
+ " 25 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 5 | \n",
+ " 26 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 6 | \n",
+ " 23 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 7 | \n",
+ " 21 | \n",
+ " female | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 8 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 9 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " 10 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 10 | \n",
+ " 11 | \n",
+ " 25 | \n",
+ " female | \n",
+ "
\n",
+ " \n",
+ " 11 | \n",
+ " 12 | \n",
+ " 26 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 12 | \n",
+ " 13 | \n",
+ " 22 | \n",
+ " female | \n",
+ "
\n",
+ " \n",
+ " 13 | \n",
+ " 14 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 14 | \n",
+ " 15 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 15 | \n",
+ " 16 | \n",
+ " 26 | \n",
+ " female | \n",
+ "
\n",
+ " \n",
+ " 16 | \n",
+ " 17 | \n",
+ " 26 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 17 | \n",
+ " 18 | \n",
+ " 23 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " UserID Age Gender\n",
+ "0 1 23 male\n",
+ "1 2 24 male\n",
+ "2 3 25 male\n",
+ "3 4 25 male\n",
+ "4 5 26 male\n",
+ "5 6 23 male\n",
+ "6 7 21 female\n",
+ "7 8 24 male\n",
+ "8 9 24 male\n",
+ "9 10 24 male\n",
+ "10 11 25 female\n",
+ "11 12 26 male\n",
+ "12 13 22 female\n",
+ "13 14 24 male\n",
+ "14 15 24 male\n",
+ "15 16 26 female\n",
+ "16 17 26 male\n",
+ "17 18 23 male"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_02_ReadData.ipynb b/python/Step_02_ReadData.ipynb
new file mode 100644
index 0000000..b657d6a
--- /dev/null
+++ b/python/Step_02_ReadData.ipynb
@@ -0,0 +1,338 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## This notebook creates one dataframe from all participants data\n",
+ "## It also removes 1% of the data as this is corrupted"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def cast_to_int(row):\n",
+ " try:\n",
+ " return np.array([a if float(a) >= 0 else 0 for a in row[2:-1]], dtype=np.uint8)\n",
+ " except Exception as e:\n",
+ " return None\n",
+ " \n",
+ "def load_csv(file):\n",
+ " temp_df = pd.read_csv(file, delimiter=\";\")\n",
+ " temp_df.Image = temp_df.Image.str.split(',')\n",
+ " temp_df.Image = temp_df.Image.apply(cast_to_int)\n",
+ " return temp_df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['DataStudyCollection/17_studyData.csv', 'DataStudyCollection/2_studyData.csv', 'DataStudyCollection/12_studyData.csv', 'DataStudyCollection/15_studyData.csv', 'DataStudyCollection/5_studyData.csv', 'DataStudyCollection/1_studyData.csv', 'DataStudyCollection/14_studyData.csv', 'DataStudyCollection/10_studyData.csv', 'DataStudyCollection/13_studyData.csv', 'DataStudyCollection/18_studyData.csv', 'DataStudyCollection/6_studyData.csv', 'DataStudyCollection/16_studyData.csv', 'DataStudyCollection/3_studyData.csv', 'DataStudyCollection/7_studyData.csv', 'DataStudyCollection/8_studyData.csv', 'DataStudyCollection/9_studyData.csv', 'DataStudyCollection/11_studyData.csv', 'DataStudyCollection/4_studyData.csv']\n",
+ "CPU times: user 1.86 s, sys: 1.03 s, total: 2.89 s\n",
+ "Wall time: 17.3 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(cpu_count() - 2)\n",
+ "data_files = [\"DataStudyCollection/%s\" % file for file in os.listdir(\"DataStudyCollection\") if file.endswith(\".csv\") and \"studyData\" in file]\n",
+ "print(data_files)\n",
+ "df_lst = pool.map(load_csv, data_files)\n",
+ "dfAll = pd.concat(df_lst)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1010014"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df = dfAll[dfAll.Image.notnull()]\n",
+ "len(df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "loaded 1013841 values\n",
+ "removed 3827 values (thats 0.377%)\n",
+ "new df has size 1010014\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"loaded %s values\" % len(dfAll))\n",
+ "print(\"removed %s values (thats %s%%)\" % (len(dfAll) - len(df), round((len(dfAll) - len(df)) / len(dfAll) * 100, 3)))\n",
+ "print(\"new df has size %s\" % len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = df.reset_index(drop=True)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 17 | \n",
+ " 1547138602677 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 17 | \n",
+ " 1547138602697 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 17 | \n",
+ " 1547138602796 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 17 | \n",
+ " 1547138602817 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 17 | \n",
+ " 1547138602863 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "0 17 1547138602677 0 34 0 0 \n",
+ "1 17 1547138602697 0 34 0 0 \n",
+ "2 17 1547138602796 0 34 0 0 \n",
+ "3 17 1547138602817 0 34 0 0 \n",
+ "4 17 1547138602863 0 34 0 0 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "0 0 False False \n",
+ "1 0 False False \n",
+ "2 0 False False \n",
+ "3 0 False False \n",
+ "4 0 False False \n",
+ "\n",
+ " Image \n",
+ "0 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... \n",
+ "1 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... \n",
+ "2 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... \n",
+ "3 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... \n",
+ "4 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... "
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyCollection/AllData.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "sorted(df.userID.unique())"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_05_CNN_PreprocessData.ipynb b/python/Step_05_CNN_PreprocessData.ipynb
new file mode 100644
index 0000000..bfafe51
--- /dev/null
+++ b/python/Step_05_CNN_PreprocessData.ipynb
@@ -0,0 +1,966 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 7919 | \n",
+ " 17 | \n",
+ " 1547138928692 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 7920 | \n",
+ " 17 | \n",
+ " 1547138928735 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 7921 | \n",
+ " 17 | \n",
+ " 1547138928773 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 7922 | \n",
+ " 17 | \n",
+ " 1547138928813 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 7923 | \n",
+ " 17 | \n",
+ " 1547138928861 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "7919 17 1547138928692 1 680 6 2 \n",
+ "7920 17 1547138928735 1 680 6 2 \n",
+ "7921 17 1547138928773 1 680 6 2 \n",
+ "7922 17 1547138928813 1 680 6 2 \n",
+ "7923 17 1547138928861 1 680 6 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "7919 0 True False \n",
+ "7920 0 True False \n",
+ "7921 0 True False \n",
+ "7922 0 True False \n",
+ "7923 0 True False \n",
+ "\n",
+ " Image \n",
+ "7919 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n",
+ "7920 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n",
+ "7921 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n",
+ "7922 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n",
+ "7923 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... "
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyCollection/AllData.pkl\")\n",
+ "df = dfAll[(dfAll.Actual_Data == True) & (dfAll.Is_Pause == False)]\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 39 s, sys: 5.78 s, total: 44.8 s\n",
+ "Wall time: 43.3 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "def is_max(df):\n",
+ " df_temp = df.copy(deep=True)\n",
+ " max_version = df_temp.RepetitionID.max()\n",
+ " df_temp[\"IsMax\"] = np.where(df_temp.RepetitionID == max_version, True, False)\n",
+ " df_temp[\"MaxRepetition\"] = [max_version] * len(df_temp)\n",
+ " return df_temp\n",
+ "\n",
+ "df_grp = df.groupby([df.userID, df.TaskID, df.VersionID])\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "result_lst = pool.map(is_max, [grp for name, grp in df_grp])\n",
+ "df = pd.concat(result_lst)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.Image = df.Image.apply(lambda x: x.reshape(27, 15))\n",
+ "df.Image = df.Image.apply(lambda x: x.clip(min=0, max=255))\n",
+ "df.Image = df.Image.apply(lambda x: x.astype(np.uint8))\n",
+ "df[\"ImageSum\"] = df.Image.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyCollection/dfFiltered.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "recorded actual: 1010014, used data: 851455\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"recorded actual: %s, used data: %s\" % (len(dfAll), len(df)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.read_pickle(\"DataStudyCollection/dfFiltered.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ " IsMax | \n",
+ " MaxRepetition | \n",
+ " ImageSum | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 291980 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 307 | \n",
+ "
\n",
+ " \n",
+ " 291981 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 222 | \n",
+ "
\n",
+ " \n",
+ " 291982 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 521 | \n",
+ "
\n",
+ " \n",
+ " 291983 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 318 | \n",
+ "
\n",
+ " \n",
+ " 291984 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 373 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "291980 1 1,54515E+12 33 680 0 2 \n",
+ "291981 1 1,54515E+12 33 680 0 2 \n",
+ "291982 1 1,54515E+12 33 680 0 2 \n",
+ "291983 1 1,54515E+12 33 680 0 2 \n",
+ "291984 1 1,54515E+12 33 680 0 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "291980 0 True False \n",
+ "291981 0 True False \n",
+ "291982 0 True False \n",
+ "291983 0 True False \n",
+ "291984 0 True False \n",
+ "\n",
+ " Image IsMax \\\n",
+ "291980 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "291981 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "291982 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "291983 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "291984 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "\n",
+ " MaxRepetition ImageSum \n",
+ "291980 0 307 \n",
+ "291981 0 222 \n",
+ "291982 0 521 \n",
+ "291983 0 318 \n",
+ "291984 0 373 "
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Label if knuckle or finger\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " #val = \"Knuckle\"\n",
+ " val = 0\n",
+ " elif row['TaskID'] >= 17:\n",
+ " #val = \"Finger\"\n",
+ " val = 1\n",
+ " return val\n",
+ "df['InputMethod'] = df.apply(f, axis=1)\n",
+ "\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " val = \"Knuckle\"\n",
+ " elif row['TaskID'] >= 17:\n",
+ " val = \"Finger\"\n",
+ " return val\n",
+ "df['Input'] = df.apply(f, axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Svens new Blob detection\n",
+ "def detect_blobs(image, task):\n",
+ " #image = e.Image\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = np.copy(image)\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 200, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " contours.sort(key=lambda a: cv2.contourArea(a))\n",
+ " if len(contours) > 0:\n",
+ " # if two finger or knuckle\n",
+ " cont_count = 2 if task in [1, 6, 7, 18, 23, 24] and len(contours) > 1 else 1\n",
+ " for i in range(1, cont_count + 1):\n",
+ " max_contour = contours[-1 * i]\n",
+ " xmax, ymax = np.max(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " xmin, ymin = np.min(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " #croped_im = np.zeros((27,15))\n",
+ " blob = large[max(ymin - 1, 0):min(ymax + 1, large.shape[0]),max(xmin - 1, 0):min(xmax + 1, large.shape[1])]\n",
+ " #croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " #return (1, [croped_im])\n",
+ " lstBlob.append(blob)\n",
+ " lstMin.append(xmax-xmin)\n",
+ " lstMax.append(ymax-ymin)\n",
+ " count = count + 1\n",
+ " return (count, lstBlob, lstMin, lstMax)\n",
+ " else:\n",
+ " return (0, [np.zeros((29, 19))], 0, 0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 11.9 s, sys: 7.51 s, total: 19.4 s\n",
+ "Wall time: 18.6 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(os.cpu_count()-2)\n",
+ "temp_blobs = pool.starmap(detect_blobs, zip(df.Image, df.TaskID))\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobCount\"] = [a[0] for a in temp_blobs]\n",
+ "df[\"BlobImages\"] = [a[1] for a in temp_blobs]\n",
+ "df[\"BlobW\"] = [a[2] for a in temp_blobs]\n",
+ "df[\"BlobH\"] = [a[3] for a in temp_blobs]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0 710145\n",
+ "1 128117\n",
+ "2 13193\n",
+ "Name: BlobCount, dtype: int64"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.BlobCount.value_counts()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfX = df[(df.BlobCount == 1)].copy(deep=True)\n",
+ "dfX.BlobImages = dfX.BlobImages.apply(lambda x : x[0])\n",
+ "dfX.BlobW = dfX.BlobW.apply(lambda x : x[0])\n",
+ "dfX.BlobH = dfX.BlobH.apply(lambda x : x[0])\n",
+ "\n",
+ "dfY = df[(df.BlobCount == 2)].copy(deep=True)\n",
+ "dfY.BlobImages = dfY.BlobImages.apply(lambda x : x[0])\n",
+ "dfY.BlobW = dfY.BlobW.apply(lambda x : x[0])\n",
+ "dfY.BlobH = dfY.BlobH.apply(lambda x : x[0])\n",
+ "\n",
+ "dfZ = df[(df.BlobCount == 2)].copy(deep=True)\n",
+ "dfZ.BlobImages = dfZ.BlobImages.apply(lambda x : x[1])\n",
+ "dfZ.BlobW = dfZ.BlobW.apply(lambda x : x[1])\n",
+ "dfZ.BlobH = dfZ.BlobH.apply(lambda x : x[1])\n",
+ "\n",
+ "df = dfX.append([dfY, dfZ])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sample Size not Argumented: 154503\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Sample Size not Argumented:\", len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobArea\"] = df[\"BlobW\"] * df[\"BlobH\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 154503.0\n",
+ "mean 15.8\n",
+ "std 5.1\n",
+ "min 12.0\n",
+ "25% 12.0\n",
+ "50% 16.0\n",
+ "75% 16.0\n",
+ "max 110.0\n",
+ "Name: BlobArea, dtype: float64"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.BlobArea.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " count | \n",
+ " mean | \n",
+ " std | \n",
+ " min | \n",
+ " 25% | \n",
+ " 50% | \n",
+ " 75% | \n",
+ " max | \n",
+ "
\n",
+ " \n",
+ " Input | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " Finger | \n",
+ " 110839.0 | \n",
+ " 16.6 | \n",
+ " 5.3 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 16.0 | \n",
+ " 16.0 | \n",
+ " 110.0 | \n",
+ "
\n",
+ " \n",
+ " Knuckle | \n",
+ " 43664.0 | \n",
+ " 13.7 | \n",
+ " 3.7 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 16.0 | \n",
+ " 72.0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " count mean std min 25% 50% 75% max\n",
+ "Input \n",
+ "Finger 110839.0 16.6 5.3 12.0 12.0 16.0 16.0 110.0\n",
+ "Knuckle 43664.0 13.7 3.7 12.0 12.0 12.0 16.0 72.0"
+ ]
+ },
+ "execution_count": 27,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.groupby(\"Input\").BlobArea.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobSum\"] = df.BlobImages.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.BlobSum.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 27,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAD8CAYAAABkbJM/AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAG2hJREFUeJzt3X+MXfV55/H3p3YgDgnYhuyV17bWrmIlcvCGwAgcJYpm48bYpIr5I8kaodqwbrxaSJN0LXXNVlorP5DIqpSClNBawcWO0jiUJsUCU9druFrtSjY/AsEYwnoCJh7L4AQb2EmUpJN99o/zDLmMr5mvZ87MnBt9XtLVnPOc7zn3uXeu/Znz496riMDMzGwsvzfdDZiZWW9wYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFZk53Q2M10UXXRSLFi2a7ja6+vnPf85555033W2MyX3Wpxd6BPdZp17oEd7c5+OPP/6ziHj3uDcWET15u+yyy6KpHn744eluoYj7rE8v9BjhPuvUCz1GvLlP4LGYwP+7PiRlZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkWKPhpE0p8CfwwEcBC4HpgH7AQuBB4H/igifi3pXGAHcBnwCvDvI+JIbucmYAPwG+DzEbEn66uA24EZwDcj4pa6HmA3izY/MJmbZ9OyYa7rch9HbvnEpN6vmdlkGnMPQ9J84PNAX0RcTPWf+lrga8BtEfEe4BRVEJA/T2X9thyHpKW53vuBVcA3JM2QNAP4OrAaWApck2PNzKxBSg9JzQRmSZoJvAM4DnwMuDeXbweuzuk1OU8uXyFJWd8ZEb+KiBeAAeDyvA1ExPMR8WuqvZY1E3tYZmZWtzEDIyKOAX8B/IQqKF6jOgT1akQM57BBYH5OzweO5rrDOf7Czvqodc5UNzOzBhnzHIakOVR/8S8GXgX+nuqQ0pSTtBHYCNBqtWi32+PazqZlw2MPmoDWrO73Md5+J8vQ0FDjeuqmF/rshR7BfdapF3qEevssOen9B8ALEfFTAEnfAz4MzJY0M/ciFgDHcvwxYCEwmIewLqA6+T1SH9G5zpnqbxIRW4GtAH19fdHf31/Q/um6nZCu06Zlw9x68PSn9si1/ZN6v2er3W4z3udwKvVCn73QI7jPOvVCj1BvnyXnMH4CLJf0jjwXsQJ4BngY+FSOWQ/cl9O7cp5c/lB+DvsuYK2kcyUtBpYAjwCPAkskLZZ0DtWJ8V0Tf2hmZlanMfcwIuKApHuBHwDDwBNUf+U/AOyU9NWs3ZWr3AV8S9IAcJIqAIiIQ5LuoQqbYeDGiPgNgKTPAXuorsDaFhGH6nuIZmZWh6L3YUTEFmDLqPLzVFc4jR77S+DTZ9jOzcDNXeq7gd0lvZiZ2fTwO73NzKyIA8PMzIo4MMzMrIgDw8zMijgwzMysiAPDzMyKODDMzKyIA8PMzIo4MMzMrIgDw8zMijgwzMysiAPDzMyKODDMzKyIA8PMzIo4MMzMrIgDw8zMiowZGJLeK+nJjtvrkr4oaa6kvZIO5885OV6S7pA0IOkpSZd2bGt9jj8saX1H/TJJB3OdO/KrYM3MrEHGDIyIeC4iLomIS4DLgF8A3wc2A/siYgmwL+cBVlN9X/cSYCNwJ4CkuVTf2ncF1Tf1bRkJmRzz2Y71VtXy6MzMrDZne0hqBfDjiHgRWANsz/p24OqcXgPsiMp+YLakecCVwN6IOBkRp4C9wKpcdn5E7I+IAHZ0bMvMzBribANjLfCdnG5FxPGcfglo5fR84GjHOoNZe6v6YJe6mZk1yMzSgZLOAT4J3DR6WUSEpKizsTP0sJHqMBetVot2uz2u7WxaNlxjV6drzep+H+Ptd7IMDQ01rqdueqHPXugR3GedeqFHqLfP4sCgOjfxg4h4OedfljQvIo7nYaUTWT8GLOxYb0HWjgH9o+rtrC/oMv40EbEV2ArQ19cX/f393YaN6brND4xrvVKblg1z68HTn9oj1/ZP6v2erXa7zXifw6nUC332Qo/gPuvUCz1CvX2ezSGpa/jt4SiAXcDIlU7rgfs66uvyaqnlwGt56GoPsFLSnDzZvRLYk8tel7Q8r45a17EtMzNriKI9DEnnAR8H/mNH+RbgHkkbgBeBz2R9N3AVMEB1RdX1ABFxUtJXgEdz3Jcj4mRO3wDcDcwCHsybmZk1SFFgRMTPgQtH1V6humpq9NgAbjzDdrYB27rUHwMuLunFzMymh9/pbWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFzubDB22CFk3yhx6+lSO3fGLa7tvMfjd4D8PMzIo4MMzMrIgDw8zMijgwzMysiAPDzMyKODDMzKyIA8PMzIoUBYak2ZLulfQjSc9K+pCkuZL2SjqcP+fkWEm6Q9KApKckXdqxnfU5/rCk9R31yyQdzHXuyO/2NjOzBindw7gd+KeIeB/wAeBZYDOwLyKWAPtyHmA1sCRvG4E7ASTNBbYAVwCXA1tGQibHfLZjvVUTe1hmZla3MQND0gXAR4G7ACLi1xHxKrAG2J7DtgNX5/QaYEdU9gOzJc0DrgT2RsTJiDgF7AVW5bLzI2J/fh/4jo5tmZlZQ5R8NMhi4KfA30r6APA48AWgFRHHc8xLQCun5wNHO9YfzNpb1Qe71E8jaSPVXgutVot2u13Q/uk2LRse13qlWrMm/z7OVrfnamhoaNzP4VTqhT57oUdwn3XqhR6h3j5LAmMmcCnwJxFxQNLt/PbwEwAREZKilo7eQkRsBbYC9PX1RX9//7i2c90kf6bTpmXD3HqwWR/TdeTa/tNq7Xab8T6HU6kX+uyFHsF91qkXeoR6+yw5hzEIDEbEgZy/lypAXs7DSeTPE7n8GLCwY/0FWXur+oIudTMza5AxAyMiXgKOSnpvllYAzwC7gJErndYD9+X0LmBdXi21HHgtD13tAVZKmpMnu1cCe3LZ65KW59VR6zq2ZWZmDVF63ORPgG9LOgd4HrieKmzukbQBeBH4TI7dDVwFDAC/yLFExElJXwEezXFfjoiTOX0DcDcwC3gwb2Zm1iBFgRERTwJ9XRat6DI2gBvPsJ1twLYu9ceAi0t6MTOz6eF3epuZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVKQoMSUckHZT0pKTHsjZX0l5Jh/PnnKxL0h2SBiQ9JenSju2sz/GHJa3vqF+W2x/IdVX3AzUzs4k5mz2MfxcRl0TEyDfvbQb2RcQSYF/OA6wGluRtI3AnVAEDbAGuAC4HtoyETI75bMd6q8b9iMzMbFJM5JDUGmB7Tm8Hru6o74jKfmC2pHnAlcDeiDgZEaeAvcCqXHZ+ROzPr3fd0bEtMzNriNLACOCfJT0uaWPWWhFxPKdfAlo5PR842rHuYNbeqj7YpW5mZg0ys3DcRyLimKR/BeyV9KPOhRERkqL+9t4sw2ojQKvVot1uj2s7m5YN19jV6VqzJv8+zla352poaGjcz+FU6oU+e6FHcJ916oUeod4+iwIjIo7lzxOSvk91DuJlSfMi4ngeVjqRw48BCztWX5C1Y0D/qHo76wu6jO/Wx1ZgK0BfX1/09/d3Gzam6zY/MK71Sm1aNsytB0uzeGocubb/tFq73Wa8z+FU6oU+e6FHcJ916oUeod4+xzwkJek8Se8amQZWAk8Du4CRK53WA/fl9C5gXV4ttRx4LQ9d7QFWSpqTJ7tXAnty2euSlufVUes6tmVmZg1R8mdwC/h+Xuk6E/i7iPgnSY8C90jaALwIfCbH7wauAgaAXwDXA0TESUlfAR7NcV+OiJM5fQNwNzALeDBvZmbWIGMGRkQ8D3ygS/0VYEWXegA3nmFb24BtXeqPARcX9GtmZtPE7/Q2M7MiDgwzMyviwDAzsyIODDMzK+LAMDOzIg4MMzMr4sAwM7MiDgwzMyviwDAzsyIODDMzK+LAMDOzIg4MMzMr4sAwM7MiDgwzMyviwDAzsyIODDMzK+LAMDOzIsWBIWmGpCck3Z/ziyUdkDQg6buSzsn6uTk/kMsXdWzjpqw/J+nKjvqqrA1I2lzfwzMzs7qczR7GF4BnO+a/BtwWEe8BTgEbsr4BOJX123IckpYCa4H3A6uAb2QIzQC+DqwGlgLX5FgzM2uQosCQtAD4BPDNnBfwMeDeHLIduDqn1+Q8uXxFjl8D7IyIX0XEC8AAcHneBiLi+Yj4NbAzx5qZWYPMLBz3V8CfAe/K+QuBVyNiOOcHgfk5PR84ChARw5Jey/Hzgf0d2+xc5+io+hXdmpC0EdgI0Gq1aLfbhe2/2aZlw2MPmoDWrMm/j7PV7bkaGhoa93M4lXqhz17oEdxnnXqhR6i3zzEDQ9IfAici4nFJ/bXc6zhFxFZgK0BfX1/094+vnes2P1BjV6fbtGyYWw+WZvHUOHJt/2m1drvNeJ/DqdQLffZCj+A+69QLPUK9fZb8r/Zh4JOSrgLeDpwP3A7MljQz9zIWAMdy/DFgITAoaSZwAfBKR31E5zpnqpuZWUOMeQ4jIm6KiAURsYjqpPVDEXEt8DDwqRy2Hrgvp3flPLn8oYiIrK/Nq6gWA0uAR4BHgSV51dU5eR+7anl0ZmZWm4kcN/kvwE5JXwWeAO7K+l3AtyQNACepAoCIOCTpHuAZYBi4MSJ+AyDpc8AeYAawLSIOTaAvMzObBGcVGBHRBto5/TzVFU6jx/wS+PQZ1r8ZuLlLfTew+2x6MTOzqeV3epuZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVGTMwJL1d0iOSfijpkKQvZX2xpAOSBiR9N79elfwK1u9m/YCkRR3buinrz0m6sqO+KmsDkjbX/zDNzGyiSvYwfgV8LCI+AFwCrJK0HPgacFtEvAc4BWzI8RuAU1m/LcchaSnV17W+H1gFfEPSDEkzgK8Dq4GlwDU51szMGmTMwIjKUM6+LW8BfAy4N+vbgatzek3Ok8tXSFLWd0bEryLiBWCA6iteLwcGIuL5iPg1sDPHmplZgxSdw8g9gSeBE8Be4MfAqxExnEMGgfk5PR84CpDLXwMu7KyPWudMdTMza5CZJYMi4jfAJZJmA98H3jepXZ2BpI3ARoBWq0W73R7XdjYtGx570AS0Zk3+fZytbs/V0NDQuJ/DqdQLffZCj+A+69QLPUK9fRYFxoiIeFXSw8CHgNmSZuZexALgWA47BiwEBiXNBC4AXumoj+hc50z10fe/FdgK0NfXF/39/WfT/huu2/zAuNYrtWnZMLcePKundtIdubb/tFq73Wa8z+FU6oU+e6FHcJ916oUeod4+S66SenfuWSBpFvBx4FngYeBTOWw9cF9O78p5cvlDERFZX5tXUS0GlgCPAI8CS/Kqq3OoTozvquPBmZlZfUr+DJ4HbM+rmX4PuCci7pf0DLBT0leBJ4C7cvxdwLckDQAnqQKAiDgk6R7gGWAYuDEPdSHpc8AeYAawLSIO1fYIzcysFmMGRkQ8BXywS/15qiucRtd/CXz6DNu6Gbi5S303sLugXzMzmyZ+p7eZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZkZKvaF0o6WFJz0g6JOkLWZ8raa+kw/lzTtYl6Q5JA5KeknRpx7bW5/jDktZ31C+TdDDXuUOSJuPBmpnZ+JXsYQwDmyJiKbAcuFHSUmAzsC8ilgD7ch5gNdX3dS8BNgJ3QhUwwBbgCqpv6tsyEjI55rMd662a+EMzM7M6jRkYEXE8In6Q0/8XeBaYD6wBtuew7cDVOb0G2BGV/cBsSfOAK4G9EXEyIk4Be4FVuez8iNgfEQHs6NiWmZk1xFmdw5C0iOr7vQ8ArYg4noteAlo5PR842rHaYNbeqj7YpW5mZg0ys3SgpHcC/wB8MSJe7zzNEBEhKSahv9E9bKQ6zEWr1aLdbo9rO5uWDdfY1elasyb/Ps5Wt+dqaGho3M/hVOqFPnuhR3CfdeqFHqHePosCQ9LbqMLi2xHxvSy/LGleRBzPw0onsn4MWNix+oKsHQP6R9XbWV/QZfxpImIrsBWgr68v+vv7uw0b03WbHxjXeqU2LRvm1oPFWTwljlzbf1qt3W4z3udwKvVCn73QI7jPOvVCj1BvnyVXSQm4C3g2Iv6yY9EuYORKp/XAfR31dXm11HLgtTx0tQdYKWlOnuxeCezJZa9LWp73ta5jW2Zm1hAlfwZ/GPgj4KCkJ7P2X4FbgHskbQBeBD6Ty3YDVwEDwC+A6wEi4qSkrwCP5rgvR8TJnL4BuBuYBTyYNzMza5AxAyMi/hdwpvdFrOgyPoAbz7CtbcC2LvXHgIvH6sXMzKaP3+ltZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRUq+03ubpBOSnu6ozZW0V9Lh/Dkn65J0h6QBSU9JurRjnfU5/rCk9R31yyQdzHXuyO/1NjOzhinZw7gbWDWqthnYFxFLgH05D7AaWJK3jcCdUAUMsAW4Argc2DISMjnmsx3rjb4vMzNrgDEDIyL+J3ByVHkNsD2ntwNXd9R3RGU/MFvSPOBKYG9EnIyIU8BeYFUuOz8i9ud3ge/o2JaZmTXIzHGu14qI4zn9EtDK6fnA0Y5xg1l7q/pgl3pXkjZS7bnQarVot9vjan7TsuFxrVeqNWvy7+NsdXuuhoaGxv0cTqVe6LMXegT3Wade6BHq7XO8gfGGiAhJUUczBfe1FdgK0NfXF/39/ePaznWbH6ixq9NtWjbMrQcn/NTW6si1/afV2u02430Op1Iv9NkLPYL7rFMv9Aj19jneq6RezsNJ5M8TWT8GLOwYtyBrb1Vf0KVuZmYNM97A2AWMXOm0Hrivo74ur5ZaDryWh672ACslzcmT3SuBPbnsdUnL8+qodR3bMjOzBhnzuImk7wD9wEWSBqmudroFuEfSBuBF4DM5fDdwFTAA/AK4HiAiTkr6CvBojvtyRIycSL+B6kqsWcCDeTMzs4YZMzAi4pozLFrRZWwAN55hO9uAbV3qjwEXj9WHmZlNL7/T28zMijgwzMysiAPDzMyKODDMzKyIA8PMzIo06+3INmkWdXl3+6Zlw5P+rvcjt3xiUrdvZlPHexhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRrz0SCSVgG3AzOAb0bELdPcktWg20eSnK3xfoSJP5bErF6N2MOQNAP4OrAaWApcI2np9HZlZmadmrKHcTkwEBHPA0jaCawBnpnWrqyn1bF3U6pzL8h7Nva7qimBMR842jE/CFwxTb2YTchUBlUnB5VNtqYERhFJG4GNOTsk6bnp7OdMPg8XAT+b7j7G4j7r04Qe9bWiYdPeZ6Fe6LMXeoQ39/lvJrKhpgTGMWBhx/yCrL1JRGwFtk5VU+Ml6bGI6JvuPsbiPuvTCz2C+6xTL/QI9fbZiJPewKPAEkmLJZ0DrAV2TXNPZmbWoRF7GBExLOlzwB6qy2q3RcShaW7LzMw6NCIwACJiN7B7uvuoSeMPmyX3WZ9e6BHcZ516oUeosU9FRF3bMjOz32FNOYdhZmYN58AoJGmbpBOSnu6ozZW0V9Lh/Dkn65J0h6QBSU9JurRjnfU5/rCk9TX3uFDSw5KekXRI0hca2ufbJT0i6YfZ55eyvljSgeznu3kBBJLOzfmBXL6oY1s3Zf05SVfW2Wduf4akJyTd3+Aej0g6KOlJSY9lrVG/89z+bEn3SvqRpGclfahpfUp6bz6PI7fXJX2xgX3+af7beVrSd/Lf1OS/NiPCt4Ib8FHgUuDpjtp/Bzbn9Gbgazl9FfAgIGA5cCDrc4Hn8+ecnJ5TY4/zgEtz+l3A/6H6qJWm9SngnTn9NuBA3v89wNqs/zXwn3L6BuCvc3ot8N2cXgr8EDgXWAz8GJhR8+/9PwN/B9yf803s8Qhw0ahao37neR/bgT/O6XOA2U3ss6PfGcBLVO9daEyfVG90fgGY1fGavG4qXpu1P8m/yzdgEW8OjOeAeTk9D3gup/8GuGb0OOAa4G866m8aNwn93gd8vMl9Au8AfkD1zv6fATOz/iFgT07vAT6U0zNznICbgJs6tvXGuJp6WwDsAz4G3J/32agec5tHOD0wGvU7By6g+k9OTe5zVG8rgf/dtD757SdjzM3X2v3AlVPx2vQhqYlpRcTxnH4JaOV0t486mf8W9drlbucHqf56b1yfeajnSeAEsJfqr5tXI2K4y32+0U8ufw24cAr6/Cvgz4D/l/MXNrBHgAD+WdLjqj4NAZr3O18M/BT42zzE901J5zWwz05rge/kdGP6jIhjwF8APwGOU73WHmcKXpsOjJpEFdGNuORM0juBfwC+GBGvdy5rSp8R8ZuIuITqr/jLgfdNc0tvIukPgRMR8fh091LgIxFxKdWnPd8o6aOdCxvyO59JdUj3zoj4IPBzqkM7b2hInwDk8f9PAn8/etl095nnT9ZQhfC/Bs4DVk3FfTswJuZlSfMA8ueJrJ/po06KPgJlIiS9jSosvh0R32tqnyMi4lXgYapd6NmSRt4b1Hmfb/STyy8AXpnkPj8MfFLSEWAn1WGp2xvWI/DGX5xExAng+1QB3LTf+SAwGBEHcv5eqgBpWp8jVgM/iIiXc75Jff4B8EJE/DQi/gX4HtXrddJfmw6MidkFjFz9sJ7qnMFIfV1eQbEceC13Z/cAKyXNyb8SVmatFpIE3AU8GxF/2eA+3y1pdk7PojrP8ixVcHzqDH2O9P8p4KH8K28XsDavAlkMLAEeqaPHiLgpIhZExCKqQxMPRcS1TeoRQNJ5kt41Mk31u3qahv3OI+Il4Kik92ZpBdXXFzSqzw7X8NvDUSP9NKXPnwDLJb0j/82PPJeT/9qcjJNFv4s3qhfPceBfqP5a2kB1HHAfcBj4H8DcHCuqL4T6MXAQ6OvYzn8ABvJ2fc09foRqV/kp4Mm8XdXAPv8t8ET2+TTw37L++/mCHaA6FHBu1t+e8wO5/Pc7tvXn2f9zwOpJ+t3389urpBrVY/bzw7wdAv486436nef2LwEey9/7P1JdPdTEPs+j+gv8go5ao/oEvgT8KP/9fIvqSqdJf236nd5mZlbEh6TMzKyIA8PMzIo4MMzMrIgDw8zMijgwzMysiAPDzMyKODDMzKyIA8PMzIr8fxQhh7kF0BR2AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "df.BlobSum.hist()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0"
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#Small / Blobs where the pixels are only a \"little\" hit\n",
+ "dfX = df[df.BlobSum <= 255]\n",
+ "len(dfX)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Augmenting by flipping in both axis (datax4)\n",
+ "df[\"Version\"] = \"Normal\"\n",
+ "dfFlipped = df.copy(deep=True)\n",
+ "dfFlipped.BlobImages = dfFlipped.BlobImages.apply(lambda x: np.flipud(x))\n",
+ "dfFlipped[\"Version\"] = \"FlippedUD\"\n",
+ "df = df.append(dfFlipped)\n",
+ "dfFlipped = df.copy(deep=True)\n",
+ "dfFlipped.BlobImages = dfFlipped.BlobImages.apply(lambda x: np.fliplr(x))\n",
+ "dfFlipped[\"Version\"] = \"FlippedLR\"\n",
+ "df = df.append(dfFlipped)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sample Size argumented: 618012\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Sample Size argumented:\", len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def pasteToEmpty (blob):\n",
+ " croped_im = np.zeros((27,15))\n",
+ " croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " return croped_im"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"Blobs\"] = df.BlobImages.apply(lambda x: pasteToEmpty(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyCollection/df_statistics.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[[\"userID\", \"TaskID\", \"Version\", \"Blobs\", \"InputMethod\"]].to_pickle(\"DataStudyCollection/df_blobs_area.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# display blobs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "userID 1\n",
+ "Timestamp 1,54515E+12\n",
+ "Current_Task 121\n",
+ "Task_amount 680\n",
+ "TaskID 0\n",
+ "VersionID 7\n",
+ "RepetitionID 0\n",
+ "Actual_Data True\n",
+ "Is_Pause False\n",
+ "Image [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]...\n",
+ "IsMax True\n",
+ "MaxRepetition 0\n",
+ "ImageSum 1495\n",
+ "BlobCount 1\n",
+ "BlobImages [[2, 2, 11, 11, 2], [2, 9, 40, 42, 9], [4, 13,...\n",
+ "BlobW 3\n",
+ "BlobH 4\n",
+ "BlobArea 12\n",
+ "BlobSum 1071\n",
+ "Version Normal\n",
+ "Blobs [[2.0, 2.0, 11.0, 11.0, 2.0, 0.0, 0.0, 0.0, 0....\n",
+ "InputMethod 0\n",
+ "Input Knuckle\n",
+ "Name: 299548, dtype: object\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAANoAAAFpCAYAAAD6NDa0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAEEtJREFUeJzt3W2MXOV5xvHrwi+AHINfCFsXHBwkHJkS4YCNU2RFriJRg6qYSBECVa0DVE7UIoVPrZugBjWq1LRRKlWKVKEG4kopSVpCsCxScGkS8gUaY/FiAsQO2AqWsQVOMSTUxPjuhznbTk9mvXP2nLl5dvz/SaudOXPveZ6Z8eU5L/fMOCIEYLTOeLcnAJwOCBqQgKABCQgakICgAQkIGpCAoAEJCBqQgKABCQgakGBu5mC2R9rvdcYZo/9/4+TJkyMfA7NLRHi6mtSgSaMNw9lnnz2ydU966623GtUTTEgtNx1tb7T9gu19trd2NSlg3Mw4aLbnSPqKpGslXSrpJtuXdjUxYJy0eUW7StK+iHgxIt6W9A1Jm7qZFjBe2gTtAkk/67v+crWssVi9eqT1J1etUswdfne0aX3T+WjdutOrPmOM0uprPNM3ftr+hKSNEfFH1fU/kLQuIm6r1W2RtKW6eiUHQzBuRn3U8aCk5X3XL6yW1Sdxl6S7pNEf3gdK1ebl5UeSLrH9ftvzJd0oaXs30wLGy4xf0SLihO3bJD0kaY6kuyPi2c5mBoyRGe+jzWgwO9hHw7gpsjOkiTPPPLNR/fLly6cv6jOT0L/00kuN6psGE+OJpmIgAUEDEhA0IAFBAxIQNCBBEUFr3Lu4dm2j+v9etUonli4duv4Xa9bonXPPHdl8iuvLo9ex+/qaos+jNT28f9FFFzWq5/A+ujDMebQiXtGAcUfQgAQEDUhA0IAERX8K1twG73KWpJtvvrlR/fnnn9+oXpK2bm32GUQcDIHEKxqQgqABCQgakICgAQkIGpCgiKA17XU8sW6dTk5MDFX74vLlOrZgwdDrfn5iQr+YP7/RfN5eu1bvNDmCWVpfHr2O3dfXpPc6Njlk3/QzQO64445G9RmH9w8fPtx4DMwu9DoChSBoQAKCBiQgaECC9F7HJh8oOmfOnEbrXrlyZaP6hQsXNqqXpLPOOqvx3wC8ogEJCBqQgKABCQgakICgAQmKCFrTXsd3Vq7UySE/p3HP0qX6ZYO2r6cWLdLRhr2Ox1evbvSd18X15dHr2H19TdGf63jOOec0Wv8999zTqH4mh/dvvfXWRvUHDhxoPAZmF3odgUIQNCABQQMSEDQgQdFv/Gz6JReXX355o/oFDd55PWn37t2N6l977bXGY2B24WAIUAiCBiQgaEACggYkIGhAAoIGJCgiaI2/LH7VqkZNvG9ceqneXrJk6PqjH/ygji9aNHT9r66+Wiff+96h64trgKWpuPv6Gs6jNcR5NNRxHg0oBEEDEhA0IAFBAxIU/Q7rph+gOm/evKbzaVQvScePH29Uf+LEicZjYHbhYAhQiFYfCW57v6Q3JL0j6URErOliUsC46eKz938nIl7tYD3A2GLTEUjQNmgh6WHbT9je0sWEgHHUNmjrI+IKSddK+hPbH6kX2N5ie5ftXVOtpHGv49q1zepXrlQ0+L7qpr2UTedfXF8evY7d19d0dnjf9p2S3oyIL52ihsP7GDsjPbxve4HthZOXJV0jac9M1weMszZHHSck3V+9KsyV9M8R8W+dzAoYM3SGNMSmI+roDAEKkf6KljYYkIRXNKAQBA1IQNCABAQNSEDQgARlBK20PjXqu63PGKO0+hoO7wMtcXgfKARBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSlBG00vrUqO+2PmOM0upr6HUEWqLXESgEQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEpQRtNL61Kjvtj5jjNLqa+h1BFqi1xEoBEEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxKUEbTS+tSo77Y+Y4zS6mvodQRaotcRKARBAxIQNCABQQMSEDQgAUEDEhA0IMG0QbN9t+0jtvf0LVtie6ftvdXvxaOdJjC7DfOK9jVJG2vLtkp6JCIukfRIdR3AFKYNWkQ8KulobfEmSduqy9skXd/xvICxMtN9tImIOFRdfkXSxFSFtrfY3mV715RrK61Pjfpu6zPGKK2+ZqheR9srJO2IiMuq6/8VEYv6bv95REy7n0avI8bRKHsdD9teJknV7yMzXA9wWphp0LZL2lxd3izpgW6mA4ynaTcdbd8raYOk8yQdlvR5Sd+R9C1J75N0QNINEVE/YDJoXWw6YuwMs+nI+9GAlng/GlAIggYkIGhAAoIGJCBoQAKCBiQoI2il9alR3219xhil1ddwHg1oifNoQCEIGpCAoAEJCBqQgKABCQgakICgAQkIGpCAoAEJCBqQoIygldanRn239RljlFZfQ68j0BK9jkAhCBqQgKABCQgakICgAQkIGpCAoAEJCBqQgKABCQgakKCMoJXWp0Z9t/UZY5RWX0OvI9ASvY5AIQgakICgAQkIGpCAoAEJCBqQgKABCQgakICgAQkIGpCgjKCV1qdGfbf1GWOUVl9DryPQEr2OQCEIGpCAoAEJCBqQgKABCQgakICgAQmmDZrtu20fsb2nb9mdtg/afrL6uW600wRmt2Fe0b4maeOA5X8XEaurnwe7nRYwXqYNWkQ8KulowlyAsdVmH+02209Xm5aLW82itD416rutzxijtPqaoXodba+QtCMiLquuT0h6VVJI+oKkZRFxyxR/u0XSlurqla1mCxRomF7HGQVt2NsG1NJUjLEzsqZi28v6rn5c0p6pagFIc6crsH2vpA2SzrP9sqTPS9pge7V6m477JX1qhHMEZj3ejwa0xPvRgEIQNCABQQMSEDQgAUEDEhA0IAFBAxKUEbTSGkKp77Y+Y4zS6ms4YQ20xAlroBAEDUhA0IAEBA1IQNCABAQNSEDQgAQEDUhA0IAEBA1IUEbQSutTo77b+owxSquvodcRaIleR6AQBA1IQNCABAQNSEDQgAQEDUhA0IAEBA1IQNCABAQNSFBG0ErrU6O+2/qMMUqrr6HXEWiJXkegEAQNSEDQgAQEDUhA0IAEBA1IQNCABAQNSEDQgAQEDUhQRtBK61Ojvtv6jDFKq6+h1xFoiV5HoBAEDUhA0IAEBA1IQNCABAQNSDBt0Gwvt/092z+2/aztz1TLl9jeaXtv9Xvx6KcLzE7TnkezvUzSsojYbXuhpCckXS/pk5KORsRf294qaXFE/Nk06+I8GsZOJ+fRIuJQROyuLr8h6TlJF0jaJGlbVbZNvfABGKDRPprtFZI+JOlxSRMRcai66RVJE53ODBgjQwfN9nsk3Sfp9og41n9b9LY/B24W2t5ie5ftXVOuvLQ+Neq7rc8Yo7T6mqF6HW3Pk7RD0kMR8eVq2QuSNkTEoWo/7vsR8YFp1sM+GsZOJ/toti3pq5KemwxZZbukzdXlzZIemMkkgdPBMEcd10v6oaRnJJ2sFn9Wvf20b0l6n6QDkm6IiKPTrItXNIydYV7ReJsM0BJvkwEKQdCABAQNSEDQgAQEDUhA0IAEBA1IUEbQSutTo77b+owxSquv4YQ10BInrIFCEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IEEZQSutT436buszxiitvoZeR6Aleh2BQhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCBBGUErrU+N+m7rM8Yorb6GXkegJXodgUIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgQRlBK61Pjfpu6zPGKK2+hl5HoCV6HYFCEDQgAUEDEhA0IAFBAxIQNCABQQMSTBs028ttf8/2j20/a/sz1fI7bR+0/WT1c93opwvMTtOesLa9TNKyiNhte6GkJyRdL+kGSW9GxJeGHowT1hhDw5ywnjvESg5JOlRdfsP2c5IuaD894PTRaB/N9gpJH5L0eLXoNttP277b9uIZz6K0PjXqu63PGKO0+pqhex1tv0fSDyT9VUR82/aEpFclhaQvqLd5ecuAv9siaUt19cpWswUKNMym41BBsz1P0g5JD0XElwfcvkLSjoi4bJr1sI+GsdNJU7FtS/qqpOf6Q1YdJJn0cUl7ZjJJ4HQwzFHH9ZJ+KOkZSSerxZ+VdJOk1eptOu6X9KnqwMmp1sUrGsZOZ5uOXSFoGEe8Hw0oBEEDEhA0IAFBAxIQNCABQQMSlBG00vrUqO+2PmOM0uprOI8GtMR5NKAQBA1IQNCABAQNSEDQgATTfmZIx16VdGDA8vOq27KdbuO+m2OP67gXDVOUenh/yknYuyJiDeOO79in27h1bDoCCQgakKCUoN3FuGM/9uk27v9TxD4aMO5KeUUDxlpq0GxvtP2C7X22tw64/Uzb36xuf7z6vMi2Yw78ko5azQbbr/d9YcdftB23Wu9+289U69w14Hbb/vvq/j5t+4oOxvxA3/140vYx27fXajq7v9WnVB+xvadv2RLbO23vrX4P/BRr25urmr22N3cw7t/afr56LO+3vWiKvz3l8zISEZHyI2mOpJ9KuljSfElPSbq0VvPHkv6hunyjpG92MO4ySVdUlxdK+smAcTeo9wGwXd/n/ZLOO8Xt10n6riRL+rCkx0fwmL8i6aJR3V9JH5F0haQ9fcv+RtLW6vJWSV8c8HdLJL1Y/V5cXV7cctxrJM2tLn9x0LjDPC+j+Ml8RbtK0r6IeDEi3pb0DUmbajWbJG2rLv+rpI9WH+A6YxFxKCJ2V5ffkFTSl3RskvRP0fOYpEW1D6Zt66OSfhoRg5oEOhERj0o6Wlvc/zxuU+/bh+p+V9LOiDgaET+XtFPSxjbjRsTDEXGiuvqYpAuHXd+oZQbtAkk/67v+sn79H/z/1lQP2OuSlnY1gQFf0tHvt20/Zfu7tn+royFD0sO2n6i+g6BumMekjRsl3TvFbaO4v5Mm4v8+TPcVSRMDakZ9329Rb2thkOmel85lt2C9a6ov6bhP0u0Rcax28271Nq/erL5Q8TuSLulg2PURcdD2+ZJ22n6++p945GzPl/QxSX8+4OZR3d9fExGR/YZf25+TdELS16coSX9eMl/RDkpa3nf9wmrZwBrbcyWdK+m1tgNXX9Jxn6SvR8S367dHxLGIeLO6/KCkebbPaztuRBysfh+RdL96m8/9hnlMZupaSbsj4vCAeY3k/vY5PLkJXP0+MqBmJPfd9icl/Z6k349qh6xuiOelc5lB+5GkS2y/v/rf9kZJ22s12yVNHn36hKT/mOrBGtZUX9JRq/mNyX1B21ep97i0CrjtBe59Q6psL1BvR73+RSDbJf1hdfTxw5Jej2m+v6CBmzTFZuMo7m9N//O4WdIDA2oeknSN7cXVUclrqmUzZnujpD+V9LGI+OUUNcM8L93LPPKi3lG2n6h39PFz1bK/rB4YSTpL0r9I2ifpPyVd3MGY69XbJn9a0pPVz3WSPi3p01XNbZKeVe9I6GOSru5g3Iur9T1VrXvy/vaPa0lfqR6PZySt6ehxXqBecM7tWzaS+6temA9J+pV6+1m3qrdf/YikvZL+XdKSqnaNpH/s+9tbqud6n6SbOxh3n3r7fZPP8+QR7N+U9OCpnpdR/9AZAiSgMwRIQNCABAQNSEDQgAQEDUhA0IAEBA1IQNCABP8DxXjFZ2QGIr8AAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "plt.clf()\n",
+ "plt.figure(figsize=(6, 6))\n",
+ "ax = plt.gca()\n",
+ "data_point = 100\n",
+ "data = df.Blobs.iloc[data_point]\n",
+ "print(df.iloc[data_point])\n",
+ "plt.imshow(data, cmap='gray', vmin=0, vmax=255)\n",
+ "# Loop over data dimensions and create text annotations.\n",
+ "for i in range(0, data.shape[0]):\n",
+ " for j in range(0, data.shape[1]):\n",
+ " text = ax.text(j, i, int(data[i, j]),\n",
+ " ha=\"center\", va=\"center\", color=\"cyan\", fontsize=1)\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_06_CNN_Baseline.ipynb b/python/Step_06_CNN_Baseline.ipynb
new file mode 100644
index 0000000..6b92955
--- /dev/null
+++ b/python/Step_06_CNN_Baseline.ipynb
@@ -0,0 +1,890 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "from matplotlib.patches import Ellipse\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2\n",
+ "\n",
+ "import sklearn\n",
+ "import random\n",
+ "from sklearn import neighbors\n",
+ "from sklearn import svm\n",
+ "from sklearn import tree\n",
+ "from sklearn import ensemble\n",
+ "from sklearn.model_selection import GridSearchCV\n",
+ "from sklearn.metrics import classification_report\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle\n",
+ "from sklearn.model_selection import GridSearchCV\n",
+ "from sklearn.metrics.pairwise import euclidean_distances\n",
+ "from sklearn.metrics import confusion_matrix\n",
+ "from sklearn.metrics import accuracy_score\n",
+ "\n",
+ "import pickle as pkl\n",
+ "import h5py\n",
+ "\n",
+ "from pathlib import Path\n",
+ "import os.path\n",
+ "import sys\n",
+ "import datetime\n",
+ "import time\n",
+ "\n",
+ "import skimage\n",
+ "\n",
+ "target_names = [\"Knuckle\", \"Finger\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from skimage import measure\n",
+ "from skimage.measure import find_contours, approximate_polygon, \\\n",
+ " subdivide_polygon, EllipseModel, LineModelND"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def getEllipseParams(img):\n",
+ " points = np.argwhere(img > 40)\n",
+ " \n",
+ " contours = skimage.measure.find_contours(img, 40)\n",
+ " points_to_approx = []\n",
+ " highest_val = 0\n",
+ " for n, contour in enumerate(contours):\n",
+ " if (len(contour) > highest_val):\n",
+ " points_to_approx = contour\n",
+ " highest_val = len(contour) \n",
+ " \n",
+ " try:\n",
+ " contour = np.fliplr(points_to_approx)\n",
+ " except Exception as inst:\n",
+ " return [-1, -1, -1, -1, -1]\n",
+ " \n",
+ "\n",
+ " ellipse = skimage.measure.fit.EllipseModel()\n",
+ " ellipse.estimate(contour)\n",
+ " try:\n",
+ " xc, yc, a, b, theta = ellipse.params \n",
+ " except Exception as int:\n",
+ " return [-1, -1, -1, -1, -1]\n",
+ " \n",
+ " return [xc, yc, a, b, theta]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n",
+ "13 : 5\n",
+ "0.7222222222222222 : 0.2777777777777778\n"
+ ]
+ }
+ ],
+ "source": [
+ "# the data, split between train and test sets\n",
+ "df = pd.read_pickle(\"DataStudyCollection/df_statistics.pkl\")\n",
+ "\n",
+ "lst = df.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "\n",
+ "df[\"Set\"] = \"Test\"\n",
+ "df.loc[df.userID.isin(train_ids), \"Set\"] = \"Train\"\n",
+ "print(train_ids, test_ids)\n",
+ "print(len(train_ids), \":\", len(test_ids))\n",
+ "print(len(train_ids) / len(lst), \":\", len(test_ids)/ len(lst))\n",
+ "\n",
+ "#df_train = df[df.userID.isin(train_ids)]\n",
+ "#df_test = df[df.userID.isin(test_ids) & (df.Version == \"Normal\")]\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAJ4AAAD8CAYAAACGuR0qAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAADetJREFUeJzt3XuQVPWZxvHvO8AwMAy3gCNBRbCQDV4yuoSoYArvqNkga0K8ZXHXCmrFXbWyu2VlqzS7+YdaNWx243pJQImV6LoaSjZFokhijHeZaBBULuE+AgPKZYIyzDDv/tFnKsNlunu6e/rt6Xk+VVPdfW79UvVwus85/TuvuTsixVYRXYD0TgqehFDwJISCJyEUPAmh4EkIBU9CKHgSQsGTEH2L+WaV1t+rqO58AbO061ufDP9PDrWlnZ3xGo2u4uStid273H1kpuXyCp6ZTQd+APQBfuzuc9MtX0U1X6y4uPPtVVamfb+KmkFp5/uf9qefnyFY3tycdr5k9oI/vSmb5XL+qDWzPsADwOXAROBaM5uY6/akd8nnO95kYJ27r3f3g8CTwIzClCXlLp/gjQa2dHi9NZl2GDObY2bLzWx5C/ook5RuP6p190fcfZK7T+pH/+5+O+kh8gleA3Bih9cnJNNEMsoneG8B481srJlVAtcAiwtTlpS7nE+nuHurmd0GPEfqdMoCd1+VdiWztKdMKgYOTL96htMp1jf9P6d1R2Pa+VI8eZ3Hc/clwJIC1SK9iC6ZSQgFT0IoeBKiqD8SKKR+lX353NljOHnCKCoqjLY2Z/PKzbxfv4HmT1uiy5MMelzwaoYO5Ou3XsSV151L1cCjj5BbDrbyxtKVPPEfv2T9Kp1WLFU9KnjnXnIa3773GqprBgDwx/caWP2HzTR/2kJlVT8mnHEC404bzdQr65h6ZR2/fuZNHvjOU3zSdCC4cjlSaQVv5PBOZ13x9S/yrXtm0Keigpc2bOTe3/2O9xp3HrbMwHpj5OBqbpz2l8w67/NcePVkTvnSeG6b/ywbd+7msw/uTfv2bQcU0GLpEQcXZ08Zzz/860z6VFQw7+VX+Ntnfn5U6Nrt3Lefexe/xNX3Pc77DY2MGTmM+bd+lVHDaopctaRT8sEbMqyaf5w7C4B5r7zKD19/I6v1Nu/aw+wf/g9vrtvCcUMG8dA3Z1IzLM2vn6WoSj541956IcNH1rDirfX8d5aha/fpwVbuePT/WLttF+NqP8Mt/35DN1UpXVXSwauuqWL6174AwIPfW0xbDmMimg40c/ujiznQ0srF10+lbpp+JF0KSjp4508/g6oBlbz96lo2rNme83a2fLSXh5e+DqC9Xoko6eCdOXkcAK++kP5HL9l47MV6Ptq+h7Gnn8iZ5/9F3tuT/JR08M6YNBaAFW9tyHtbrYfaWDL/1wB85ZZL8t6e5Keo5/EMsDRjZ33An69EVPbvy8hRQzl4sJVNDXvwAZWsveHBtNuvbz6Ydv4Pvnoj3/iXv2by5XX0H11LS3PrYfPbtupKR7GU7B6vZnDq6kTT3k8yjofN1kc79vLHVQ30r6rkc2efXJBtSm5KNniDaqoA2N9U2JFpq95aD8CEujEF3a50TckG75NPUh+bAwcVdmTahxtTVzxGjBpa0O1K15Rs8PZ8nLodxdACX23Ys7Mptd0R6cdvSPcq2eC1HGxl7+799O3Xh5HHDynYdv+071MABiXfISVGyQYPYM17HwJwWt1JBdtmdRK4/fqpVKiSDt47b6YOBL4wZXzBtlkzNDWEsmnPJwXbpnRdUc/jOelvFVbRcPi419eefIVv3nkZUy+cyEN3/oSz/+3WtNs/OCT9/fVOOrCasafWArBt/XZcv78LU9J7vA83NFL/m1VUDazk0uvOK8g2zzg3tfdc+dragmxPclPSwQNYPP9FAGb9/WUMqa7Ka1ujxx3HSaeO4tP9B1j7h80FqE5yVfLBe+P5d1nxyhqGjqjhjplfymtbV825EIDfLlpOa8uhQpQnOSr54AH85z/9lIPNLcw873S+ck5uv6ebcPwILrt+Cm1tbSx6eFmBK5Su6hHB27puB4/c/TQAd19/KeefPrZL6w/o15f7rrmCyv79+OXjL7Np9bbuKFO6oEcED+AXj/6W+c+9Sd8+Fcy7eQbXXXBWVusNGVjFj2+6mlNqP8Om1dt45O7/7eZKJRulNbwxg/969mUM+LvLJvPPX7uAS846lYeXvMbrHxx9oGAGl585gTsvm8LoYUP4cPc+vnfjQ7rLQImwYnboHmzDPV27gYpBGe5/V9kPgClfPovb77+BwcNTy2/ftIt3X1vD9i0f4w4jPzuMuvMnMGrMCADWvbuFe/7mIXatTn8kq3G1+XvBn65390mZlsu3z8VGoAk4BLRm84aF8Mov3ubtF9/nr26axsxbLub4MSM4PglZR7s+3M3j9y/hhafeoC1D8xUprrz2eEnwJrn7rmyWL9Qe77B1Koxxp5/AaeeMZ8iIwZgZ+3bv54P6Dax5ZxNtbX/+97Xt0Z0EultR9niloK3NWbdiC+tWbMH6HR1MKU35HtU68LyZ1ZvZnEIUJL1Dvnu8qe7eYGbHAUvN7AN3f6njAkkg5wBUkf7m2tJ75LXHc/eG5LERWESqzdSRy6jBihwlnyZ61WZW0/4cuBRYWajCpLzl81FbCyxKxsn2BX7m7r/KuFaao+i2pqY8ypGeJJ8GK+uBzxewFulFesy1WikvCp6EUPAkhIInIRQ8CaHgSQgFT0IoeBJCwZMQCp6EUPAkhIInIRQ8CaHgSQgFT0IoeBJCwZMQCp6EUPAkhIInIRQ8CaHgSQgFT0IoeBJCwZMQCp6EUPAkhIInIRQ8CaHgSQgFT0JkDJ6ZLTCzRjNb2WHacDNbamZrk8dh3VumlJts9niPAdOPmHYXsMzdxwPLktciWcsYvOQu7h8fMXkGsDB5vhC4qsB1SZnL9Tterbu3997cTup+yCJZy/vgwlM9qTq9o7aZzTGz5Wa2vIXmfN9OykSuwdthZqMAksfGzhZUnws5llyDtxiYnTyfDTxbmHKkt8jmdMoTwGvABDPbamY3AXOBS8xsLXBx8lokaxn7XLj7tZ3MuqjAtUgvoisXEkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQuTaYOW7ZtZgZu8kf1d0b5lSbnJtsAIwz93rkr8lhS1Lyl2uDVZE8pLPd7zbzGxF8lHcaS8z9bmQY8k1eA8CpwB1wDbg/s4WVJ8LOZacgufuO9z9kLu3AT8CJhe2LCl3OQWvvatPYiawsrNlRY4lY5+LpMHKNGCEmW0F7gGmmVkdqR5mG4Gbu7FGKUO5NliZ3w21SC+iKxcSQsGTEAqehFDwJISCJyEUPAmh4EkIBU9CKHgSQsGTEAqehFDwJISCJyEUPAmh4EkIBU9CKHgSQsGTEAqehFDwJISCJyEUPAmh4EkIBU9CKHgSQsGTEAqehFDwJISCJyEUPAmh4EmIbPpcnGhmvzGz98xslZndnkwfbmZLzWxt8tjpDbhFjpTNHq8V+La7TwTOAb5lZhOBu4Bl7j4eWJa8FslKNn0utrn775PnTcD7wGhgBrAwWWwhcFV3FSnlJ+OtaDsys5OBs4A3gFp335bM2g7UdrLOHGAOQBUDc61TykzWBxdmNgh4BrjD3fd1nOfuTupG3EdRnws5lqyCZ2b9SIXup+7+82Tyjva2A8ljY/eUKOUom6NaI3WX9/fd/fsdZi0GZifPZwPPFr48KVfZfMebAnwDeNfM3kmmfQeYCzxlZjcBm4BZ3VOilKNs+ly8DFgnsy8qbDnSW+jKhYRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EiKfPhffNbMGM3sn+bui+8uVcpHNHUHb+1z83sxqgHozW5rMm+fu93VfeVKusrkj6DZgW/K8ycza+1yI5KxL3/GO6HMBcJuZrTCzBWopJV2RT5+LB4FTgDpSe8T7O1lvjpktN7PlLTQXoGQpBzn3uXD3He5+yN3bgB8Bk4+1rhqsyLHk3OeivblKYiawsvDlSbnKp8/FtWZWR6qV1Ebg5m6pUMpSPn0ulhS+HOktdOVCQih4EkLBkxAKnoRQ8CSEgichFDwJYe5evDcz20mqqXK7EcCuohXQdaVeH5RejWPcfWSmhYoavKPe3Gy5u08KKyCDUq8PekaNx6KPWgmh4EmI6OA9Evz+mZR6fdAzajxK6Hc86b2i93jSS4UEz8ymm9lqM1tnZndF1JCJmW00s3eToZvLS6CeBWbWaGYrO0wbbmZLzWxt8thjxr0UPXhm1gd4ALgcmEjqB6UTi11Hli5w97oSOV3xGDD9iGl3AcvcfTywLHndI0Ts8SYD69x9vbsfBJ4EZgTU0aO4+0vAx0dMngEsTJ4vBK4qalF5iAjeaGBLh9dbKc1xug48b2b1ZjYnuphO1CbjngG2A7WRxXRFNmMuequp7t5gZscBS83sg2SvU5Lc3c2sx5yiiNjjNQAndnh9QjKtpLh7Q/LYCCyik+GbwXa0j/ZLHhuD68laRPDeAsab2VgzqwSuARYH1NEpM6tO7hODmVUDl1KawzcXA7OT57OBZwNr6ZKif9S6e6uZ3QY8B/QBFrj7qmLXkUEtsCg1pJi+wM/c/VeRBZnZE8A0YISZbQXuAeYCT5nZTaR+9TMrrsKu0ZULCaErFxJCwZMQCp6EUPAkhIInIRQ8CaHgSQgFT0L8Pyp94TKMVNfuAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "fig, ax = plt.subplots(1)\n",
+ "img = df.iloc[0].Blobs\n",
+ "xc, yc, a, b, theta = getEllipseParams(img)\n",
+ "ax.imshow(img)\n",
+ "e = Ellipse(xy=[xc,yc], width=a*2, height=b*2, angle=math.degrees(theta), fill=False, lw=2, edgecolor='w')\n",
+ "ax.add_artist(e)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lst = df.Blobs.apply(lambda x: getEllipseParams(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lst2 = np.vstack(lst.values)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(618012, 5)"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lst2.shape"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"XC\"] = lst2[:,0]\n",
+ "df[\"YC\"] = lst2[:,1]\n",
+ "df[\"EllipseW\"] = lst2[:,2]\n",
+ "df[\"EllipseH\"] = lst2[:,3]\n",
+ "df[\"EllipseTheta\"] = lst2[:,4]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"Area\"] = df[\"EllipseW\"] * df[\"EllipseH\"] * np.pi\n",
+ "df[\"AvgCapa\"] = df.Blobs.apply(lambda x: np.mean(x))\n",
+ "df[\"SumCapa\"] = df.Blobs.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[8, 11, 6, 7, 16, 15, 14, 10, 9, 2, 3, 13, 17, 5, 12, 1, 4]"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lst = list(range(1, df.userID.max()))\n",
+ "SEED = 42#448\n",
+ "random.seed(SEED)\n",
+ "random.shuffle(lst)\n",
+ "lst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfY = df[df.Set == \"Train\"].copy(deep=True)\n",
+ "dfT = df[(df.Set == \"Test\") & (df.Version == \"Normal\")].copy(deep=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "minmax = min(len(dfY[dfY.Input == \"Finger\"]), len(dfY[dfY.Input == \"Knuckle\"]))\n",
+ "dfX = dfY[dfY.Input == \"Finger\"].sample(minmax)\n",
+ "dfZ = dfY[dfY.Input == \"Knuckle\"].sample(minmax)\n",
+ "dfY = pd.concat([dfX,dfZ])\n",
+ "\n",
+ "minmax = min(len(dfT[dfT.Input == \"Finger\"]), len(dfT[dfT.Input == \"Knuckle\"]))\n",
+ "dfX = dfT[dfT.Input == \"Finger\"].sample(minmax)\n",
+ "dfZ = dfT[dfT.Input == \"Knuckle\"].sample(minmax)\n",
+ "dfT = pd.concat([dfX,dfZ])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ " ... | \n",
+ " InputMethod | \n",
+ " Set | \n",
+ " XC | \n",
+ " YC | \n",
+ " EllipseW | \n",
+ " EllipseH | \n",
+ " EllipseTheta | \n",
+ " Area | \n",
+ " AvgCapa | \n",
+ " SumCapa | \n",
+ "
\n",
+ " \n",
+ " Input | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " Finger | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " ... | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ "
\n",
+ " \n",
+ " Knuckle | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " ... | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
2 rows × 31 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "Input \n",
+ "Finger 9421 9421 9421 9421 9421 9421 \n",
+ "Knuckle 9421 9421 9421 9421 9421 9421 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause Image ... InputMethod Set \\\n",
+ "Input ... \n",
+ "Finger 9421 9421 9421 9421 ... 9421 9421 \n",
+ "Knuckle 9421 9421 9421 9421 ... 9421 9421 \n",
+ "\n",
+ " XC YC EllipseW EllipseH EllipseTheta Area AvgCapa SumCapa \n",
+ "Input \n",
+ "Finger 9421 9421 9421 9421 9421 9421 9421 9421 \n",
+ "Knuckle 9421 9421 9421 9421 9421 9421 9421 9421 \n",
+ "\n",
+ "[2 rows x 31 columns]"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfT.groupby(\"Input\").count()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# FEATURE SET: sum of capacitance, avg of capacitance, ellipse area, ellipse width, height and theta."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "features = [\"SumCapa\", \"AvgCapa\", \"Area\", \"EllipseW\", \"EllipseH\", \"EllipseTheta\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# ZeroR"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfT[\"InputMethodPred\"] = 1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[ 0 9421]\n",
+ " [ 0 9421]]\n",
+ "Accuray: 0.50\n",
+ "Recall: 0.50\n",
+ "Precision: 0.50\n",
+ "F1-Score: 0.33\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.00 0.00 0.00 9421\n",
+ " Finger 0.50 1.00 0.67 9421\n",
+ "\n",
+ " micro avg 0.50 0.50 0.50 18842\n",
+ " macro avg 0.25 0.50 0.33 18842\n",
+ "weighted avg 0.25 0.50 0.33 18842\n",
+ "\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
+ " 'precision', 'predicted', average, warn_for)\n",
+ "/usr/local/lib/python3.6/dist-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.\n",
+ " 'precision', 'predicted', average, warn_for)\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.2f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.2f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.2f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"F1-Score: %.2f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# DecisionTreeClassifier"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 240 candidates, totalling 1200 fits\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[Parallel(n_jobs=30)]: Using backend LokyBackend with 30 concurrent workers.\n",
+ "[Parallel(n_jobs=30)]: Done 140 tasks | elapsed: 10.4s\n",
+ "[Parallel(n_jobs=30)]: Done 390 tasks | elapsed: 31.4s\n",
+ "[Parallel(n_jobs=30)]: Done 740 tasks | elapsed: 1.3min\n",
+ "[Parallel(n_jobs=30)]: Done 1200 out of 1200 | elapsed: 2.4min finished\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'max_depth': 22, 'min_samples_split': 2} 0.8120637794585754\n",
+ "[[7409 2012]\n",
+ " [3096 6325]]\n",
+ "Accuray: 0.73\n",
+ "Recall: 0.73\n",
+ "Precision: 0.67\n",
+ "F1-Score: 0.73\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.71 0.79 0.74 9421\n",
+ " Finger 0.76 0.67 0.71 9421\n",
+ "\n",
+ " micro avg 0.73 0.73 0.73 18842\n",
+ " macro avg 0.73 0.73 0.73 18842\n",
+ "weighted avg 0.73 0.73 0.73 18842\n",
+ "\n",
+ "CPU times: user 7.26 s, sys: 3.38 s, total: 10.6 s\n",
+ "Wall time: 2min 29s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "param_grid = {'max_depth': range(2,32,1),\n",
+ " 'min_samples_split':range(2,10,1)}\n",
+ "#TODO: Create Baseline for different ML stuff\n",
+ "clf = GridSearchCV(tree.DecisionTreeClassifier(), \n",
+ " param_grid,\n",
+ " cv=5 , n_jobs=os.cpu_count()-2, verbose=1)\n",
+ "clf.fit(dfY[features].values, dfY.InputMethod.values)\n",
+ "print(clf.best_params_, clf.best_score_)\n",
+ "dfT[\"InputMethodPred\"] = clf.predict(dfT[features].values) \n",
+ "\n",
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.3f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# RandomForestClassifier"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 180 candidates, totalling 900 fits\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[Parallel(n_jobs=94)]: Using backend LokyBackend with 94 concurrent workers.\n",
+ "[Parallel(n_jobs=94)]: Done 12 tasks | elapsed: 1.2min\n",
+ "[Parallel(n_jobs=94)]: Done 262 tasks | elapsed: 4.0min\n",
+ "[Parallel(n_jobs=94)]: Done 612 tasks | elapsed: 9.2min\n",
+ "[Parallel(n_jobs=94)]: Done 900 out of 900 | elapsed: 12.8min finished\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'max_depth': 60, 'n_estimators': 63} 0.8669582104371696\n",
+ "[[8175 1246]\n",
+ " [2765 6656]]\n",
+ "Accuray: 0.79\n",
+ "Recall: 0.71\n",
+ "Precision: 0.74\n",
+ "F1-Score: 0.77\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.75 0.87 0.80 9421\n",
+ " Finger 0.84 0.71 0.77 9421\n",
+ "\n",
+ " micro avg 0.79 0.79 0.79 18842\n",
+ " macro avg 0.79 0.79 0.79 18842\n",
+ "weighted avg 0.79 0.79 0.79 18842\n",
+ "\n",
+ "CPU times: user 42.1 s, sys: 834 ms, total: 42.9 s\n",
+ "Wall time: 13min 28s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "param_grid = {'n_estimators': range(55,64,1),\n",
+ " 'max_depth': range(50,70,1)}\n",
+ "#TODO: Create Baseline for different ML stuff\n",
+ "clf = GridSearchCV(ensemble.RandomForestClassifier(), \n",
+ " param_grid,\n",
+ " cv=5 , n_jobs=os.cpu_count()-2, verbose=1)\n",
+ "clf.fit(dfY[features].values, dfY.InputMethod.values)\n",
+ "print(clf.best_params_, clf.best_score_)\n",
+ "dfT[\"InputMethodPred\"] = clf.predict(dfT[features].values) \n",
+ "\n",
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.2f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.2f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Precision: %.2f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"F1-Score: %.2f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# kNN"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 62 candidates, totalling 310 fits\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[Parallel(n_jobs=94)]: Using backend LokyBackend with 94 concurrent workers.\n",
+ "[Parallel(n_jobs=94)]: Done 12 tasks | elapsed: 17.7s\n",
+ "[Parallel(n_jobs=94)]: Done 310 out of 310 | elapsed: 1.5min finished\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'n_neighbors': 2} 0.800546827088748\n",
+ "[[8187 1234]\n",
+ " [4318 5103]]\n",
+ "Accuray: 0.71\n",
+ "Recall: 0.54\n",
+ "Precision: 0.67\n",
+ "F1-Score: 0.65\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.65 0.87 0.75 9421\n",
+ " Finger 0.81 0.54 0.65 9421\n",
+ "\n",
+ " micro avg 0.71 0.71 0.71 18842\n",
+ " macro avg 0.73 0.71 0.70 18842\n",
+ "weighted avg 0.73 0.71 0.70 18842\n",
+ "\n",
+ "CPU times: user 1.74 s, sys: 300 ms, total: 2.04 s\n",
+ "Wall time: 1min 30s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "param_grid = {'n_neighbors': range(2,64,1),\n",
+ " #weights': ['uniform', 'distance']\n",
+ " }\n",
+ "#TODO: Create Baseline for different ML stuff\n",
+ "clf = GridSearchCV(neighbors.KNeighborsClassifier(),\n",
+ " param_grid,\n",
+ " cv=5 , n_jobs=os.cpu_count()-2, verbose=1)\n",
+ "clf.fit(dfY[features].values, dfY.InputMethod.values)\n",
+ "print(clf.best_params_, clf.best_score_)\n",
+ "dfT[\"InputMethodPred\"] = clf.predict(dfT[features].values) \n",
+ "\n",
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.2f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.2f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.2f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"F1-Score: %.2f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# SVM"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 9 candidates, totalling 45 fits\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[Parallel(n_jobs=94)]: Using backend LokyBackend with 94 concurrent workers.\n",
+ "[Parallel(n_jobs=94)]: Done 42 out of 45 | elapsed: 1056.5min remaining: 75.5min\n",
+ "[Parallel(n_jobs=94)]: Done 45 out of 45 | elapsed: 1080.5min finished\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'C': 10.0, 'gamma': 10.0} 0.8256943024851795\n",
+ "CPU times: user 2h 42min 9s, sys: 23.6 s, total: 2h 42min 33s\n",
+ "Wall time: 20h 43min 1s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "C_range = np.logspace(1, 3,3)\n",
+ "gamma_range = np.logspace(-1, 1, 3)\n",
+ "param_grid = dict(gamma=gamma_range, C=C_range)\n",
+ "clf = GridSearchCV(sklearn.svm.SVC(), \n",
+ " param_grid,\n",
+ " cv=5 , n_jobs=os.cpu_count()-2, verbose=1)\n",
+ "clf.fit(dfY[features].values, dfY.InputMethod.values)\n",
+ "print(clf.best_params_, clf.best_score_)\n",
+ "\n",
+ "dfT[\"InputMethodPred\"] = clf.predict(dfT[features].values)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'C': 10.0, 'gamma': 10.0} 0.8256943024851795\n",
+ "[[7106 2315]\n",
+ " [2944 6477]]\n",
+ "Accuray: 0.72\n",
+ "Recall: 0.69\n",
+ "Precision: 0.66\n",
+ "F1-Score: 0.71\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.71 0.75 0.73 9421\n",
+ " Finger 0.74 0.69 0.71 9421\n",
+ "\n",
+ " micro avg 0.72 0.72 0.72 18842\n",
+ " macro avg 0.72 0.72 0.72 18842\n",
+ "weighted avg 0.72 0.72 0.72 18842\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(clf.best_params_, clf.best_score_)\n",
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.2f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.2f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Precision: %.2f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"F1-Score: %.2f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_07_CNN.ipynb b/python/Step_07_CNN.ipynb
new file mode 100644
index 0000000..73c53ba
--- /dev/null
+++ b/python/Step_07_CNN.ipynb
@@ -0,0 +1,13240 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "from keras.models import Sequential, load_model\n",
+ "from keras.layers import *\n",
+ "from keras import optimizers\n",
+ "from keras import utils\n",
+ "from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau\n",
+ "import keras\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle \n",
+ "\n",
+ "# Used for graph export\n",
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "from keras import backend as K\n",
+ "from keras import regularizers\n",
+ "\n",
+ "import pickle as pkl\n",
+ "import h5py\n",
+ "\n",
+ "from pathlib import Path\n",
+ "import os.path\n",
+ "import sys\n",
+ "import datetime\n",
+ "import time\n",
+ "\n",
+ "from keras.callbacks import Callback"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class LoggingTensorBoard(TensorBoard): \n",
+ "\n",
+ " def __init__(self, log_dir, settings_str_to_log, **kwargs):\n",
+ " super(LoggingTensorBoard, self).__init__(log_dir, **kwargs)\n",
+ "\n",
+ " self.settings_str = settings_str_to_log\n",
+ "\n",
+ " def on_train_begin(self, logs=None):\n",
+ " TensorBoard.on_train_begin(self, logs=logs)\n",
+ "\n",
+ " tensor = tf.convert_to_tensor(self.settings_str)\n",
+ " summary = tf.summary.text (\"Run_Settings\", tensor)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " s = sess.run(summary)\n",
+ " self.writer.add_summary(s)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n",
+ "13 : 5\n",
+ "0.7222222222222222 : 0.2777777777777778\n"
+ ]
+ }
+ ],
+ "source": [
+ "# the data, split between train and test sets\n",
+ "dfAll = pd.read_pickle(\"DataStudyCollection/df_blobs_area.pkl\")\n",
+ "\n",
+ "lst = dfAll.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)\n",
+ "print(len(train_ids), \":\", len(test_ids))\n",
+ "print(len(train_ids) / len(lst), \":\", len(test_ids)/ len(lst))\n",
+ "\n",
+ "df_train = dfAll[dfAll.userID.isin(train_ids)]\n",
+ "df_test = dfAll[dfAll.userID.isin(test_ids) & (dfAll.Version == \"Normal\")]\n",
+ "\n",
+ "df_train2 = df_train[['Blobs', 'InputMethod']].copy()\n",
+ "df_test2 = df_test[['Blobs', 'InputMethod']].copy()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "618012"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(dfAll)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_train = np.vstack(df_train2.Blobs)\n",
+ "x_test = np.vstack(df_test2.Blobs)\n",
+ "y_train = df_train2.InputMethod.values\n",
+ "y_test = df_test2.InputMethod.values\n",
+ "\n",
+ "x_train = x_train.reshape(-1, 27, 15, 1)\n",
+ "x_test = x_test.reshape(-1, 27, 15, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = 2\n",
+ "y_train_one_hot = utils.to_categorical(df_train2.InputMethod, num_classes)\n",
+ "y_test_one_hot = utils.to_categorical(df_test2.InputMethod, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Text(0.5, 1.0, 'Label for image 1 is: 0')"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAKEAAAEICAYAAAA3NZQkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAADntJREFUeJzt3XusHPV5xvHv4xsG44SLiQXmYoIQkYuEW1GTKjSBYMChTUxUlYBKZSKo0zQovdAkJG0DSavKikrTpEpRAjg4JAFRKMVNKWAsKIraBkxECAQSE2PAri+AbWyHq+23f8zvpOPD2Yv3ct7js89HWu3Mzvxm3t3znJmd2dn9KSIwyzQhuwAzh9DSOYSWziG0dA6hpXMILd2YCqGkByRd1uu2qnxT0lZJD3VXJUg6VtJOSRO7XdZYkfmc+hJCSWslze/Hsjt0OnA2cHREzOt2YRHxXEQcHBG7uy+tfySdKel+SS9LWtts3l4+J0l/KmmjpO2Slko6oNn8Y2pL2EfHAWsj4hf72lDSpD7UM1p+ASwFPjVaK5R0LnAlcBbV6/5O4AvN2oxqCCUdKul7kl4ou8bvSTp62GwnSHqo/BfdKemwWvt3S/ovSdsk/UjSGW2s81LgeuA3yu7mC+XxP5D0tKQtkpZLOqrWJiR9QtJqYPUIy5xd5plUxh+Q9Deltp2S/k3S4ZK+U57Hw5Jm19p/RdLzZdojkn6zNu1AScvK6/OkpE9LWlebfpSk28tr+IykTzZ67hHxUETcBKxp43Ua/pwukbRG0o6ynt9rtYxiEXBDRDwREVuBvwYuadoiInp+A9YC80d4/HDgd4CDgOnAPwP/Wpv+ALAeOBmYBtwOfLtMmwW8BJxH9c9zdhk/otb2sgb1XAJ8vzb+fuBF4NeAA4B/BB6sTQ9gBXAYcOAIy5td5plUW/fTwAnA24GfAD8D5gOTgG8B36y1v7i8FpOAK4CNwNQybQnwn8ChwNHAY8C6Mm0C8AjweWAK1VZmDXBui7/HfKo9QbN5fvmcymu/HTipTDsS+JUyfCywDTi2wXJ+BHykNj6jLPfwhusezRCOMN9cYOuwEC6pjc8B3gAmAp8BbhrW/h5gUQchvAH4Um38YOBNYHYthO9v5w9WW/df1KZfA/xHbfyDwKNNlrcVOKUM7xUq4LJaCE8DnhvW9rP1gPcwhNuoNhhv+SdssZyfAwtq45PLcmc3ajPau+ODJH1d0rOStgMPAocMOyJ7vjb8LNWTmEH1/uJ3y654m6RtVAccR3ZQylFl2QBExE6qreqsBnW0Y1Nt+NURxg8eGpH052VX+3J5Hm+neo5DtdXXXR8+Djhq2GvwOWDmPtbaVFTvnT8C/CGwQdK/S3pXm813Am+rjQ8N72jUYLQPTK4ATgJOi4i3Ae8tj6s2zzG14WOptlAvUv0xboqIQ2q3aRGxpIM6/pfqD1qtXJpGtXtcX5unL5cXlfd/nwYuAA6NiEOAl/n/12AD1W54SP31eB54ZthrMD0izut1nRFxT0ScTfVP/hRwXZtNnwBOqY2fAmyKiJcaNehnCCdLmlq7TaJ6H/gqsK0ccFw1QruLJc2RdBDwReC2qE4bfBv4oKRzJU0syzxjhAObdtwMfFTS3HL64G+BH0TE2k6e6D6aDuwCXgAmSfo8e285bgU+Ww7iZgGX16Y9BOyQ9JlyADNR0smSfn2kFUmaIGkq1d5E5TWb0qpASTMlLSz/nK9Tbd32tPn8vgVcWv6GhwB/CdzYrEE/Q3gXVeCGblcD/wAcSLVl+x/g7hHa3URV9EZgKvBJgIh4HlhItft5gWqr8Ck6eA4RcR/wV1QHPhuoDigu3NfldOgequf9M6q3BK+x9y73i8A64BngPuA2qiBQ/hl/m+q99DNUr+P1VLvzkbyX6rW/i2qv8ipwbxs1TgD+jGqPsQV4H/Bx2Ouk9rEjNYyIu4EvAfcDz5XnONLG5pdU3jzaGCXp48CFEfG+7Fr6ZVBOVu83JB0p6T1lV3oS1fvoO7Lr6qf9+dOA8WoK8HXgeKrTJLcA/5RaUZ95d2zpvDu2dKO6O56iA2Iq0xrPIDWeBmhiq6uMmm/VY9eYvuhlXNjB1hcj4oh9adNVCCUtAL5C9bHa9a1OHE9lGqdNPKfx8iY3L2fCIY3ORBS7m4ds95ZtzdvvcUi7dV/c9mzrufbW8e64fNT2NeADVJ/xXiRpTqfLs8HVzXvCecDTEbEmIt6gOopb2JuybJB0E8JZ7H2mfx17XwAAgKTFklZJWvVmdeLfbC99PzqOiG9ExKkRcepkml7lbQOqmxCuZ+8rPI5m76tQzNrSTQgfBk6UdHy5MuNCYHlvyrJB0vEpmojYJelyqqtCJgJLI+KJ1g0bXxE04YAWu+vpTc4xAuxo8T0mn4IZk7o6TxgRd1FdJmTWMX9sZ+kcQkvnEFo6h9DSOYSWziG0dKN6PaEmTGDCgQc2nmFW8+9w//ziGU2nT97R/HrEY766s+n0Pa+80nS69Ye3hJbOIbR0DqGlcwgtnUNo6RxCS+cQWrox9TMg0eJ7xT/96LVdLf+3bvlQ0+l71j7X1fKtM94SWjqH0NI5hJbOIbR0DqGlcwgtnUNo6cbUecIJW7c3nX7yV/+o6fRXjmzey8G7Xm3ZxZsl8JbQ0jmEls4htHQOoaVzCC2dQ2jpHEJLN6rnCWPPHva81vh3q/Xaa03bH3fLuu7W/2rz5VuObvsxWUvVo/duYFdEnNqLomyw9GJLeGZEvNiD5diA8ntCS9dtCAO4V9Ijkhb3oiAbPN3ujk+PiPWS3gGskPRURDxYn6GEczHAVA7qcnU2HnW1JYyI9eV+M1Xv5PNGmMed6VhT3XSwOE3S9KFh4Bzg8V4VZoOjm93xTOAOVX0UTwK+GxF3t2zVpC+R3S9tad621XTbL3XTmc4a4JQe1mIDyqdoLJ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpWsZQklLJW2W9HjtscMkrZC0utwf2t8ybTxrZ0t4I7Bg2GNXAisj4kRgZRk360jLEJYuIYb/WPRCYFkZXgac3+O6bIB0+pvVMyNiQxneSPUj6iNyPybWStcHJhERVD07NZrufkysqU5DuEnSkQDlfnPvSrJB02kIlwOLyvAi4M7elGODqJ1TNDcD/w2cJGmdpEuBJcDZklYD88u4WUdaHphExEUNJp3V41psQPkTE0vnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILV2n/ZhcLWm9pEfL7bz+lmnjWaf9mAB8OSLmlttdvS3LBkmn/ZiY9Uw37wkvl/RY2V037FZM0mJJqyStepPXu1idjVedhvBa4ARgLrABuKbRjO7HxFrpKIQRsSkidkfEHuA6YF5vy7JB0lEIhzrSKT4MPN5oXrNWWnYhUfoxOQOYIWkdcBVwhqS5VN2JrQU+1scabZzrtB+TG/pQiw0of2Ji6RxCS+cQWjqH0NI5hJbOIbR0DqGlcwgtnUNo6RxCS+cQWjqH0NI5hJbOIbR0DqGlcwgtnUNo6RxCS+cQWjqH0NI5hJbOIbR0DqGlcwgtnUNo6RxCS+cQWjqH0NI5hJbOIbR0DqGla6cfk2Mk3S/pJ5KekPTH5fHDJK2QtLrcN/zxdLNm2tkS7gKuiIg5wLuBT0iaA1wJrIyIE4GVZdxsn7XTj8mGiPhhGd4BPAnMAhYCy8psy4Dz+1WkjW8tfy64TtJs4FeBHwAzI2JDmbQRmNmgzWJgMcBUDuq0ThvH2j4wkXQwcDvwJxGxvT4tIoLqR9Tfwv2YWCtthVDSZKoAfici/qU8vGmoK4lyv7k/Jdp4187Rsah+rf/JiPj72qTlwKIyvAi4s/fl2SBo5z3he4DfB34s6dHy2OeAJcCtki4FngUu6E+JNt6104/J9wE1mHxWb8uxQeRPTCydQ2jpHEJL5xBaOofQ0jmEls4htHQOoaVzCC2dQ2jpHEJL5xBaOofQ0jmEls4htHQOoaVzCC2dQ2jpHEJL5xBaOofQ0jmEls4htHQOoaVzCC2dQ2jpHEJL5xBaOofQ0jmEls4htHQOoaXrpjOdqyWtl/RouZ3X/3JtPGrn54KHOtP5oaTpwCOSVpRpX46Iv+tfeTYI2vm54A3AhjK8Q9JQZzpmPbFP7wmHdaYDcLmkxyQtbdS3naTFklZJWvUmr3dVrI1P3XSmcy1wAjCXakt5zUjt3JmOtdJxZzoRsSkidkfEHuA6YF7/yrTxrOPOdIZ6cyo+DDze+/JsEHTTmc5FkuZS9Wm3FvhYXyq0ca+bznTu6n05Noj8iYmlcwgtnUNo6RxCS+cQWjqH0NIpIkZvZdILVB10D5kBvDhqBey7sV4fjL0aj4uII/alwaiG8C0rl1ZFxKlpBbQw1uuD/aPGVrw7tnQOoaXLDuE3ktffylivD/aPGptKfU9oBvlbQjOH0PKlhFDSAkk/lfS0pCszamhF0lpJPy5fZ101BupZKmmzpMdrjx0maYWk1eV+xO/5jHWjHkJJE4GvAR8A5lBdHDtntOto05kRMXeMnIe7EVgw7LErgZURcSKwsozvdzK2hPOApyNiTUS8AdwCLEyoY78SEQ8CW4Y9vBBYVoaXAeePalE9khHCWcDztfF1jM3vMQdwr6RHJC3OLqaBmeV74QAbgZmZxXSqne+YDKrTI2K9pHcAKyQ9VbZGY1JEhKT98nxbxpZwPXBMbfzo8tiYEhHry/1m4A7G5ldaNw1967Hcb06upyMZIXwYOFHS8ZKmABcCyxPqaEjStPK7O0iaBpzD2PxK63JgURleBNyZWEvHRn13HBG7JF0O3ANMBJZGxBOjXUcLM4E7qq9cMwn4bkTcnVmQpJuBM4AZktYBVwFLgFslXUp1idwFeRV2zh/bWTp/YmLpHEJL5xBaOofQ0jmEls4htHQOoaX7P9Bb5SQoQkqOAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "i = 1\n",
+ "plt.imshow(x_train[i].reshape(27, 15)) #np.sqrt(784) = 28\n",
+ "plt.title(\"Label for image %i is: %s\" % (i, y_train[i]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.2\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Colocations handled automatically by placer.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
+ "CNN\n",
+ "_________________________________________________________________\n",
+ "Layer (type) Output Shape Param # \n",
+ "=================================================================\n",
+ "conv2d_1 (Conv2D) (None, 27, 15, 128) 1280 \n",
+ "_________________________________________________________________\n",
+ "batch_normalization_1 (Batch (None, 27, 15, 128) 512 \n",
+ "_________________________________________________________________\n",
+ "conv2d_2 (Conv2D) (None, 27, 15, 64) 73792 \n",
+ "_________________________________________________________________\n",
+ "batch_normalization_2 (Batch (None, 27, 15, 64) 256 \n",
+ "_________________________________________________________________\n",
+ "max_pooling2d_1 (MaxPooling2 (None, 14, 8, 64) 0 \n",
+ "_________________________________________________________________\n",
+ "dropout_1 (Dropout) (None, 14, 8, 64) 0 \n",
+ "_________________________________________________________________\n",
+ "conv2d_3 (Conv2D) (None, 14, 8, 64) 36928 \n",
+ "_________________________________________________________________\n",
+ "batch_normalization_3 (Batch (None, 14, 8, 64) 256 \n",
+ "_________________________________________________________________\n",
+ "conv2d_4 (Conv2D) (None, 14, 8, 32) 18464 \n",
+ "_________________________________________________________________\n",
+ "batch_normalization_4 (Batch (None, 14, 8, 32) 128 \n",
+ "_________________________________________________________________\n",
+ "max_pooling2d_2 (MaxPooling2 (None, 7, 4, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "dropout_2 (Dropout) (None, 7, 4, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "flatten_1 (Flatten) (None, 896) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_1 (Dense) (None, 140) 125580 \n",
+ "_________________________________________________________________\n",
+ "dropout_3 (Dropout) (None, 140) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_2 (Dense) (None, 70) 9870 \n",
+ "_________________________________________________________________\n",
+ "dropout_4 (Dropout) (None, 70) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_3 (Dense) (None, 2) 142 \n",
+ "=================================================================\n",
+ "Total params: 267,208\n",
+ "Trainable params: 266,632\n",
+ "Non-trainable params: 576\n",
+ "_________________________________________________________________\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Use tf.cast instead.\n",
+ "Train on 465844 samples, validate on 38042 samples\n",
+ "Epoch 1/100000\n",
+ " - 22s - loss: 10.6384 - acc: 0.8243 - val_loss: 1.4424 - val_acc: 0.2476\n",
+ "\n",
+ "Epoch 00001: val_acc improved from -inf to 0.24765, saving model to ./ModelSnapshots/CNN-001.h5\n",
+ "Epoch 2/100000\n",
+ " - 19s - loss: 0.6649 - acc: 0.9111 - val_loss: 0.6993 - val_acc: 0.9254\n",
+ "\n",
+ "Epoch 00002: val_acc improved from 0.24765 to 0.92542, saving model to ./ModelSnapshots/CNN-002.h5\n",
+ "Epoch 3/100000\n",
+ " - 19s - loss: 0.5491 - acc: 0.9172 - val_loss: 0.4966 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00003: val_acc improved from 0.92542 to 0.92732, saving model to ./ModelSnapshots/CNN-003.h5\n",
+ "Epoch 4/100000\n",
+ " - 20s - loss: 0.5172 - acc: 0.9224 - val_loss: 0.4912 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 00004: val_acc did not improve from 0.92732\n",
+ "Epoch 5/100000\n",
+ " - 19s - loss: 0.5110 - acc: 0.9224 - val_loss: 0.6533 - val_acc: 0.7959\n",
+ "\n",
+ "Epoch 00005: val_acc did not improve from 0.92732\n",
+ "Epoch 6/100000\n",
+ " - 19s - loss: 0.5013 - acc: 0.9244 - val_loss: 0.4590 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00006: val_acc improved from 0.92732 to 0.93449, saving model to ./ModelSnapshots/CNN-006.h5\n",
+ "Epoch 7/100000\n",
+ " - 19s - loss: 0.4876 - acc: 0.9258 - val_loss: 0.8813 - val_acc: 0.7317\n",
+ "\n",
+ "Epoch 00007: val_acc did not improve from 0.93449\n",
+ "Epoch 8/100000\n",
+ " - 19s - loss: 0.4874 - acc: 0.9272 - val_loss: 0.4632 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00008: val_acc did not improve from 0.93449\n",
+ "Epoch 9/100000\n",
+ " - 18s - loss: 0.4859 - acc: 0.9262 - val_loss: 0.4528 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00009: val_acc did not improve from 0.93449\n",
+ "Epoch 10/100000\n",
+ " - 19s - loss: 0.4795 - acc: 0.9274 - val_loss: 0.5400 - val_acc: 0.8862\n",
+ "\n",
+ "Epoch 00010: val_acc did not improve from 0.93449\n",
+ "Epoch 11/100000\n",
+ " - 18s - loss: 0.4747 - acc: 0.9271 - val_loss: 0.4750 - val_acc: 0.9164\n",
+ "\n",
+ "Epoch 00011: val_acc did not improve from 0.93449\n",
+ "Epoch 12/100000\n",
+ " - 18s - loss: 0.4673 - acc: 0.9282 - val_loss: 0.6994 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00012: val_acc did not improve from 0.93449\n",
+ "Epoch 13/100000\n",
+ " - 18s - loss: 0.4585 - acc: 0.9291 - val_loss: 0.4540 - val_acc: 0.9170\n",
+ "\n",
+ "Epoch 00013: val_acc did not improve from 0.93449\n",
+ "Epoch 14/100000\n",
+ " - 18s - loss: 0.4556 - acc: 0.9278 - val_loss: 0.4327 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 00014: val_acc did not improve from 0.93449\n",
+ "Epoch 15/100000\n",
+ " - 19s - loss: 0.4564 - acc: 0.9284 - val_loss: 0.4179 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00015: val_acc did not improve from 0.93449\n",
+ "Epoch 16/100000\n",
+ " - 19s - loss: 0.4476 - acc: 0.9295 - val_loss: 0.4111 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 00016: val_acc did not improve from 0.93449\n",
+ "Epoch 17/100000\n",
+ " - 19s - loss: 0.4486 - acc: 0.9287 - val_loss: 0.4264 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 00017: val_acc did not improve from 0.93449\n",
+ "Epoch 18/100000\n",
+ " - 20s - loss: 0.4490 - acc: 0.9292 - val_loss: 0.4275 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 00018: val_acc did not improve from 0.93449\n",
+ "Epoch 19/100000\n",
+ " - 19s - loss: 0.4426 - acc: 0.9300 - val_loss: 0.4185 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00019: val_acc did not improve from 0.93449\n",
+ "Epoch 20/100000\n",
+ " - 19s - loss: 0.4419 - acc: 0.9299 - val_loss: 0.4256 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 00020: val_acc did not improve from 0.93449\n",
+ "Epoch 21/100000\n",
+ " - 19s - loss: 0.4459 - acc: 0.9296 - val_loss: 0.4994 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 00021: val_acc did not improve from 0.93449\n",
+ "Epoch 22/100000\n",
+ " - 19s - loss: 0.4388 - acc: 0.9309 - val_loss: 0.4264 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 00022: val_acc did not improve from 0.93449\n",
+ "Epoch 23/100000\n",
+ " - 19s - loss: 0.4330 - acc: 0.9315 - val_loss: 0.3966 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00023: val_acc improved from 0.93449 to 0.93675, saving model to ./ModelSnapshots/CNN-023.h5\n",
+ "Epoch 24/100000\n",
+ " - 19s - loss: 0.4331 - acc: 0.9308 - val_loss: 0.4026 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 00024: val_acc did not improve from 0.93675\n",
+ "Epoch 25/100000\n",
+ " - 20s - loss: 0.4322 - acc: 0.9312 - val_loss: 0.4607 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 00025: val_acc did not improve from 0.93675\n",
+ "Epoch 26/100000\n",
+ " - 19s - loss: 0.4345 - acc: 0.9300 - val_loss: 0.4082 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 00026: val_acc did not improve from 0.93675\n",
+ "Epoch 27/100000\n",
+ " - 19s - loss: 0.4337 - acc: 0.9312 - val_loss: 0.4118 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 00027: val_acc did not improve from 0.93675\n",
+ "Epoch 28/100000\n",
+ " - 19s - loss: 0.4307 - acc: 0.9316 - val_loss: 0.4559 - val_acc: 0.9112\n",
+ "\n",
+ "Epoch 00028: val_acc did not improve from 0.93675\n",
+ "Epoch 29/100000\n",
+ " - 18s - loss: 0.4348 - acc: 0.9312 - val_loss: 0.4312 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00029: val_acc did not improve from 0.93675\n",
+ "Epoch 30/100000\n",
+ " - 19s - loss: 0.4290 - acc: 0.9310 - val_loss: 0.4249 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 00030: val_acc did not improve from 0.93675\n",
+ "Epoch 31/100000\n",
+ " - 19s - loss: 0.4317 - acc: 0.9312 - val_loss: 0.4365 - val_acc: 0.9219\n",
+ "\n",
+ "Epoch 00031: val_acc did not improve from 0.93675\n",
+ "Epoch 32/100000\n",
+ " - 19s - loss: 0.4269 - acc: 0.9315 - val_loss: 0.3956 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00032: val_acc did not improve from 0.93675\n",
+ "Epoch 33/100000\n",
+ " - 19s - loss: 0.4252 - acc: 0.9313 - val_loss: 0.4402 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00033: val_acc did not improve from 0.93675\n",
+ "Epoch 34/100000\n",
+ " - 19s - loss: 0.4258 - acc: 0.9323 - val_loss: 0.3936 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00034: val_acc did not improve from 0.93675\n",
+ "Epoch 35/100000\n",
+ " - 19s - loss: 0.4266 - acc: 0.9311 - val_loss: 0.4042 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00035: val_acc did not improve from 0.93675\n",
+ "Epoch 36/100000\n",
+ " - 19s - loss: 0.4259 - acc: 0.9322 - val_loss: 0.4084 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 00036: val_acc did not improve from 0.93675\n",
+ "Epoch 37/100000\n",
+ " - 19s - loss: 0.4240 - acc: 0.9318 - val_loss: 0.4365 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 00037: val_acc did not improve from 0.93675\n",
+ "Epoch 38/100000\n",
+ " - 19s - loss: 0.4194 - acc: 0.9327 - val_loss: 0.4481 - val_acc: 0.8921\n",
+ "\n",
+ "Epoch 00038: val_acc did not improve from 0.93675\n",
+ "Epoch 39/100000\n",
+ " - 19s - loss: 0.4211 - acc: 0.9312 - val_loss: 0.3893 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00039: val_acc did not improve from 0.93675\n",
+ "Epoch 40/100000\n",
+ " - 19s - loss: 0.4197 - acc: 0.9324 - val_loss: 0.4019 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00040: val_acc did not improve from 0.93675\n",
+ "Epoch 41/100000\n",
+ " - 19s - loss: 0.4215 - acc: 0.9311 - val_loss: 0.4159 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00041: val_acc did not improve from 0.93675\n",
+ "Epoch 42/100000\n",
+ " - 19s - loss: 0.4193 - acc: 0.9319 - val_loss: 0.6113 - val_acc: 0.8096\n",
+ "\n",
+ "Epoch 00042: val_acc did not improve from 0.93675\n",
+ "Epoch 43/100000\n",
+ " - 19s - loss: 0.4186 - acc: 0.9320 - val_loss: 1.0623 - val_acc: 0.7866\n",
+ "\n",
+ "Epoch 00043: val_acc did not improve from 0.93675\n",
+ "Epoch 44/100000\n",
+ " - 19s - loss: 0.4189 - acc: 0.9330 - val_loss: 0.4174 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 00044: val_acc did not improve from 0.93675\n",
+ "Epoch 45/100000\n",
+ " - 19s - loss: 0.4156 - acc: 0.9320 - val_loss: 0.4213 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00045: val_acc did not improve from 0.93675\n",
+ "Epoch 46/100000\n",
+ " - 19s - loss: 0.4156 - acc: 0.9320 - val_loss: 0.4395 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 00046: val_acc did not improve from 0.93675\n",
+ "Epoch 47/100000\n",
+ " - 19s - loss: 0.4143 - acc: 0.9324 - val_loss: 0.3903 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 00047: val_acc did not improve from 0.93675\n",
+ "Epoch 48/100000\n",
+ " - 19s - loss: 0.4120 - acc: 0.9323 - val_loss: 0.4650 - val_acc: 0.8933\n",
+ "\n",
+ "Epoch 00048: val_acc did not improve from 0.93675\n",
+ "Epoch 49/100000\n",
+ " - 19s - loss: 0.4158 - acc: 0.9317 - val_loss: 0.4139 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00049: val_acc did not improve from 0.93675\n",
+ "Epoch 50/100000\n",
+ " - 19s - loss: 0.4154 - acc: 0.9321 - val_loss: 0.4007 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 00050: val_acc did not improve from 0.93675\n",
+ "Epoch 51/100000\n",
+ " - 19s - loss: 0.4142 - acc: 0.9324 - val_loss: 0.4773 - val_acc: 0.8966\n",
+ "\n",
+ "Epoch 00051: val_acc did not improve from 0.93675\n",
+ "Epoch 52/100000\n",
+ " - 19s - loss: 0.4123 - acc: 0.9323 - val_loss: 0.9801 - val_acc: 0.8088\n",
+ "\n",
+ "Epoch 00052: val_acc did not improve from 0.93675\n",
+ "Epoch 53/100000\n",
+ " - 19s - loss: 0.4135 - acc: 0.9315 - val_loss: 0.4215 - val_acc: 0.9125\n",
+ "\n",
+ "Epoch 00053: val_acc did not improve from 0.93675\n",
+ "Epoch 54/100000\n",
+ " - 19s - loss: 0.4129 - acc: 0.9327 - val_loss: 0.3969 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00054: val_acc did not improve from 0.93675\n",
+ "Epoch 55/100000\n",
+ " - 19s - loss: 0.4087 - acc: 0.9330 - val_loss: 0.6396 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00055: val_acc did not improve from 0.93675\n",
+ "Epoch 56/100000\n",
+ " - 19s - loss: 0.4110 - acc: 0.9319 - val_loss: 0.3968 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00056: val_acc did not improve from 0.93675\n",
+ "Epoch 57/100000\n",
+ " - 19s - loss: 0.4119 - acc: 0.9323 - val_loss: 0.4063 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 00057: val_acc did not improve from 0.93675\n",
+ "Epoch 58/100000\n",
+ " - 19s - loss: 0.4097 - acc: 0.9324 - val_loss: 0.4219 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 00058: val_acc did not improve from 0.93675\n",
+ "Epoch 59/100000\n",
+ " - 19s - loss: 0.4094 - acc: 0.9327 - val_loss: 0.4067 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 00059: val_acc did not improve from 0.93675\n",
+ "Epoch 60/100000\n",
+ " - 19s - loss: 0.4070 - acc: 0.9337 - val_loss: 0.3934 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00060: val_acc did not improve from 0.93675\n",
+ "Epoch 61/100000\n",
+ " - 19s - loss: 0.4090 - acc: 0.9326 - val_loss: 0.4402 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 00061: val_acc did not improve from 0.93675\n",
+ "Epoch 62/100000\n",
+ " - 19s - loss: 0.4079 - acc: 0.9330 - val_loss: 0.4429 - val_acc: 0.9156\n",
+ "\n",
+ "Epoch 00062: val_acc did not improve from 0.93675\n",
+ "Epoch 63/100000\n",
+ " - 19s - loss: 0.4093 - acc: 0.9323 - val_loss: 0.4007 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00063: val_acc did not improve from 0.93675\n",
+ "Epoch 64/100000\n",
+ " - 19s - loss: 0.4081 - acc: 0.9333 - val_loss: 0.4042 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00064: val_acc did not improve from 0.93675\n",
+ "Epoch 65/100000\n",
+ " - 19s - loss: 0.4104 - acc: 0.9319 - val_loss: 0.3883 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00065: val_acc did not improve from 0.93675\n",
+ "Epoch 66/100000\n",
+ " - 19s - loss: 0.4095 - acc: 0.9326 - val_loss: 0.3803 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00066: val_acc did not improve from 0.93675\n",
+ "Epoch 67/100000\n",
+ " - 19s - loss: 0.4099 - acc: 0.9329 - val_loss: 0.3824 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00067: val_acc improved from 0.93675 to 0.93723, saving model to ./ModelSnapshots/CNN-067.h5\n",
+ "Epoch 68/100000\n",
+ " - 19s - loss: 0.4103 - acc: 0.9330 - val_loss: 0.3824 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 00068: val_acc improved from 0.93723 to 0.93773, saving model to ./ModelSnapshots/CNN-068.h5\n",
+ "Epoch 69/100000\n",
+ " - 19s - loss: 0.4068 - acc: 0.9335 - val_loss: 0.3933 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00069: val_acc did not improve from 0.93773\n",
+ "Epoch 70/100000\n",
+ " - 19s - loss: 0.4102 - acc: 0.9331 - val_loss: 0.4003 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 00070: val_acc did not improve from 0.93773\n",
+ "Epoch 71/100000\n",
+ " - 19s - loss: 0.4072 - acc: 0.9332 - val_loss: 0.3831 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00071: val_acc did not improve from 0.93773\n",
+ "Epoch 72/100000\n",
+ " - 19s - loss: 0.4106 - acc: 0.9330 - val_loss: 0.4424 - val_acc: 0.9048\n",
+ "\n",
+ "Epoch 00072: val_acc did not improve from 0.93773\n",
+ "Epoch 73/100000\n",
+ " - 19s - loss: 0.4077 - acc: 0.9335 - val_loss: 0.4400 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 00073: val_acc did not improve from 0.93773\n",
+ "Epoch 74/100000\n",
+ " - 19s - loss: 0.4126 - acc: 0.9320 - val_loss: 0.4159 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 00074: val_acc did not improve from 0.93773\n",
+ "Epoch 75/100000\n",
+ " - 19s - loss: 0.4082 - acc: 0.9329 - val_loss: 0.4983 - val_acc: 0.8585\n",
+ "\n",
+ "Epoch 00075: val_acc did not improve from 0.93773\n",
+ "Epoch 76/100000\n",
+ " - 19s - loss: 0.4097 - acc: 0.9330 - val_loss: 0.4032 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00076: val_acc did not improve from 0.93773\n",
+ "Epoch 77/100000\n",
+ " - 19s - loss: 0.4094 - acc: 0.9335 - val_loss: 0.4593 - val_acc: 0.8998\n",
+ "\n",
+ "Epoch 00077: val_acc did not improve from 0.93773\n",
+ "Epoch 78/100000\n",
+ " - 19s - loss: 0.4086 - acc: 0.9326 - val_loss: 0.5056 - val_acc: 0.8838\n",
+ "\n",
+ "Epoch 00078: val_acc did not improve from 0.93773\n",
+ "Epoch 79/100000\n",
+ " - 19s - loss: 0.4061 - acc: 0.9329 - val_loss: 0.4159 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 00079: val_acc did not improve from 0.93773\n",
+ "Epoch 80/100000\n",
+ " - 19s - loss: 0.4060 - acc: 0.9338 - val_loss: 0.4024 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 00080: val_acc did not improve from 0.93773\n",
+ "Epoch 81/100000\n",
+ " - 19s - loss: 0.4033 - acc: 0.9339 - val_loss: 0.3978 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00081: val_acc did not improve from 0.93773\n",
+ "Epoch 82/100000\n",
+ " - 19s - loss: 0.4089 - acc: 0.9327 - val_loss: 0.4198 - val_acc: 0.9193\n",
+ "\n",
+ "Epoch 00082: val_acc did not improve from 0.93773\n",
+ "Epoch 83/100000\n",
+ " - 19s - loss: 0.4096 - acc: 0.9330 - val_loss: 0.3905 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00083: val_acc did not improve from 0.93773\n",
+ "Epoch 84/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9332 - val_loss: 0.4277 - val_acc: 0.9073\n",
+ "\n",
+ "Epoch 00084: val_acc did not improve from 0.93773\n",
+ "Epoch 85/100000\n",
+ " - 19s - loss: 0.4064 - acc: 0.9324 - val_loss: 0.4027 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00085: val_acc did not improve from 0.93773\n",
+ "Epoch 86/100000\n",
+ " - 19s - loss: 0.4039 - acc: 0.9337 - val_loss: 0.4472 - val_acc: 0.9010\n",
+ "\n",
+ "Epoch 00086: val_acc did not improve from 0.93773\n",
+ "Epoch 87/100000\n",
+ " - 19s - loss: 0.4078 - acc: 0.9336 - val_loss: 0.3936 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00087: val_acc did not improve from 0.93773\n",
+ "Epoch 88/100000\n",
+ " - 18s - loss: 0.4056 - acc: 0.9333 - val_loss: 0.4580 - val_acc: 0.9131\n",
+ "\n",
+ "Epoch 00088: val_acc did not improve from 0.93773\n",
+ "Epoch 89/100000\n",
+ " - 19s - loss: 0.4075 - acc: 0.9333 - val_loss: 0.3881 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00089: val_acc did not improve from 0.93773\n",
+ "Epoch 90/100000\n",
+ " - 19s - loss: 0.4054 - acc: 0.9340 - val_loss: 0.3874 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 00090: val_acc did not improve from 0.93773\n",
+ "Epoch 91/100000\n",
+ " - 19s - loss: 0.4079 - acc: 0.9332 - val_loss: 0.3935 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00091: val_acc did not improve from 0.93773\n",
+ "Epoch 92/100000\n",
+ " - 19s - loss: 0.4062 - acc: 0.9340 - val_loss: 0.4137 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 00092: val_acc did not improve from 0.93773\n",
+ "Epoch 93/100000\n",
+ " - 19s - loss: 0.4079 - acc: 0.9328 - val_loss: 0.6338 - val_acc: 0.8380\n",
+ "\n",
+ "Epoch 00093: val_acc did not improve from 0.93773\n",
+ "Epoch 94/100000\n",
+ " - 19s - loss: 0.4096 - acc: 0.9331 - val_loss: 0.3798 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00094: val_acc improved from 0.93773 to 0.93817, saving model to ./ModelSnapshots/CNN-094.h5\n",
+ "Epoch 95/100000\n",
+ " - 19s - loss: 0.4054 - acc: 0.9335 - val_loss: 0.3844 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00095: val_acc did not improve from 0.93817\n",
+ "Epoch 96/100000\n",
+ " - 19s - loss: 0.4075 - acc: 0.9331 - val_loss: 0.4462 - val_acc: 0.9086\n",
+ "\n",
+ "Epoch 00096: val_acc did not improve from 0.93817\n",
+ "Epoch 97/100000\n",
+ " - 19s - loss: 0.4097 - acc: 0.9325 - val_loss: 0.4847 - val_acc: 0.8882\n",
+ "\n",
+ "Epoch 00097: val_acc did not improve from 0.93817\n",
+ "Epoch 98/100000\n",
+ " - 20s - loss: 0.4076 - acc: 0.9339 - val_loss: 0.4225 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00098: val_acc did not improve from 0.93817\n",
+ "Epoch 99/100000\n",
+ " - 19s - loss: 0.4072 - acc: 0.9334 - val_loss: 0.3894 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 00099: val_acc did not improve from 0.93817\n",
+ "Epoch 100/100000\n",
+ " - 19s - loss: 0.4018 - acc: 0.9346 - val_loss: 0.3839 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 00100: val_acc did not improve from 0.93817\n",
+ "Epoch 101/100000\n",
+ " - 19s - loss: 0.4070 - acc: 0.9328 - val_loss: 0.3996 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 00101: val_acc did not improve from 0.93817\n",
+ "Epoch 102/100000\n",
+ " - 19s - loss: 0.4043 - acc: 0.9342 - val_loss: 0.4956 - val_acc: 0.9097\n",
+ "\n",
+ "Epoch 00102: val_acc did not improve from 0.93817\n",
+ "Epoch 103/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9332 - val_loss: 0.4098 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 00103: val_acc did not improve from 0.93817\n",
+ "Epoch 104/100000\n",
+ " - 19s - loss: 0.4034 - acc: 0.9344 - val_loss: 0.4489 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00104: val_acc did not improve from 0.93817\n",
+ "Epoch 105/100000\n",
+ " - 19s - loss: 0.4046 - acc: 0.9340 - val_loss: 0.4827 - val_acc: 0.8891\n",
+ "\n",
+ "Epoch 00105: val_acc did not improve from 0.93817\n",
+ "Epoch 106/100000\n",
+ " - 19s - loss: 0.4058 - acc: 0.9343 - val_loss: 0.3853 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00106: val_acc did not improve from 0.93817\n",
+ "Epoch 107/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9336 - val_loss: 0.4043 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00107: val_acc did not improve from 0.93817\n",
+ "Epoch 108/100000\n",
+ " - 19s - loss: 0.4074 - acc: 0.9341 - val_loss: 0.3846 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00108: val_acc did not improve from 0.93817\n",
+ "Epoch 109/100000\n",
+ " - 19s - loss: 0.4036 - acc: 0.9355 - val_loss: 0.4847 - val_acc: 0.8968\n",
+ "\n",
+ "Epoch 00109: val_acc did not improve from 0.93817\n",
+ "Epoch 110/100000\n",
+ " - 19s - loss: 0.4043 - acc: 0.9343 - val_loss: 0.4023 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00110: val_acc did not improve from 0.93817\n",
+ "Epoch 111/100000\n",
+ " - 19s - loss: 0.4075 - acc: 0.9342 - val_loss: 0.3929 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00111: val_acc did not improve from 0.93817\n",
+ "Epoch 112/100000\n",
+ " - 19s - loss: 0.4058 - acc: 0.9347 - val_loss: 0.4070 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 00112: val_acc did not improve from 0.93817\n",
+ "Epoch 113/100000\n",
+ " - 19s - loss: 0.4045 - acc: 0.9348 - val_loss: 0.4018 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00113: val_acc did not improve from 0.93817\n",
+ "Epoch 114/100000\n",
+ " - 19s - loss: 0.4049 - acc: 0.9341 - val_loss: 0.3926 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 00114: val_acc did not improve from 0.93817\n",
+ "Epoch 115/100000\n",
+ " - 19s - loss: 0.4055 - acc: 0.9339 - val_loss: 0.3904 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00115: val_acc did not improve from 0.93817\n",
+ "Epoch 116/100000\n",
+ " - 19s - loss: 0.4059 - acc: 0.9332 - val_loss: 0.4077 - val_acc: 0.9229\n",
+ "\n",
+ "Epoch 00116: val_acc did not improve from 0.93817\n",
+ "Epoch 117/100000\n",
+ " - 19s - loss: 0.4051 - acc: 0.9333 - val_loss: 0.4226 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 00117: val_acc did not improve from 0.93817\n",
+ "Epoch 118/100000\n",
+ " - 19s - loss: 0.4034 - acc: 0.9341 - val_loss: 0.3989 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 00118: val_acc did not improve from 0.93817\n",
+ "Epoch 119/100000\n",
+ " - 19s - loss: 0.4010 - acc: 0.9349 - val_loss: 0.3932 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 00119: val_acc did not improve from 0.93817\n",
+ "Epoch 120/100000\n",
+ " - 19s - loss: 0.4042 - acc: 0.9342 - val_loss: 0.7033 - val_acc: 0.8155\n",
+ "\n",
+ "Epoch 00120: val_acc did not improve from 0.93817\n",
+ "Epoch 121/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9339 - val_loss: 0.4062 - val_acc: 0.9259\n",
+ "\n",
+ "Epoch 00121: val_acc did not improve from 0.93817\n",
+ "Epoch 122/100000\n",
+ " - 19s - loss: 0.4026 - acc: 0.9342 - val_loss: 0.3889 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 00122: val_acc did not improve from 0.93817\n",
+ "Epoch 123/100000\n",
+ " - 19s - loss: 0.4031 - acc: 0.9350 - val_loss: 0.4046 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 00123: val_acc did not improve from 0.93817\n",
+ "Epoch 124/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9346 - val_loss: 0.3852 - val_acc: 0.9400\n",
+ "\n",
+ "Epoch 00124: val_acc improved from 0.93817 to 0.94001, saving model to ./ModelSnapshots/CNN-124.h5\n",
+ "Epoch 125/100000\n",
+ " - 19s - loss: 0.4052 - acc: 0.9337 - val_loss: 0.3885 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00125: val_acc did not improve from 0.94001\n",
+ "Epoch 126/100000\n",
+ " - 19s - loss: 0.4039 - acc: 0.9345 - val_loss: 0.3948 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00126: val_acc did not improve from 0.94001\n",
+ "Epoch 127/100000\n",
+ " - 19s - loss: 0.4060 - acc: 0.9342 - val_loss: 0.4544 - val_acc: 0.9040\n",
+ "\n",
+ "Epoch 00127: val_acc did not improve from 0.94001\n",
+ "Epoch 128/100000\n",
+ " - 19s - loss: 0.4082 - acc: 0.9342 - val_loss: 0.4008 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00128: val_acc did not improve from 0.94001\n",
+ "Epoch 129/100000\n",
+ " - 20s - loss: 0.4062 - acc: 0.9346 - val_loss: 0.4203 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 00129: val_acc did not improve from 0.94001\n",
+ "Epoch 130/100000\n",
+ " - 20s - loss: 0.4027 - acc: 0.9348 - val_loss: 0.3881 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00130: val_acc did not improve from 0.94001\n",
+ "Epoch 131/100000\n",
+ " - 19s - loss: 0.4049 - acc: 0.9349 - val_loss: 0.3957 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00131: val_acc did not improve from 0.94001\n",
+ "Epoch 132/100000\n",
+ " - 19s - loss: 0.4051 - acc: 0.9347 - val_loss: 0.4183 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 00132: val_acc did not improve from 0.94001\n",
+ "Epoch 133/100000\n",
+ " - 19s - loss: 0.4037 - acc: 0.9346 - val_loss: 0.4063 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 00133: val_acc did not improve from 0.94001\n",
+ "Epoch 134/100000\n",
+ " - 19s - loss: 0.4056 - acc: 0.9343 - val_loss: 0.4185 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 00134: val_acc did not improve from 0.94001\n",
+ "Epoch 135/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9345 - val_loss: 0.4389 - val_acc: 0.9144\n",
+ "\n",
+ "Epoch 00135: val_acc did not improve from 0.94001\n",
+ "Epoch 136/100000\n",
+ " - 19s - loss: 0.4048 - acc: 0.9348 - val_loss: 0.3881 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00136: val_acc did not improve from 0.94001\n",
+ "Epoch 137/100000\n",
+ " - 19s - loss: 0.4058 - acc: 0.9343 - val_loss: 0.4193 - val_acc: 0.9176\n",
+ "\n",
+ "Epoch 00137: val_acc did not improve from 0.94001\n",
+ "Epoch 138/100000\n",
+ " - 19s - loss: 0.4031 - acc: 0.9353 - val_loss: 0.4260 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00138: val_acc did not improve from 0.94001\n",
+ "Epoch 139/100000\n",
+ " - 19s - loss: 0.4023 - acc: 0.9349 - val_loss: 0.3853 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00139: val_acc did not improve from 0.94001\n",
+ "Epoch 140/100000\n",
+ " - 19s - loss: 0.4062 - acc: 0.9345 - val_loss: 0.3913 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 00140: val_acc did not improve from 0.94001\n",
+ "Epoch 141/100000\n",
+ " - 19s - loss: 0.4038 - acc: 0.9350 - val_loss: 0.3837 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 00141: val_acc did not improve from 0.94001\n",
+ "Epoch 142/100000\n",
+ " - 19s - loss: 0.4040 - acc: 0.9346 - val_loss: 0.3926 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00142: val_acc did not improve from 0.94001\n",
+ "Epoch 143/100000\n",
+ " - 19s - loss: 0.4034 - acc: 0.9347 - val_loss: 0.6062 - val_acc: 0.8273\n",
+ "\n",
+ "Epoch 00143: val_acc did not improve from 0.94001\n",
+ "Epoch 144/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9343 - val_loss: 0.3798 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 00144: val_acc did not improve from 0.94001\n",
+ "Epoch 145/100000\n",
+ " - 19s - loss: 0.4047 - acc: 0.9352 - val_loss: 0.3861 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00145: val_acc did not improve from 0.94001\n",
+ "Epoch 146/100000\n",
+ " - 19s - loss: 0.4028 - acc: 0.9352 - val_loss: 0.7171 - val_acc: 0.7992\n",
+ "\n",
+ "Epoch 00146: val_acc did not improve from 0.94001\n",
+ "Epoch 147/100000\n",
+ " - 19s - loss: 0.4042 - acc: 0.9352 - val_loss: 0.4404 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 00147: val_acc did not improve from 0.94001\n",
+ "Epoch 148/100000\n",
+ " - 20s - loss: 0.4044 - acc: 0.9348 - val_loss: 0.3975 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00148: val_acc did not improve from 0.94001\n",
+ "Epoch 149/100000\n",
+ " - 19s - loss: 0.4013 - acc: 0.9355 - val_loss: 0.3931 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 00149: val_acc did not improve from 0.94001\n",
+ "Epoch 150/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9340 - val_loss: 0.4519 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 00150: val_acc did not improve from 0.94001\n",
+ "Epoch 151/100000\n",
+ " - 19s - loss: 0.4036 - acc: 0.9349 - val_loss: 0.3926 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00151: val_acc did not improve from 0.94001\n",
+ "Epoch 152/100000\n",
+ " - 18s - loss: 0.4053 - acc: 0.9345 - val_loss: 0.4576 - val_acc: 0.8998\n",
+ "\n",
+ "Epoch 00152: val_acc did not improve from 0.94001\n",
+ "Epoch 153/100000\n",
+ " - 19s - loss: 0.4064 - acc: 0.9338 - val_loss: 0.3841 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00153: val_acc did not improve from 0.94001\n",
+ "Epoch 154/100000\n",
+ " - 19s - loss: 0.4039 - acc: 0.9346 - val_loss: 0.3943 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00154: val_acc did not improve from 0.94001\n",
+ "Epoch 155/100000\n",
+ " - 19s - loss: 0.4018 - acc: 0.9355 - val_loss: 0.4134 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 00155: val_acc did not improve from 0.94001\n",
+ "Epoch 156/100000\n",
+ " - 19s - loss: 0.4061 - acc: 0.9344 - val_loss: 0.3877 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 00156: val_acc did not improve from 0.94001\n",
+ "Epoch 157/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9347 - val_loss: 0.4376 - val_acc: 0.9163\n",
+ "\n",
+ "Epoch 00157: val_acc did not improve from 0.94001\n",
+ "Epoch 158/100000\n",
+ " - 19s - loss: 0.4067 - acc: 0.9346 - val_loss: 0.4217 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 00158: val_acc did not improve from 0.94001\n",
+ "Epoch 159/100000\n",
+ " - 18s - loss: 0.4059 - acc: 0.9350 - val_loss: 0.3867 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 00159: val_acc did not improve from 0.94001\n",
+ "Epoch 160/100000\n",
+ " - 19s - loss: 0.4033 - acc: 0.9346 - val_loss: 0.4266 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 00160: val_acc did not improve from 0.94001\n",
+ "Epoch 161/100000\n",
+ " - 19s - loss: 0.4081 - acc: 0.9344 - val_loss: 0.4022 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 00161: val_acc did not improve from 0.94001\n",
+ "Epoch 162/100000\n",
+ " - 19s - loss: 0.4047 - acc: 0.9348 - val_loss: 0.3967 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00162: val_acc did not improve from 0.94001\n",
+ "Epoch 163/100000\n",
+ " - 19s - loss: 0.4053 - acc: 0.9340 - val_loss: 0.4013 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 00163: val_acc did not improve from 0.94001\n",
+ "Epoch 164/100000\n",
+ " - 19s - loss: 0.4024 - acc: 0.9351 - val_loss: 0.3972 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00164: val_acc did not improve from 0.94001\n",
+ "Epoch 165/100000\n",
+ " - 19s - loss: 0.4046 - acc: 0.9351 - val_loss: 0.3931 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 00165: val_acc did not improve from 0.94001\n",
+ "Epoch 166/100000\n",
+ " - 19s - loss: 0.4044 - acc: 0.9351 - val_loss: 0.3920 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00166: val_acc did not improve from 0.94001\n",
+ "Epoch 167/100000\n",
+ " - 19s - loss: 0.4069 - acc: 0.9351 - val_loss: 0.3930 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 00167: val_acc did not improve from 0.94001\n",
+ "Epoch 168/100000\n",
+ " - 19s - loss: 0.4038 - acc: 0.9362 - val_loss: 0.3903 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 00168: val_acc did not improve from 0.94001\n",
+ "Epoch 169/100000\n",
+ " - 19s - loss: 0.4116 - acc: 0.9342 - val_loss: 0.4017 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00169: val_acc did not improve from 0.94001\n",
+ "Epoch 170/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9350 - val_loss: 0.5106 - val_acc: 0.8779\n",
+ "\n",
+ "Epoch 00170: val_acc did not improve from 0.94001\n",
+ "Epoch 171/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9343 - val_loss: 0.3971 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00171: val_acc did not improve from 0.94001\n",
+ "Epoch 172/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9344 - val_loss: 0.3943 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 00172: val_acc did not improve from 0.94001\n",
+ "Epoch 173/100000\n",
+ " - 19s - loss: 0.4046 - acc: 0.9355 - val_loss: 0.4040 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 00173: val_acc did not improve from 0.94001\n",
+ "Epoch 174/100000\n",
+ " - 19s - loss: 0.4047 - acc: 0.9351 - val_loss: 0.3935 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00174: val_acc did not improve from 0.94001\n",
+ "Epoch 175/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9345 - val_loss: 0.4480 - val_acc: 0.9054\n",
+ "\n",
+ "Epoch 00175: val_acc did not improve from 0.94001\n",
+ "Epoch 176/100000\n",
+ " - 19s - loss: 0.4079 - acc: 0.9340 - val_loss: 0.3912 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00176: val_acc did not improve from 0.94001\n",
+ "Epoch 177/100000\n",
+ " - 19s - loss: 0.4044 - acc: 0.9352 - val_loss: 0.4139 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 00177: val_acc did not improve from 0.94001\n",
+ "Epoch 178/100000\n",
+ " - 19s - loss: 0.4071 - acc: 0.9350 - val_loss: 0.5654 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00178: val_acc did not improve from 0.94001\n",
+ "Epoch 179/100000\n",
+ " - 19s - loss: 0.4083 - acc: 0.9349 - val_loss: 0.4514 - val_acc: 0.9079\n",
+ "\n",
+ "Epoch 00179: val_acc did not improve from 0.94001\n",
+ "Epoch 180/100000\n",
+ " - 19s - loss: 0.4057 - acc: 0.9348 - val_loss: 0.3918 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 00180: val_acc did not improve from 0.94001\n",
+ "Epoch 181/100000\n",
+ " - 18s - loss: 0.4035 - acc: 0.9356 - val_loss: 0.4328 - val_acc: 0.9174\n",
+ "\n",
+ "Epoch 00181: val_acc did not improve from 0.94001\n",
+ "Epoch 182/100000\n",
+ " - 19s - loss: 0.4064 - acc: 0.9345 - val_loss: 0.4011 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00182: val_acc did not improve from 0.94001\n",
+ "Epoch 183/100000\n",
+ " - 18s - loss: 0.4069 - acc: 0.9349 - val_loss: 0.4212 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 00183: val_acc did not improve from 0.94001\n",
+ "Epoch 184/100000\n",
+ " - 18s - loss: 0.4054 - acc: 0.9346 - val_loss: 0.4712 - val_acc: 0.9023\n",
+ "\n",
+ "Epoch 00184: val_acc did not improve from 0.94001\n",
+ "Epoch 185/100000\n",
+ " - 19s - loss: 0.4077 - acc: 0.9348 - val_loss: 0.4396 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 00185: val_acc did not improve from 0.94001\n",
+ "Epoch 186/100000\n",
+ " - 19s - loss: 0.4068 - acc: 0.9348 - val_loss: 0.3882 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 00186: val_acc did not improve from 0.94001\n",
+ "Epoch 187/100000\n",
+ " - 18s - loss: 0.4056 - acc: 0.9346 - val_loss: 0.4323 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 00187: val_acc did not improve from 0.94001\n",
+ "Epoch 188/100000\n",
+ " - 19s - loss: 0.4043 - acc: 0.9350 - val_loss: 0.4514 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 00188: val_acc did not improve from 0.94001\n",
+ "Epoch 189/100000\n",
+ " - 19s - loss: 0.4041 - acc: 0.9353 - val_loss: 0.4244 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 00189: val_acc did not improve from 0.94001\n",
+ "Epoch 190/100000\n",
+ " - 18s - loss: 0.4080 - acc: 0.9352 - val_loss: 0.3979 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00190: val_acc did not improve from 0.94001\n",
+ "Epoch 191/100000\n",
+ " - 19s - loss: 0.4092 - acc: 0.9340 - val_loss: 0.4226 - val_acc: 0.9241\n",
+ "\n",
+ "Epoch 00191: val_acc did not improve from 0.94001\n",
+ "Epoch 192/100000\n",
+ " - 18s - loss: 0.4050 - acc: 0.9345 - val_loss: 0.4195 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 00192: val_acc did not improve from 0.94001\n",
+ "Epoch 193/100000\n",
+ " - 19s - loss: 0.4084 - acc: 0.9343 - val_loss: 0.4304 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 00193: val_acc did not improve from 0.94001\n",
+ "Epoch 194/100000\n",
+ " - 19s - loss: 0.4068 - acc: 0.9352 - val_loss: 0.4290 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 00194: val_acc did not improve from 0.94001\n",
+ "Epoch 195/100000\n",
+ " - 18s - loss: 0.4046 - acc: 0.9353 - val_loss: 0.3933 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00195: val_acc did not improve from 0.94001\n",
+ "Epoch 196/100000\n",
+ " - 19s - loss: 0.4057 - acc: 0.9354 - val_loss: 0.3930 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00196: val_acc did not improve from 0.94001\n",
+ "Epoch 197/100000\n",
+ " - 19s - loss: 0.4042 - acc: 0.9353 - val_loss: 0.3876 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00197: val_acc did not improve from 0.94001\n",
+ "Epoch 198/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9355 - val_loss: 0.4373 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 00198: val_acc did not improve from 0.94001\n",
+ "Epoch 199/100000\n",
+ " - 19s - loss: 0.4090 - acc: 0.9347 - val_loss: 0.3874 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00199: val_acc did not improve from 0.94001\n",
+ "Epoch 200/100000\n",
+ " - 19s - loss: 0.4077 - acc: 0.9345 - val_loss: 0.3863 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00200: val_acc did not improve from 0.94001\n",
+ "Epoch 201/100000\n",
+ " - 19s - loss: 0.4075 - acc: 0.9353 - val_loss: 0.4163 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 00201: val_acc did not improve from 0.94001\n",
+ "Epoch 202/100000\n",
+ " - 19s - loss: 0.4082 - acc: 0.9346 - val_loss: 0.4323 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 00202: val_acc did not improve from 0.94001\n",
+ "Epoch 203/100000\n",
+ " - 18s - loss: 0.4042 - acc: 0.9353 - val_loss: 0.4121 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 00203: val_acc did not improve from 0.94001\n",
+ "Epoch 204/100000\n",
+ " - 19s - loss: 0.4067 - acc: 0.9346 - val_loss: 0.4085 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 00204: val_acc did not improve from 0.94001\n",
+ "Epoch 205/100000\n",
+ " - 18s - loss: 0.4089 - acc: 0.9341 - val_loss: 0.4019 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 00205: val_acc did not improve from 0.94001\n",
+ "Epoch 206/100000\n",
+ " - 19s - loss: 0.4062 - acc: 0.9346 - val_loss: 0.3963 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00206: val_acc did not improve from 0.94001\n",
+ "Epoch 207/100000\n",
+ " - 19s - loss: 0.4070 - acc: 0.9352 - val_loss: 0.3899 - val_acc: 0.9398\n",
+ "\n",
+ "Epoch 00207: val_acc did not improve from 0.94001\n",
+ "Epoch 208/100000\n",
+ " - 19s - loss: 0.4096 - acc: 0.9352 - val_loss: 0.3982 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00208: val_acc did not improve from 0.94001\n",
+ "Epoch 209/100000\n",
+ " - 19s - loss: 0.4100 - acc: 0.9337 - val_loss: 0.4350 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 00209: val_acc did not improve from 0.94001\n",
+ "Epoch 210/100000\n",
+ " - 19s - loss: 0.4054 - acc: 0.9352 - val_loss: 0.5032 - val_acc: 0.8937\n",
+ "\n",
+ "Epoch 00210: val_acc did not improve from 0.94001\n",
+ "Epoch 211/100000\n",
+ " - 19s - loss: 0.4053 - acc: 0.9352 - val_loss: 0.4182 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 00211: val_acc did not improve from 0.94001\n",
+ "Epoch 212/100000\n",
+ " - 18s - loss: 0.4083 - acc: 0.9349 - val_loss: 0.3891 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00212: val_acc did not improve from 0.94001\n",
+ "Epoch 213/100000\n",
+ " - 19s - loss: 0.4043 - acc: 0.9358 - val_loss: 0.4356 - val_acc: 0.9087\n",
+ "\n",
+ "Epoch 00213: val_acc did not improve from 0.94001\n",
+ "Epoch 214/100000\n",
+ " - 18s - loss: 0.4056 - acc: 0.9347 - val_loss: 0.4195 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00214: val_acc did not improve from 0.94001\n",
+ "Epoch 215/100000\n",
+ " - 19s - loss: 0.4049 - acc: 0.9344 - val_loss: 0.6084 - val_acc: 0.8258\n",
+ "\n",
+ "Epoch 00215: val_acc did not improve from 0.94001\n",
+ "Epoch 216/100000\n",
+ " - 19s - loss: 0.4108 - acc: 0.9343 - val_loss: 0.4112 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00216: val_acc did not improve from 0.94001\n",
+ "Epoch 217/100000\n",
+ " - 18s - loss: 0.4115 - acc: 0.9340 - val_loss: 0.3907 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00217: val_acc did not improve from 0.94001\n",
+ "Epoch 218/100000\n",
+ " - 19s - loss: 0.4093 - acc: 0.9350 - val_loss: 0.5302 - val_acc: 0.8689\n",
+ "\n",
+ "Epoch 00218: val_acc did not improve from 0.94001\n",
+ "Epoch 219/100000\n",
+ " - 18s - loss: 0.4070 - acc: 0.9349 - val_loss: 0.4213 - val_acc: 0.9148\n",
+ "\n",
+ "Epoch 00219: val_acc did not improve from 0.94001\n",
+ "Epoch 220/100000\n",
+ " - 19s - loss: 0.4057 - acc: 0.9344 - val_loss: 0.4425 - val_acc: 0.9089\n",
+ "\n",
+ "Epoch 00220: val_acc did not improve from 0.94001\n",
+ "Epoch 221/100000\n",
+ " - 18s - loss: 0.4078 - acc: 0.9340 - val_loss: 0.5564 - val_acc: 0.8558\n",
+ "\n",
+ "Epoch 00221: val_acc did not improve from 0.94001\n",
+ "Epoch 222/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9345 - val_loss: 0.4350 - val_acc: 0.9088\n",
+ "\n",
+ "Epoch 00222: val_acc did not improve from 0.94001\n",
+ "Epoch 223/100000\n",
+ " - 19s - loss: 0.4054 - acc: 0.9348 - val_loss: 0.4403 - val_acc: 0.9155\n",
+ "\n",
+ "Epoch 00223: val_acc did not improve from 0.94001\n",
+ "Epoch 224/100000\n",
+ " - 19s - loss: 0.4058 - acc: 0.9351 - val_loss: 0.4111 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00224: val_acc did not improve from 0.94001\n",
+ "Epoch 225/100000\n",
+ " - 18s - loss: 0.4075 - acc: 0.9342 - val_loss: 0.4473 - val_acc: 0.9033\n",
+ "\n",
+ "Epoch 00225: val_acc did not improve from 0.94001\n",
+ "Epoch 226/100000\n",
+ " - 19s - loss: 0.4074 - acc: 0.9340 - val_loss: 0.4620 - val_acc: 0.9073\n",
+ "\n",
+ "Epoch 00226: val_acc did not improve from 0.94001\n",
+ "Epoch 227/100000\n",
+ " - 19s - loss: 0.4047 - acc: 0.9353 - val_loss: 0.3895 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00227: val_acc did not improve from 0.94001\n",
+ "Epoch 228/100000\n",
+ " - 18s - loss: 0.4057 - acc: 0.9345 - val_loss: 0.3968 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00228: val_acc did not improve from 0.94001\n",
+ "Epoch 229/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9340 - val_loss: 0.5012 - val_acc: 0.8788\n",
+ "\n",
+ "Epoch 00229: val_acc did not improve from 0.94001\n",
+ "Epoch 230/100000\n",
+ " - 19s - loss: 0.4055 - acc: 0.9351 - val_loss: 0.3875 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 00230: val_acc did not improve from 0.94001\n",
+ "Epoch 231/100000\n",
+ " - 19s - loss: 0.4070 - acc: 0.9348 - val_loss: 0.4155 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 00231: val_acc did not improve from 0.94001\n",
+ "Epoch 232/100000\n",
+ " - 19s - loss: 0.4087 - acc: 0.9341 - val_loss: 0.4521 - val_acc: 0.9038\n",
+ "\n",
+ "Epoch 00232: val_acc did not improve from 0.94001\n",
+ "Epoch 233/100000\n",
+ " - 19s - loss: 0.4078 - acc: 0.9341 - val_loss: 0.4023 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00233: val_acc did not improve from 0.94001\n",
+ "Epoch 234/100000\n",
+ " - 19s - loss: 0.4039 - acc: 0.9347 - val_loss: 0.3896 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00234: val_acc did not improve from 0.94001\n",
+ "\n",
+ "Epoch 00234: ReduceLROnPlateau reducing learning rate to 0.0009500000451225787.\n",
+ "Epoch 235/100000\n",
+ " - 18s - loss: 0.3999 - acc: 0.9338 - val_loss: 0.3818 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00235: val_acc did not improve from 0.94001\n",
+ "Epoch 236/100000\n",
+ " - 19s - loss: 0.3963 - acc: 0.9350 - val_loss: 0.4515 - val_acc: 0.8875\n",
+ "\n",
+ "Epoch 00236: val_acc did not improve from 0.94001\n",
+ "Epoch 237/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9354 - val_loss: 0.4109 - val_acc: 0.9161\n",
+ "\n",
+ "Epoch 00237: val_acc did not improve from 0.94001\n",
+ "Epoch 238/100000\n",
+ " - 20s - loss: 0.3957 - acc: 0.9353 - val_loss: 0.3963 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 00238: val_acc did not improve from 0.94001\n",
+ "Epoch 239/100000\n",
+ " - 19s - loss: 0.3949 - acc: 0.9346 - val_loss: 0.3813 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00239: val_acc did not improve from 0.94001\n",
+ "Epoch 240/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9353 - val_loss: 0.4712 - val_acc: 0.8910\n",
+ "\n",
+ "Epoch 00240: val_acc did not improve from 0.94001\n",
+ "Epoch 241/100000\n",
+ " - 18s - loss: 0.3932 - acc: 0.9360 - val_loss: 0.4536 - val_acc: 0.8974\n",
+ "\n",
+ "Epoch 00241: val_acc did not improve from 0.94001\n",
+ "Epoch 242/100000\n",
+ " - 19s - loss: 0.3959 - acc: 0.9354 - val_loss: 0.4080 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00242: val_acc did not improve from 0.94001\n",
+ "Epoch 243/100000\n",
+ " - 19s - loss: 0.3991 - acc: 0.9340 - val_loss: 0.3888 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00243: val_acc did not improve from 0.94001\n",
+ "Epoch 244/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9338 - val_loss: 0.3851 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00244: val_acc did not improve from 0.94001\n",
+ "Epoch 245/100000\n",
+ " - 19s - loss: 0.3952 - acc: 0.9355 - val_loss: 0.4672 - val_acc: 0.8829\n",
+ "\n",
+ "Epoch 00245: val_acc did not improve from 0.94001\n",
+ "Epoch 246/100000\n",
+ " - 19s - loss: 0.3952 - acc: 0.9356 - val_loss: 0.3993 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 00246: val_acc did not improve from 0.94001\n",
+ "Epoch 247/100000\n",
+ " - 19s - loss: 0.3936 - acc: 0.9355 - val_loss: 0.3858 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00247: val_acc did not improve from 0.94001\n",
+ "Epoch 248/100000\n",
+ " - 19s - loss: 0.3959 - acc: 0.9349 - val_loss: 0.4098 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00248: val_acc did not improve from 0.94001\n",
+ "Epoch 249/100000\n",
+ " - 19s - loss: 0.3936 - acc: 0.9351 - val_loss: 0.7285 - val_acc: 0.7529\n",
+ "\n",
+ "Epoch 00249: val_acc did not improve from 0.94001\n",
+ "Epoch 250/100000\n",
+ " - 19s - loss: 0.3956 - acc: 0.9344 - val_loss: 0.5881 - val_acc: 0.8154\n",
+ "\n",
+ "Epoch 00250: val_acc did not improve from 0.94001\n",
+ "Epoch 251/100000\n",
+ " - 19s - loss: 0.3964 - acc: 0.9343 - val_loss: 0.4024 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 00251: val_acc did not improve from 0.94001\n",
+ "Epoch 252/100000\n",
+ " - 18s - loss: 0.3950 - acc: 0.9346 - val_loss: 0.4116 - val_acc: 0.9261\n",
+ "\n",
+ "Epoch 00252: val_acc did not improve from 0.94001\n",
+ "Epoch 253/100000\n",
+ " - 19s - loss: 0.3923 - acc: 0.9358 - val_loss: 0.3834 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 00253: val_acc did not improve from 0.94001\n",
+ "Epoch 254/100000\n",
+ " - 19s - loss: 0.3927 - acc: 0.9358 - val_loss: 0.3759 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00254: val_acc did not improve from 0.94001\n",
+ "Epoch 255/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9345 - val_loss: 0.6460 - val_acc: 0.8246\n",
+ "\n",
+ "Epoch 00255: val_acc did not improve from 0.94001\n",
+ "Epoch 256/100000\n",
+ " - 19s - loss: 0.3931 - acc: 0.9351 - val_loss: 0.3781 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00256: val_acc did not improve from 0.94001\n",
+ "Epoch 257/100000\n",
+ " - 19s - loss: 0.3945 - acc: 0.9349 - val_loss: 0.3822 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00257: val_acc did not improve from 0.94001\n",
+ "Epoch 258/100000\n",
+ " - 19s - loss: 0.3909 - acc: 0.9350 - val_loss: 0.3879 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 00258: val_acc did not improve from 0.94001\n",
+ "Epoch 259/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9346 - val_loss: 0.4041 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 00259: val_acc did not improve from 0.94001\n",
+ "Epoch 260/100000\n",
+ " - 19s - loss: 0.3962 - acc: 0.9344 - val_loss: 0.3822 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00260: val_acc did not improve from 0.94001\n",
+ "Epoch 261/100000\n",
+ " - 19s - loss: 0.3928 - acc: 0.9355 - val_loss: 0.4050 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00261: val_acc did not improve from 0.94001\n",
+ "Epoch 262/100000\n",
+ " - 19s - loss: 0.3973 - acc: 0.9340 - val_loss: 0.3784 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00262: val_acc did not improve from 0.94001\n",
+ "Epoch 263/100000\n",
+ " - 19s - loss: 0.3965 - acc: 0.9344 - val_loss: 0.3824 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 00263: val_acc did not improve from 0.94001\n",
+ "Epoch 264/100000\n",
+ " - 19s - loss: 0.3953 - acc: 0.9341 - val_loss: 0.3855 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00264: val_acc did not improve from 0.94001\n",
+ "Epoch 265/100000\n",
+ " - 19s - loss: 0.3925 - acc: 0.9350 - val_loss: 0.3810 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00265: val_acc did not improve from 0.94001\n",
+ "Epoch 266/100000\n",
+ " - 19s - loss: 0.3953 - acc: 0.9344 - val_loss: 0.3863 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 00266: val_acc did not improve from 0.94001\n",
+ "Epoch 267/100000\n",
+ " - 19s - loss: 0.3946 - acc: 0.9345 - val_loss: 0.3758 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00267: val_acc did not improve from 0.94001\n",
+ "Epoch 268/100000\n",
+ " - 19s - loss: 0.3936 - acc: 0.9353 - val_loss: 0.3837 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00268: val_acc did not improve from 0.94001\n",
+ "Epoch 269/100000\n",
+ " - 19s - loss: 0.3935 - acc: 0.9351 - val_loss: 0.3979 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00269: val_acc did not improve from 0.94001\n",
+ "Epoch 270/100000\n",
+ " - 19s - loss: 0.3910 - acc: 0.9350 - val_loss: 0.3745 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00270: val_acc did not improve from 0.94001\n",
+ "Epoch 271/100000\n",
+ " - 19s - loss: 0.3931 - acc: 0.9346 - val_loss: 0.3989 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 00271: val_acc did not improve from 0.94001\n",
+ "Epoch 272/100000\n",
+ " - 19s - loss: 0.3926 - acc: 0.9347 - val_loss: 0.5013 - val_acc: 0.8768\n",
+ "\n",
+ "Epoch 00272: val_acc did not improve from 0.94001\n",
+ "Epoch 273/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9344 - val_loss: 0.3885 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 00273: val_acc did not improve from 0.94001\n",
+ "Epoch 274/100000\n",
+ " - 19s - loss: 0.3927 - acc: 0.9351 - val_loss: 0.4362 - val_acc: 0.8934\n",
+ "\n",
+ "Epoch 00274: val_acc did not improve from 0.94001\n",
+ "Epoch 275/100000\n",
+ " - 19s - loss: 0.3941 - acc: 0.9343 - val_loss: 0.3803 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00275: val_acc did not improve from 0.94001\n",
+ "Epoch 276/100000\n",
+ " - 19s - loss: 0.3939 - acc: 0.9344 - val_loss: 0.3769 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 00276: val_acc did not improve from 0.94001\n",
+ "Epoch 277/100000\n",
+ " - 19s - loss: 0.3940 - acc: 0.9347 - val_loss: 0.4209 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 00277: val_acc did not improve from 0.94001\n",
+ "Epoch 278/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9351 - val_loss: 0.3839 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 00278: val_acc did not improve from 0.94001\n",
+ "Epoch 279/100000\n",
+ " - 19s - loss: 0.3933 - acc: 0.9353 - val_loss: 0.4531 - val_acc: 0.8966\n",
+ "\n",
+ "Epoch 00279: val_acc did not improve from 0.94001\n",
+ "Epoch 280/100000\n",
+ " - 19s - loss: 0.3966 - acc: 0.9338 - val_loss: 0.4798 - val_acc: 0.9050\n",
+ "\n",
+ "Epoch 00280: val_acc did not improve from 0.94001\n",
+ "Epoch 281/100000\n",
+ " - 19s - loss: 0.3929 - acc: 0.9349 - val_loss: 0.3854 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00281: val_acc did not improve from 0.94001\n",
+ "Epoch 282/100000\n",
+ " - 19s - loss: 0.3959 - acc: 0.9346 - val_loss: 0.3824 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 00282: val_acc did not improve from 0.94001\n",
+ "Epoch 283/100000\n",
+ " - 19s - loss: 0.3933 - acc: 0.9348 - val_loss: 0.4169 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00283: val_acc did not improve from 0.94001\n",
+ "Epoch 284/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9346 - val_loss: 0.3699 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 00284: val_acc did not improve from 0.94001\n",
+ "Epoch 285/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9342 - val_loss: 0.3907 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 00285: val_acc did not improve from 0.94001\n",
+ "Epoch 286/100000\n",
+ " - 19s - loss: 0.3922 - acc: 0.9353 - val_loss: 0.3721 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00286: val_acc did not improve from 0.94001\n",
+ "Epoch 287/100000\n",
+ " - 19s - loss: 0.3950 - acc: 0.9348 - val_loss: 0.3805 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00287: val_acc did not improve from 0.94001\n",
+ "Epoch 288/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9337 - val_loss: 0.4103 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00288: val_acc did not improve from 0.94001\n",
+ "Epoch 289/100000\n",
+ " - 19s - loss: 0.3923 - acc: 0.9350 - val_loss: 0.6771 - val_acc: 0.7532\n",
+ "\n",
+ "Epoch 00289: val_acc did not improve from 0.94001\n",
+ "Epoch 290/100000\n",
+ " - 19s - loss: 0.3953 - acc: 0.9342 - val_loss: 0.3949 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 00290: val_acc did not improve from 0.94001\n",
+ "Epoch 291/100000\n",
+ " - 19s - loss: 0.3964 - acc: 0.9346 - val_loss: 0.3964 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00291: val_acc did not improve from 0.94001\n",
+ "Epoch 292/100000\n",
+ " - 19s - loss: 0.3964 - acc: 0.9346 - val_loss: 0.4180 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 00292: val_acc did not improve from 0.94001\n",
+ "Epoch 293/100000\n",
+ " - 19s - loss: 0.3980 - acc: 0.9333 - val_loss: 0.3960 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 00293: val_acc did not improve from 0.94001\n",
+ "Epoch 294/100000\n",
+ " - 19s - loss: 0.3964 - acc: 0.9341 - val_loss: 0.5895 - val_acc: 0.8239\n",
+ "\n",
+ "Epoch 00294: val_acc did not improve from 0.94001\n",
+ "Epoch 295/100000\n",
+ " - 19s - loss: 0.3968 - acc: 0.9340 - val_loss: 0.4205 - val_acc: 0.9074\n",
+ "\n",
+ "Epoch 00295: val_acc did not improve from 0.94001\n",
+ "Epoch 296/100000\n",
+ " - 18s - loss: 0.3944 - acc: 0.9349 - val_loss: 0.3864 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 00296: val_acc did not improve from 0.94001\n",
+ "Epoch 297/100000\n",
+ " - 19s - loss: 0.3945 - acc: 0.9343 - val_loss: 0.4374 - val_acc: 0.9185\n",
+ "\n",
+ "Epoch 00297: val_acc did not improve from 0.94001\n",
+ "Epoch 298/100000\n",
+ " - 19s - loss: 0.3960 - acc: 0.9348 - val_loss: 0.3795 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00298: val_acc did not improve from 0.94001\n",
+ "Epoch 299/100000\n",
+ " - 19s - loss: 0.3981 - acc: 0.9342 - val_loss: 0.3761 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 00299: val_acc did not improve from 0.94001\n",
+ "Epoch 300/100000\n",
+ " - 18s - loss: 0.3946 - acc: 0.9354 - val_loss: 0.3748 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00300: val_acc did not improve from 0.94001\n",
+ "Epoch 301/100000\n",
+ " - 19s - loss: 0.3937 - acc: 0.9351 - val_loss: 0.4768 - val_acc: 0.8688\n",
+ "\n",
+ "Epoch 00301: val_acc did not improve from 0.94001\n",
+ "Epoch 302/100000\n",
+ " - 19s - loss: 0.3950 - acc: 0.9350 - val_loss: 0.3795 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00302: val_acc did not improve from 0.94001\n",
+ "Epoch 303/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9351 - val_loss: 0.5872 - val_acc: 0.8118\n",
+ "\n",
+ "Epoch 00303: val_acc did not improve from 0.94001\n",
+ "Epoch 304/100000\n",
+ " - 19s - loss: 0.3973 - acc: 0.9352 - val_loss: 0.4045 - val_acc: 0.9153\n",
+ "\n",
+ "Epoch 00304: val_acc did not improve from 0.94001\n",
+ "Epoch 305/100000\n",
+ " - 19s - loss: 0.3947 - acc: 0.9346 - val_loss: 0.3971 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 00305: val_acc did not improve from 0.94001\n",
+ "Epoch 306/100000\n",
+ " - 19s - loss: 0.3961 - acc: 0.9339 - val_loss: 0.4000 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 00306: val_acc did not improve from 0.94001\n",
+ "Epoch 307/100000\n",
+ " - 19s - loss: 0.3961 - acc: 0.9343 - val_loss: 0.3832 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 00307: val_acc did not improve from 0.94001\n",
+ "Epoch 308/100000\n",
+ " - 19s - loss: 0.3940 - acc: 0.9352 - val_loss: 0.3918 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 00308: val_acc did not improve from 0.94001\n",
+ "Epoch 309/100000\n",
+ " - 18s - loss: 0.3968 - acc: 0.9337 - val_loss: 0.3899 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00309: val_acc did not improve from 0.94001\n",
+ "Epoch 310/100000\n",
+ " - 19s - loss: 0.3944 - acc: 0.9354 - val_loss: 0.3990 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00310: val_acc did not improve from 0.94001\n",
+ "Epoch 311/100000\n",
+ " - 18s - loss: 0.3946 - acc: 0.9346 - val_loss: 0.3739 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00311: val_acc did not improve from 0.94001\n",
+ "Epoch 312/100000\n",
+ " - 19s - loss: 0.3966 - acc: 0.9340 - val_loss: 0.4089 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00312: val_acc did not improve from 0.94001\n",
+ "Epoch 313/100000\n",
+ " - 18s - loss: 0.3938 - acc: 0.9348 - val_loss: 0.4106 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 00313: val_acc did not improve from 0.94001\n",
+ "Epoch 314/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9338 - val_loss: 0.3881 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00314: val_acc did not improve from 0.94001\n",
+ "Epoch 315/100000\n",
+ " - 19s - loss: 0.3963 - acc: 0.9345 - val_loss: 0.4076 - val_acc: 0.9191\n",
+ "\n",
+ "Epoch 00315: val_acc did not improve from 0.94001\n",
+ "Epoch 316/100000\n",
+ " - 18s - loss: 0.3965 - acc: 0.9346 - val_loss: 0.3757 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00316: val_acc did not improve from 0.94001\n",
+ "Epoch 317/100000\n",
+ " - 19s - loss: 0.3945 - acc: 0.9345 - val_loss: 0.3892 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00317: val_acc did not improve from 0.94001\n",
+ "Epoch 318/100000\n",
+ " - 18s - loss: 0.3951 - acc: 0.9349 - val_loss: 0.5040 - val_acc: 0.8924\n",
+ "\n",
+ "Epoch 00318: val_acc did not improve from 0.94001\n",
+ "Epoch 319/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9347 - val_loss: 0.3891 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 00319: val_acc did not improve from 0.94001\n",
+ "Epoch 320/100000\n",
+ " - 19s - loss: 0.3968 - acc: 0.9343 - val_loss: 0.3818 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 00320: val_acc did not improve from 0.94001\n",
+ "Epoch 321/100000\n",
+ " - 18s - loss: 0.3981 - acc: 0.9340 - val_loss: 0.3808 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00321: val_acc did not improve from 0.94001\n",
+ "Epoch 322/100000\n",
+ " - 18s - loss: 0.3976 - acc: 0.9346 - val_loss: 0.3913 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00322: val_acc did not improve from 0.94001\n",
+ "Epoch 323/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9347 - val_loss: 0.4147 - val_acc: 0.9132\n",
+ "\n",
+ "Epoch 00323: val_acc did not improve from 0.94001\n",
+ "Epoch 324/100000\n",
+ " - 18s - loss: 0.3959 - acc: 0.9348 - val_loss: 0.4095 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 00324: val_acc did not improve from 0.94001\n",
+ "Epoch 325/100000\n",
+ " - 19s - loss: 0.3998 - acc: 0.9346 - val_loss: 0.3957 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00325: val_acc did not improve from 0.94001\n",
+ "Epoch 326/100000\n",
+ " - 18s - loss: 0.3982 - acc: 0.9344 - val_loss: 0.3826 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00326: val_acc did not improve from 0.94001\n",
+ "Epoch 327/100000\n",
+ " - 18s - loss: 0.3960 - acc: 0.9347 - val_loss: 0.3958 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00327: val_acc did not improve from 0.94001\n",
+ "Epoch 328/100000\n",
+ " - 19s - loss: 0.3934 - acc: 0.9357 - val_loss: 0.5840 - val_acc: 0.8825\n",
+ "\n",
+ "Epoch 00328: val_acc did not improve from 0.94001\n",
+ "Epoch 329/100000\n",
+ " - 18s - loss: 0.3960 - acc: 0.9343 - val_loss: 0.3772 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 00329: val_acc did not improve from 0.94001\n",
+ "Epoch 330/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9349 - val_loss: 0.3994 - val_acc: 0.9233\n",
+ "\n",
+ "Epoch 00330: val_acc did not improve from 0.94001\n",
+ "Epoch 331/100000\n",
+ " - 19s - loss: 0.3977 - acc: 0.9338 - val_loss: 0.4528 - val_acc: 0.8848\n",
+ "\n",
+ "Epoch 00331: val_acc did not improve from 0.94001\n",
+ "Epoch 332/100000\n",
+ " - 19s - loss: 0.3950 - acc: 0.9349 - val_loss: 0.3940 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00332: val_acc did not improve from 0.94001\n",
+ "Epoch 333/100000\n",
+ " - 19s - loss: 0.4000 - acc: 0.9333 - val_loss: 0.4483 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 00333: val_acc did not improve from 0.94001\n",
+ "Epoch 334/100000\n",
+ " - 19s - loss: 0.3962 - acc: 0.9344 - val_loss: 0.3880 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00334: val_acc did not improve from 0.94001\n",
+ "Epoch 335/100000\n",
+ " - 19s - loss: 0.3970 - acc: 0.9340 - val_loss: 0.4581 - val_acc: 0.9046\n",
+ "\n",
+ "Epoch 00335: val_acc did not improve from 0.94001\n",
+ "Epoch 336/100000\n",
+ " - 19s - loss: 0.3953 - acc: 0.9348 - val_loss: 0.3917 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 00336: val_acc did not improve from 0.94001\n",
+ "Epoch 337/100000\n",
+ " - 19s - loss: 0.3937 - acc: 0.9352 - val_loss: 0.4428 - val_acc: 0.8998\n",
+ "\n",
+ "Epoch 00337: val_acc did not improve from 0.94001\n",
+ "Epoch 338/100000\n",
+ " - 19s - loss: 0.3956 - acc: 0.9347 - val_loss: 0.3827 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00338: val_acc did not improve from 0.94001\n",
+ "Epoch 339/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9343 - val_loss: 0.3867 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 00339: val_acc did not improve from 0.94001\n",
+ "Epoch 340/100000\n",
+ " - 19s - loss: 0.3958 - acc: 0.9342 - val_loss: 0.3905 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00340: val_acc did not improve from 0.94001\n",
+ "Epoch 341/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9344 - val_loss: 0.4146 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 00341: val_acc did not improve from 0.94001\n",
+ "Epoch 342/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9340 - val_loss: 0.4159 - val_acc: 0.9167\n",
+ "\n",
+ "Epoch 00342: val_acc did not improve from 0.94001\n",
+ "Epoch 343/100000\n",
+ " - 19s - loss: 0.3945 - acc: 0.9353 - val_loss: 0.3909 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 00343: val_acc did not improve from 0.94001\n",
+ "Epoch 344/100000\n",
+ " - 19s - loss: 0.3969 - acc: 0.9347 - val_loss: 0.3812 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00344: val_acc did not improve from 0.94001\n",
+ "Epoch 345/100000\n",
+ " - 18s - loss: 0.3976 - acc: 0.9345 - val_loss: 0.3926 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 00345: val_acc did not improve from 0.94001\n",
+ "Epoch 346/100000\n",
+ " - 19s - loss: 0.3954 - acc: 0.9349 - val_loss: 0.3950 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00346: val_acc did not improve from 0.94001\n",
+ "Epoch 347/100000\n",
+ " - 18s - loss: 0.3981 - acc: 0.9338 - val_loss: 0.4860 - val_acc: 0.9099\n",
+ "\n",
+ "Epoch 00347: val_acc did not improve from 0.94001\n",
+ "Epoch 348/100000\n",
+ " - 18s - loss: 0.3988 - acc: 0.9344 - val_loss: 0.3988 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00348: val_acc did not improve from 0.94001\n",
+ "Epoch 349/100000\n",
+ " - 19s - loss: 0.3965 - acc: 0.9343 - val_loss: 0.5226 - val_acc: 0.8434\n",
+ "\n",
+ "Epoch 00349: val_acc did not improve from 0.94001\n",
+ "Epoch 350/100000\n",
+ " - 18s - loss: 0.3960 - acc: 0.9346 - val_loss: 0.4704 - val_acc: 0.8918\n",
+ "\n",
+ "Epoch 00350: val_acc did not improve from 0.94001\n",
+ "Epoch 351/100000\n",
+ " - 19s - loss: 0.3937 - acc: 0.9347 - val_loss: 0.3920 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 00351: val_acc did not improve from 0.94001\n",
+ "Epoch 352/100000\n",
+ " - 18s - loss: 0.3948 - acc: 0.9349 - val_loss: 0.4805 - val_acc: 0.8873\n",
+ "\n",
+ "Epoch 00352: val_acc did not improve from 0.94001\n",
+ "Epoch 353/100000\n",
+ " - 19s - loss: 0.3989 - acc: 0.9339 - val_loss: 0.3707 - val_acc: 0.9422\n",
+ "\n",
+ "Epoch 00353: val_acc improved from 0.94001 to 0.94225, saving model to ./ModelSnapshots/CNN-353.h5\n",
+ "Epoch 354/100000\n",
+ " - 19s - loss: 0.3969 - acc: 0.9347 - val_loss: 0.3850 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00354: val_acc did not improve from 0.94225\n",
+ "Epoch 355/100000\n",
+ " - 19s - loss: 0.3982 - acc: 0.9344 - val_loss: 0.3779 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00355: val_acc did not improve from 0.94225\n",
+ "Epoch 356/100000\n",
+ " - 19s - loss: 0.3979 - acc: 0.9341 - val_loss: 0.4924 - val_acc: 0.8875\n",
+ "\n",
+ "Epoch 00356: val_acc did not improve from 0.94225\n",
+ "Epoch 357/100000\n",
+ " - 19s - loss: 0.3966 - acc: 0.9347 - val_loss: 0.4153 - val_acc: 0.9132\n",
+ "\n",
+ "Epoch 00357: val_acc did not improve from 0.94225\n",
+ "Epoch 358/100000\n",
+ " - 19s - loss: 0.3974 - acc: 0.9344 - val_loss: 0.4070 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 00358: val_acc did not improve from 0.94225\n",
+ "Epoch 359/100000\n",
+ " - 19s - loss: 0.3972 - acc: 0.9346 - val_loss: 0.3953 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 00359: val_acc did not improve from 0.94225\n",
+ "Epoch 360/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9347 - val_loss: 0.3962 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 00360: val_acc did not improve from 0.94225\n",
+ "Epoch 361/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9347 - val_loss: 0.4077 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 00361: val_acc did not improve from 0.94225\n",
+ "Epoch 362/100000\n",
+ " - 19s - loss: 0.3961 - acc: 0.9345 - val_loss: 0.6163 - val_acc: 0.7919\n",
+ "\n",
+ "Epoch 00362: val_acc did not improve from 0.94225\n",
+ "Epoch 363/100000\n",
+ " - 19s - loss: 0.3992 - acc: 0.9335 - val_loss: 0.4250 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00363: val_acc did not improve from 0.94225\n",
+ "Epoch 364/100000\n",
+ " - 19s - loss: 0.3949 - acc: 0.9352 - val_loss: 0.3871 - val_acc: 0.9270\n",
+ "\n",
+ "Epoch 00364: val_acc did not improve from 0.94225\n",
+ "Epoch 365/100000\n",
+ " - 19s - loss: 0.3974 - acc: 0.9344 - val_loss: 0.3863 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 00365: val_acc did not improve from 0.94225\n",
+ "Epoch 366/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9340 - val_loss: 0.4407 - val_acc: 0.9095\n",
+ "\n",
+ "Epoch 00366: val_acc did not improve from 0.94225\n",
+ "Epoch 367/100000\n",
+ " - 18s - loss: 0.3930 - acc: 0.9349 - val_loss: 0.4103 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 00367: val_acc did not improve from 0.94225\n",
+ "Epoch 368/100000\n",
+ " - 19s - loss: 0.3969 - acc: 0.9346 - val_loss: 0.3766 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 00368: val_acc did not improve from 0.94225\n",
+ "Epoch 369/100000\n",
+ " - 18s - loss: 0.3956 - acc: 0.9348 - val_loss: 0.4009 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 00369: val_acc did not improve from 0.94225\n",
+ "Epoch 370/100000\n",
+ " - 19s - loss: 0.3948 - acc: 0.9347 - val_loss: 0.3784 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 00370: val_acc did not improve from 0.94225\n",
+ "Epoch 371/100000\n",
+ " - 18s - loss: 0.3975 - acc: 0.9337 - val_loss: 0.4197 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 00371: val_acc did not improve from 0.94225\n",
+ "Epoch 372/100000\n",
+ " - 19s - loss: 0.3972 - acc: 0.9345 - val_loss: 0.3916 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00372: val_acc did not improve from 0.94225\n",
+ "Epoch 373/100000\n",
+ " - 18s - loss: 0.3983 - acc: 0.9340 - val_loss: 0.4921 - val_acc: 0.8681\n",
+ "\n",
+ "Epoch 00373: val_acc did not improve from 0.94225\n",
+ "Epoch 374/100000\n",
+ " - 19s - loss: 0.3991 - acc: 0.9347 - val_loss: 0.4028 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 00374: val_acc did not improve from 0.94225\n",
+ "Epoch 375/100000\n",
+ " - 19s - loss: 0.3969 - acc: 0.9340 - val_loss: 0.3990 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 00375: val_acc did not improve from 0.94225\n",
+ "Epoch 376/100000\n",
+ " - 19s - loss: 0.3990 - acc: 0.9343 - val_loss: 0.4141 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 00376: val_acc did not improve from 0.94225\n",
+ "Epoch 377/100000\n",
+ " - 19s - loss: 0.3959 - acc: 0.9345 - val_loss: 0.4142 - val_acc: 0.9103\n",
+ "\n",
+ "Epoch 00377: val_acc did not improve from 0.94225\n",
+ "Epoch 378/100000\n",
+ " - 19s - loss: 0.3974 - acc: 0.9341 - val_loss: 0.3783 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 00378: val_acc did not improve from 0.94225\n",
+ "Epoch 379/100000\n",
+ " - 18s - loss: 0.3963 - acc: 0.9343 - val_loss: 0.3905 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 00379: val_acc did not improve from 0.94225\n",
+ "Epoch 380/100000\n",
+ " - 19s - loss: 0.3949 - acc: 0.9351 - val_loss: 0.3983 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 00380: val_acc did not improve from 0.94225\n",
+ "Epoch 381/100000\n",
+ " - 19s - loss: 0.3978 - acc: 0.9345 - val_loss: 0.4146 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 00381: val_acc did not improve from 0.94225\n",
+ "Epoch 382/100000\n",
+ " - 18s - loss: 0.3962 - acc: 0.9348 - val_loss: 0.3957 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 00382: val_acc did not improve from 0.94225\n",
+ "Epoch 383/100000\n",
+ " - 19s - loss: 0.3955 - acc: 0.9342 - val_loss: 0.3834 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00383: val_acc did not improve from 0.94225\n",
+ "Epoch 384/100000\n",
+ " - 18s - loss: 0.3956 - acc: 0.9343 - val_loss: 0.3966 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 00384: val_acc did not improve from 0.94225\n",
+ "Epoch 385/100000\n",
+ " - 19s - loss: 0.3942 - acc: 0.9350 - val_loss: 0.4539 - val_acc: 0.8910\n",
+ "\n",
+ "Epoch 00385: val_acc did not improve from 0.94225\n",
+ "Epoch 386/100000\n",
+ " - 18s - loss: 0.3998 - acc: 0.9334 - val_loss: 0.4513 - val_acc: 0.8985\n",
+ "\n",
+ "Epoch 00386: val_acc did not improve from 0.94225\n",
+ "Epoch 387/100000\n",
+ " - 18s - loss: 0.3960 - acc: 0.9342 - val_loss: 0.4464 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 00387: val_acc did not improve from 0.94225\n",
+ "Epoch 388/100000\n",
+ " - 19s - loss: 0.3954 - acc: 0.9344 - val_loss: 0.5134 - val_acc: 0.8496\n",
+ "\n",
+ "Epoch 00388: val_acc did not improve from 0.94225\n",
+ "Epoch 389/100000\n",
+ " - 19s - loss: 0.3965 - acc: 0.9345 - val_loss: 0.3970 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 00389: val_acc did not improve from 0.94225\n",
+ "Epoch 390/100000\n",
+ " - 19s - loss: 0.3975 - acc: 0.9341 - val_loss: 0.3818 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 00390: val_acc did not improve from 0.94225\n",
+ "Epoch 391/100000\n",
+ " - 19s - loss: 0.3958 - acc: 0.9345 - val_loss: 0.3894 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 00391: val_acc did not improve from 0.94225\n",
+ "Epoch 392/100000\n",
+ " - 18s - loss: 0.3951 - acc: 0.9342 - val_loss: 0.3928 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 00392: val_acc did not improve from 0.94225\n",
+ "Epoch 393/100000\n",
+ " - 19s - loss: 0.3982 - acc: 0.9346 - val_loss: 0.4041 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00393: val_acc did not improve from 0.94225\n",
+ "Epoch 394/100000\n",
+ " - 18s - loss: 0.3956 - acc: 0.9343 - val_loss: 0.4016 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00394: val_acc did not improve from 0.94225\n",
+ "Epoch 395/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9346 - val_loss: 0.4112 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 00395: val_acc did not improve from 0.94225\n",
+ "Epoch 396/100000\n",
+ " - 18s - loss: 0.3957 - acc: 0.9352 - val_loss: 0.4031 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 00396: val_acc did not improve from 0.94225\n",
+ "Epoch 397/100000\n",
+ " - 19s - loss: 0.3960 - acc: 0.9349 - val_loss: 0.4604 - val_acc: 0.9044\n",
+ "\n",
+ "Epoch 00397: val_acc did not improve from 0.94225\n",
+ "Epoch 398/100000\n",
+ " - 19s - loss: 0.3996 - acc: 0.9339 - val_loss: 0.4998 - val_acc: 0.8609\n",
+ "\n",
+ "Epoch 00398: val_acc did not improve from 0.94225\n",
+ "Epoch 399/100000\n",
+ " - 18s - loss: 0.3953 - acc: 0.9351 - val_loss: 0.3926 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 00399: val_acc did not improve from 0.94225\n",
+ "Epoch 400/100000\n",
+ " - 19s - loss: 0.4040 - acc: 0.9328 - val_loss: 0.3926 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 00400: val_acc did not improve from 0.94225\n",
+ "Epoch 401/100000\n",
+ " - 19s - loss: 0.3983 - acc: 0.9335 - val_loss: 0.4238 - val_acc: 0.9105\n",
+ "\n",
+ "Epoch 00401: val_acc did not improve from 0.94225\n",
+ "Epoch 402/100000\n",
+ " - 19s - loss: 0.3954 - acc: 0.9351 - val_loss: 0.5009 - val_acc: 0.8659\n",
+ "\n",
+ "Epoch 00402: val_acc did not improve from 0.94225\n",
+ "Epoch 403/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9343 - val_loss: 0.4153 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 00403: val_acc did not improve from 0.94225\n",
+ "Epoch 404/100000\n",
+ " - 19s - loss: 0.3961 - acc: 0.9341 - val_loss: 0.4492 - val_acc: 0.9110\n",
+ "\n",
+ "Epoch 00404: val_acc did not improve from 0.94225\n",
+ "Epoch 405/100000\n",
+ " - 19s - loss: 0.4000 - acc: 0.9335 - val_loss: 0.4161 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00405: val_acc did not improve from 0.94225\n",
+ "Epoch 406/100000\n",
+ " - 18s - loss: 0.3975 - acc: 0.9341 - val_loss: 0.3899 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00406: val_acc did not improve from 0.94225\n",
+ "Epoch 407/100000\n",
+ " - 19s - loss: 0.3998 - acc: 0.9331 - val_loss: 0.4004 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 00407: val_acc did not improve from 0.94225\n",
+ "Epoch 408/100000\n",
+ " - 18s - loss: 0.3968 - acc: 0.9341 - val_loss: 0.4631 - val_acc: 0.9163\n",
+ "\n",
+ "Epoch 00408: val_acc did not improve from 0.94225\n",
+ "Epoch 409/100000\n",
+ " - 19s - loss: 0.3943 - acc: 0.9343 - val_loss: 0.5393 - val_acc: 0.8576\n",
+ "\n",
+ "Epoch 00409: val_acc did not improve from 0.94225\n",
+ "Epoch 410/100000\n",
+ " - 19s - loss: 0.3970 - acc: 0.9345 - val_loss: 0.3905 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00410: val_acc did not improve from 0.94225\n",
+ "Epoch 411/100000\n",
+ " - 18s - loss: 0.3971 - acc: 0.9347 - val_loss: 0.3771 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00411: val_acc did not improve from 0.94225\n",
+ "Epoch 412/100000\n",
+ " - 19s - loss: 0.3975 - acc: 0.9340 - val_loss: 0.4578 - val_acc: 0.9153\n",
+ "\n",
+ "Epoch 00412: val_acc did not improve from 0.94225\n",
+ "Epoch 413/100000\n",
+ " - 19s - loss: 0.3981 - acc: 0.9340 - val_loss: 0.4280 - val_acc: 0.9183\n",
+ "\n",
+ "Epoch 00418: val_acc did not improve from 0.94225\n",
+ "Epoch 419/100000\n",
+ " - 19s - loss: 0.3979 - acc: 0.9340 - val_loss: 0.3753 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00419: val_acc did not improve from 0.94225\n",
+ "Epoch 420/100000\n",
+ " - 19s - loss: 0.3985 - acc: 0.9334 - val_loss: 0.4093 - val_acc: 0.9185\n",
+ "\n",
+ "Epoch 00420: val_acc did not improve from 0.94225\n",
+ "Epoch 421/100000\n",
+ " - 19s - loss: 0.3973 - acc: 0.9341 - val_loss: 0.4216 - val_acc: 0.9139\n",
+ "\n",
+ "Epoch 00421: val_acc did not improve from 0.94225\n",
+ "Epoch 422/100000\n",
+ " - 19s - loss: 0.3983 - acc: 0.9345 - val_loss: 0.3978 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00422: val_acc did not improve from 0.94225\n",
+ "Epoch 423/100000\n",
+ " - 19s - loss: 0.3984 - acc: 0.9348 - val_loss: 0.3900 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00423: val_acc did not improve from 0.94225\n",
+ "Epoch 424/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9341 - val_loss: 0.4112 - val_acc: 0.9127\n",
+ "\n",
+ "Epoch 00424: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 00424: ReduceLROnPlateau reducing learning rate to 0.0009025000152178108.\n",
+ "Epoch 425/100000\n",
+ " - 19s - loss: 0.3903 - acc: 0.9339 - val_loss: 0.4279 - val_acc: 0.9038\n",
+ "\n",
+ "Epoch 00425: val_acc did not improve from 0.94225\n",
+ "Epoch 426/100000\n",
+ " - 18s - loss: 0.3873 - acc: 0.9352 - val_loss: 0.3781 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00426: val_acc did not improve from 0.94225\n",
+ "Epoch 427/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9344 - val_loss: 0.4225 - val_acc: 0.9219\n",
+ "\n",
+ "Epoch 00427: val_acc did not improve from 0.94225\n",
+ "Epoch 428/100000\n",
+ " - 19s - loss: 0.3854 - acc: 0.9352 - val_loss: 0.6165 - val_acc: 0.8236\n",
+ "\n",
+ "Epoch 00428: val_acc did not improve from 0.94225\n",
+ "Epoch 429/100000\n",
+ " - 19s - loss: 0.3911 - acc: 0.9331 - val_loss: 0.5023 - val_acc: 0.8787\n",
+ "\n",
+ "Epoch 00429: val_acc did not improve from 0.94225\n",
+ "Epoch 430/100000\n",
+ " - 19s - loss: 0.3876 - acc: 0.9348 - val_loss: 0.5066 - val_acc: 0.8492\n",
+ "\n",
+ "Epoch 00430: val_acc did not improve from 0.94225\n",
+ "Epoch 431/100000\n",
+ " - 19s - loss: 0.3878 - acc: 0.9342 - val_loss: 0.3706 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 00431: val_acc did not improve from 0.94225\n",
+ "Epoch 432/100000\n",
+ " - 18s - loss: 0.3916 - acc: 0.9337 - val_loss: 0.3999 - val_acc: 0.9116\n",
+ "\n",
+ "Epoch 00432: val_acc did not improve from 0.94225\n",
+ "Epoch 433/100000\n",
+ " - 18s - loss: 0.3887 - acc: 0.9338 - val_loss: 0.4852 - val_acc: 0.8689\n",
+ "\n",
+ "Epoch 00433: val_acc did not improve from 0.94225\n",
+ "Epoch 434/100000\n",
+ " - 19s - loss: 0.3842 - acc: 0.9355 - val_loss: 0.4900 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 00434: val_acc did not improve from 0.94225\n",
+ "Epoch 435/100000\n",
+ " - 19s - loss: 0.3870 - acc: 0.9350 - val_loss: 0.5658 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00435: val_acc did not improve from 0.94225\n",
+ "Epoch 436/100000\n",
+ " - 18s - loss: 0.3904 - acc: 0.9339 - val_loss: 0.3719 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00436: val_acc did not improve from 0.94225\n",
+ "Epoch 437/100000\n",
+ " - 19s - loss: 0.3926 - acc: 0.9327 - val_loss: 0.3680 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00437: val_acc did not improve from 0.94225\n",
+ "Epoch 438/100000\n",
+ " - 18s - loss: 0.3857 - acc: 0.9353 - val_loss: 0.3827 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 00438: val_acc did not improve from 0.94225\n",
+ "Epoch 439/100000\n",
+ " - 19s - loss: 0.3866 - acc: 0.9351 - val_loss: 0.4315 - val_acc: 0.9057\n",
+ "\n",
+ "Epoch 00439: val_acc did not improve from 0.94225\n",
+ "Epoch 440/100000\n",
+ " - 18s - loss: 0.3884 - acc: 0.9341 - val_loss: 0.3742 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00440: val_acc did not improve from 0.94225\n",
+ "Epoch 441/100000\n",
+ " - 19s - loss: 0.3892 - acc: 0.9344 - val_loss: 0.3746 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00441: val_acc did not improve from 0.94225\n",
+ "Epoch 442/100000\n",
+ " - 19s - loss: 0.3875 - acc: 0.9345 - val_loss: 0.4422 - val_acc: 0.8977\n",
+ "\n",
+ "Epoch 00442: val_acc did not improve from 0.94225\n",
+ "Epoch 443/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9339 - val_loss: 0.3710 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00443: val_acc did not improve from 0.94225\n",
+ "Epoch 444/100000\n",
+ " - 18s - loss: 0.3869 - acc: 0.9352 - val_loss: 0.4202 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 00444: val_acc did not improve from 0.94225\n",
+ "Epoch 445/100000\n",
+ " - 19s - loss: 0.3880 - acc: 0.9342 - val_loss: 0.4037 - val_acc: 0.9171\n",
+ "\n",
+ "Epoch 00445: val_acc did not improve from 0.94225\n",
+ "Epoch 446/100000\n",
+ " - 19s - loss: 0.3894 - acc: 0.9348 - val_loss: 0.3825 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00446: val_acc did not improve from 0.94225\n",
+ "Epoch 447/100000\n",
+ " - 19s - loss: 0.3883 - acc: 0.9342 - val_loss: 0.3763 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00447: val_acc did not improve from 0.94225\n",
+ "Epoch 448/100000\n",
+ " - 19s - loss: 0.3861 - acc: 0.9343 - val_loss: 0.4339 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 00448: val_acc did not improve from 0.94225\n",
+ "Epoch 449/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9337 - val_loss: 0.3682 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00449: val_acc did not improve from 0.94225\n",
+ "Epoch 450/100000\n",
+ " - 19s - loss: 0.3899 - acc: 0.9343 - val_loss: 0.3819 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00450: val_acc did not improve from 0.94225\n",
+ "Epoch 451/100000\n",
+ " - 19s - loss: 0.3861 - acc: 0.9348 - val_loss: 0.4300 - val_acc: 0.9040\n",
+ "\n",
+ "Epoch 00451: val_acc did not improve from 0.94225\n",
+ "Epoch 452/100000\n",
+ " - 19s - loss: 0.3881 - acc: 0.9336 - val_loss: 0.3857 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 00452: val_acc did not improve from 0.94225\n",
+ "Epoch 453/100000\n",
+ " - 18s - loss: 0.3847 - acc: 0.9352 - val_loss: 0.3745 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 00453: val_acc did not improve from 0.94225\n",
+ "Epoch 454/100000\n",
+ " - 19s - loss: 0.3858 - acc: 0.9345 - val_loss: 0.3701 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 00454: val_acc did not improve from 0.94225\n",
+ "Epoch 455/100000\n",
+ " - 18s - loss: 0.3853 - acc: 0.9345 - val_loss: 0.4384 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 00455: val_acc did not improve from 0.94225\n",
+ "Epoch 456/100000\n",
+ " - 19s - loss: 0.3889 - acc: 0.9342 - val_loss: 0.4469 - val_acc: 0.9017\n",
+ "\n",
+ "Epoch 00456: val_acc did not improve from 0.94225\n",
+ "Epoch 457/100000\n",
+ " - 19s - loss: 0.3877 - acc: 0.9349 - val_loss: 0.3737 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00457: val_acc did not improve from 0.94225\n",
+ "Epoch 458/100000\n",
+ " - 19s - loss: 0.3918 - acc: 0.9333 - val_loss: 0.4124 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00458: val_acc did not improve from 0.94225\n",
+ "Epoch 459/100000\n",
+ " - 19s - loss: 0.3857 - acc: 0.9352 - val_loss: 0.3875 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 00459: val_acc did not improve from 0.94225\n",
+ "Epoch 460/100000\n",
+ " - 19s - loss: 0.3884 - acc: 0.9342 - val_loss: 0.4006 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00460: val_acc did not improve from 0.94225\n",
+ "Epoch 461/100000\n",
+ " - 19s - loss: 0.3885 - acc: 0.9345 - val_loss: 0.3791 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 00461: val_acc did not improve from 0.94225\n",
+ "Epoch 462/100000\n",
+ " - 19s - loss: 0.3882 - acc: 0.9349 - val_loss: 0.4018 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 00462: val_acc did not improve from 0.94225\n",
+ "Epoch 463/100000\n",
+ " - 19s - loss: 0.3898 - acc: 0.9341 - val_loss: 0.3822 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00463: val_acc did not improve from 0.94225\n",
+ "Epoch 464/100000\n",
+ " - 19s - loss: 0.3886 - acc: 0.9343 - val_loss: 0.3959 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 00464: val_acc did not improve from 0.94225\n",
+ "Epoch 465/100000\n",
+ " - 18s - loss: 0.3904 - acc: 0.9340 - val_loss: 0.3818 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 00465: val_acc did not improve from 0.94225\n",
+ "Epoch 466/100000\n",
+ " - 19s - loss: 0.3878 - acc: 0.9346 - val_loss: 0.3951 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00466: val_acc did not improve from 0.94225\n",
+ "Epoch 467/100000\n",
+ " - 18s - loss: 0.3900 - acc: 0.9335 - val_loss: 0.3747 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 00467: val_acc did not improve from 0.94225\n",
+ "Epoch 468/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9346 - val_loss: 0.3802 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 00468: val_acc did not improve from 0.94225\n",
+ "Epoch 469/100000\n",
+ " - 18s - loss: 0.3893 - acc: 0.9340 - val_loss: 0.3893 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00469: val_acc did not improve from 0.94225\n",
+ "Epoch 470/100000\n",
+ " - 18s - loss: 0.3892 - acc: 0.9344 - val_loss: 0.4014 - val_acc: 0.9155\n",
+ "\n",
+ "Epoch 00470: val_acc did not improve from 0.94225\n",
+ "Epoch 471/100000\n",
+ " - 19s - loss: 0.3885 - acc: 0.9348 - val_loss: 0.3714 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 00471: val_acc did not improve from 0.94225\n",
+ "Epoch 472/100000\n",
+ " - 18s - loss: 0.3882 - acc: 0.9349 - val_loss: 0.3981 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 00472: val_acc did not improve from 0.94225\n",
+ "Epoch 473/100000\n",
+ " - 19s - loss: 0.3850 - acc: 0.9349 - val_loss: 0.3910 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 00473: val_acc did not improve from 0.94225\n",
+ "Epoch 474/100000\n",
+ " - 19s - loss: 0.3878 - acc: 0.9345 - val_loss: 0.4567 - val_acc: 0.8801\n",
+ "\n",
+ "Epoch 00474: val_acc did not improve from 0.94225\n",
+ "Epoch 475/100000\n",
+ " - 18s - loss: 0.3892 - acc: 0.9351 - val_loss: 0.3816 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00475: val_acc did not improve from 0.94225\n",
+ "Epoch 476/100000\n",
+ " - 19s - loss: 0.3904 - acc: 0.9338 - val_loss: 0.3739 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00476: val_acc did not improve from 0.94225\n",
+ "Epoch 477/100000\n",
+ " - 18s - loss: 0.3913 - acc: 0.9344 - val_loss: 0.4849 - val_acc: 0.8807\n",
+ "\n",
+ "Epoch 00477: val_acc did not improve from 0.94225\n",
+ "Epoch 478/100000\n",
+ " - 19s - loss: 0.3912 - acc: 0.9345 - val_loss: 0.3807 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00478: val_acc did not improve from 0.94225\n",
+ "Epoch 479/100000\n",
+ " - 19s - loss: 0.3887 - acc: 0.9345 - val_loss: 0.3844 - val_acc: 0.9225\n",
+ "\n",
+ "Epoch 00479: val_acc did not improve from 0.94225\n",
+ "Epoch 480/100000\n",
+ " - 19s - loss: 0.3876 - acc: 0.9352 - val_loss: 0.3716 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00480: val_acc did not improve from 0.94225\n",
+ "Epoch 481/100000\n",
+ " - 19s - loss: 0.3880 - acc: 0.9348 - val_loss: 0.3883 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 00481: val_acc did not improve from 0.94225\n",
+ "Epoch 482/100000\n",
+ " - 19s - loss: 0.3882 - acc: 0.9342 - val_loss: 0.3689 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00482: val_acc did not improve from 0.94225\n",
+ "Epoch 483/100000\n",
+ " - 19s - loss: 0.3893 - acc: 0.9338 - val_loss: 0.3841 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00483: val_acc did not improve from 0.94225\n",
+ "Epoch 484/100000\n",
+ " - 19s - loss: 0.3870 - acc: 0.9343 - val_loss: 0.3907 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00484: val_acc did not improve from 0.94225\n",
+ "Epoch 485/100000\n",
+ " - 19s - loss: 0.3912 - acc: 0.9337 - val_loss: 0.3848 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 00485: val_acc did not improve from 0.94225\n",
+ "Epoch 486/100000\n",
+ " - 19s - loss: 0.3901 - acc: 0.9346 - val_loss: 0.3797 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00486: val_acc did not improve from 0.94225\n",
+ "Epoch 487/100000\n",
+ " - 19s - loss: 0.3894 - acc: 0.9337 - val_loss: 0.4391 - val_acc: 0.8926\n",
+ "\n",
+ "Epoch 00487: val_acc did not improve from 0.94225\n",
+ "Epoch 488/100000\n",
+ " - 18s - loss: 0.3860 - acc: 0.9350 - val_loss: 0.3975 - val_acc: 0.9171\n",
+ "\n",
+ "Epoch 00488: val_acc did not improve from 0.94225\n",
+ "Epoch 489/100000\n",
+ " - 19s - loss: 0.3867 - acc: 0.9348 - val_loss: 0.3714 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00489: val_acc did not improve from 0.94225\n",
+ "Epoch 490/100000\n",
+ " - 19s - loss: 0.3890 - acc: 0.9337 - val_loss: 0.4128 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 00490: val_acc did not improve from 0.94225\n",
+ "Epoch 491/100000\n",
+ " - 19s - loss: 0.3918 - acc: 0.9331 - val_loss: 0.5274 - val_acc: 0.8722\n",
+ "\n",
+ "Epoch 00491: val_acc did not improve from 0.94225\n",
+ "Epoch 492/100000\n",
+ " - 19s - loss: 0.3885 - acc: 0.9347 - val_loss: 0.3829 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 00492: val_acc did not improve from 0.94225\n",
+ "Epoch 493/100000\n",
+ " - 19s - loss: 0.3897 - acc: 0.9348 - val_loss: 0.3878 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00493: val_acc did not improve from 0.94225\n",
+ "Epoch 494/100000\n",
+ " - 19s - loss: 0.3904 - acc: 0.9340 - val_loss: 0.3814 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00494: val_acc did not improve from 0.94225\n",
+ "Epoch 495/100000\n",
+ " - 18s - loss: 0.3881 - acc: 0.9350 - val_loss: 0.3973 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 00495: val_acc did not improve from 0.94225\n",
+ "Epoch 496/100000\n",
+ " - 19s - loss: 0.3894 - acc: 0.9341 - val_loss: 0.3813 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00496: val_acc did not improve from 0.94225\n",
+ "Epoch 497/100000\n",
+ " - 18s - loss: 0.3891 - acc: 0.9340 - val_loss: 0.3759 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 00497: val_acc did not improve from 0.94225\n",
+ "Epoch 498/100000\n",
+ " - 19s - loss: 0.3914 - acc: 0.9332 - val_loss: 0.3937 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 00498: val_acc did not improve from 0.94225\n",
+ "Epoch 499/100000\n",
+ " - 19s - loss: 0.3878 - acc: 0.9348 - val_loss: 0.4182 - val_acc: 0.9042\n",
+ "\n",
+ "Epoch 00499: val_acc did not improve from 0.94225\n",
+ "Epoch 500/100000\n",
+ " - 18s - loss: 0.3901 - acc: 0.9348 - val_loss: 0.4485 - val_acc: 0.9114\n",
+ "\n",
+ "Epoch 00500: val_acc did not improve from 0.94225\n",
+ "Epoch 501/100000\n",
+ " - 19s - loss: 0.3908 - acc: 0.9344 - val_loss: 0.5070 - val_acc: 0.8653\n",
+ "\n",
+ "Epoch 00501: val_acc did not improve from 0.94225\n",
+ "Epoch 502/100000\n",
+ " - 18s - loss: 0.3889 - acc: 0.9345 - val_loss: 0.3704 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00502: val_acc did not improve from 0.94225\n",
+ "Epoch 503/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9344 - val_loss: 0.3717 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00503: val_acc did not improve from 0.94225\n",
+ "Epoch 504/100000\n",
+ " - 18s - loss: 0.3907 - acc: 0.9339 - val_loss: 0.3746 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00504: val_acc did not improve from 0.94225\n",
+ "Epoch 505/100000\n",
+ " - 19s - loss: 0.3897 - acc: 0.9342 - val_loss: 0.4514 - val_acc: 0.8870\n",
+ "\n",
+ "Epoch 00505: val_acc did not improve from 0.94225\n",
+ "Epoch 506/100000\n",
+ " - 19s - loss: 0.3895 - acc: 0.9337 - val_loss: 0.3828 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00506: val_acc did not improve from 0.94225\n",
+ "Epoch 507/100000\n",
+ " - 19s - loss: 0.3919 - acc: 0.9334 - val_loss: 0.3786 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00507: val_acc did not improve from 0.94225\n",
+ "Epoch 508/100000\n",
+ " - 19s - loss: 0.3893 - acc: 0.9347 - val_loss: 0.3916 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00508: val_acc did not improve from 0.94225\n",
+ "Epoch 509/100000\n",
+ " - 19s - loss: 0.3862 - acc: 0.9350 - val_loss: 0.4134 - val_acc: 0.9084\n",
+ "\n",
+ "Epoch 00509: val_acc did not improve from 0.94225\n",
+ "Epoch 510/100000\n",
+ " - 18s - loss: 0.3901 - acc: 0.9338 - val_loss: 0.3840 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00510: val_acc did not improve from 0.94225\n",
+ "Epoch 511/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9340 - val_loss: 0.3769 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00511: val_acc did not improve from 0.94225\n",
+ "Epoch 512/100000\n",
+ " - 18s - loss: 0.3912 - acc: 0.9340 - val_loss: 0.3935 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00512: val_acc did not improve from 0.94225\n",
+ "Epoch 513/100000\n",
+ " - 19s - loss: 0.3886 - acc: 0.9351 - val_loss: 0.5641 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00513: val_acc did not improve from 0.94225\n",
+ "Epoch 514/100000\n",
+ " - 18s - loss: 0.3890 - acc: 0.9346 - val_loss: 0.4265 - val_acc: 0.9001\n",
+ "\n",
+ "Epoch 00514: val_acc did not improve from 0.94225\n",
+ "Epoch 515/100000\n",
+ " - 20s - loss: 0.3876 - acc: 0.9345 - val_loss: 0.3850 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 00515: val_acc did not improve from 0.94225\n",
+ "Epoch 516/100000\n",
+ " - 19s - loss: 0.3865 - acc: 0.9347 - val_loss: 0.4356 - val_acc: 0.8957\n",
+ "\n",
+ "Epoch 00516: val_acc did not improve from 0.94225\n",
+ "Epoch 517/100000\n",
+ " - 18s - loss: 0.3890 - acc: 0.9341 - val_loss: 0.3715 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00517: val_acc did not improve from 0.94225\n",
+ "Epoch 518/100000\n",
+ " - 19s - loss: 0.3864 - acc: 0.9343 - val_loss: 0.4276 - val_acc: 0.9041\n",
+ "\n",
+ "Epoch 00518: val_acc did not improve from 0.94225\n",
+ "Epoch 519/100000\n",
+ " - 18s - loss: 0.3878 - acc: 0.9349 - val_loss: 0.3802 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00519: val_acc did not improve from 0.94225\n",
+ "Epoch 520/100000\n",
+ " - 19s - loss: 0.3894 - acc: 0.9343 - val_loss: 0.3798 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 00520: val_acc did not improve from 0.94225\n",
+ "Epoch 521/100000\n",
+ " - 19s - loss: 0.3915 - acc: 0.9336 - val_loss: 0.3790 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 00521: val_acc did not improve from 0.94225\n",
+ "Epoch 522/100000\n",
+ " - 18s - loss: 0.3897 - acc: 0.9343 - val_loss: 0.3886 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 00522: val_acc did not improve from 0.94225\n",
+ "Epoch 523/100000\n",
+ " - 19s - loss: 0.3906 - acc: 0.9340 - val_loss: 0.4196 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 00523: val_acc did not improve from 0.94225\n",
+ "Epoch 524/100000\n",
+ " - 18s - loss: 0.3910 - acc: 0.9341 - val_loss: 0.3871 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 00524: val_acc did not improve from 0.94225\n",
+ "Epoch 525/100000\n",
+ " - 19s - loss: 0.3872 - acc: 0.9344 - val_loss: 0.3840 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00525: val_acc did not improve from 0.94225\n",
+ "Epoch 526/100000\n",
+ " - 19s - loss: 0.3905 - acc: 0.9341 - val_loss: 0.4848 - val_acc: 0.8520\n",
+ "\n",
+ "Epoch 00526: val_acc did not improve from 0.94225\n",
+ "Epoch 527/100000\n",
+ " - 19s - loss: 0.3883 - acc: 0.9350 - val_loss: 0.3856 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 00527: val_acc did not improve from 0.94225\n",
+ "Epoch 528/100000\n",
+ " - 19s - loss: 0.3892 - acc: 0.9342 - val_loss: 0.3745 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00528: val_acc did not improve from 0.94225\n",
+ "Epoch 529/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9343 - val_loss: 0.4346 - val_acc: 0.9039\n",
+ "\n",
+ "Epoch 00529: val_acc did not improve from 0.94225\n",
+ "Epoch 530/100000\n",
+ " - 19s - loss: 0.3925 - acc: 0.9332 - val_loss: 0.4347 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 00530: val_acc did not improve from 0.94225\n",
+ "Epoch 531/100000\n",
+ " - 18s - loss: 0.3917 - acc: 0.9337 - val_loss: 0.3870 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00531: val_acc did not improve from 0.94225\n",
+ "Epoch 532/100000\n",
+ " - 19s - loss: 0.3893 - acc: 0.9342 - val_loss: 0.4613 - val_acc: 0.8869\n",
+ "\n",
+ "Epoch 00532: val_acc did not improve from 0.94225\n",
+ "Epoch 533/100000\n",
+ " - 18s - loss: 0.3895 - acc: 0.9340 - val_loss: 0.3790 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 00533: val_acc did not improve from 0.94225\n",
+ "Epoch 534/100000\n",
+ " - 19s - loss: 0.3904 - acc: 0.9344 - val_loss: 0.4047 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00534: val_acc did not improve from 0.94225\n",
+ "Epoch 535/100000\n",
+ " - 19s - loss: 0.3873 - acc: 0.9356 - val_loss: 0.3876 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 00535: val_acc did not improve from 0.94225\n",
+ "Epoch 536/100000\n",
+ " - 19s - loss: 0.3886 - acc: 0.9345 - val_loss: 0.3754 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00536: val_acc did not improve from 0.94225\n",
+ "Epoch 537/100000\n",
+ " - 19s - loss: 0.3903 - acc: 0.9345 - val_loss: 0.3870 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 00537: val_acc did not improve from 0.94225\n",
+ "Epoch 538/100000\n",
+ " - 19s - loss: 0.3897 - acc: 0.9340 - val_loss: 0.3874 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00538: val_acc did not improve from 0.94225\n",
+ "Epoch 539/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9339 - val_loss: 0.3879 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00539: val_acc did not improve from 0.94225\n",
+ "Epoch 540/100000\n",
+ " - 19s - loss: 0.3866 - acc: 0.9344 - val_loss: 0.4102 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00540: val_acc did not improve from 0.94225\n",
+ "Epoch 541/100000\n",
+ " - 19s - loss: 0.3909 - acc: 0.9338 - val_loss: 0.5077 - val_acc: 0.8792\n",
+ "\n",
+ "Epoch 00541: val_acc did not improve from 0.94225\n",
+ "Epoch 542/100000\n",
+ " - 18s - loss: 0.3932 - acc: 0.9332 - val_loss: 0.4151 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 00542: val_acc did not improve from 0.94225\n",
+ "Epoch 543/100000\n",
+ " - 19s - loss: 0.3863 - acc: 0.9349 - val_loss: 0.4328 - val_acc: 0.8954\n",
+ "\n",
+ "Epoch 00543: val_acc did not improve from 0.94225\n",
+ "Epoch 544/100000\n",
+ " - 19s - loss: 0.3888 - acc: 0.9342 - val_loss: 0.3837 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 00544: val_acc did not improve from 0.94225\n",
+ "Epoch 545/100000\n",
+ " - 18s - loss: 0.3895 - acc: 0.9344 - val_loss: 0.3835 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00545: val_acc did not improve from 0.94225\n",
+ "Epoch 546/100000\n",
+ " - 19s - loss: 0.3916 - acc: 0.9337 - val_loss: 0.4144 - val_acc: 0.9085\n",
+ "\n",
+ "Epoch 00546: val_acc did not improve from 0.94225\n",
+ "Epoch 547/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9343 - val_loss: 0.4783 - val_acc: 0.8717\n",
+ "\n",
+ "Epoch 00547: val_acc did not improve from 0.94225\n",
+ "Epoch 548/100000\n",
+ " - 19s - loss: 0.3905 - acc: 0.9342 - val_loss: 0.3820 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00548: val_acc did not improve from 0.94225\n",
+ "Epoch 549/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9345 - val_loss: 0.3798 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 00549: val_acc did not improve from 0.94225\n",
+ "Epoch 550/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9342 - val_loss: 0.4307 - val_acc: 0.9131\n",
+ "\n",
+ "Epoch 00550: val_acc did not improve from 0.94225\n",
+ "Epoch 551/100000\n",
+ " - 18s - loss: 0.3885 - acc: 0.9342 - val_loss: 0.3731 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00551: val_acc did not improve from 0.94225\n",
+ "Epoch 552/100000\n",
+ " - 19s - loss: 0.3883 - acc: 0.9349 - val_loss: 0.4225 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 00552: val_acc did not improve from 0.94225\n",
+ "Epoch 553/100000\n",
+ " - 18s - loss: 0.3888 - acc: 0.9343 - val_loss: 0.4251 - val_acc: 0.9084\n",
+ "\n",
+ "Epoch 00553: val_acc did not improve from 0.94225\n",
+ "Epoch 554/100000\n",
+ " - 19s - loss: 0.3901 - acc: 0.9339 - val_loss: 0.4113 - val_acc: 0.9138\n",
+ "\n",
+ "Epoch 00554: val_acc did not improve from 0.94225\n",
+ "Epoch 555/100000\n",
+ " - 19s - loss: 0.3866 - acc: 0.9342 - val_loss: 0.3780 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00555: val_acc did not improve from 0.94225\n",
+ "Epoch 556/100000\n",
+ " - 18s - loss: 0.3869 - acc: 0.9343 - val_loss: 0.4453 - val_acc: 0.9016\n",
+ "\n",
+ "Epoch 00556: val_acc did not improve from 0.94225\n",
+ "Epoch 557/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9344 - val_loss: 0.3972 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 00557: val_acc did not improve from 0.94225\n",
+ "Epoch 558/100000\n",
+ " - 19s - loss: 0.3883 - acc: 0.9344 - val_loss: 0.4326 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 00558: val_acc did not improve from 0.94225\n",
+ "Epoch 559/100000\n",
+ " - 19s - loss: 0.3909 - acc: 0.9337 - val_loss: 0.3951 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 00559: val_acc did not improve from 0.94225\n",
+ "Epoch 560/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9341 - val_loss: 0.4256 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 00560: val_acc did not improve from 0.94225\n",
+ "Epoch 561/100000\n",
+ " - 19s - loss: 0.3913 - acc: 0.9334 - val_loss: 0.3812 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 00561: val_acc did not improve from 0.94225\n",
+ "Epoch 562/100000\n",
+ " - 18s - loss: 0.3881 - acc: 0.9344 - val_loss: 0.4252 - val_acc: 0.8997\n",
+ "\n",
+ "Epoch 00562: val_acc did not improve from 0.94225\n",
+ "Epoch 563/100000\n",
+ " - 19s - loss: 0.3892 - acc: 0.9341 - val_loss: 0.5081 - val_acc: 0.8627\n",
+ "\n",
+ "Epoch 00563: val_acc did not improve from 0.94225\n",
+ "Epoch 564/100000\n",
+ " - 19s - loss: 0.3879 - acc: 0.9343 - val_loss: 0.4423 - val_acc: 0.8970\n",
+ "\n",
+ "Epoch 00564: val_acc did not improve from 0.94225\n",
+ "Epoch 565/100000\n",
+ " - 19s - loss: 0.3898 - acc: 0.9335 - val_loss: 0.3721 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 00565: val_acc did not improve from 0.94225\n",
+ "Epoch 566/100000\n",
+ " - 19s - loss: 0.3875 - acc: 0.9347 - val_loss: 0.4689 - val_acc: 0.8928\n",
+ "\n",
+ "Epoch 00566: val_acc did not improve from 0.94225\n",
+ "Epoch 567/100000\n",
+ " - 19s - loss: 0.3887 - acc: 0.9346 - val_loss: 0.3980 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 00567: val_acc did not improve from 0.94225\n",
+ "Epoch 568/100000\n",
+ " - 19s - loss: 0.3889 - acc: 0.9344 - val_loss: 0.3769 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 00568: val_acc did not improve from 0.94225\n",
+ "Epoch 569/100000\n",
+ " - 19s - loss: 0.3871 - acc: 0.9344 - val_loss: 0.3833 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00569: val_acc did not improve from 0.94225\n",
+ "Epoch 570/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9336 - val_loss: 0.3943 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 00570: val_acc did not improve from 0.94225\n",
+ "Epoch 571/100000\n",
+ " - 19s - loss: 0.3913 - acc: 0.9336 - val_loss: 0.4071 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 00571: val_acc did not improve from 0.94225\n",
+ "Epoch 572/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9340 - val_loss: 0.3988 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00572: val_acc did not improve from 0.94225\n",
+ "Epoch 573/100000\n",
+ " - 19s - loss: 0.3924 - acc: 0.9335 - val_loss: 0.4232 - val_acc: 0.9095\n",
+ "\n",
+ "Epoch 00573: val_acc did not improve from 0.94225\n",
+ "Epoch 574/100000\n",
+ " - 18s - loss: 0.3889 - acc: 0.9337 - val_loss: 0.3809 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00574: val_acc did not improve from 0.94225\n",
+ "Epoch 575/100000\n",
+ " - 18s - loss: 0.3882 - acc: 0.9345 - val_loss: 0.4112 - val_acc: 0.9159\n",
+ "\n",
+ "Epoch 00575: val_acc did not improve from 0.94225\n",
+ "Epoch 576/100000\n",
+ " - 19s - loss: 0.3882 - acc: 0.9338 - val_loss: 0.4809 - val_acc: 0.8983\n",
+ "\n",
+ "Epoch 00576: val_acc did not improve from 0.94225\n",
+ "Epoch 577/100000\n",
+ " - 18s - loss: 0.3882 - acc: 0.9341 - val_loss: 0.3886 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00577: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 00577: ReduceLROnPlateau reducing learning rate to 0.0008573750033974647.\n",
+ "Epoch 578/100000\n",
+ " - 19s - loss: 0.3809 - acc: 0.9339 - val_loss: 0.3703 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00578: val_acc did not improve from 0.94225\n",
+ "Epoch 579/100000\n",
+ " - 18s - loss: 0.3780 - acc: 0.9343 - val_loss: 0.3910 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 00579: val_acc did not improve from 0.94225\n",
+ "Epoch 580/100000\n",
+ " - 19s - loss: 0.3765 - acc: 0.9348 - val_loss: 0.3937 - val_acc: 0.9092\n",
+ "\n",
+ "Epoch 00580: val_acc did not improve from 0.94225\n",
+ "Epoch 581/100000\n",
+ " - 19s - loss: 0.3816 - acc: 0.9336 - val_loss: 0.3745 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 00581: val_acc did not improve from 0.94225\n",
+ "Epoch 582/100000\n",
+ " - 18s - loss: 0.3781 - acc: 0.9345 - val_loss: 0.3646 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00582: val_acc did not improve from 0.94225\n",
+ "Epoch 583/100000\n",
+ " - 19s - loss: 0.3812 - acc: 0.9333 - val_loss: 0.5008 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00583: val_acc did not improve from 0.94225\n",
+ "Epoch 584/100000\n",
+ " - 19s - loss: 0.3791 - acc: 0.9345 - val_loss: 0.3701 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00584: val_acc did not improve from 0.94225\n",
+ "Epoch 585/100000\n",
+ " - 19s - loss: 0.3755 - acc: 0.9352 - val_loss: 0.3850 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 00585: val_acc did not improve from 0.94225\n",
+ "Epoch 586/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9338 - val_loss: 0.3616 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00586: val_acc did not improve from 0.94225\n",
+ "Epoch 587/100000\n",
+ " - 18s - loss: 0.3754 - acc: 0.9355 - val_loss: 0.4552 - val_acc: 0.8937\n",
+ "\n",
+ "Epoch 00587: val_acc did not improve from 0.94225\n",
+ "Epoch 588/100000\n",
+ " - 19s - loss: 0.3775 - acc: 0.9339 - val_loss: 0.3803 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 00588: val_acc did not improve from 0.94225\n",
+ "Epoch 589/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9342 - val_loss: 0.3644 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00589: val_acc did not improve from 0.94225\n",
+ "Epoch 590/100000\n",
+ " - 18s - loss: 0.3784 - acc: 0.9348 - val_loss: 0.3641 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00590: val_acc did not improve from 0.94225\n",
+ "Epoch 591/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9343 - val_loss: 0.6189 - val_acc: 0.7987\n",
+ "\n",
+ "Epoch 00591: val_acc did not improve from 0.94225\n",
+ "Epoch 592/100000\n",
+ " - 19s - loss: 0.3766 - acc: 0.9348 - val_loss: 0.3623 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00592: val_acc did not improve from 0.94225\n",
+ "Epoch 593/100000\n",
+ " - 18s - loss: 0.3801 - acc: 0.9345 - val_loss: 0.3660 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 00593: val_acc did not improve from 0.94225\n",
+ "Epoch 594/100000\n",
+ " - 19s - loss: 0.3796 - acc: 0.9339 - val_loss: 0.8390 - val_acc: 0.8398\n",
+ "\n",
+ "Epoch 00594: val_acc did not improve from 0.94225\n",
+ "Epoch 595/100000\n",
+ " - 18s - loss: 0.3793 - acc: 0.9342 - val_loss: 0.3632 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00595: val_acc did not improve from 0.94225\n",
+ "Epoch 596/100000\n",
+ " - 19s - loss: 0.3811 - acc: 0.9339 - val_loss: 0.3732 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00596: val_acc did not improve from 0.94225\n",
+ "Epoch 597/100000\n",
+ " - 18s - loss: 0.3763 - acc: 0.9347 - val_loss: 0.3957 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 00597: val_acc did not improve from 0.94225\n",
+ "Epoch 598/100000\n",
+ " - 19s - loss: 0.3770 - acc: 0.9349 - val_loss: 0.3944 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 00598: val_acc did not improve from 0.94225\n",
+ "Epoch 599/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9343 - val_loss: 0.3731 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 00599: val_acc did not improve from 0.94225\n",
+ "Epoch 600/100000\n",
+ " - 18s - loss: 0.3783 - acc: 0.9345 - val_loss: 0.7100 - val_acc: 0.8460\n",
+ "\n",
+ "Epoch 00600: val_acc did not improve from 0.94225\n",
+ "Epoch 601/100000\n",
+ " - 19s - loss: 0.3777 - acc: 0.9348 - val_loss: 0.3732 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00601: val_acc did not improve from 0.94225\n",
+ "Epoch 602/100000\n",
+ " - 18s - loss: 0.3771 - acc: 0.9350 - val_loss: 0.3764 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00602: val_acc did not improve from 0.94225\n",
+ "Epoch 603/100000\n",
+ " - 19s - loss: 0.3766 - acc: 0.9354 - val_loss: 0.3885 - val_acc: 0.9259\n",
+ "\n",
+ "Epoch 00603: val_acc did not improve from 0.94225\n",
+ "Epoch 604/100000\n",
+ " - 18s - loss: 0.3793 - acc: 0.9341 - val_loss: 0.4155 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 00604: val_acc did not improve from 0.94225\n",
+ "Epoch 605/100000\n",
+ " - 19s - loss: 0.3784 - acc: 0.9343 - val_loss: 0.4017 - val_acc: 0.9163\n",
+ "\n",
+ "Epoch 00605: val_acc did not improve from 0.94225\n",
+ "Epoch 606/100000\n",
+ " - 18s - loss: 0.3770 - acc: 0.9354 - val_loss: 0.3908 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 00606: val_acc did not improve from 0.94225\n",
+ "Epoch 607/100000\n",
+ " - 19s - loss: 0.3778 - acc: 0.9348 - val_loss: 0.3622 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 00607: val_acc did not improve from 0.94225\n",
+ "Epoch 608/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9343 - val_loss: 0.3821 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00608: val_acc did not improve from 0.94225\n",
+ "Epoch 609/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9343 - val_loss: 0.3940 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 00609: val_acc did not improve from 0.94225\n",
+ "Epoch 610/100000\n",
+ " - 19s - loss: 0.3791 - acc: 0.9346 - val_loss: 0.4071 - val_acc: 0.9129\n",
+ "\n",
+ "Epoch 00610: val_acc did not improve from 0.94225\n",
+ "Epoch 611/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9346 - val_loss: 0.3791 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 00611: val_acc did not improve from 0.94225\n",
+ "Epoch 612/100000\n",
+ " - 19s - loss: 0.3804 - acc: 0.9343 - val_loss: 0.3829 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 00612: val_acc did not improve from 0.94225\n",
+ "Epoch 613/100000\n",
+ " - 19s - loss: 0.3789 - acc: 0.9345 - val_loss: 0.3635 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00613: val_acc did not improve from 0.94225\n",
+ "Epoch 614/100000\n",
+ " - 19s - loss: 0.3774 - acc: 0.9348 - val_loss: 0.3549 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00614: val_acc did not improve from 0.94225\n",
+ "Epoch 615/100000\n",
+ " - 19s - loss: 0.3770 - acc: 0.9351 - val_loss: 0.3810 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 00615: val_acc did not improve from 0.94225\n",
+ "Epoch 616/100000\n",
+ " - 18s - loss: 0.3794 - acc: 0.9341 - val_loss: 0.3658 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00616: val_acc did not improve from 0.94225\n",
+ "Epoch 617/100000\n",
+ " - 19s - loss: 0.3810 - acc: 0.9341 - val_loss: 0.4580 - val_acc: 0.8960\n",
+ "\n",
+ "Epoch 00617: val_acc did not improve from 0.94225\n",
+ "Epoch 618/100000\n",
+ " - 19s - loss: 0.3796 - acc: 0.9345 - val_loss: 0.3745 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 00618: val_acc did not improve from 0.94225\n",
+ "Epoch 619/100000\n",
+ " - 18s - loss: 0.3794 - acc: 0.9347 - val_loss: 0.3897 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 00619: val_acc did not improve from 0.94225\n",
+ "Epoch 620/100000\n",
+ " - 19s - loss: 0.3794 - acc: 0.9349 - val_loss: 0.4037 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 00620: val_acc did not improve from 0.94225\n",
+ "Epoch 621/100000\n",
+ " - 18s - loss: 0.3792 - acc: 0.9347 - val_loss: 0.3791 - val_acc: 0.9254\n",
+ "\n",
+ "Epoch 00621: val_acc did not improve from 0.94225\n",
+ "Epoch 622/100000\n",
+ " - 18s - loss: 0.3835 - acc: 0.9338 - val_loss: 0.3653 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00622: val_acc did not improve from 0.94225\n",
+ "Epoch 623/100000\n",
+ " - 18s - loss: 0.3840 - acc: 0.9334 - val_loss: 0.3737 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00623: val_acc did not improve from 0.94225\n",
+ "Epoch 624/100000\n",
+ " - 19s - loss: 0.3805 - acc: 0.9342 - val_loss: 0.3861 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00624: val_acc did not improve from 0.94225\n",
+ "Epoch 625/100000\n",
+ " - 18s - loss: 0.3805 - acc: 0.9343 - val_loss: 0.3730 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00625: val_acc did not improve from 0.94225\n",
+ "Epoch 626/100000\n",
+ " - 19s - loss: 0.3799 - acc: 0.9348 - val_loss: 0.4311 - val_acc: 0.9044\n",
+ "\n",
+ "Epoch 00626: val_acc did not improve from 0.94225\n",
+ "Epoch 627/100000\n",
+ " - 18s - loss: 0.3803 - acc: 0.9341 - val_loss: 0.3959 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 00627: val_acc did not improve from 0.94225\n",
+ "Epoch 628/100000\n",
+ " - 18s - loss: 0.3775 - acc: 0.9352 - val_loss: 0.3884 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00628: val_acc did not improve from 0.94225\n",
+ "Epoch 629/100000\n",
+ " - 19s - loss: 0.3782 - acc: 0.9353 - val_loss: 0.3964 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 00629: val_acc did not improve from 0.94225\n",
+ "Epoch 630/100000\n",
+ " - 19s - loss: 0.3792 - acc: 0.9343 - val_loss: 0.4396 - val_acc: 0.9041\n",
+ "\n",
+ "Epoch 00630: val_acc did not improve from 0.94225\n",
+ "Epoch 631/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9336 - val_loss: 0.3772 - val_acc: 0.9270\n",
+ "\n",
+ "Epoch 00631: val_acc did not improve from 0.94225\n",
+ "Epoch 632/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9347 - val_loss: 0.4404 - val_acc: 0.9050\n",
+ "\n",
+ "Epoch 00632: val_acc did not improve from 0.94225\n",
+ "Epoch 633/100000\n",
+ " - 19s - loss: 0.3806 - acc: 0.9340 - val_loss: 0.3706 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00633: val_acc did not improve from 0.94225\n",
+ "Epoch 634/100000\n",
+ " - 19s - loss: 0.3813 - acc: 0.9346 - val_loss: 0.3819 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 00634: val_acc did not improve from 0.94225\n",
+ "Epoch 635/100000\n",
+ " - 19s - loss: 0.3813 - acc: 0.9344 - val_loss: 0.3713 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00635: val_acc did not improve from 0.94225\n",
+ "Epoch 636/100000\n",
+ " - 19s - loss: 0.3829 - acc: 0.9332 - val_loss: 0.4335 - val_acc: 0.8970\n",
+ "\n",
+ "Epoch 00636: val_acc did not improve from 0.94225\n",
+ "Epoch 637/100000\n",
+ " - 19s - loss: 0.3778 - acc: 0.9346 - val_loss: 0.3979 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 00637: val_acc did not improve from 0.94225\n",
+ "Epoch 638/100000\n",
+ " - 18s - loss: 0.3779 - acc: 0.9345 - val_loss: 0.3819 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 00638: val_acc did not improve from 0.94225\n",
+ "Epoch 639/100000\n",
+ " - 19s - loss: 0.3801 - acc: 0.9343 - val_loss: 0.3714 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 00639: val_acc did not improve from 0.94225\n",
+ "Epoch 640/100000\n",
+ " - 19s - loss: 0.3760 - acc: 0.9350 - val_loss: 0.4263 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 00640: val_acc did not improve from 0.94225\n",
+ "Epoch 641/100000\n",
+ " - 19s - loss: 0.3783 - acc: 0.9347 - val_loss: 0.3699 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 00641: val_acc did not improve from 0.94225\n",
+ "Epoch 642/100000\n",
+ " - 18s - loss: 0.3790 - acc: 0.9345 - val_loss: 0.3690 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 00642: val_acc did not improve from 0.94225\n",
+ "Epoch 643/100000\n",
+ " - 19s - loss: 0.3820 - acc: 0.9337 - val_loss: 0.3767 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 00643: val_acc did not improve from 0.94225\n",
+ "Epoch 644/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9356 - val_loss: 0.3741 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00644: val_acc did not improve from 0.94225\n",
+ "Epoch 645/100000\n",
+ " - 19s - loss: 0.3841 - acc: 0.9338 - val_loss: 0.3770 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00645: val_acc did not improve from 0.94225\n",
+ "Epoch 646/100000\n",
+ " - 19s - loss: 0.3827 - acc: 0.9347 - val_loss: 0.3673 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00646: val_acc did not improve from 0.94225\n",
+ "Epoch 647/100000\n",
+ " - 19s - loss: 0.3804 - acc: 0.9351 - val_loss: 0.3602 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 00647: val_acc did not improve from 0.94225\n",
+ "Epoch 648/100000\n",
+ " - 18s - loss: 0.3780 - acc: 0.9344 - val_loss: 0.4046 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 00648: val_acc did not improve from 0.94225\n",
+ "Epoch 649/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9341 - val_loss: 0.3771 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00649: val_acc did not improve from 0.94225\n",
+ "Epoch 650/100000\n",
+ " - 18s - loss: 0.3788 - acc: 0.9347 - val_loss: 0.4698 - val_acc: 0.8820\n",
+ "\n",
+ "Epoch 00650: val_acc did not improve from 0.94225\n",
+ "Epoch 651/100000\n",
+ " - 19s - loss: 0.3798 - acc: 0.9342 - val_loss: 0.3757 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 00651: val_acc did not improve from 0.94225\n",
+ "Epoch 652/100000\n",
+ " - 18s - loss: 0.3789 - acc: 0.9349 - val_loss: 0.4087 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 00652: val_acc did not improve from 0.94225\n",
+ "Epoch 653/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9351 - val_loss: 0.4727 - val_acc: 0.9054\n",
+ "\n",
+ "Epoch 00653: val_acc did not improve from 0.94225\n",
+ "Epoch 654/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9346 - val_loss: 0.4045 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 00654: val_acc did not improve from 0.94225\n",
+ "Epoch 655/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9338 - val_loss: 0.5151 - val_acc: 0.8732\n",
+ "\n",
+ "Epoch 00655: val_acc did not improve from 0.94225\n",
+ "Epoch 656/100000\n",
+ " - 18s - loss: 0.3773 - acc: 0.9346 - val_loss: 0.4580 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 00656: val_acc did not improve from 0.94225\n",
+ "Epoch 657/100000\n",
+ " - 19s - loss: 0.3773 - acc: 0.9351 - val_loss: 0.3740 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 00657: val_acc did not improve from 0.94225\n",
+ "Epoch 658/100000\n",
+ " - 18s - loss: 0.3808 - acc: 0.9339 - val_loss: 0.4696 - val_acc: 0.8698\n",
+ "\n",
+ "Epoch 00658: val_acc did not improve from 0.94225\n",
+ "Epoch 659/100000\n",
+ " - 19s - loss: 0.4057 - acc: 0.9326 - val_loss: 0.6827 - val_acc: 0.8110\n",
+ "\n",
+ "Epoch 00659: val_acc did not improve from 0.94225\n",
+ "Epoch 660/100000\n",
+ " - 18s - loss: 0.3829 - acc: 0.9345 - val_loss: 0.3905 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 00660: val_acc did not improve from 0.94225\n",
+ "Epoch 661/100000\n",
+ " - 18s - loss: 0.3809 - acc: 0.9351 - val_loss: 0.4302 - val_acc: 0.9010\n",
+ "\n",
+ "Epoch 00661: val_acc did not improve from 0.94225\n",
+ "Epoch 662/100000\n",
+ " - 18s - loss: 0.3830 - acc: 0.9341 - val_loss: 0.4423 - val_acc: 0.9229\n",
+ "\n",
+ "Epoch 00662: val_acc did not improve from 0.94225\n",
+ "Epoch 663/100000\n",
+ " - 18s - loss: 0.3843 - acc: 0.9337 - val_loss: 0.3998 - val_acc: 0.9192\n",
+ "\n",
+ "Epoch 00663: val_acc did not improve from 0.94225\n",
+ "Epoch 664/100000\n",
+ " - 19s - loss: 0.3801 - acc: 0.9341 - val_loss: 0.4312 - val_acc: 0.9040\n",
+ "\n",
+ "Epoch 00664: val_acc did not improve from 0.94225\n",
+ "Epoch 665/100000\n",
+ " - 18s - loss: 0.3826 - acc: 0.9334 - val_loss: 0.4027 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 00665: val_acc did not improve from 0.94225\n",
+ "Epoch 666/100000\n",
+ " - 19s - loss: 0.3770 - acc: 0.9350 - val_loss: 0.3937 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00666: val_acc did not improve from 0.94225\n",
+ "Epoch 667/100000\n",
+ " - 18s - loss: 0.3772 - acc: 0.9351 - val_loss: 0.3842 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00667: val_acc did not improve from 0.94225\n",
+ "Epoch 668/100000\n",
+ " - 18s - loss: 0.3809 - acc: 0.9339 - val_loss: 0.3844 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 00668: val_acc did not improve from 0.94225\n",
+ "Epoch 669/100000\n",
+ " - 19s - loss: 0.3804 - acc: 0.9349 - val_loss: 0.3713 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00669: val_acc did not improve from 0.94225\n",
+ "Epoch 670/100000\n",
+ " - 18s - loss: 0.3795 - acc: 0.9344 - val_loss: 0.3684 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00670: val_acc did not improve from 0.94225\n",
+ "Epoch 671/100000\n",
+ " - 19s - loss: 0.3793 - acc: 0.9338 - val_loss: 0.3813 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00671: val_acc did not improve from 0.94225\n",
+ "Epoch 672/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9344 - val_loss: 0.4438 - val_acc: 0.8856\n",
+ "\n",
+ "Epoch 00672: val_acc did not improve from 0.94225\n",
+ "Epoch 673/100000\n",
+ " - 18s - loss: 0.3779 - acc: 0.9344 - val_loss: 0.3848 - val_acc: 0.9225\n",
+ "\n",
+ "Epoch 00673: val_acc did not improve from 0.94225\n",
+ "Epoch 674/100000\n",
+ " - 19s - loss: 0.3765 - acc: 0.9349 - val_loss: 0.4575 - val_acc: 0.8904\n",
+ "\n",
+ "Epoch 00674: val_acc did not improve from 0.94225\n",
+ "Epoch 675/100000\n",
+ " - 18s - loss: 0.3755 - acc: 0.9351 - val_loss: 0.4283 - val_acc: 0.9026\n",
+ "\n",
+ "Epoch 00675: val_acc did not improve from 0.94225\n",
+ "Epoch 676/100000\n",
+ " - 19s - loss: 0.3775 - acc: 0.9347 - val_loss: 0.3565 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 00676: val_acc did not improve from 0.94225\n",
+ "Epoch 677/100000\n",
+ " - 19s - loss: 0.3818 - acc: 0.9340 - val_loss: 0.3740 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 00677: val_acc did not improve from 0.94225\n",
+ "Epoch 678/100000\n",
+ " - 19s - loss: 0.3809 - acc: 0.9342 - val_loss: 0.3740 - val_acc: 0.9407\n",
+ "\n",
+ "Epoch 00678: val_acc did not improve from 0.94225\n",
+ "Epoch 679/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9345 - val_loss: 0.3687 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 00679: val_acc did not improve from 0.94225\n",
+ "Epoch 680/100000\n",
+ " - 19s - loss: 0.3814 - acc: 0.9344 - val_loss: 0.4969 - val_acc: 0.8720\n",
+ "\n",
+ "Epoch 00680: val_acc did not improve from 0.94225\n",
+ "Epoch 681/100000\n",
+ " - 19s - loss: 0.3800 - acc: 0.9345 - val_loss: 0.3764 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 00681: val_acc did not improve from 0.94225\n",
+ "Epoch 682/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9353 - val_loss: 0.3978 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 00682: val_acc did not improve from 0.94225\n",
+ "Epoch 683/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9346 - val_loss: 0.3640 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00683: val_acc did not improve from 0.94225\n",
+ "Epoch 684/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9346 - val_loss: 0.3716 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00684: val_acc did not improve from 0.94225\n",
+ "Epoch 685/100000\n",
+ " - 19s - loss: 0.3757 - acc: 0.9347 - val_loss: 0.4724 - val_acc: 0.8705\n",
+ "\n",
+ "Epoch 00685: val_acc did not improve from 0.94225\n",
+ "Epoch 686/100000\n",
+ " - 19s - loss: 0.3757 - acc: 0.9352 - val_loss: 0.4218 - val_acc: 0.9005\n",
+ "\n",
+ "Epoch 00686: val_acc did not improve from 0.94225\n",
+ "Epoch 687/100000\n",
+ " - 19s - loss: 0.3751 - acc: 0.9354 - val_loss: 0.3734 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00687: val_acc did not improve from 0.94225\n",
+ "Epoch 688/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9342 - val_loss: 0.4124 - val_acc: 0.9099\n",
+ "\n",
+ "Epoch 00688: val_acc did not improve from 0.94225\n",
+ "Epoch 689/100000\n",
+ " - 19s - loss: 0.3804 - acc: 0.9340 - val_loss: 0.4480 - val_acc: 0.8794\n",
+ "\n",
+ "Epoch 00689: val_acc did not improve from 0.94225\n",
+ "Epoch 690/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9339 - val_loss: 0.3826 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 00690: val_acc did not improve from 0.94225\n",
+ "Epoch 691/100000\n",
+ " - 19s - loss: 0.3803 - acc: 0.9342 - val_loss: 0.3705 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00691: val_acc did not improve from 0.94225\n",
+ "Epoch 692/100000\n",
+ " - 19s - loss: 0.3779 - acc: 0.9348 - val_loss: 0.3717 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00692: val_acc did not improve from 0.94225\n",
+ "Epoch 693/100000\n",
+ " - 19s - loss: 0.3777 - acc: 0.9354 - val_loss: 0.4409 - val_acc: 0.8942\n",
+ "\n",
+ "Epoch 00693: val_acc did not improve from 0.94225\n",
+ "Epoch 694/100000\n",
+ " - 19s - loss: 0.3798 - acc: 0.9343 - val_loss: 0.3922 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 00694: val_acc did not improve from 0.94225\n",
+ "Epoch 695/100000\n",
+ " - 19s - loss: 0.3771 - acc: 0.9348 - val_loss: 0.3740 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00695: val_acc did not improve from 0.94225\n",
+ "Epoch 696/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9342 - val_loss: 0.3680 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00696: val_acc did not improve from 0.94225\n",
+ "Epoch 697/100000\n",
+ " - 19s - loss: 0.3761 - acc: 0.9349 - val_loss: 0.7643 - val_acc: 0.7615\n",
+ "\n",
+ "Epoch 00697: val_acc did not improve from 0.94225\n",
+ "Epoch 698/100000\n",
+ " - 19s - loss: 0.3778 - acc: 0.9352 - val_loss: 0.4694 - val_acc: 0.8857\n",
+ "\n",
+ "Epoch 00698: val_acc did not improve from 0.94225\n",
+ "Epoch 699/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9343 - val_loss: 0.3801 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 00699: val_acc did not improve from 0.94225\n",
+ "Epoch 700/100000\n",
+ " - 18s - loss: 0.3782 - acc: 0.9351 - val_loss: 0.3661 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 00700: val_acc did not improve from 0.94225\n",
+ "Epoch 701/100000\n",
+ " - 19s - loss: 0.3770 - acc: 0.9351 - val_loss: 0.3751 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 00701: val_acc did not improve from 0.94225\n",
+ "Epoch 702/100000\n",
+ " - 18s - loss: 0.3818 - acc: 0.9346 - val_loss: 0.3845 - val_acc: 0.9235\n",
+ "\n",
+ "Epoch 00702: val_acc did not improve from 0.94225\n",
+ "Epoch 703/100000\n",
+ " - 18s - loss: 0.3801 - acc: 0.9343 - val_loss: 0.3782 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 00703: val_acc did not improve from 0.94225\n",
+ "Epoch 704/100000\n",
+ " - 19s - loss: 0.3767 - acc: 0.9353 - val_loss: 0.3924 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00704: val_acc did not improve from 0.94225\n",
+ "Epoch 705/100000\n",
+ " - 19s - loss: 0.3774 - acc: 0.9344 - val_loss: 0.3659 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00705: val_acc did not improve from 0.94225\n",
+ "Epoch 706/100000\n",
+ " - 20s - loss: 0.3777 - acc: 0.9349 - val_loss: 0.4487 - val_acc: 0.8984\n",
+ "\n",
+ "Epoch 00706: val_acc did not improve from 0.94225\n",
+ "Epoch 707/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9346 - val_loss: 0.3673 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00707: val_acc did not improve from 0.94225\n",
+ "Epoch 708/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9347 - val_loss: 0.4078 - val_acc: 0.9167\n",
+ "\n",
+ "Epoch 00708: val_acc did not improve from 0.94225\n",
+ "Epoch 709/100000\n",
+ " - 19s - loss: 0.3796 - acc: 0.9343 - val_loss: 0.3727 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00709: val_acc did not improve from 0.94225\n",
+ "Epoch 710/100000\n",
+ " - 19s - loss: 0.3769 - acc: 0.9351 - val_loss: 0.3802 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 00710: val_acc did not improve from 0.94225\n",
+ "Epoch 711/100000\n",
+ " - 19s - loss: 0.3801 - acc: 0.9348 - val_loss: 0.3875 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 00711: val_acc did not improve from 0.94225\n",
+ "Epoch 712/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9345 - val_loss: 0.4152 - val_acc: 0.9036\n",
+ "\n",
+ "Epoch 00712: val_acc did not improve from 0.94225\n",
+ "Epoch 713/100000\n",
+ " - 19s - loss: 0.3796 - acc: 0.9340 - val_loss: 0.3615 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 00713: val_acc did not improve from 0.94225\n",
+ "Epoch 714/100000\n",
+ " - 19s - loss: 0.3792 - acc: 0.9351 - val_loss: 0.3711 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00714: val_acc did not improve from 0.94225\n",
+ "Epoch 715/100000\n",
+ " - 18s - loss: 0.3822 - acc: 0.9342 - val_loss: 0.4784 - val_acc: 0.8695\n",
+ "\n",
+ "Epoch 00715: val_acc did not improve from 0.94225\n",
+ "Epoch 716/100000\n",
+ " - 19s - loss: 0.3792 - acc: 0.9354 - val_loss: 0.3992 - val_acc: 0.9137\n",
+ "\n",
+ "Epoch 00716: val_acc did not improve from 0.94225\n",
+ "Epoch 717/100000\n",
+ " - 18s - loss: 0.3787 - acc: 0.9343 - val_loss: 0.4005 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 00717: val_acc did not improve from 0.94225\n",
+ "Epoch 718/100000\n",
+ " - 18s - loss: 0.3802 - acc: 0.9337 - val_loss: 0.3626 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00718: val_acc did not improve from 0.94225\n",
+ "Epoch 719/100000\n",
+ " - 19s - loss: 0.3782 - acc: 0.9345 - val_loss: 0.3770 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 00719: val_acc did not improve from 0.94225\n",
+ "Epoch 720/100000\n",
+ " - 18s - loss: 0.3778 - acc: 0.9354 - val_loss: 0.3678 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00720: val_acc did not improve from 0.94225\n",
+ "Epoch 721/100000\n",
+ " - 18s - loss: 0.3776 - acc: 0.9348 - val_loss: 0.3646 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00721: val_acc did not improve from 0.94225\n",
+ "Epoch 722/100000\n",
+ " - 18s - loss: 0.3790 - acc: 0.9347 - val_loss: 0.3897 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 00722: val_acc did not improve from 0.94225\n",
+ "Epoch 723/100000\n",
+ " - 19s - loss: 0.3793 - acc: 0.9349 - val_loss: 0.3649 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 00723: val_acc did not improve from 0.94225\n",
+ "Epoch 724/100000\n",
+ " - 19s - loss: 0.3795 - acc: 0.9343 - val_loss: 0.3645 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00724: val_acc did not improve from 0.94225\n",
+ "Epoch 725/100000\n",
+ " - 18s - loss: 0.3767 - acc: 0.9351 - val_loss: 0.3979 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 00725: val_acc did not improve from 0.94225\n",
+ "Epoch 726/100000\n",
+ " - 19s - loss: 0.3785 - acc: 0.9342 - val_loss: 0.4071 - val_acc: 0.9076\n",
+ "\n",
+ "Epoch 00726: val_acc did not improve from 0.94225\n",
+ "Epoch 727/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9339 - val_loss: 0.4158 - val_acc: 0.9085\n",
+ "\n",
+ "Epoch 00727: val_acc did not improve from 0.94225\n",
+ "Epoch 728/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9347 - val_loss: 0.4179 - val_acc: 0.9229\n",
+ "\n",
+ "Epoch 00728: val_acc did not improve from 0.94225\n",
+ "Epoch 729/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9346 - val_loss: 0.3907 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00729: val_acc did not improve from 0.94225\n",
+ "Epoch 730/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9352 - val_loss: 0.3907 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 00730: val_acc did not improve from 0.94225\n",
+ "Epoch 731/100000\n",
+ " - 18s - loss: 0.3781 - acc: 0.9349 - val_loss: 0.3692 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00731: val_acc did not improve from 0.94225\n",
+ "Epoch 732/100000\n",
+ " - 19s - loss: 0.3805 - acc: 0.9337 - val_loss: 0.4081 - val_acc: 0.9055\n",
+ "\n",
+ "Epoch 00732: val_acc did not improve from 0.94225\n",
+ "Epoch 733/100000\n",
+ " - 19s - loss: 0.3785 - acc: 0.9344 - val_loss: 0.3692 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00733: val_acc did not improve from 0.94225\n",
+ "Epoch 734/100000\n",
+ " - 19s - loss: 0.3797 - acc: 0.9350 - val_loss: 0.3772 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 00734: val_acc did not improve from 0.94225\n",
+ "Epoch 735/100000\n",
+ " - 18s - loss: 0.3807 - acc: 0.9345 - val_loss: 0.3739 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 00735: val_acc did not improve from 0.94225\n",
+ "Epoch 736/100000\n",
+ " - 19s - loss: 0.3767 - acc: 0.9350 - val_loss: 0.3630 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 00736: val_acc did not improve from 0.94225\n",
+ "Epoch 737/100000\n",
+ " - 18s - loss: 0.3769 - acc: 0.9347 - val_loss: 0.4213 - val_acc: 0.9074\n",
+ "\n",
+ "Epoch 00737: val_acc did not improve from 0.94225\n",
+ "Epoch 738/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9336 - val_loss: 0.3637 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00738: val_acc did not improve from 0.94225\n",
+ "Epoch 739/100000\n",
+ " - 19s - loss: 0.3765 - acc: 0.9351 - val_loss: 0.3808 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00739: val_acc did not improve from 0.94225\n",
+ "Epoch 740/100000\n",
+ " - 18s - loss: 0.3774 - acc: 0.9346 - val_loss: 0.3783 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00740: val_acc did not improve from 0.94225\n",
+ "Epoch 741/100000\n",
+ " - 19s - loss: 0.3792 - acc: 0.9350 - val_loss: 0.3681 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 00741: val_acc did not improve from 0.94225\n",
+ "Epoch 742/100000\n",
+ " - 18s - loss: 0.3800 - acc: 0.9342 - val_loss: 0.3957 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 00742: val_acc did not improve from 0.94225\n",
+ "Epoch 743/100000\n",
+ " - 19s - loss: 0.3777 - acc: 0.9348 - val_loss: 0.3887 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 00743: val_acc did not improve from 0.94225\n",
+ "Epoch 744/100000\n",
+ " - 18s - loss: 0.3780 - acc: 0.9340 - val_loss: 0.3671 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00744: val_acc did not improve from 0.94225\n",
+ "Epoch 745/100000\n",
+ " - 19s - loss: 0.3772 - acc: 0.9347 - val_loss: 0.3989 - val_acc: 0.9130\n",
+ "\n",
+ "Epoch 00745: val_acc did not improve from 0.94225\n",
+ "Epoch 746/100000\n",
+ " - 19s - loss: 0.3803 - acc: 0.9341 - val_loss: 0.3751 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00746: val_acc did not improve from 0.94225\n",
+ "Epoch 747/100000\n",
+ " - 18s - loss: 0.3765 - acc: 0.9351 - val_loss: 0.4008 - val_acc: 0.9113\n",
+ "\n",
+ "Epoch 00747: val_acc did not improve from 0.94225\n",
+ "Epoch 748/100000\n",
+ " - 19s - loss: 0.3793 - acc: 0.9350 - val_loss: 0.4131 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 00748: val_acc did not improve from 0.94225\n",
+ "Epoch 749/100000\n",
+ " - 18s - loss: 0.3781 - acc: 0.9348 - val_loss: 0.4171 - val_acc: 0.9108\n",
+ "\n",
+ "Epoch 00749: val_acc did not improve from 0.94225\n",
+ "Epoch 750/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9348 - val_loss: 0.3574 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 00750: val_acc did not improve from 0.94225\n",
+ "Epoch 751/100000\n",
+ " - 19s - loss: 0.3760 - acc: 0.9349 - val_loss: 0.3613 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00751: val_acc did not improve from 0.94225\n",
+ "Epoch 752/100000\n",
+ " - 19s - loss: 0.3798 - acc: 0.9343 - val_loss: 0.3728 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 00752: val_acc did not improve from 0.94225\n",
+ "Epoch 753/100000\n",
+ " - 18s - loss: 0.3758 - acc: 0.9352 - val_loss: 0.3673 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00753: val_acc did not improve from 0.94225\n",
+ "Epoch 754/100000\n",
+ " - 19s - loss: 0.3793 - acc: 0.9345 - val_loss: 0.3728 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00754: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 00754: ReduceLROnPlateau reducing learning rate to 0.0008145062311086804.\n",
+ "Epoch 755/100000\n",
+ " - 19s - loss: 0.3713 - acc: 0.9343 - val_loss: 0.3890 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00755: val_acc did not improve from 0.94225\n",
+ "Epoch 756/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9347 - val_loss: 0.3545 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 00756: val_acc did not improve from 0.94225\n",
+ "Epoch 757/100000\n",
+ " - 18s - loss: 0.3679 - acc: 0.9350 - val_loss: 0.4251 - val_acc: 0.9188\n",
+ "\n",
+ "Epoch 00757: val_acc did not improve from 0.94225\n",
+ "Epoch 758/100000\n",
+ " - 19s - loss: 0.3691 - acc: 0.9353 - val_loss: 0.3651 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00758: val_acc did not improve from 0.94225\n",
+ "Epoch 759/100000\n",
+ " - 19s - loss: 0.3676 - acc: 0.9355 - val_loss: 0.3806 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00759: val_acc did not improve from 0.94225\n",
+ "Epoch 760/100000\n",
+ " - 19s - loss: 0.3665 - acc: 0.9357 - val_loss: 0.3723 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 00760: val_acc did not improve from 0.94225\n",
+ "Epoch 761/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9347 - val_loss: 0.3615 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00761: val_acc did not improve from 0.94225\n",
+ "Epoch 762/100000\n",
+ " - 18s - loss: 0.3696 - acc: 0.9352 - val_loss: 0.3999 - val_acc: 0.9171\n",
+ "\n",
+ "Epoch 00762: val_acc did not improve from 0.94225\n",
+ "Epoch 763/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9357 - val_loss: 0.4208 - val_acc: 0.9109\n",
+ "\n",
+ "Epoch 00763: val_acc did not improve from 0.94225\n",
+ "Epoch 764/100000\n",
+ " - 18s - loss: 0.3675 - acc: 0.9352 - val_loss: 0.3548 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00764: val_acc did not improve from 0.94225\n",
+ "Epoch 765/100000\n",
+ " - 19s - loss: 0.3658 - acc: 0.9356 - val_loss: 0.3548 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00765: val_acc did not improve from 0.94225\n",
+ "Epoch 766/100000\n",
+ " - 19s - loss: 0.3701 - acc: 0.9341 - val_loss: 0.3799 - val_acc: 0.9205\n",
+ "\n",
+ "Epoch 00766: val_acc did not improve from 0.94225\n",
+ "Epoch 767/100000\n",
+ " - 18s - loss: 0.3678 - acc: 0.9352 - val_loss: 0.3727 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00767: val_acc did not improve from 0.94225\n",
+ "Epoch 768/100000\n",
+ " - 18s - loss: 0.3695 - acc: 0.9346 - val_loss: 0.3752 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 00768: val_acc did not improve from 0.94225\n",
+ "Epoch 769/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9353 - val_loss: 0.3650 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00769: val_acc did not improve from 0.94225\n",
+ "Epoch 770/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9355 - val_loss: 0.3740 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 00770: val_acc did not improve from 0.94225\n",
+ "Epoch 771/100000\n",
+ " - 19s - loss: 0.3668 - acc: 0.9355 - val_loss: 0.4004 - val_acc: 0.9144\n",
+ "\n",
+ "Epoch 00771: val_acc did not improve from 0.94225\n",
+ "Epoch 772/100000\n",
+ " - 18s - loss: 0.3708 - acc: 0.9345 - val_loss: 0.3804 - val_acc: 0.9221\n",
+ "\n",
+ "Epoch 00772: val_acc did not improve from 0.94225\n",
+ "Epoch 773/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9355 - val_loss: 0.3922 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 00773: val_acc did not improve from 0.94225\n",
+ "Epoch 774/100000\n",
+ " - 19s - loss: 0.3687 - acc: 0.9353 - val_loss: 0.3916 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 00774: val_acc did not improve from 0.94225\n",
+ "Epoch 775/100000\n",
+ " - 19s - loss: 0.3706 - acc: 0.9349 - val_loss: 0.3752 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 00775: val_acc did not improve from 0.94225\n",
+ "Epoch 776/100000\n",
+ " - 18s - loss: 0.3695 - acc: 0.9352 - val_loss: 0.3545 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00776: val_acc did not improve from 0.94225\n",
+ "Epoch 777/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9352 - val_loss: 0.4058 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 00777: val_acc did not improve from 0.94225\n",
+ "Epoch 778/100000\n",
+ " - 19s - loss: 0.3674 - acc: 0.9352 - val_loss: 0.3854 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 00778: val_acc did not improve from 0.94225\n",
+ "Epoch 779/100000\n",
+ " - 18s - loss: 0.3666 - acc: 0.9359 - val_loss: 0.3708 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 00779: val_acc did not improve from 0.94225\n",
+ "Epoch 780/100000\n",
+ " - 19s - loss: 0.3717 - acc: 0.9346 - val_loss: 0.4261 - val_acc: 0.9064\n",
+ "\n",
+ "Epoch 00780: val_acc did not improve from 0.94225\n",
+ "Epoch 781/100000\n",
+ " - 18s - loss: 0.3703 - acc: 0.9350 - val_loss: 0.3795 - val_acc: 0.9210\n",
+ "\n",
+ "Epoch 00781: val_acc did not improve from 0.94225\n",
+ "Epoch 782/100000\n",
+ " - 18s - loss: 0.3649 - acc: 0.9363 - val_loss: 0.4089 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 00782: val_acc did not improve from 0.94225\n",
+ "Epoch 783/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9345 - val_loss: 0.5078 - val_acc: 0.8677\n",
+ "\n",
+ "Epoch 00783: val_acc did not improve from 0.94225\n",
+ "Epoch 784/100000\n",
+ " - 19s - loss: 0.3679 - acc: 0.9350 - val_loss: 0.3764 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00784: val_acc did not improve from 0.94225\n",
+ "Epoch 785/100000\n",
+ " - 19s - loss: 0.3681 - acc: 0.9355 - val_loss: 0.3577 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00785: val_acc did not improve from 0.94225\n",
+ "Epoch 786/100000\n",
+ " - 19s - loss: 0.3695 - acc: 0.9353 - val_loss: 0.3614 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 00786: val_acc did not improve from 0.94225\n",
+ "Epoch 787/100000\n",
+ " - 18s - loss: 0.3695 - acc: 0.9354 - val_loss: 0.3585 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 00787: val_acc did not improve from 0.94225\n",
+ "Epoch 788/100000\n",
+ " - 19s - loss: 0.3709 - acc: 0.9352 - val_loss: 0.3749 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00788: val_acc did not improve from 0.94225\n",
+ "Epoch 789/100000\n",
+ " - 19s - loss: 0.3670 - acc: 0.9356 - val_loss: 0.3633 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00789: val_acc did not improve from 0.94225\n",
+ "Epoch 790/100000\n",
+ " - 18s - loss: 0.3695 - acc: 0.9357 - val_loss: 0.3663 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 00790: val_acc did not improve from 0.94225\n",
+ "Epoch 791/100000\n",
+ " - 19s - loss: 0.3705 - acc: 0.9345 - val_loss: 0.3555 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 00791: val_acc did not improve from 0.94225\n",
+ "Epoch 792/100000\n",
+ " - 18s - loss: 0.3698 - acc: 0.9345 - val_loss: 0.4211 - val_acc: 0.9135\n",
+ "\n",
+ "Epoch 00792: val_acc did not improve from 0.94225\n",
+ "Epoch 793/100000\n",
+ " - 19s - loss: 0.3690 - acc: 0.9351 - val_loss: 0.3914 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 00793: val_acc did not improve from 0.94225\n",
+ "Epoch 794/100000\n",
+ " - 18s - loss: 0.3662 - acc: 0.9358 - val_loss: 0.3840 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 00794: val_acc did not improve from 0.94225\n",
+ "Epoch 795/100000\n",
+ " - 19s - loss: 0.3676 - acc: 0.9353 - val_loss: 0.3589 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00795: val_acc did not improve from 0.94225\n",
+ "Epoch 796/100000\n",
+ " - 19s - loss: 0.3832 - acc: 0.9347 - val_loss: 0.3726 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00796: val_acc did not improve from 0.94225\n",
+ "Epoch 797/100000\n",
+ " - 18s - loss: 0.3701 - acc: 0.9358 - val_loss: 0.3664 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 00797: val_acc did not improve from 0.94225\n",
+ "Epoch 798/100000\n",
+ " - 19s - loss: 0.3716 - acc: 0.9347 - val_loss: 0.3745 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 00798: val_acc did not improve from 0.94225\n",
+ "Epoch 799/100000\n",
+ " - 18s - loss: 0.3690 - acc: 0.9349 - val_loss: 0.3630 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00799: val_acc did not improve from 0.94225\n",
+ "Epoch 800/100000\n",
+ " - 19s - loss: 0.3707 - acc: 0.9348 - val_loss: 0.3807 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 00800: val_acc did not improve from 0.94225\n",
+ "Epoch 801/100000\n",
+ " - 18s - loss: 0.3699 - acc: 0.9350 - val_loss: 0.3994 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 00801: val_acc did not improve from 0.94225\n",
+ "Epoch 802/100000\n",
+ " - 19s - loss: 0.3686 - acc: 0.9353 - val_loss: 0.3653 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 00802: val_acc did not improve from 0.94225\n",
+ "Epoch 803/100000\n",
+ " - 19s - loss: 0.3681 - acc: 0.9354 - val_loss: 0.3653 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00803: val_acc did not improve from 0.94225\n",
+ "Epoch 804/100000\n",
+ " - 19s - loss: 0.3674 - acc: 0.9350 - val_loss: 0.3856 - val_acc: 0.9192\n",
+ "\n",
+ "Epoch 00804: val_acc did not improve from 0.94225\n",
+ "Epoch 805/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9347 - val_loss: 0.3610 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00805: val_acc did not improve from 0.94225\n",
+ "Epoch 806/100000\n",
+ " - 19s - loss: 0.3667 - acc: 0.9351 - val_loss: 0.4244 - val_acc: 0.9028\n",
+ "\n",
+ "Epoch 00806: val_acc did not improve from 0.94225\n",
+ "Epoch 807/100000\n",
+ " - 19s - loss: 0.3660 - acc: 0.9362 - val_loss: 0.3534 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00807: val_acc did not improve from 0.94225\n",
+ "Epoch 808/100000\n",
+ " - 18s - loss: 0.3693 - acc: 0.9349 - val_loss: 0.4597 - val_acc: 0.8820\n",
+ "\n",
+ "Epoch 00808: val_acc did not improve from 0.94225\n",
+ "Epoch 809/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9350 - val_loss: 0.3918 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 00809: val_acc did not improve from 0.94225\n",
+ "Epoch 810/100000\n",
+ " - 18s - loss: 0.3702 - acc: 0.9345 - val_loss: 0.3701 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 00810: val_acc did not improve from 0.94225\n",
+ "Epoch 811/100000\n",
+ " - 19s - loss: 0.3720 - acc: 0.9345 - val_loss: 0.3940 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00811: val_acc did not improve from 0.94225\n",
+ "Epoch 812/100000\n",
+ " - 19s - loss: 0.3686 - acc: 0.9347 - val_loss: 0.4201 - val_acc: 0.9053\n",
+ "\n",
+ "Epoch 00812: val_acc did not improve from 0.94225\n",
+ "Epoch 813/100000\n",
+ " - 19s - loss: 0.3684 - acc: 0.9347 - val_loss: 0.4689 - val_acc: 0.9188\n",
+ "\n",
+ "Epoch 00813: val_acc did not improve from 0.94225\n",
+ "Epoch 814/100000\n",
+ " - 18s - loss: 0.3714 - acc: 0.9352 - val_loss: 0.3492 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00814: val_acc did not improve from 0.94225\n",
+ "Epoch 815/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9348 - val_loss: 0.3726 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 00815: val_acc did not improve from 0.94225\n",
+ "Epoch 816/100000\n",
+ " - 18s - loss: 0.3685 - acc: 0.9349 - val_loss: 0.3745 - val_acc: 0.9278\n",
+ "\n",
+ "Epoch 00816: val_acc did not improve from 0.94225\n",
+ "Epoch 817/100000\n",
+ " - 18s - loss: 0.3696 - acc: 0.9348 - val_loss: 0.3696 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 00817: val_acc did not improve from 0.94225\n",
+ "Epoch 818/100000\n",
+ " - 18s - loss: 0.3682 - acc: 0.9360 - val_loss: 0.4135 - val_acc: 0.9065\n",
+ "\n",
+ "Epoch 00818: val_acc did not improve from 0.94225\n",
+ "Epoch 819/100000\n",
+ " - 19s - loss: 0.3686 - acc: 0.9358 - val_loss: 0.4224 - val_acc: 0.9000\n",
+ "\n",
+ "Epoch 00819: val_acc did not improve from 0.94225\n",
+ "Epoch 820/100000\n",
+ " - 18s - loss: 0.3702 - acc: 0.9351 - val_loss: 0.4221 - val_acc: 0.9104\n",
+ "\n",
+ "Epoch 00820: val_acc did not improve from 0.94225\n",
+ "Epoch 821/100000\n",
+ " - 19s - loss: 0.3701 - acc: 0.9348 - val_loss: 0.3642 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 00821: val_acc did not improve from 0.94225\n",
+ "Epoch 822/100000\n",
+ " - 19s - loss: 0.3716 - acc: 0.9336 - val_loss: 0.3812 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 00822: val_acc did not improve from 0.94225\n",
+ "Epoch 823/100000\n",
+ " - 19s - loss: 0.3678 - acc: 0.9352 - val_loss: 0.4046 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 00823: val_acc did not improve from 0.94225\n",
+ "Epoch 824/100000\n",
+ " - 18s - loss: 0.3693 - acc: 0.9348 - val_loss: 0.3854 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 00824: val_acc did not improve from 0.94225\n",
+ "Epoch 825/100000\n",
+ " - 19s - loss: 0.3699 - acc: 0.9350 - val_loss: 0.4669 - val_acc: 0.8761\n",
+ "\n",
+ "Epoch 00825: val_acc did not improve from 0.94225\n",
+ "Epoch 826/100000\n",
+ " - 19s - loss: 0.3718 - acc: 0.9346 - val_loss: 0.5650 - val_acc: 0.8329\n",
+ "\n",
+ "Epoch 00826: val_acc did not improve from 0.94225\n",
+ "Epoch 827/100000\n",
+ " - 18s - loss: 0.3696 - acc: 0.9358 - val_loss: 0.3839 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00827: val_acc did not improve from 0.94225\n",
+ "Epoch 828/100000\n",
+ " - 19s - loss: 0.3731 - acc: 0.9352 - val_loss: 0.3533 - val_acc: 0.9397\n",
+ "\n",
+ "Epoch 00828: val_acc did not improve from 0.94225\n",
+ "Epoch 829/100000\n",
+ " - 18s - loss: 0.3722 - acc: 0.9349 - val_loss: 0.3716 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00829: val_acc did not improve from 0.94225\n",
+ "Epoch 830/100000\n",
+ " - 19s - loss: 0.3673 - acc: 0.9359 - val_loss: 0.3555 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00830: val_acc did not improve from 0.94225\n",
+ "Epoch 831/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9345 - val_loss: 0.3586 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00831: val_acc did not improve from 0.94225\n",
+ "Epoch 832/100000\n",
+ " - 19s - loss: 0.3683 - acc: 0.9353 - val_loss: 0.3840 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 00832: val_acc did not improve from 0.94225\n",
+ "Epoch 833/100000\n",
+ " - 19s - loss: 0.3674 - acc: 0.9352 - val_loss: 0.4149 - val_acc: 0.9095\n",
+ "\n",
+ "Epoch 00833: val_acc did not improve from 0.94225\n",
+ "Epoch 834/100000\n",
+ " - 19s - loss: 0.3661 - acc: 0.9360 - val_loss: 0.4385 - val_acc: 0.8936\n",
+ "\n",
+ "Epoch 00834: val_acc did not improve from 0.94225\n",
+ "Epoch 835/100000\n",
+ " - 19s - loss: 0.3715 - acc: 0.9341 - val_loss: 0.5857 - val_acc: 0.8354\n",
+ "\n",
+ "Epoch 00835: val_acc did not improve from 0.94225\n",
+ "Epoch 836/100000\n",
+ " - 19s - loss: 0.3706 - acc: 0.9348 - val_loss: 0.4281 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 00836: val_acc did not improve from 0.94225\n",
+ "Epoch 837/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9355 - val_loss: 0.3857 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 00837: val_acc did not improve from 0.94225\n",
+ "Epoch 838/100000\n",
+ " - 19s - loss: 0.3699 - acc: 0.9350 - val_loss: 0.3686 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 00838: val_acc did not improve from 0.94225\n",
+ "Epoch 839/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9346 - val_loss: 0.3547 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00839: val_acc did not improve from 0.94225\n",
+ "Epoch 840/100000\n",
+ " - 18s - loss: 0.3677 - acc: 0.9356 - val_loss: 0.3726 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00840: val_acc did not improve from 0.94225\n",
+ "Epoch 841/100000\n",
+ " - 19s - loss: 0.3678 - acc: 0.9349 - val_loss: 0.3826 - val_acc: 0.9220\n",
+ "\n",
+ "Epoch 00841: val_acc did not improve from 0.94225\n",
+ "Epoch 842/100000\n",
+ " - 19s - loss: 0.3696 - acc: 0.9346 - val_loss: 0.3728 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00842: val_acc did not improve from 0.94225\n",
+ "Epoch 843/100000\n",
+ " - 19s - loss: 0.3719 - acc: 0.9346 - val_loss: 0.4048 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 00843: val_acc did not improve from 0.94225\n",
+ "Epoch 844/100000\n",
+ " - 19s - loss: 0.3709 - acc: 0.9351 - val_loss: 0.4000 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 00844: val_acc did not improve from 0.94225\n",
+ "Epoch 845/100000\n",
+ " - 19s - loss: 0.3708 - acc: 0.9351 - val_loss: 0.3479 - val_acc: 0.9405\n",
+ "\n",
+ "Epoch 00845: val_acc did not improve from 0.94225\n",
+ "Epoch 846/100000\n",
+ " - 18s - loss: 0.3708 - acc: 0.9354 - val_loss: 0.4133 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 00846: val_acc did not improve from 0.94225\n",
+ "Epoch 847/100000\n",
+ " - 19s - loss: 0.3733 - acc: 0.9349 - val_loss: 0.3578 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00847: val_acc did not improve from 0.94225\n",
+ "Epoch 848/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9355 - val_loss: 0.4061 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 00848: val_acc did not improve from 0.94225\n",
+ "Epoch 849/100000\n",
+ " - 19s - loss: 0.3685 - acc: 0.9348 - val_loss: 0.4407 - val_acc: 0.9061\n",
+ "\n",
+ "Epoch 00849: val_acc did not improve from 0.94225\n",
+ "Epoch 850/100000\n",
+ " - 19s - loss: 0.3691 - acc: 0.9350 - val_loss: 0.3587 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00850: val_acc did not improve from 0.94225\n",
+ "Epoch 851/100000\n",
+ " - 19s - loss: 0.3695 - acc: 0.9348 - val_loss: 0.4356 - val_acc: 0.8978\n",
+ "\n",
+ "Epoch 00851: val_acc did not improve from 0.94225\n",
+ "Epoch 852/100000\n",
+ " - 19s - loss: 0.3699 - acc: 0.9347 - val_loss: 0.3590 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 00852: val_acc did not improve from 0.94225\n",
+ "Epoch 853/100000\n",
+ " - 19s - loss: 0.3714 - acc: 0.9348 - val_loss: 0.3570 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00853: val_acc did not improve from 0.94225\n",
+ "Epoch 854/100000\n",
+ " - 19s - loss: 0.3685 - acc: 0.9358 - val_loss: 0.3602 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00854: val_acc did not improve from 0.94225\n",
+ "Epoch 855/100000\n",
+ " - 18s - loss: 0.3713 - acc: 0.9346 - val_loss: 0.3806 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 00855: val_acc did not improve from 0.94225\n",
+ "Epoch 856/100000\n",
+ " - 19s - loss: 0.3712 - acc: 0.9352 - val_loss: 0.4086 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00856: val_acc did not improve from 0.94225\n",
+ "Epoch 857/100000\n",
+ " - 18s - loss: 0.3693 - acc: 0.9352 - val_loss: 0.3585 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00857: val_acc did not improve from 0.94225\n",
+ "Epoch 858/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9351 - val_loss: 0.3653 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00858: val_acc did not improve from 0.94225\n",
+ "Epoch 859/100000\n",
+ " - 19s - loss: 0.3697 - acc: 0.9349 - val_loss: 0.3805 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00859: val_acc did not improve from 0.94225\n",
+ "Epoch 860/100000\n",
+ " - 19s - loss: 0.3703 - acc: 0.9356 - val_loss: 0.3670 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 00860: val_acc did not improve from 0.94225\n",
+ "Epoch 861/100000\n",
+ " - 19s - loss: 0.3698 - acc: 0.9354 - val_loss: 0.3690 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 00861: val_acc did not improve from 0.94225\n",
+ "Epoch 862/100000\n",
+ " - 18s - loss: 0.3721 - acc: 0.9357 - val_loss: 0.3699 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 00862: val_acc did not improve from 0.94225\n",
+ "Epoch 863/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9351 - val_loss: 0.3664 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00863: val_acc did not improve from 0.94225\n",
+ "Epoch 864/100000\n",
+ " - 18s - loss: 0.3719 - acc: 0.9348 - val_loss: 0.3663 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 00864: val_acc did not improve from 0.94225\n",
+ "Epoch 865/100000\n",
+ " - 18s - loss: 0.3698 - acc: 0.9355 - val_loss: 0.4007 - val_acc: 0.9042\n",
+ "\n",
+ "Epoch 00865: val_acc did not improve from 0.94225\n",
+ "Epoch 866/100000\n",
+ " - 18s - loss: 0.3682 - acc: 0.9358 - val_loss: 0.4400 - val_acc: 0.8985\n",
+ "\n",
+ "Epoch 00866: val_acc did not improve from 0.94225\n",
+ "Epoch 867/100000\n",
+ " - 19s - loss: 0.3717 - acc: 0.9342 - val_loss: 0.5341 - val_acc: 0.8577\n",
+ "\n",
+ "Epoch 00867: val_acc did not improve from 0.94225\n",
+ "Epoch 868/100000\n",
+ " - 19s - loss: 0.3695 - acc: 0.9348 - val_loss: 0.3563 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00868: val_acc did not improve from 0.94225\n",
+ "Epoch 869/100000\n",
+ " - 18s - loss: 0.3690 - acc: 0.9349 - val_loss: 0.3616 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00869: val_acc did not improve from 0.94225\n",
+ "Epoch 870/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9357 - val_loss: 0.3499 - val_acc: 0.9406\n",
+ "\n",
+ "Epoch 00870: val_acc did not improve from 0.94225\n",
+ "Epoch 871/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9352 - val_loss: 0.4537 - val_acc: 0.8939\n",
+ "\n",
+ "Epoch 00871: val_acc did not improve from 0.94225\n",
+ "Epoch 872/100000\n",
+ " - 18s - loss: 0.3685 - acc: 0.9349 - val_loss: 0.3643 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 00872: val_acc did not improve from 0.94225\n",
+ "Epoch 873/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9351 - val_loss: 0.4030 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 00873: val_acc did not improve from 0.94225\n",
+ "Epoch 874/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9351 - val_loss: 0.3518 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 00874: val_acc did not improve from 0.94225\n",
+ "Epoch 875/100000\n",
+ " - 19s - loss: 0.3690 - acc: 0.9348 - val_loss: 0.3889 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 00875: val_acc did not improve from 0.94225\n",
+ "Epoch 876/100000\n",
+ " - 19s - loss: 0.3704 - acc: 0.9348 - val_loss: 0.4140 - val_acc: 0.9079\n",
+ "\n",
+ "Epoch 00876: val_acc did not improve from 0.94225\n",
+ "Epoch 877/100000\n",
+ " - 18s - loss: 0.3655 - acc: 0.9362 - val_loss: 0.3707 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00877: val_acc did not improve from 0.94225\n",
+ "Epoch 878/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9356 - val_loss: 0.3727 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00878: val_acc did not improve from 0.94225\n",
+ "Epoch 879/100000\n",
+ " - 18s - loss: 0.3679 - acc: 0.9352 - val_loss: 0.3842 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00879: val_acc did not improve from 0.94225\n",
+ "Epoch 880/100000\n",
+ " - 19s - loss: 0.3690 - acc: 0.9350 - val_loss: 0.3659 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 00880: val_acc did not improve from 0.94225\n",
+ "Epoch 881/100000\n",
+ " - 18s - loss: 0.3689 - acc: 0.9360 - val_loss: 0.3472 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 00881: val_acc did not improve from 0.94225\n",
+ "Epoch 882/100000\n",
+ " - 19s - loss: 0.3709 - acc: 0.9351 - val_loss: 0.3796 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 00882: val_acc did not improve from 0.94225\n",
+ "Epoch 883/100000\n",
+ " - 18s - loss: 0.3703 - acc: 0.9354 - val_loss: 0.3553 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00883: val_acc did not improve from 0.94225\n",
+ "Epoch 884/100000\n",
+ " - 19s - loss: 0.3679 - acc: 0.9360 - val_loss: 0.3717 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00884: val_acc did not improve from 0.94225\n",
+ "Epoch 885/100000\n",
+ " - 18s - loss: 0.3733 - acc: 0.9347 - val_loss: 0.3841 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 00885: val_acc did not improve from 0.94225\n",
+ "Epoch 886/100000\n",
+ " - 19s - loss: 0.3725 - acc: 0.9347 - val_loss: 0.3570 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 00886: val_acc did not improve from 0.94225\n",
+ "Epoch 887/100000\n",
+ " - 18s - loss: 0.3687 - acc: 0.9351 - val_loss: 0.3621 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 00887: val_acc did not improve from 0.94225\n",
+ "Epoch 888/100000\n",
+ " - 19s - loss: 0.3679 - acc: 0.9349 - val_loss: 0.3540 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00888: val_acc did not improve from 0.94225\n",
+ "Epoch 889/100000\n",
+ " - 19s - loss: 0.3691 - acc: 0.9351 - val_loss: 0.3810 - val_acc: 0.9213\n",
+ "\n",
+ "Epoch 00889: val_acc did not improve from 0.94225\n",
+ "Epoch 890/100000\n",
+ " - 19s - loss: 0.3720 - acc: 0.9339 - val_loss: 0.4381 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 00890: val_acc did not improve from 0.94225\n",
+ "Epoch 891/100000\n",
+ " - 19s - loss: 0.3700 - acc: 0.9356 - val_loss: 0.4386 - val_acc: 0.8918\n",
+ "\n",
+ "Epoch 00891: val_acc did not improve from 0.94225\n",
+ "Epoch 892/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9352 - val_loss: 0.3775 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 00892: val_acc did not improve from 0.94225\n",
+ "Epoch 893/100000\n",
+ " - 19s - loss: 0.3737 - acc: 0.9349 - val_loss: 0.3662 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 00893: val_acc did not improve from 0.94225\n",
+ "Epoch 894/100000\n",
+ " - 19s - loss: 0.3704 - acc: 0.9355 - val_loss: 0.5097 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00894: val_acc did not improve from 0.94225\n",
+ "Epoch 895/100000\n",
+ " - 19s - loss: 0.3683 - acc: 0.9352 - val_loss: 0.3650 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00895: val_acc did not improve from 0.94225\n",
+ "Epoch 896/100000\n",
+ " - 19s - loss: 0.3705 - acc: 0.9345 - val_loss: 0.4798 - val_acc: 0.8715\n",
+ "\n",
+ "Epoch 00896: val_acc did not improve from 0.94225\n",
+ "Epoch 897/100000\n",
+ " - 19s - loss: 0.3685 - acc: 0.9353 - val_loss: 0.3714 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 00897: val_acc did not improve from 0.94225\n",
+ "Epoch 898/100000\n",
+ " - 19s - loss: 0.3696 - acc: 0.9353 - val_loss: 0.4371 - val_acc: 0.8925\n",
+ "\n",
+ "Epoch 00898: val_acc did not improve from 0.94225\n",
+ "Epoch 899/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9355 - val_loss: 0.3752 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00899: val_acc did not improve from 0.94225\n",
+ "Epoch 900/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9357 - val_loss: 0.3795 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 00900: val_acc did not improve from 0.94225\n",
+ "Epoch 901/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9354 - val_loss: 0.3578 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00901: val_acc did not improve from 0.94225\n",
+ "Epoch 902/100000\n",
+ " - 19s - loss: 0.3711 - acc: 0.9342 - val_loss: 0.3597 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00902: val_acc did not improve from 0.94225\n",
+ "Epoch 903/100000\n",
+ " - 19s - loss: 0.3700 - acc: 0.9351 - val_loss: 0.3584 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00903: val_acc did not improve from 0.94225\n",
+ "Epoch 904/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9356 - val_loss: 0.3729 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00904: val_acc did not improve from 0.94225\n",
+ "Epoch 905/100000\n",
+ " - 19s - loss: 0.3691 - acc: 0.9354 - val_loss: 0.3595 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00905: val_acc did not improve from 0.94225\n",
+ "Epoch 906/100000\n",
+ " - 18s - loss: 0.3675 - acc: 0.9353 - val_loss: 0.3856 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00906: val_acc did not improve from 0.94225\n",
+ "Epoch 907/100000\n",
+ " - 19s - loss: 0.3710 - acc: 0.9339 - val_loss: 0.3753 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 00907: val_acc did not improve from 0.94225\n",
+ "Epoch 908/100000\n",
+ " - 19s - loss: 0.3676 - acc: 0.9356 - val_loss: 0.3600 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00908: val_acc did not improve from 0.94225\n",
+ "Epoch 909/100000\n",
+ " - 19s - loss: 0.3701 - acc: 0.9353 - val_loss: 0.3770 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 00909: val_acc did not improve from 0.94225\n",
+ "Epoch 910/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9347 - val_loss: 0.3586 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00910: val_acc did not improve from 0.94225\n",
+ "Epoch 911/100000\n",
+ " - 19s - loss: 0.3777 - acc: 0.9345 - val_loss: 0.3757 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 00911: val_acc did not improve from 0.94225\n",
+ "Epoch 912/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9354 - val_loss: 0.3734 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 00912: val_acc did not improve from 0.94225\n",
+ "Epoch 913/100000\n",
+ " - 19s - loss: 0.3698 - acc: 0.9354 - val_loss: 0.3631 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00913: val_acc did not improve from 0.94225\n",
+ "Epoch 914/100000\n",
+ " - 18s - loss: 0.3670 - acc: 0.9361 - val_loss: 0.3696 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00914: val_acc did not improve from 0.94225\n",
+ "Epoch 915/100000\n",
+ " - 19s - loss: 0.3666 - acc: 0.9358 - val_loss: 0.3705 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 00915: val_acc did not improve from 0.94225\n",
+ "Epoch 916/100000\n",
+ " - 19s - loss: 0.3715 - acc: 0.9343 - val_loss: 0.3684 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00916: val_acc did not improve from 0.94225\n",
+ "Epoch 917/100000\n",
+ " - 19s - loss: 0.3690 - acc: 0.9350 - val_loss: 0.4192 - val_acc: 0.9105\n",
+ "\n",
+ "Epoch 00917: val_acc did not improve from 0.94225\n",
+ "Epoch 918/100000\n",
+ " - 19s - loss: 0.3682 - acc: 0.9354 - val_loss: 0.3831 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00918: val_acc did not improve from 0.94225\n",
+ "Epoch 919/100000\n",
+ " - 19s - loss: 0.3700 - acc: 0.9348 - val_loss: 0.3727 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00919: val_acc did not improve from 0.94225\n",
+ "Epoch 920/100000\n",
+ " - 18s - loss: 0.3711 - acc: 0.9355 - val_loss: 0.3746 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00920: val_acc did not improve from 0.94225\n",
+ "Epoch 921/100000\n",
+ " - 18s - loss: 0.3710 - acc: 0.9345 - val_loss: 0.5080 - val_acc: 0.8611\n",
+ "\n",
+ "Epoch 00921: val_acc did not improve from 0.94225\n",
+ "Epoch 922/100000\n",
+ " - 18s - loss: 0.3692 - acc: 0.9350 - val_loss: 0.3991 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 00922: val_acc did not improve from 0.94225\n",
+ "Epoch 923/100000\n",
+ " - 19s - loss: 0.3711 - acc: 0.9355 - val_loss: 0.3512 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 00923: val_acc did not improve from 0.94225\n",
+ "Epoch 924/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9351 - val_loss: 0.3697 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00924: val_acc did not improve from 0.94225\n",
+ "Epoch 925/100000\n",
+ " - 18s - loss: 0.3703 - acc: 0.9348 - val_loss: 0.3726 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 00925: val_acc did not improve from 0.94225\n",
+ "Epoch 926/100000\n",
+ " - 19s - loss: 0.3722 - acc: 0.9347 - val_loss: 0.3712 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00926: val_acc did not improve from 0.94225\n",
+ "Epoch 927/100000\n",
+ " - 18s - loss: 0.3684 - acc: 0.9355 - val_loss: 0.4057 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 00927: val_acc did not improve from 0.94225\n",
+ "Epoch 928/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9346 - val_loss: 0.4334 - val_acc: 0.9022\n",
+ "\n",
+ "Epoch 00928: val_acc did not improve from 0.94225\n",
+ "Epoch 929/100000\n",
+ " - 18s - loss: 0.3699 - acc: 0.9349 - val_loss: 0.4640 - val_acc: 0.9127\n",
+ "\n",
+ "Epoch 00929: val_acc did not improve from 0.94225\n",
+ "Epoch 930/100000\n",
+ " - 19s - loss: 0.3703 - acc: 0.9351 - val_loss: 0.3636 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00930: val_acc did not improve from 0.94225\n",
+ "Epoch 931/100000\n",
+ " - 18s - loss: 0.3701 - acc: 0.9346 - val_loss: 0.3531 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00931: val_acc did not improve from 0.94225\n",
+ "Epoch 932/100000\n",
+ " - 19s - loss: 0.3676 - acc: 0.9357 - val_loss: 0.3653 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00932: val_acc did not improve from 0.94225\n",
+ "Epoch 933/100000\n",
+ " - 18s - loss: 0.3756 - acc: 0.9345 - val_loss: 0.3529 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 00933: val_acc did not improve from 0.94225\n",
+ "Epoch 934/100000\n",
+ " - 19s - loss: 0.3711 - acc: 0.9356 - val_loss: 0.4170 - val_acc: 0.9113\n",
+ "\n",
+ "Epoch 00934: val_acc did not improve from 0.94225\n",
+ "Epoch 935/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9355 - val_loss: 0.3608 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00935: val_acc did not improve from 0.94225\n",
+ "Epoch 936/100000\n",
+ " - 18s - loss: 0.3688 - acc: 0.9356 - val_loss: 0.4597 - val_acc: 0.8720\n",
+ "\n",
+ "Epoch 00936: val_acc did not improve from 0.94225\n",
+ "Epoch 937/100000\n",
+ " - 19s - loss: 0.3703 - acc: 0.9348 - val_loss: 0.3580 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00937: val_acc did not improve from 0.94225\n",
+ "Epoch 938/100000\n",
+ " - 19s - loss: 0.3664 - acc: 0.9359 - val_loss: 0.4087 - val_acc: 0.9040\n",
+ "\n",
+ "Epoch 00938: val_acc did not improve from 0.94225\n",
+ "Epoch 939/100000\n",
+ " - 19s - loss: 0.3741 - acc: 0.9349 - val_loss: 0.3821 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 00939: val_acc did not improve from 0.94225\n",
+ "Epoch 940/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9348 - val_loss: 0.3700 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 00940: val_acc did not improve from 0.94225\n",
+ "Epoch 941/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9345 - val_loss: 0.3679 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00941: val_acc did not improve from 0.94225\n",
+ "Epoch 942/100000\n",
+ " - 18s - loss: 0.3711 - acc: 0.9349 - val_loss: 0.3549 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00942: val_acc did not improve from 0.94225\n",
+ "Epoch 943/100000\n",
+ " - 19s - loss: 0.3654 - acc: 0.9361 - val_loss: 0.3527 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00943: val_acc did not improve from 0.94225\n",
+ "Epoch 944/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9351 - val_loss: 0.3573 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 00944: val_acc did not improve from 0.94225\n",
+ "Epoch 945/100000\n",
+ " - 19s - loss: 0.3687 - acc: 0.9358 - val_loss: 0.3673 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 00950: val_acc did not improve from 0.94225\n",
+ "Epoch 951/100000\n",
+ " - 19s - loss: 0.3675 - acc: 0.9357 - val_loss: 0.3832 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 00951: val_acc did not improve from 0.94225\n",
+ "Epoch 952/100000\n",
+ " - 19s - loss: 0.3693 - acc: 0.9354 - val_loss: 0.4486 - val_acc: 0.8881\n",
+ "\n",
+ "Epoch 00952: val_acc did not improve from 0.94225\n",
+ "Epoch 953/100000\n",
+ " - 19s - loss: 0.3696 - acc: 0.9348 - val_loss: 0.4675 - val_acc: 0.8906\n",
+ "\n",
+ "Epoch 00953: val_acc did not improve from 0.94225\n",
+ "Epoch 954/100000\n",
+ " - 19s - loss: 0.3689 - acc: 0.9347 - val_loss: 0.3679 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00954: val_acc did not improve from 0.94225\n",
+ "Epoch 955/100000\n",
+ " - 18s - loss: 0.3666 - acc: 0.9361 - val_loss: 0.3597 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 00955: val_acc did not improve from 0.94225\n",
+ "Epoch 956/100000\n",
+ " - 19s - loss: 0.3726 - acc: 0.9349 - val_loss: 0.3635 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00956: val_acc did not improve from 0.94225\n",
+ "Epoch 957/100000\n",
+ " - 19s - loss: 0.3707 - acc: 0.9356 - val_loss: 0.4386 - val_acc: 0.9002\n",
+ "\n",
+ "Epoch 00957: val_acc did not improve from 0.94225\n",
+ "Epoch 958/100000\n",
+ " - 19s - loss: 0.3684 - acc: 0.9353 - val_loss: 0.3702 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 00958: val_acc did not improve from 0.94225\n",
+ "Epoch 959/100000\n",
+ " - 19s - loss: 0.3669 - acc: 0.9360 - val_loss: 0.3556 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 00959: val_acc did not improve from 0.94225\n",
+ "Epoch 960/100000\n",
+ " - 19s - loss: 0.3710 - acc: 0.9350 - val_loss: 0.3578 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00960: val_acc did not improve from 0.94225\n",
+ "Epoch 961/100000\n",
+ " - 19s - loss: 0.3681 - acc: 0.9352 - val_loss: 0.3766 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 00961: val_acc did not improve from 0.94225\n",
+ "Epoch 962/100000\n",
+ " - 19s - loss: 0.3706 - acc: 0.9346 - val_loss: 0.3936 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 00962: val_acc did not improve from 0.94225\n",
+ "Epoch 963/100000\n",
+ " - 19s - loss: 0.3680 - acc: 0.9353 - val_loss: 0.3806 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00963: val_acc did not improve from 0.94225\n",
+ "Epoch 964/100000\n",
+ " - 18s - loss: 0.3697 - acc: 0.9355 - val_loss: 0.3882 - val_acc: 0.9182\n",
+ "\n",
+ "Epoch 00964: val_acc did not improve from 0.94225\n",
+ "Epoch 965/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9355 - val_loss: 0.4790 - val_acc: 0.8930\n",
+ "\n",
+ "Epoch 00965: val_acc did not improve from 0.94225\n",
+ "Epoch 966/100000\n",
+ " - 18s - loss: 0.3680 - acc: 0.9356 - val_loss: 0.3589 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00966: val_acc did not improve from 0.94225\n",
+ "Epoch 967/100000\n",
+ " - 19s - loss: 0.3679 - acc: 0.9358 - val_loss: 0.5199 - val_acc: 0.8695\n",
+ "\n",
+ "Epoch 00967: val_acc did not improve from 0.94225\n",
+ "Epoch 968/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9356 - val_loss: 0.3737 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00968: val_acc did not improve from 0.94225\n",
+ "Epoch 969/100000\n",
+ " - 18s - loss: 0.3689 - acc: 0.9351 - val_loss: 0.3668 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 00969: val_acc did not improve from 0.94225\n",
+ "Epoch 970/100000\n",
+ " - 19s - loss: 0.3769 - acc: 0.9355 - val_loss: 0.3705 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00970: val_acc did not improve from 0.94225\n",
+ "Epoch 971/100000\n",
+ " - 18s - loss: 0.3741 - acc: 0.9342 - val_loss: 0.4023 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 00971: val_acc did not improve from 0.94225\n",
+ "Epoch 972/100000\n",
+ " - 19s - loss: 0.3709 - acc: 0.9355 - val_loss: 0.3594 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 00972: val_acc did not improve from 0.94225\n",
+ "Epoch 973/100000\n",
+ " - 18s - loss: 0.3688 - acc: 0.9348 - val_loss: 0.3753 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00973: val_acc did not improve from 0.94225\n",
+ "Epoch 974/100000\n",
+ " - 18s - loss: 0.3688 - acc: 0.9351 - val_loss: 0.4787 - val_acc: 0.8756\n",
+ "\n",
+ "Epoch 00974: val_acc did not improve from 0.94225\n",
+ "Epoch 975/100000\n",
+ " - 19s - loss: 0.3675 - acc: 0.9353 - val_loss: 0.3528 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 00975: val_acc did not improve from 0.94225\n",
+ "Epoch 976/100000\n",
+ " - 18s - loss: 0.3698 - acc: 0.9355 - val_loss: 0.3602 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00976: val_acc did not improve from 0.94225\n",
+ "Epoch 977/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9355 - val_loss: 0.3692 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 00977: val_acc did not improve from 0.94225\n",
+ "Epoch 978/100000\n",
+ " - 18s - loss: 0.3673 - acc: 0.9353 - val_loss: 0.3905 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 00978: val_acc did not improve from 0.94225\n",
+ "Epoch 979/100000\n",
+ " - 19s - loss: 0.3662 - acc: 0.9355 - val_loss: 0.5235 - val_acc: 0.8545\n",
+ "\n",
+ "Epoch 00979: val_acc did not improve from 0.94225\n",
+ "Epoch 980/100000\n",
+ " - 19s - loss: 0.3711 - acc: 0.9344 - val_loss: 0.3732 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 00980: val_acc did not improve from 0.94225\n",
+ "Epoch 981/100000\n",
+ " - 19s - loss: 0.3689 - acc: 0.9350 - val_loss: 0.3575 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00981: val_acc did not improve from 0.94225\n",
+ "Epoch 982/100000\n",
+ " - 19s - loss: 0.3659 - acc: 0.9362 - val_loss: 0.3526 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00982: val_acc did not improve from 0.94225\n",
+ "Epoch 983/100000\n",
+ " - 19s - loss: 0.3649 - acc: 0.9355 - val_loss: 0.3654 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00983: val_acc did not improve from 0.94225\n",
+ "Epoch 984/100000\n",
+ " - 18s - loss: 0.3696 - acc: 0.9349 - val_loss: 0.3543 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00984: val_acc did not improve from 0.94225\n",
+ "Epoch 985/100000\n",
+ " - 19s - loss: 0.3678 - acc: 0.9355 - val_loss: 0.3865 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 00985: val_acc did not improve from 0.94225\n",
+ "Epoch 986/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9355 - val_loss: 0.3663 - val_acc: 0.9261\n",
+ "\n",
+ "Epoch 00986: val_acc did not improve from 0.94225\n",
+ "Epoch 987/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9351 - val_loss: 0.3641 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00987: val_acc did not improve from 0.94225\n",
+ "Epoch 988/100000\n",
+ " - 18s - loss: 0.3711 - acc: 0.9350 - val_loss: 0.3741 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 00988: val_acc did not improve from 0.94225\n",
+ "Epoch 989/100000\n",
+ " - 19s - loss: 0.3714 - acc: 0.9352 - val_loss: 0.3934 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 00989: val_acc did not improve from 0.94225\n",
+ "Epoch 990/100000\n",
+ " - 18s - loss: 0.3706 - acc: 0.9352 - val_loss: 0.3845 - val_acc: 0.9182\n",
+ "\n",
+ "Epoch 00990: val_acc did not improve from 0.94225\n",
+ "Epoch 991/100000\n",
+ " - 19s - loss: 0.3686 - acc: 0.9357 - val_loss: 0.4132 - val_acc: 0.9096\n",
+ "\n",
+ "Epoch 00991: val_acc did not improve from 0.94225\n",
+ "Epoch 992/100000\n",
+ " - 18s - loss: 0.3685 - acc: 0.9353 - val_loss: 0.4026 - val_acc: 0.9117\n",
+ "\n",
+ "Epoch 00992: val_acc did not improve from 0.94225\n",
+ "Epoch 993/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9361 - val_loss: 0.4523 - val_acc: 0.8918\n",
+ "\n",
+ "Epoch 00993: val_acc did not improve from 0.94225\n",
+ "Epoch 994/100000\n",
+ " - 19s - loss: 0.3655 - acc: 0.9353 - val_loss: 0.3633 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 00994: val_acc did not improve from 0.94225\n",
+ "Epoch 995/100000\n",
+ " - 18s - loss: 0.3707 - acc: 0.9350 - val_loss: 0.3904 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 00995: val_acc did not improve from 0.94225\n",
+ "Epoch 996/100000\n",
+ " - 19s - loss: 0.3713 - acc: 0.9350 - val_loss: 0.3561 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00996: val_acc did not improve from 0.94225\n",
+ "Epoch 997/100000\n",
+ " - 18s - loss: 0.3665 - acc: 0.9359 - val_loss: 0.3723 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00997: val_acc did not improve from 0.94225\n",
+ "Epoch 998/100000\n",
+ " - 19s - loss: 0.3659 - acc: 0.9362 - val_loss: 0.3898 - val_acc: 0.9191\n",
+ "\n",
+ "Epoch 00998: val_acc did not improve from 0.94225\n",
+ "Epoch 999/100000\n",
+ " - 19s - loss: 0.3689 - acc: 0.9345 - val_loss: 0.3863 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 00999: val_acc did not improve from 0.94225\n",
+ "Epoch 1000/100000\n",
+ " - 19s - loss: 0.3655 - acc: 0.9360 - val_loss: 0.3560 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01000: val_acc did not improve from 0.94225\n",
+ "Epoch 1001/100000\n",
+ " - 19s - loss: 0.3689 - acc: 0.9357 - val_loss: 0.4617 - val_acc: 0.8773\n",
+ "\n",
+ "Epoch 01001: val_acc did not improve from 0.94225\n",
+ "Epoch 1002/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9354 - val_loss: 0.4845 - val_acc: 0.8792\n",
+ "\n",
+ "Epoch 01002: val_acc did not improve from 0.94225\n",
+ "Epoch 1003/100000\n",
+ " - 18s - loss: 0.3726 - acc: 0.9343 - val_loss: 0.4135 - val_acc: 0.9026\n",
+ "\n",
+ "Epoch 01003: val_acc did not improve from 0.94225\n",
+ "Epoch 1004/100000\n",
+ " - 19s - loss: 0.3658 - acc: 0.9359 - val_loss: 0.3567 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01004: val_acc did not improve from 0.94225\n",
+ "Epoch 1005/100000\n",
+ " - 18s - loss: 0.3694 - acc: 0.9351 - val_loss: 0.3876 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 01005: val_acc did not improve from 0.94225\n",
+ "Epoch 1006/100000\n",
+ " - 20s - loss: 0.3688 - acc: 0.9342 - val_loss: 0.4080 - val_acc: 0.9221\n",
+ "\n",
+ "Epoch 01006: val_acc did not improve from 0.94225\n",
+ "Epoch 1007/100000\n",
+ " - 19s - loss: 0.3708 - acc: 0.9352 - val_loss: 0.3818 - val_acc: 0.9192\n",
+ "\n",
+ "Epoch 01007: val_acc did not improve from 0.94225\n",
+ "Epoch 1008/100000\n",
+ " - 19s - loss: 0.3699 - acc: 0.9350 - val_loss: 0.3908 - val_acc: 0.9166\n",
+ "\n",
+ "Epoch 01008: val_acc did not improve from 0.94225\n",
+ "Epoch 1009/100000\n",
+ " - 19s - loss: 0.3729 - acc: 0.9347 - val_loss: 0.4244 - val_acc: 0.9037\n",
+ "\n",
+ "Epoch 01009: val_acc did not improve from 0.94225\n",
+ "Epoch 1010/100000\n",
+ " - 18s - loss: 0.3700 - acc: 0.9356 - val_loss: 0.3676 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01010: val_acc did not improve from 0.94225\n",
+ "Epoch 1011/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9349 - val_loss: 0.3629 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 01011: val_acc did not improve from 0.94225\n",
+ "Epoch 1012/100000\n",
+ " - 18s - loss: 0.3699 - acc: 0.9352 - val_loss: 0.3848 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01012: val_acc did not improve from 0.94225\n",
+ "Epoch 1013/100000\n",
+ " - 19s - loss: 0.3672 - acc: 0.9358 - val_loss: 0.4152 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 01013: val_acc did not improve from 0.94225\n",
+ "Epoch 1014/100000\n",
+ " - 18s - loss: 0.3681 - acc: 0.9350 - val_loss: 0.4354 - val_acc: 0.8935\n",
+ "\n",
+ "Epoch 01014: val_acc did not improve from 0.94225\n",
+ "Epoch 1015/100000\n",
+ " - 18s - loss: 0.3664 - acc: 0.9353 - val_loss: 0.3524 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01015: val_acc did not improve from 0.94225\n",
+ "Epoch 1016/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9359 - val_loss: 0.4288 - val_acc: 0.9082\n",
+ "\n",
+ "Epoch 01016: val_acc did not improve from 0.94225\n",
+ "Epoch 1017/100000\n",
+ " - 19s - loss: 0.3750 - acc: 0.9343 - val_loss: 0.3659 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01017: val_acc did not improve from 0.94225\n",
+ "Epoch 1018/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9364 - val_loss: 0.3569 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01018: val_acc did not improve from 0.94225\n",
+ "Epoch 1019/100000\n",
+ " - 19s - loss: 0.3704 - acc: 0.9347 - val_loss: 0.3696 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01019: val_acc did not improve from 0.94225\n",
+ "Epoch 1020/100000\n",
+ " - 19s - loss: 0.3664 - acc: 0.9361 - val_loss: 0.3648 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01020: val_acc did not improve from 0.94225\n",
+ "Epoch 1021/100000\n",
+ " - 19s - loss: 0.3674 - acc: 0.9356 - val_loss: 0.3922 - val_acc: 0.9088\n",
+ "\n",
+ "Epoch 01021: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 01021: ReduceLROnPlateau reducing learning rate to 0.0007737808919046074.\n",
+ "Epoch 1022/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9354 - val_loss: 0.3905 - val_acc: 0.9103\n",
+ "\n",
+ "Epoch 01022: val_acc did not improve from 0.94225\n",
+ "Epoch 1023/100000\n",
+ " - 19s - loss: 0.3582 - acc: 0.9358 - val_loss: 0.3477 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01023: val_acc did not improve from 0.94225\n",
+ "Epoch 1024/100000\n",
+ " - 19s - loss: 0.3586 - acc: 0.9362 - val_loss: 0.4202 - val_acc: 0.9004\n",
+ "\n",
+ "Epoch 01024: val_acc did not improve from 0.94225\n",
+ "Epoch 1025/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9349 - val_loss: 0.3616 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01025: val_acc did not improve from 0.94225\n",
+ "Epoch 1026/100000\n",
+ " - 18s - loss: 0.3586 - acc: 0.9364 - val_loss: 0.3522 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01026: val_acc did not improve from 0.94225\n",
+ "Epoch 1027/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9356 - val_loss: 0.3971 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 01027: val_acc did not improve from 0.94225\n",
+ "Epoch 1028/100000\n",
+ " - 19s - loss: 0.3606 - acc: 0.9352 - val_loss: 0.3717 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01028: val_acc did not improve from 0.94225\n",
+ "Epoch 1029/100000\n",
+ " - 18s - loss: 0.3629 - acc: 0.9355 - val_loss: 0.3563 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01029: val_acc did not improve from 0.94225\n",
+ "Epoch 1030/100000\n",
+ " - 18s - loss: 0.3625 - acc: 0.9353 - val_loss: 0.3926 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01030: val_acc did not improve from 0.94225\n",
+ "Epoch 1031/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9354 - val_loss: 0.3511 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01031: val_acc did not improve from 0.94225\n",
+ "Epoch 1032/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9356 - val_loss: 0.3683 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 01032: val_acc did not improve from 0.94225\n",
+ "Epoch 1033/100000\n",
+ " - 18s - loss: 0.3598 - acc: 0.9362 - val_loss: 0.3531 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01033: val_acc did not improve from 0.94225\n",
+ "Epoch 1034/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9353 - val_loss: 0.3663 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01034: val_acc did not improve from 0.94225\n",
+ "Epoch 1035/100000\n",
+ " - 18s - loss: 0.3640 - acc: 0.9360 - val_loss: 0.3815 - val_acc: 0.9166\n",
+ "\n",
+ "Epoch 01035: val_acc did not improve from 0.94225\n",
+ "Epoch 1036/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9357 - val_loss: 0.3550 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 01036: val_acc did not improve from 0.94225\n",
+ "Epoch 1037/100000\n",
+ " - 19s - loss: 0.3655 - acc: 0.9343 - val_loss: 0.4479 - val_acc: 0.8834\n",
+ "\n",
+ "Epoch 01037: val_acc did not improve from 0.94225\n",
+ "Epoch 1038/100000\n",
+ " - 19s - loss: 0.3594 - acc: 0.9357 - val_loss: 0.4071 - val_acc: 0.9060\n",
+ "\n",
+ "Epoch 01038: val_acc did not improve from 0.94225\n",
+ "Epoch 1039/100000\n",
+ " - 18s - loss: 0.3608 - acc: 0.9356 - val_loss: 0.3546 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01039: val_acc did not improve from 0.94225\n",
+ "Epoch 1040/100000\n",
+ " - 19s - loss: 0.3588 - acc: 0.9354 - val_loss: 0.3529 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 01040: val_acc did not improve from 0.94225\n",
+ "Epoch 1041/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9354 - val_loss: 0.3611 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01041: val_acc did not improve from 0.94225\n",
+ "Epoch 1042/100000\n",
+ " - 19s - loss: 0.3586 - acc: 0.9356 - val_loss: 0.3821 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 01042: val_acc did not improve from 0.94225\n",
+ "Epoch 1043/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9360 - val_loss: 0.3599 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 01043: val_acc did not improve from 0.94225\n",
+ "Epoch 1044/100000\n",
+ " - 18s - loss: 0.3586 - acc: 0.9359 - val_loss: 0.4377 - val_acc: 0.8873\n",
+ "\n",
+ "Epoch 01044: val_acc did not improve from 0.94225\n",
+ "Epoch 1045/100000\n",
+ " - 19s - loss: 0.3599 - acc: 0.9367 - val_loss: 0.3597 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01045: val_acc did not improve from 0.94225\n",
+ "Epoch 1046/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9354 - val_loss: 0.3557 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01046: val_acc did not improve from 0.94225\n",
+ "Epoch 1047/100000\n",
+ " - 19s - loss: 0.3574 - acc: 0.9366 - val_loss: 0.4169 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 01047: val_acc did not improve from 0.94225\n",
+ "Epoch 1048/100000\n",
+ " - 19s - loss: 0.3611 - acc: 0.9353 - val_loss: 0.3805 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 01048: val_acc did not improve from 0.94225\n",
+ "Epoch 1049/100000\n",
+ " - 19s - loss: 0.3616 - acc: 0.9369 - val_loss: 0.3614 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 01049: val_acc did not improve from 0.94225\n",
+ "Epoch 1050/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9362 - val_loss: 0.3883 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01050: val_acc did not improve from 0.94225\n",
+ "Epoch 1051/100000\n",
+ " - 19s - loss: 0.3592 - acc: 0.9362 - val_loss: 0.3851 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 01051: val_acc did not improve from 0.94225\n",
+ "Epoch 1052/100000\n",
+ " - 19s - loss: 0.3614 - acc: 0.9350 - val_loss: 0.3693 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01052: val_acc did not improve from 0.94225\n",
+ "Epoch 1053/100000\n",
+ " - 19s - loss: 0.3604 - acc: 0.9357 - val_loss: 0.3481 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01053: val_acc did not improve from 0.94225\n",
+ "Epoch 1054/100000\n",
+ " - 19s - loss: 0.3616 - acc: 0.9358 - val_loss: 0.3653 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 01054: val_acc did not improve from 0.94225\n",
+ "Epoch 1055/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9362 - val_loss: 0.3789 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 01055: val_acc did not improve from 0.94225\n",
+ "Epoch 1056/100000\n",
+ " - 18s - loss: 0.3603 - acc: 0.9357 - val_loss: 0.3585 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01056: val_acc did not improve from 0.94225\n",
+ "Epoch 1057/100000\n",
+ " - 18s - loss: 0.3579 - acc: 0.9354 - val_loss: 0.3861 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 01057: val_acc did not improve from 0.94225\n",
+ "Epoch 1058/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9352 - val_loss: 0.3684 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 01058: val_acc did not improve from 0.94225\n",
+ "Epoch 1059/100000\n",
+ " - 18s - loss: 0.3592 - acc: 0.9366 - val_loss: 0.3538 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01059: val_acc did not improve from 0.94225\n",
+ "Epoch 1060/100000\n",
+ " - 19s - loss: 0.3617 - acc: 0.9355 - val_loss: 0.3526 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01060: val_acc did not improve from 0.94225\n",
+ "Epoch 1061/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9354 - val_loss: 0.4439 - val_acc: 0.9052\n",
+ "\n",
+ "Epoch 01061: val_acc did not improve from 0.94225\n",
+ "Epoch 1062/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9361 - val_loss: 0.3466 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01062: val_acc did not improve from 0.94225\n",
+ "Epoch 1063/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9349 - val_loss: 0.3642 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 01063: val_acc did not improve from 0.94225\n",
+ "Epoch 1064/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9355 - val_loss: 0.3436 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01064: val_acc did not improve from 0.94225\n",
+ "Epoch 1065/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9367 - val_loss: 0.3443 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01065: val_acc did not improve from 0.94225\n",
+ "Epoch 1066/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9358 - val_loss: 0.3527 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01066: val_acc did not improve from 0.94225\n",
+ "Epoch 1067/100000\n",
+ " - 19s - loss: 0.3625 - acc: 0.9363 - val_loss: 0.3588 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01067: val_acc did not improve from 0.94225\n",
+ "Epoch 1068/100000\n",
+ " - 19s - loss: 0.3609 - acc: 0.9352 - val_loss: 0.3745 - val_acc: 0.9270\n",
+ "\n",
+ "Epoch 01068: val_acc did not improve from 0.94225\n",
+ "Epoch 1069/100000\n",
+ " - 19s - loss: 0.3591 - acc: 0.9362 - val_loss: 0.3635 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 01069: val_acc did not improve from 0.94225\n",
+ "Epoch 1070/100000\n",
+ " - 19s - loss: 0.3607 - acc: 0.9359 - val_loss: 0.3800 - val_acc: 0.9260\n",
+ "\n",
+ "Epoch 01070: val_acc did not improve from 0.94225\n",
+ "Epoch 1071/100000\n",
+ " - 19s - loss: 0.3626 - acc: 0.9359 - val_loss: 0.3877 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 01071: val_acc did not improve from 0.94225\n",
+ "Epoch 1072/100000\n",
+ " - 19s - loss: 0.3612 - acc: 0.9355 - val_loss: 0.3566 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01072: val_acc did not improve from 0.94225\n",
+ "Epoch 1073/100000\n",
+ " - 18s - loss: 0.3633 - acc: 0.9351 - val_loss: 0.3567 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01073: val_acc did not improve from 0.94225\n",
+ "Epoch 1074/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9356 - val_loss: 0.3495 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01074: val_acc did not improve from 0.94225\n",
+ "Epoch 1075/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9355 - val_loss: 0.3666 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 01075: val_acc did not improve from 0.94225\n",
+ "Epoch 1076/100000\n",
+ " - 19s - loss: 0.3577 - acc: 0.9362 - val_loss: 0.3483 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01076: val_acc did not improve from 0.94225\n",
+ "Epoch 1077/100000\n",
+ " - 19s - loss: 0.3592 - acc: 0.9358 - val_loss: 0.3499 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01077: val_acc did not improve from 0.94225\n",
+ "Epoch 1078/100000\n",
+ " - 18s - loss: 0.3560 - acc: 0.9368 - val_loss: 0.4120 - val_acc: 0.9058\n",
+ "\n",
+ "Epoch 01078: val_acc did not improve from 0.94225\n",
+ "Epoch 1079/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9357 - val_loss: 0.3637 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01079: val_acc did not improve from 0.94225\n",
+ "Epoch 1080/100000\n",
+ " - 18s - loss: 0.3582 - acc: 0.9359 - val_loss: 0.3686 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 01080: val_acc did not improve from 0.94225\n",
+ "Epoch 1081/100000\n",
+ " - 19s - loss: 0.3609 - acc: 0.9353 - val_loss: 0.3763 - val_acc: 0.9190\n",
+ "\n",
+ "Epoch 01081: val_acc did not improve from 0.94225\n",
+ "Epoch 1082/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9355 - val_loss: 0.3695 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01082: val_acc did not improve from 0.94225\n",
+ "Epoch 1083/100000\n",
+ " - 19s - loss: 0.3629 - acc: 0.9355 - val_loss: 0.3737 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 01083: val_acc did not improve from 0.94225\n",
+ "Epoch 1084/100000\n",
+ " - 18s - loss: 0.3574 - acc: 0.9369 - val_loss: 0.3719 - val_acc: 0.9184\n",
+ "\n",
+ "Epoch 01084: val_acc did not improve from 0.94225\n",
+ "Epoch 1085/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9356 - val_loss: 0.3503 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01085: val_acc did not improve from 0.94225\n",
+ "Epoch 1086/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9359 - val_loss: 0.3958 - val_acc: 0.9151\n",
+ "\n",
+ "Epoch 01086: val_acc did not improve from 0.94225\n",
+ "Epoch 1087/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9356 - val_loss: 0.3657 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01087: val_acc did not improve from 0.94225\n",
+ "Epoch 1088/100000\n",
+ " - 19s - loss: 0.3619 - acc: 0.9356 - val_loss: 0.3708 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 01088: val_acc did not improve from 0.94225\n",
+ "Epoch 1089/100000\n",
+ " - 19s - loss: 0.3614 - acc: 0.9355 - val_loss: 0.3594 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 01089: val_acc did not improve from 0.94225\n",
+ "Epoch 1090/100000\n",
+ " - 18s - loss: 0.3632 - acc: 0.9355 - val_loss: 0.3885 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 01090: val_acc did not improve from 0.94225\n",
+ "Epoch 1091/100000\n",
+ " - 19s - loss: 0.3611 - acc: 0.9352 - val_loss: 0.3528 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01091: val_acc did not improve from 0.94225\n",
+ "Epoch 1092/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9357 - val_loss: 0.4545 - val_acc: 0.8758\n",
+ "\n",
+ "Epoch 01092: val_acc did not improve from 0.94225\n",
+ "Epoch 1093/100000\n",
+ " - 18s - loss: 0.3593 - acc: 0.9356 - val_loss: 0.3640 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01093: val_acc did not improve from 0.94225\n",
+ "Epoch 1094/100000\n",
+ " - 19s - loss: 0.3573 - acc: 0.9365 - val_loss: 0.3825 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 01094: val_acc did not improve from 0.94225\n",
+ "Epoch 1095/100000\n",
+ " - 18s - loss: 0.3582 - acc: 0.9358 - val_loss: 0.3581 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01095: val_acc did not improve from 0.94225\n",
+ "Epoch 1096/100000\n",
+ " - 19s - loss: 0.3564 - acc: 0.9358 - val_loss: 0.3449 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01096: val_acc did not improve from 0.94225\n",
+ "Epoch 1097/100000\n",
+ " - 18s - loss: 0.3602 - acc: 0.9354 - val_loss: 0.3632 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01097: val_acc did not improve from 0.94225\n",
+ "Epoch 1098/100000\n",
+ " - 19s - loss: 0.3576 - acc: 0.9361 - val_loss: 0.4003 - val_acc: 0.9083\n",
+ "\n",
+ "Epoch 01098: val_acc did not improve from 0.94225\n",
+ "Epoch 1099/100000\n",
+ " - 19s - loss: 0.3591 - acc: 0.9355 - val_loss: 0.3667 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 01099: val_acc did not improve from 0.94225\n",
+ "Epoch 1100/100000\n",
+ " - 19s - loss: 0.3607 - acc: 0.9351 - val_loss: 0.4439 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 01100: val_acc did not improve from 0.94225\n",
+ "Epoch 1101/100000\n",
+ " - 18s - loss: 0.3569 - acc: 0.9362 - val_loss: 0.3593 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01101: val_acc did not improve from 0.94225\n",
+ "Epoch 1102/100000\n",
+ " - 18s - loss: 0.3618 - acc: 0.9352 - val_loss: 0.4063 - val_acc: 0.9079\n",
+ "\n",
+ "Epoch 01102: val_acc did not improve from 0.94225\n",
+ "Epoch 1103/100000\n",
+ " - 19s - loss: 0.3613 - acc: 0.9356 - val_loss: 0.3780 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 01103: val_acc did not improve from 0.94225\n",
+ "Epoch 1104/100000\n",
+ " - 18s - loss: 0.3572 - acc: 0.9363 - val_loss: 0.3644 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01104: val_acc did not improve from 0.94225\n",
+ "Epoch 1105/100000\n",
+ " - 19s - loss: 0.3599 - acc: 0.9351 - val_loss: 0.3491 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01105: val_acc did not improve from 0.94225\n",
+ "Epoch 1106/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9358 - val_loss: 0.4023 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 01106: val_acc did not improve from 0.94225\n",
+ "Epoch 1107/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9367 - val_loss: 0.3596 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01107: val_acc did not improve from 0.94225\n",
+ "Epoch 1108/100000\n",
+ " - 19s - loss: 0.3593 - acc: 0.9354 - val_loss: 0.3698 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01108: val_acc did not improve from 0.94225\n",
+ "Epoch 1109/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9368 - val_loss: 0.3541 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01109: val_acc did not improve from 0.94225\n",
+ "Epoch 1110/100000\n",
+ " - 19s - loss: 0.3609 - acc: 0.9358 - val_loss: 0.3623 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01110: val_acc did not improve from 0.94225\n",
+ "Epoch 1111/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9358 - val_loss: 0.4410 - val_acc: 0.9088\n",
+ "\n",
+ "Epoch 01111: val_acc did not improve from 0.94225\n",
+ "Epoch 1112/100000\n",
+ " - 19s - loss: 0.3606 - acc: 0.9348 - val_loss: 0.3723 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01112: val_acc did not improve from 0.94225\n",
+ "Epoch 1113/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9366 - val_loss: 0.3540 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01113: val_acc did not improve from 0.94225\n",
+ "Epoch 1114/100000\n",
+ " - 19s - loss: 0.3583 - acc: 0.9360 - val_loss: 0.3552 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01114: val_acc did not improve from 0.94225\n",
+ "Epoch 1115/100000\n",
+ " - 18s - loss: 0.3563 - acc: 0.9367 - val_loss: 0.3485 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01115: val_acc did not improve from 0.94225\n",
+ "Epoch 1116/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9362 - val_loss: 0.3540 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01116: val_acc did not improve from 0.94225\n",
+ "Epoch 1117/100000\n",
+ " - 18s - loss: 0.3630 - acc: 0.9350 - val_loss: 0.4921 - val_acc: 0.8713\n",
+ "\n",
+ "Epoch 01117: val_acc did not improve from 0.94225\n",
+ "Epoch 1118/100000\n",
+ " - 18s - loss: 0.3612 - acc: 0.9363 - val_loss: 0.3579 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01118: val_acc did not improve from 0.94225\n",
+ "Epoch 1119/100000\n",
+ " - 19s - loss: 0.3641 - acc: 0.9357 - val_loss: 0.3490 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01119: val_acc did not improve from 0.94225\n",
+ "Epoch 1120/100000\n",
+ " - 19s - loss: 0.3560 - acc: 0.9368 - val_loss: 0.3551 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 01120: val_acc did not improve from 0.94225\n",
+ "Epoch 1121/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9358 - val_loss: 0.3585 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 01121: val_acc did not improve from 0.94225\n",
+ "Epoch 1122/100000\n",
+ " - 19s - loss: 0.3572 - acc: 0.9366 - val_loss: 0.3454 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 01122: val_acc did not improve from 0.94225\n",
+ "Epoch 1123/100000\n",
+ " - 19s - loss: 0.3565 - acc: 0.9365 - val_loss: 0.3447 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01123: val_acc did not improve from 0.94225\n",
+ "Epoch 1124/100000\n",
+ " - 19s - loss: 0.3571 - acc: 0.9358 - val_loss: 0.3658 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 01124: val_acc did not improve from 0.94225\n",
+ "Epoch 1125/100000\n",
+ " - 18s - loss: 0.3639 - acc: 0.9360 - val_loss: 0.3565 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01125: val_acc did not improve from 0.94225\n",
+ "Epoch 1126/100000\n",
+ " - 19s - loss: 0.3652 - acc: 0.9359 - val_loss: 0.4050 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 01126: val_acc did not improve from 0.94225\n",
+ "Epoch 1127/100000\n",
+ " - 19s - loss: 0.3605 - acc: 0.9361 - val_loss: 0.3539 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01127: val_acc did not improve from 0.94225\n",
+ "Epoch 1128/100000\n",
+ " - 18s - loss: 0.3621 - acc: 0.9366 - val_loss: 0.3543 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 01128: val_acc did not improve from 0.94225\n",
+ "Epoch 1129/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9362 - val_loss: 0.3643 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 01129: val_acc did not improve from 0.94225\n",
+ "Epoch 1130/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9364 - val_loss: 0.3483 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01130: val_acc did not improve from 0.94225\n",
+ "Epoch 1131/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9351 - val_loss: 0.4475 - val_acc: 0.8930\n",
+ "\n",
+ "Epoch 01131: val_acc did not improve from 0.94225\n",
+ "Epoch 1132/100000\n",
+ " - 18s - loss: 0.3563 - acc: 0.9364 - val_loss: 0.3564 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01132: val_acc did not improve from 0.94225\n",
+ "Epoch 1133/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9357 - val_loss: 0.3730 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01133: val_acc did not improve from 0.94225\n",
+ "Epoch 1134/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9366 - val_loss: 0.4411 - val_acc: 0.8936\n",
+ "\n",
+ "Epoch 01134: val_acc did not improve from 0.94225\n",
+ "Epoch 1135/100000\n",
+ " - 18s - loss: 0.3608 - acc: 0.9361 - val_loss: 0.4857 - val_acc: 0.8861\n",
+ "\n",
+ "Epoch 01135: val_acc did not improve from 0.94225\n",
+ "Epoch 1136/100000\n",
+ " - 18s - loss: 0.3579 - acc: 0.9358 - val_loss: 0.4125 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01136: val_acc did not improve from 0.94225\n",
+ "Epoch 1137/100000\n",
+ " - 18s - loss: 0.3558 - acc: 0.9361 - val_loss: 0.3608 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 01137: val_acc did not improve from 0.94225\n",
+ "Epoch 1138/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9359 - val_loss: 0.3970 - val_acc: 0.9120\n",
+ "\n",
+ "Epoch 01138: val_acc did not improve from 0.94225\n",
+ "Epoch 1139/100000\n",
+ " - 18s - loss: 0.3571 - acc: 0.9358 - val_loss: 0.4211 - val_acc: 0.8972\n",
+ "\n",
+ "Epoch 01139: val_acc did not improve from 0.94225\n",
+ "Epoch 1140/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9353 - val_loss: 0.3916 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 01140: val_acc did not improve from 0.94225\n",
+ "Epoch 1141/100000\n",
+ " - 18s - loss: 0.3594 - acc: 0.9355 - val_loss: 0.3521 - val_acc: 0.9401\n",
+ "\n",
+ "Epoch 01141: val_acc did not improve from 0.94225\n",
+ "Epoch 1142/100000\n",
+ " - 19s - loss: 0.3593 - acc: 0.9366 - val_loss: 0.3431 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01142: val_acc did not improve from 0.94225\n",
+ "Epoch 1143/100000\n",
+ " - 18s - loss: 0.3612 - acc: 0.9358 - val_loss: 0.3640 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01143: val_acc did not improve from 0.94225\n",
+ "Epoch 1144/100000\n",
+ " - 19s - loss: 0.3584 - acc: 0.9358 - val_loss: 0.3558 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01144: val_acc did not improve from 0.94225\n",
+ "Epoch 1145/100000\n",
+ " - 18s - loss: 0.3606 - acc: 0.9360 - val_loss: 0.3875 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 01145: val_acc did not improve from 0.94225\n",
+ "Epoch 1146/100000\n",
+ " - 19s - loss: 0.3598 - acc: 0.9367 - val_loss: 0.3411 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 01146: val_acc did not improve from 0.94225\n",
+ "Epoch 1147/100000\n",
+ " - 18s - loss: 0.3593 - acc: 0.9358 - val_loss: 0.3730 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 01147: val_acc did not improve from 0.94225\n",
+ "Epoch 1148/100000\n",
+ " - 18s - loss: 0.3572 - acc: 0.9367 - val_loss: 0.3561 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01148: val_acc did not improve from 0.94225\n",
+ "Epoch 1149/100000\n",
+ " - 19s - loss: 0.3627 - acc: 0.9357 - val_loss: 0.3520 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01149: val_acc did not improve from 0.94225\n",
+ "Epoch 1150/100000\n",
+ " - 19s - loss: 0.3573 - acc: 0.9361 - val_loss: 0.3520 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01150: val_acc did not improve from 0.94225\n",
+ "Epoch 1151/100000\n",
+ " - 18s - loss: 0.3613 - acc: 0.9353 - val_loss: 0.3449 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01151: val_acc did not improve from 0.94225\n",
+ "Epoch 1152/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9361 - val_loss: 0.4182 - val_acc: 0.9023\n",
+ "\n",
+ "Epoch 01152: val_acc did not improve from 0.94225\n",
+ "Epoch 1153/100000\n",
+ " - 18s - loss: 0.3603 - acc: 0.9359 - val_loss: 0.3829 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 01153: val_acc did not improve from 0.94225\n",
+ "Epoch 1154/100000\n",
+ " - 19s - loss: 0.3611 - acc: 0.9352 - val_loss: 0.3667 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01154: val_acc did not improve from 0.94225\n",
+ "Epoch 1155/100000\n",
+ " - 18s - loss: 0.3588 - acc: 0.9361 - val_loss: 0.3589 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01155: val_acc did not improve from 0.94225\n",
+ "Epoch 1156/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9361 - val_loss: 0.4061 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 01156: val_acc did not improve from 0.94225\n",
+ "Epoch 1157/100000\n",
+ " - 19s - loss: 0.3615 - acc: 0.9355 - val_loss: 0.3540 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01157: val_acc did not improve from 0.94225\n",
+ "Epoch 1158/100000\n",
+ " - 19s - loss: 0.3584 - acc: 0.9359 - val_loss: 0.3480 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01158: val_acc did not improve from 0.94225\n",
+ "Epoch 1159/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9365 - val_loss: 0.6041 - val_acc: 0.8105\n",
+ "\n",
+ "Epoch 01159: val_acc did not improve from 0.94225\n",
+ "Epoch 1160/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9364 - val_loss: 0.3692 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01160: val_acc did not improve from 0.94225\n",
+ "Epoch 1161/100000\n",
+ " - 19s - loss: 0.3609 - acc: 0.9356 - val_loss: 0.3574 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01161: val_acc did not improve from 0.94225\n",
+ "Epoch 1162/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9359 - val_loss: 0.3821 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01162: val_acc did not improve from 0.94225\n",
+ "Epoch 1163/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9360 - val_loss: 0.3619 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01163: val_acc did not improve from 0.94225\n",
+ "Epoch 1164/100000\n",
+ " - 19s - loss: 0.3723 - acc: 0.9346 - val_loss: 0.3601 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01164: val_acc did not improve from 0.94225\n",
+ "Epoch 1165/100000\n",
+ " - 18s - loss: 0.3625 - acc: 0.9358 - val_loss: 0.3693 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01165: val_acc did not improve from 0.94225\n",
+ "Epoch 1166/100000\n",
+ " - 19s - loss: 0.3606 - acc: 0.9363 - val_loss: 0.3890 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 01166: val_acc did not improve from 0.94225\n",
+ "Epoch 1167/100000\n",
+ " - 18s - loss: 0.3588 - acc: 0.9366 - val_loss: 0.3762 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 01167: val_acc did not improve from 0.94225\n",
+ "Epoch 1168/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9358 - val_loss: 0.3602 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01168: val_acc did not improve from 0.94225\n",
+ "Epoch 1169/100000\n",
+ " - 18s - loss: 0.3548 - acc: 0.9369 - val_loss: 0.3650 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01169: val_acc did not improve from 0.94225\n",
+ "Epoch 1170/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9356 - val_loss: 0.3917 - val_acc: 0.9190\n",
+ "\n",
+ "Epoch 01170: val_acc did not improve from 0.94225\n",
+ "Epoch 1171/100000\n",
+ " - 19s - loss: 0.3592 - acc: 0.9357 - val_loss: 0.5134 - val_acc: 0.8695\n",
+ "\n",
+ "Epoch 01171: val_acc did not improve from 0.94225\n",
+ "Epoch 1172/100000\n",
+ " - 18s - loss: 0.3580 - acc: 0.9363 - val_loss: 0.3624 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01172: val_acc did not improve from 0.94225\n",
+ "Epoch 1173/100000\n",
+ " - 19s - loss: 0.3577 - acc: 0.9356 - val_loss: 0.3523 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01173: val_acc did not improve from 0.94225\n",
+ "Epoch 1174/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9360 - val_loss: 0.3914 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 01174: val_acc did not improve from 0.94225\n",
+ "Epoch 1175/100000\n",
+ " - 18s - loss: 0.3570 - acc: 0.9364 - val_loss: 0.4204 - val_acc: 0.9030\n",
+ "\n",
+ "Epoch 01175: val_acc did not improve from 0.94225\n",
+ "Epoch 1176/100000\n",
+ " - 19s - loss: 0.3591 - acc: 0.9358 - val_loss: 0.3784 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 01176: val_acc did not improve from 0.94225\n",
+ "Epoch 1177/100000\n",
+ " - 19s - loss: 0.3616 - acc: 0.9355 - val_loss: 0.3564 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 01177: val_acc did not improve from 0.94225\n",
+ "Epoch 1178/100000\n",
+ " - 18s - loss: 0.3605 - acc: 0.9352 - val_loss: 0.3502 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01178: val_acc did not improve from 0.94225\n",
+ "Epoch 1179/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9356 - val_loss: 0.4329 - val_acc: 0.9030\n",
+ "\n",
+ "Epoch 01179: val_acc did not improve from 0.94225\n",
+ "Epoch 1180/100000\n",
+ " - 18s - loss: 0.3597 - acc: 0.9359 - val_loss: 0.3773 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 01180: val_acc did not improve from 0.94225\n",
+ "Epoch 1181/100000\n",
+ " - 19s - loss: 0.3578 - acc: 0.9365 - val_loss: 0.3541 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01181: val_acc did not improve from 0.94225\n",
+ "Epoch 1182/100000\n",
+ " - 19s - loss: 0.3647 - acc: 0.9351 - val_loss: 0.3621 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 01182: val_acc did not improve from 0.94225\n",
+ "Epoch 1183/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9362 - val_loss: 0.3877 - val_acc: 0.9190\n",
+ "\n",
+ "Epoch 01183: val_acc did not improve from 0.94225\n",
+ "Epoch 1184/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9358 - val_loss: 0.3559 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01184: val_acc did not improve from 0.94225\n",
+ "Epoch 1185/100000\n",
+ " - 18s - loss: 0.3565 - acc: 0.9370 - val_loss: 0.4044 - val_acc: 0.9096\n",
+ "\n",
+ "Epoch 01185: val_acc did not improve from 0.94225\n",
+ "Epoch 1186/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9352 - val_loss: 0.3478 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 01186: val_acc did not improve from 0.94225\n",
+ "Epoch 1187/100000\n",
+ " - 19s - loss: 0.3560 - acc: 0.9363 - val_loss: 0.3496 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01187: val_acc did not improve from 0.94225\n",
+ "Epoch 1188/100000\n",
+ " - 19s - loss: 0.3588 - acc: 0.9356 - val_loss: 0.3934 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 01188: val_acc did not improve from 0.94225\n",
+ "Epoch 1189/100000\n",
+ " - 19s - loss: 0.3583 - acc: 0.9361 - val_loss: 0.4334 - val_acc: 0.8921\n",
+ "\n",
+ "Epoch 01189: val_acc did not improve from 0.94225\n",
+ "Epoch 1190/100000\n",
+ " - 19s - loss: 0.3598 - acc: 0.9358 - val_loss: 0.3973 - val_acc: 0.9169\n",
+ "\n",
+ "Epoch 01190: val_acc did not improve from 0.94225\n",
+ "Epoch 1191/100000\n",
+ " - 19s - loss: 0.3587 - acc: 0.9362 - val_loss: 0.3464 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01191: val_acc did not improve from 0.94225\n",
+ "Epoch 1192/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9361 - val_loss: 0.3521 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01192: val_acc did not improve from 0.94225\n",
+ "Epoch 1193/100000\n",
+ " - 19s - loss: 0.3621 - acc: 0.9354 - val_loss: 0.4276 - val_acc: 0.9015\n",
+ "\n",
+ "Epoch 01193: val_acc did not improve from 0.94225\n",
+ "Epoch 1194/100000\n",
+ " - 19s - loss: 0.3568 - acc: 0.9363 - val_loss: 0.3681 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 01194: val_acc did not improve from 0.94225\n",
+ "Epoch 1195/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9353 - val_loss: 0.3727 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 01195: val_acc did not improve from 0.94225\n",
+ "Epoch 1196/100000\n",
+ " - 19s - loss: 0.3595 - acc: 0.9354 - val_loss: 0.3542 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01196: val_acc did not improve from 0.94225\n",
+ "Epoch 1197/100000\n",
+ " - 19s - loss: 0.3567 - acc: 0.9366 - val_loss: 0.3694 - val_acc: 0.9229\n",
+ "\n",
+ "Epoch 01197: val_acc did not improve from 0.94225\n",
+ "Epoch 1198/100000\n",
+ " - 19s - loss: 0.3587 - acc: 0.9361 - val_loss: 0.3853 - val_acc: 0.9106\n",
+ "\n",
+ "Epoch 01198: val_acc did not improve from 0.94225\n",
+ "Epoch 1199/100000\n",
+ " - 19s - loss: 0.3594 - acc: 0.9362 - val_loss: 0.3656 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01199: val_acc did not improve from 0.94225\n",
+ "Epoch 1200/100000\n",
+ " - 19s - loss: 0.3636 - acc: 0.9342 - val_loss: 0.3509 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01200: val_acc did not improve from 0.94225\n",
+ "Epoch 1201/100000\n",
+ " - 18s - loss: 0.3594 - acc: 0.9356 - val_loss: 0.4513 - val_acc: 0.9011\n",
+ "\n",
+ "Epoch 01201: val_acc did not improve from 0.94225\n",
+ "Epoch 1202/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9359 - val_loss: 0.3671 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 01202: val_acc did not improve from 0.94225\n",
+ "Epoch 1203/100000\n",
+ " - 18s - loss: 0.3581 - acc: 0.9360 - val_loss: 0.4738 - val_acc: 0.8876\n",
+ "\n",
+ "Epoch 01203: val_acc did not improve from 0.94225\n",
+ "Epoch 1204/100000\n",
+ " - 19s - loss: 0.3630 - acc: 0.9355 - val_loss: 0.3852 - val_acc: 0.9197\n",
+ "\n",
+ "Epoch 01204: val_acc did not improve from 0.94225\n",
+ "Epoch 1205/100000\n",
+ " - 18s - loss: 0.3604 - acc: 0.9362 - val_loss: 0.4052 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 01205: val_acc did not improve from 0.94225\n",
+ "Epoch 1206/100000\n",
+ " - 19s - loss: 0.3593 - acc: 0.9352 - val_loss: 0.4701 - val_acc: 0.8816\n",
+ "\n",
+ "Epoch 01206: val_acc did not improve from 0.94225\n",
+ "Epoch 1207/100000\n",
+ " - 19s - loss: 0.3571 - acc: 0.9364 - val_loss: 0.3683 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 01207: val_acc did not improve from 0.94225\n",
+ "Epoch 1208/100000\n",
+ " - 18s - loss: 0.3577 - acc: 0.9354 - val_loss: 0.4464 - val_acc: 0.8817\n",
+ "\n",
+ "Epoch 01208: val_acc did not improve from 0.94225\n",
+ "Epoch 1209/100000\n",
+ " - 19s - loss: 0.3583 - acc: 0.9357 - val_loss: 0.3472 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01209: val_acc did not improve from 0.94225\n",
+ "Epoch 1210/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9356 - val_loss: 0.3925 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 01210: val_acc did not improve from 0.94225\n",
+ "Epoch 1211/100000\n",
+ " - 19s - loss: 0.3618 - acc: 0.9353 - val_loss: 0.3874 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 01211: val_acc did not improve from 0.94225\n",
+ "Epoch 1212/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9359 - val_loss: 0.3558 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01212: val_acc did not improve from 0.94225\n",
+ "Epoch 1213/100000\n",
+ " - 19s - loss: 0.3574 - acc: 0.9365 - val_loss: 0.3701 - val_acc: 0.9213\n",
+ "\n",
+ "Epoch 01213: val_acc did not improve from 0.94225\n",
+ "Epoch 1214/100000\n",
+ " - 19s - loss: 0.3567 - acc: 0.9364 - val_loss: 0.3620 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01214: val_acc did not improve from 0.94225\n",
+ "Epoch 1215/100000\n",
+ " - 19s - loss: 0.3576 - acc: 0.9362 - val_loss: 0.3854 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01215: val_acc did not improve from 0.94225\n",
+ "Epoch 1216/100000\n",
+ " - 19s - loss: 0.3570 - acc: 0.9363 - val_loss: 0.3704 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 01216: val_acc did not improve from 0.94225\n",
+ "Epoch 1217/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9358 - val_loss: 0.3468 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01217: val_acc did not improve from 0.94225\n",
+ "Epoch 1218/100000\n",
+ " - 19s - loss: 0.3587 - acc: 0.9353 - val_loss: 0.3458 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 01218: val_acc did not improve from 0.94225\n",
+ "Epoch 1219/100000\n",
+ " - 18s - loss: 0.3593 - acc: 0.9363 - val_loss: 0.3549 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01219: val_acc did not improve from 0.94225\n",
+ "Epoch 1220/100000\n",
+ " - 19s - loss: 0.3592 - acc: 0.9364 - val_loss: 0.3557 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01220: val_acc did not improve from 0.94225\n",
+ "Epoch 1221/100000\n",
+ " - 18s - loss: 0.3588 - acc: 0.9360 - val_loss: 0.3977 - val_acc: 0.9138\n",
+ "\n",
+ "Epoch 01221: val_acc did not improve from 0.94225\n",
+ "Epoch 1222/100000\n",
+ " - 18s - loss: 0.3586 - acc: 0.9362 - val_loss: 0.3557 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01222: val_acc did not improve from 0.94225\n",
+ "Epoch 1223/100000\n",
+ " - 19s - loss: 0.3636 - acc: 0.9364 - val_loss: 0.3560 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01223: val_acc did not improve from 0.94225\n",
+ "Epoch 1224/100000\n",
+ " - 18s - loss: 0.3623 - acc: 0.9364 - val_loss: 0.3531 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01224: val_acc did not improve from 0.94225\n",
+ "Epoch 1225/100000\n",
+ " - 19s - loss: 0.3585 - acc: 0.9361 - val_loss: 0.3497 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01225: val_acc did not improve from 0.94225\n",
+ "Epoch 1226/100000\n",
+ " - 18s - loss: 0.3593 - acc: 0.9351 - val_loss: 0.3582 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 01226: val_acc did not improve from 0.94225\n",
+ "Epoch 1227/100000\n",
+ " - 18s - loss: 0.3575 - acc: 0.9360 - val_loss: 0.3502 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01227: val_acc did not improve from 0.94225\n",
+ "Epoch 1228/100000\n",
+ " - 18s - loss: 0.3587 - acc: 0.9359 - val_loss: 0.3858 - val_acc: 0.9169\n",
+ "\n",
+ "Epoch 01228: val_acc did not improve from 0.94225\n",
+ "Epoch 1229/100000\n",
+ " - 18s - loss: 0.3568 - acc: 0.9363 - val_loss: 0.5091 - val_acc: 0.8639\n",
+ "\n",
+ "Epoch 01229: val_acc did not improve from 0.94225\n",
+ "Epoch 1230/100000\n",
+ " - 19s - loss: 0.3594 - acc: 0.9354 - val_loss: 0.3561 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01230: val_acc did not improve from 0.94225\n",
+ "Epoch 1231/100000\n",
+ " - 19s - loss: 0.3598 - acc: 0.9356 - val_loss: 0.3674 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 01231: val_acc did not improve from 0.94225\n",
+ "Epoch 1232/100000\n",
+ " - 18s - loss: 0.3609 - acc: 0.9355 - val_loss: 0.3586 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 01232: val_acc did not improve from 0.94225\n",
+ "Epoch 1233/100000\n",
+ " - 18s - loss: 0.3581 - acc: 0.9361 - val_loss: 0.3548 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 01233: val_acc did not improve from 0.94225\n",
+ "Epoch 1234/100000\n",
+ " - 19s - loss: 0.3586 - acc: 0.9361 - val_loss: 0.4034 - val_acc: 0.9129\n",
+ "\n",
+ "Epoch 01234: val_acc did not improve from 0.94225\n",
+ "Epoch 1235/100000\n",
+ " - 18s - loss: 0.3548 - acc: 0.9369 - val_loss: 0.3567 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01235: val_acc did not improve from 0.94225\n",
+ "Epoch 1236/100000\n",
+ " - 19s - loss: 0.3588 - acc: 0.9359 - val_loss: 0.3822 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 01236: val_acc did not improve from 0.94225\n",
+ "Epoch 1237/100000\n",
+ " - 19s - loss: 0.3576 - acc: 0.9356 - val_loss: 0.4058 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01237: val_acc did not improve from 0.94225\n",
+ "Epoch 1238/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9361 - val_loss: 0.3624 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01238: val_acc did not improve from 0.94225\n",
+ "Epoch 1239/100000\n",
+ " - 19s - loss: 0.3645 - acc: 0.9360 - val_loss: 0.3573 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01239: val_acc did not improve from 0.94225\n",
+ "Epoch 1240/100000\n",
+ " - 19s - loss: 0.3621 - acc: 0.9357 - val_loss: 0.4141 - val_acc: 0.9023\n",
+ "\n",
+ "Epoch 01240: val_acc did not improve from 0.94225\n",
+ "Epoch 1241/100000\n",
+ " - 19s - loss: 0.3652 - acc: 0.9360 - val_loss: 0.3802 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 01241: val_acc did not improve from 0.94225\n",
+ "Epoch 1242/100000\n",
+ " - 19s - loss: 0.3644 - acc: 0.9354 - val_loss: 0.3492 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01242: val_acc did not improve from 0.94225\n",
+ "Epoch 1243/100000\n",
+ " - 19s - loss: 0.3598 - acc: 0.9357 - val_loss: 0.3558 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01243: val_acc did not improve from 0.94225\n",
+ "Epoch 1244/100000\n",
+ " - 19s - loss: 0.3570 - acc: 0.9363 - val_loss: 0.3535 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01244: val_acc did not improve from 0.94225\n",
+ "Epoch 1245/100000\n",
+ " - 19s - loss: 0.3564 - acc: 0.9358 - val_loss: 0.3835 - val_acc: 0.9206\n",
+ "\n",
+ "Epoch 01245: val_acc did not improve from 0.94225\n",
+ "Epoch 1246/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9356 - val_loss: 0.4218 - val_acc: 0.8994\n",
+ "\n",
+ "Epoch 01246: val_acc did not improve from 0.94225\n",
+ "Epoch 1247/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9356 - val_loss: 0.3473 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 01247: val_acc did not improve from 0.94225\n",
+ "Epoch 1248/100000\n",
+ " - 19s - loss: 0.3587 - acc: 0.9352 - val_loss: 0.3813 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 01248: val_acc did not improve from 0.94225\n",
+ "Epoch 1249/100000\n",
+ " - 19s - loss: 0.3558 - acc: 0.9365 - val_loss: 0.3492 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01249: val_acc did not improve from 0.94225\n",
+ "Epoch 1250/100000\n",
+ " - 19s - loss: 0.3608 - acc: 0.9362 - val_loss: 0.3969 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 01250: val_acc did not improve from 0.94225\n",
+ "Epoch 1251/100000\n",
+ " - 19s - loss: 0.3637 - acc: 0.9357 - val_loss: 0.3599 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01251: val_acc did not improve from 0.94225\n",
+ "Epoch 1252/100000\n",
+ " - 18s - loss: 0.3621 - acc: 0.9354 - val_loss: 0.3641 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 01252: val_acc did not improve from 0.94225\n",
+ "Epoch 1253/100000\n",
+ " - 19s - loss: 0.3582 - acc: 0.9358 - val_loss: 0.3521 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01253: val_acc did not improve from 0.94225\n",
+ "Epoch 1254/100000\n",
+ " - 18s - loss: 0.3621 - acc: 0.9345 - val_loss: 0.3537 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01254: val_acc did not improve from 0.94225\n",
+ "Epoch 1255/100000\n",
+ " - 18s - loss: 0.3571 - acc: 0.9357 - val_loss: 0.3619 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01255: val_acc did not improve from 0.94225\n",
+ "Epoch 1256/100000\n",
+ " - 18s - loss: 0.3549 - acc: 0.9362 - val_loss: 0.3444 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 01256: val_acc did not improve from 0.94225\n",
+ "Epoch 1257/100000\n",
+ " - 18s - loss: 0.3567 - acc: 0.9360 - val_loss: 0.3557 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01257: val_acc did not improve from 0.94225\n",
+ "Epoch 1258/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9359 - val_loss: 0.3829 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01258: val_acc did not improve from 0.94225\n",
+ "Epoch 1259/100000\n",
+ " - 19s - loss: 0.3605 - acc: 0.9357 - val_loss: 0.3874 - val_acc: 0.9197\n",
+ "\n",
+ "Epoch 01259: val_acc did not improve from 0.94225\n",
+ "Epoch 1260/100000\n",
+ " - 19s - loss: 0.3556 - acc: 0.9366 - val_loss: 0.4037 - val_acc: 0.9101\n",
+ "\n",
+ "Epoch 01260: val_acc did not improve from 0.94225\n",
+ "Epoch 1261/100000\n",
+ " - 19s - loss: 0.3573 - acc: 0.9354 - val_loss: 0.3705 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01261: val_acc did not improve from 0.94225\n",
+ "Epoch 1262/100000\n",
+ " - 18s - loss: 0.3577 - acc: 0.9354 - val_loss: 0.3475 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 01262: val_acc did not improve from 0.94225\n",
+ "Epoch 1263/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9355 - val_loss: 0.3562 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01263: val_acc did not improve from 0.94225\n",
+ "Epoch 1264/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9351 - val_loss: 0.3526 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01264: val_acc did not improve from 0.94225\n",
+ "Epoch 1265/100000\n",
+ " - 19s - loss: 0.3563 - acc: 0.9352 - val_loss: 0.5401 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 01265: val_acc did not improve from 0.94225\n",
+ "Epoch 1266/100000\n",
+ " - 18s - loss: 0.3538 - acc: 0.9368 - val_loss: 0.3755 - val_acc: 0.9176\n",
+ "\n",
+ "Epoch 01266: val_acc did not improve from 0.94225\n",
+ "Epoch 1267/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9358 - val_loss: 0.3439 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01267: val_acc did not improve from 0.94225\n",
+ "Epoch 1268/100000\n",
+ " - 20s - loss: 0.3589 - acc: 0.9359 - val_loss: 0.3707 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 01268: val_acc did not improve from 0.94225\n",
+ "Epoch 1269/100000\n",
+ " - 19s - loss: 0.3557 - acc: 0.9364 - val_loss: 0.3529 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01269: val_acc did not improve from 0.94225\n",
+ "Epoch 1270/100000\n",
+ " - 19s - loss: 0.3576 - acc: 0.9351 - val_loss: 0.3546 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01270: val_acc did not improve from 0.94225\n",
+ "Epoch 1271/100000\n",
+ " - 18s - loss: 0.3602 - acc: 0.9360 - val_loss: 0.3715 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01271: val_acc did not improve from 0.94225\n",
+ "Epoch 1272/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9358 - val_loss: 0.3762 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 01272: val_acc did not improve from 0.94225\n",
+ "Epoch 1273/100000\n",
+ " - 19s - loss: 0.3562 - acc: 0.9365 - val_loss: 0.3410 - val_acc: 0.9407\n",
+ "\n",
+ "Epoch 01273: val_acc did not improve from 0.94225\n",
+ "Epoch 1274/100000\n",
+ " - 18s - loss: 0.3565 - acc: 0.9356 - val_loss: 0.3539 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01274: val_acc did not improve from 0.94225\n",
+ "Epoch 1275/100000\n",
+ " - 19s - loss: 0.3545 - acc: 0.9364 - val_loss: 0.3632 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 01275: val_acc did not improve from 0.94225\n",
+ "Epoch 1276/100000\n",
+ " - 19s - loss: 0.3568 - acc: 0.9360 - val_loss: 0.4181 - val_acc: 0.9032\n",
+ "\n",
+ "Epoch 01276: val_acc did not improve from 0.94225\n",
+ "Epoch 1277/100000\n",
+ " - 18s - loss: 0.3574 - acc: 0.9364 - val_loss: 0.4111 - val_acc: 0.9107\n",
+ "\n",
+ "Epoch 01277: val_acc did not improve from 0.94225\n",
+ "Epoch 1278/100000\n",
+ " - 18s - loss: 0.3598 - acc: 0.9349 - val_loss: 0.5373 - val_acc: 0.8367\n",
+ "\n",
+ "Epoch 01278: val_acc did not improve from 0.94225\n",
+ "Epoch 1279/100000\n",
+ " - 19s - loss: 0.3550 - acc: 0.9369 - val_loss: 0.5288 - val_acc: 0.8457\n",
+ "\n",
+ "Epoch 01279: val_acc did not improve from 0.94225\n",
+ "Epoch 1280/100000\n",
+ " - 18s - loss: 0.3568 - acc: 0.9362 - val_loss: 0.3934 - val_acc: 0.9126\n",
+ "\n",
+ "Epoch 01280: val_acc did not improve from 0.94225\n",
+ "Epoch 1281/100000\n",
+ " - 20s - loss: 0.3569 - acc: 0.9364 - val_loss: 0.3653 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 01281: val_acc did not improve from 0.94225\n",
+ "Epoch 1282/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9351 - val_loss: 0.3441 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01282: val_acc did not improve from 0.94225\n",
+ "Epoch 1283/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9360 - val_loss: 0.4125 - val_acc: 0.9167\n",
+ "\n",
+ "Epoch 01283: val_acc did not improve from 0.94225\n",
+ "Epoch 1284/100000\n",
+ " - 19s - loss: 0.3595 - acc: 0.9356 - val_loss: 0.3550 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01284: val_acc did not improve from 0.94225\n",
+ "Epoch 1285/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9361 - val_loss: 0.3451 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 01285: val_acc did not improve from 0.94225\n",
+ "Epoch 1286/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9358 - val_loss: 0.4535 - val_acc: 0.8897\n",
+ "\n",
+ "Epoch 01286: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 01286: ReduceLROnPlateau reducing learning rate to 0.000735091819660738.\n",
+ "Epoch 1287/100000\n",
+ " - 19s - loss: 0.3520 - acc: 0.9362 - val_loss: 0.3907 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 01287: val_acc did not improve from 0.94225\n",
+ "Epoch 1288/100000\n",
+ " - 19s - loss: 0.3477 - acc: 0.9366 - val_loss: 0.3336 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01288: val_acc did not improve from 0.94225\n",
+ "Epoch 1289/100000\n",
+ " - 18s - loss: 0.3484 - acc: 0.9362 - val_loss: 0.3497 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01289: val_acc did not improve from 0.94225\n",
+ "Epoch 1290/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9368 - val_loss: 0.3646 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01290: val_acc did not improve from 0.94225\n",
+ "Epoch 1291/100000\n",
+ " - 18s - loss: 0.3487 - acc: 0.9361 - val_loss: 0.3363 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01291: val_acc did not improve from 0.94225\n",
+ "Epoch 1292/100000\n",
+ " - 19s - loss: 0.3510 - acc: 0.9361 - val_loss: 0.3603 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 01292: val_acc did not improve from 0.94225\n",
+ "Epoch 1293/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9367 - val_loss: 0.4813 - val_acc: 0.8736\n",
+ "\n",
+ "Epoch 01293: val_acc did not improve from 0.94225\n",
+ "Epoch 1294/100000\n",
+ " - 19s - loss: 0.3482 - acc: 0.9363 - val_loss: 0.3447 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01294: val_acc did not improve from 0.94225\n",
+ "Epoch 1295/100000\n",
+ " - 19s - loss: 0.3506 - acc: 0.9364 - val_loss: 0.3877 - val_acc: 0.9054\n",
+ "\n",
+ "Epoch 01295: val_acc did not improve from 0.94225\n",
+ "Epoch 1296/100000\n",
+ " - 19s - loss: 0.3538 - acc: 0.9353 - val_loss: 0.4202 - val_acc: 0.8993\n",
+ "\n",
+ "Epoch 01296: val_acc did not improve from 0.94225\n",
+ "Epoch 1297/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9369 - val_loss: 0.3981 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 01297: val_acc did not improve from 0.94225\n",
+ "Epoch 1298/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9365 - val_loss: 0.3663 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 01298: val_acc did not improve from 0.94225\n",
+ "Epoch 1299/100000\n",
+ " - 19s - loss: 0.3506 - acc: 0.9359 - val_loss: 0.3596 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 01299: val_acc did not improve from 0.94225\n",
+ "Epoch 1300/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9360 - val_loss: 0.3594 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 01300: val_acc did not improve from 0.94225\n",
+ "Epoch 1301/100000\n",
+ " - 19s - loss: 0.3512 - acc: 0.9353 - val_loss: 0.3475 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01301: val_acc did not improve from 0.94225\n",
+ "Epoch 1302/100000\n",
+ " - 19s - loss: 0.3484 - acc: 0.9368 - val_loss: 0.4905 - val_acc: 0.8686\n",
+ "\n",
+ "Epoch 01302: val_acc did not improve from 0.94225\n",
+ "Epoch 1303/100000\n",
+ " - 19s - loss: 0.3524 - acc: 0.9360 - val_loss: 0.3535 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 01303: val_acc did not improve from 0.94225\n",
+ "Epoch 1304/100000\n",
+ " - 19s - loss: 0.3479 - acc: 0.9365 - val_loss: 0.4334 - val_acc: 0.8951\n",
+ "\n",
+ "Epoch 01304: val_acc did not improve from 0.94225\n",
+ "Epoch 1305/100000\n",
+ " - 19s - loss: 0.3511 - acc: 0.9361 - val_loss: 0.3498 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 01305: val_acc did not improve from 0.94225\n",
+ "Epoch 1306/100000\n",
+ " - 19s - loss: 0.3513 - acc: 0.9365 - val_loss: 0.3516 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01306: val_acc did not improve from 0.94225\n",
+ "Epoch 1307/100000\n",
+ " - 19s - loss: 0.3503 - acc: 0.9365 - val_loss: 0.4595 - val_acc: 0.8725\n",
+ "\n",
+ "Epoch 01307: val_acc did not improve from 0.94225\n",
+ "Epoch 1308/100000\n",
+ " - 18s - loss: 0.3502 - acc: 0.9364 - val_loss: 0.3387 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01308: val_acc did not improve from 0.94225\n",
+ "Epoch 1309/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9358 - val_loss: 0.3703 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 01309: val_acc did not improve from 0.94225\n",
+ "Epoch 1310/100000\n",
+ " - 19s - loss: 0.3502 - acc: 0.9359 - val_loss: 0.3550 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01310: val_acc did not improve from 0.94225\n",
+ "Epoch 1311/100000\n",
+ " - 18s - loss: 0.3490 - acc: 0.9361 - val_loss: 0.3437 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01311: val_acc did not improve from 0.94225\n",
+ "Epoch 1312/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9362 - val_loss: 0.5847 - val_acc: 0.8269\n",
+ "\n",
+ "Epoch 01312: val_acc did not improve from 0.94225\n",
+ "Epoch 1313/100000\n",
+ " - 19s - loss: 0.3522 - acc: 0.9358 - val_loss: 0.4976 - val_acc: 0.8589\n",
+ "\n",
+ "Epoch 01313: val_acc did not improve from 0.94225\n",
+ "Epoch 1314/100000\n",
+ " - 19s - loss: 0.3546 - acc: 0.9355 - val_loss: 0.3692 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 01314: val_acc did not improve from 0.94225\n",
+ "Epoch 1315/100000\n",
+ " - 19s - loss: 0.3506 - acc: 0.9359 - val_loss: 0.3653 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 01315: val_acc did not improve from 0.94225\n",
+ "Epoch 1316/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9355 - val_loss: 0.3492 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01316: val_acc did not improve from 0.94225\n",
+ "Epoch 1317/100000\n",
+ " - 19s - loss: 0.3491 - acc: 0.9371 - val_loss: 0.3431 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01317: val_acc did not improve from 0.94225\n",
+ "Epoch 1318/100000\n",
+ " - 19s - loss: 0.3510 - acc: 0.9359 - val_loss: 0.3548 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 01318: val_acc did not improve from 0.94225\n",
+ "Epoch 1319/100000\n",
+ " - 19s - loss: 0.3505 - acc: 0.9360 - val_loss: 0.3685 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 01319: val_acc did not improve from 0.94225\n",
+ "Epoch 1320/100000\n",
+ " - 19s - loss: 0.3478 - acc: 0.9367 - val_loss: 0.4238 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 01320: val_acc did not improve from 0.94225\n",
+ "Epoch 1321/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9361 - val_loss: 0.3664 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 01321: val_acc did not improve from 0.94225\n",
+ "Epoch 1322/100000\n",
+ " - 19s - loss: 0.3485 - acc: 0.9366 - val_loss: 0.3918 - val_acc: 0.9128\n",
+ "\n",
+ "Epoch 01322: val_acc did not improve from 0.94225\n",
+ "Epoch 1323/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9362 - val_loss: 0.3484 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01323: val_acc did not improve from 0.94225\n",
+ "Epoch 1324/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9363 - val_loss: 0.3639 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01324: val_acc did not improve from 0.94225\n",
+ "Epoch 1325/100000\n",
+ " - 18s - loss: 0.3497 - acc: 0.9361 - val_loss: 0.3365 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01325: val_acc did not improve from 0.94225\n",
+ "Epoch 1326/100000\n",
+ " - 19s - loss: 0.3508 - acc: 0.9354 - val_loss: 0.3898 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 01326: val_acc did not improve from 0.94225\n",
+ "Epoch 1327/100000\n",
+ " - 19s - loss: 0.3467 - acc: 0.9368 - val_loss: 0.3324 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01327: val_acc did not improve from 0.94225\n",
+ "Epoch 1328/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9366 - val_loss: 0.4140 - val_acc: 0.9076\n",
+ "\n",
+ "Epoch 01328: val_acc did not improve from 0.94225\n",
+ "Epoch 1329/100000\n",
+ " - 18s - loss: 0.3501 - acc: 0.9362 - val_loss: 0.3408 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01329: val_acc did not improve from 0.94225\n",
+ "Epoch 1330/100000\n",
+ " - 18s - loss: 0.3471 - acc: 0.9369 - val_loss: 0.3520 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 01330: val_acc did not improve from 0.94225\n",
+ "Epoch 1331/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9358 - val_loss: 0.3488 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01331: val_acc did not improve from 0.94225\n",
+ "Epoch 1332/100000\n",
+ " - 19s - loss: 0.3477 - acc: 0.9366 - val_loss: 0.3378 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01332: val_acc did not improve from 0.94225\n",
+ "Epoch 1333/100000\n",
+ " - 19s - loss: 0.3461 - acc: 0.9375 - val_loss: 0.3621 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01333: val_acc did not improve from 0.94225\n",
+ "Epoch 1334/100000\n",
+ " - 19s - loss: 0.3496 - acc: 0.9362 - val_loss: 0.3540 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01334: val_acc did not improve from 0.94225\n",
+ "Epoch 1335/100000\n",
+ " - 18s - loss: 0.3498 - acc: 0.9358 - val_loss: 0.3483 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 01335: val_acc did not improve from 0.94225\n",
+ "Epoch 1336/100000\n",
+ " - 19s - loss: 0.3528 - acc: 0.9359 - val_loss: 0.3635 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01336: val_acc did not improve from 0.94225\n",
+ "Epoch 1337/100000\n",
+ " - 19s - loss: 0.3522 - acc: 0.9358 - val_loss: 0.3385 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01337: val_acc did not improve from 0.94225\n",
+ "Epoch 1338/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9373 - val_loss: 0.3531 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01338: val_acc did not improve from 0.94225\n",
+ "Epoch 1339/100000\n",
+ " - 18s - loss: 0.3497 - acc: 0.9363 - val_loss: 0.3799 - val_acc: 0.9136\n",
+ "\n",
+ "Epoch 01339: val_acc did not improve from 0.94225\n",
+ "Epoch 1340/100000\n",
+ " - 19s - loss: 0.3504 - acc: 0.9361 - val_loss: 0.3649 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01340: val_acc did not improve from 0.94225\n",
+ "Epoch 1341/100000\n",
+ " - 19s - loss: 0.3517 - acc: 0.9361 - val_loss: 0.3956 - val_acc: 0.9161\n",
+ "\n",
+ "Epoch 01341: val_acc did not improve from 0.94225\n",
+ "Epoch 1342/100000\n",
+ " - 18s - loss: 0.3510 - acc: 0.9367 - val_loss: 0.3512 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01342: val_acc did not improve from 0.94225\n",
+ "Epoch 1343/100000\n",
+ " - 18s - loss: 0.3492 - acc: 0.9368 - val_loss: 0.3510 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 01343: val_acc did not improve from 0.94225\n",
+ "Epoch 1344/100000\n",
+ " - 19s - loss: 0.3507 - acc: 0.9363 - val_loss: 0.3877 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 01344: val_acc did not improve from 0.94225\n",
+ "Epoch 1345/100000\n",
+ " - 18s - loss: 0.3487 - acc: 0.9362 - val_loss: 0.3785 - val_acc: 0.9118\n",
+ "\n",
+ "Epoch 01345: val_acc did not improve from 0.94225\n",
+ "Epoch 1346/100000\n",
+ " - 19s - loss: 0.3520 - acc: 0.9361 - val_loss: 0.3456 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 01346: val_acc did not improve from 0.94225\n",
+ "Epoch 1347/100000\n",
+ " - 18s - loss: 0.3491 - acc: 0.9365 - val_loss: 0.3323 - val_acc: 0.9415\n",
+ "\n",
+ "Epoch 01347: val_acc did not improve from 0.94225\n",
+ "Epoch 1348/100000\n",
+ " - 19s - loss: 0.3467 - acc: 0.9373 - val_loss: 0.3378 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01348: val_acc did not improve from 0.94225\n",
+ "Epoch 1349/100000\n",
+ " - 19s - loss: 0.3503 - acc: 0.9358 - val_loss: 0.3460 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01349: val_acc did not improve from 0.94225\n",
+ "Epoch 1350/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9357 - val_loss: 0.3574 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01350: val_acc did not improve from 0.94225\n",
+ "Epoch 1351/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9361 - val_loss: 0.3418 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01351: val_acc did not improve from 0.94225\n",
+ "Epoch 1352/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9365 - val_loss: 0.3556 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01352: val_acc did not improve from 0.94225\n",
+ "Epoch 1353/100000\n",
+ " - 18s - loss: 0.3534 - acc: 0.9354 - val_loss: 0.4185 - val_acc: 0.9064\n",
+ "\n",
+ "Epoch 01353: val_acc did not improve from 0.94225\n",
+ "Epoch 1354/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9369 - val_loss: 0.3780 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 01354: val_acc did not improve from 0.94225\n",
+ "Epoch 1355/100000\n",
+ " - 19s - loss: 0.3530 - acc: 0.9364 - val_loss: 0.3556 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 01355: val_acc did not improve from 0.94225\n",
+ "Epoch 1356/100000\n",
+ " - 18s - loss: 0.3495 - acc: 0.9364 - val_loss: 0.3535 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01356: val_acc did not improve from 0.94225\n",
+ "Epoch 1357/100000\n",
+ " - 20s - loss: 0.3484 - acc: 0.9370 - val_loss: 0.4011 - val_acc: 0.9021\n",
+ "\n",
+ "Epoch 01357: val_acc did not improve from 0.94225\n",
+ "Epoch 1358/100000\n",
+ " - 19s - loss: 0.3522 - acc: 0.9360 - val_loss: 0.3770 - val_acc: 0.9220\n",
+ "\n",
+ "Epoch 01358: val_acc did not improve from 0.94225\n",
+ "Epoch 1359/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9363 - val_loss: 0.3472 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 01359: val_acc did not improve from 0.94225\n",
+ "Epoch 1360/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9367 - val_loss: 0.3615 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 01360: val_acc did not improve from 0.94225\n",
+ "Epoch 1361/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9362 - val_loss: 0.4447 - val_acc: 0.8936\n",
+ "\n",
+ "Epoch 01361: val_acc did not improve from 0.94225\n",
+ "Epoch 1362/100000\n",
+ " - 19s - loss: 0.3480 - acc: 0.9365 - val_loss: 0.3556 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 01362: val_acc did not improve from 0.94225\n",
+ "Epoch 1363/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9361 - val_loss: 0.3450 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01363: val_acc did not improve from 0.94225\n",
+ "Epoch 1364/100000\n",
+ " - 18s - loss: 0.3499 - acc: 0.9366 - val_loss: 0.3697 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 01364: val_acc did not improve from 0.94225\n",
+ "Epoch 1365/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9362 - val_loss: 0.3614 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 01365: val_acc did not improve from 0.94225\n",
+ "Epoch 1366/100000\n",
+ " - 18s - loss: 0.3520 - acc: 0.9356 - val_loss: 0.3464 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01366: val_acc did not improve from 0.94225\n",
+ "Epoch 1367/100000\n",
+ " - 19s - loss: 0.3573 - acc: 0.9357 - val_loss: 0.4283 - val_acc: 0.8999\n",
+ "\n",
+ "Epoch 01367: val_acc did not improve from 0.94225\n",
+ "Epoch 1368/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9369 - val_loss: 0.3501 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 01368: val_acc did not improve from 0.94225\n",
+ "Epoch 1369/100000\n",
+ " - 18s - loss: 0.3482 - acc: 0.9367 - val_loss: 0.4369 - val_acc: 0.8915\n",
+ "\n",
+ "Epoch 01369: val_acc did not improve from 0.94225\n",
+ "Epoch 1370/100000\n",
+ " - 19s - loss: 0.3479 - acc: 0.9363 - val_loss: 0.4671 - val_acc: 0.8727\n",
+ "\n",
+ "Epoch 01370: val_acc did not improve from 0.94225\n",
+ "Epoch 1371/100000\n",
+ " - 18s - loss: 0.3489 - acc: 0.9359 - val_loss: 0.3383 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 01371: val_acc did not improve from 0.94225\n",
+ "Epoch 1372/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9365 - val_loss: 0.3334 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01372: val_acc did not improve from 0.94225\n",
+ "Epoch 1373/100000\n",
+ " - 18s - loss: 0.3484 - acc: 0.9359 - val_loss: 0.3782 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 01373: val_acc did not improve from 0.94225\n",
+ "Epoch 1374/100000\n",
+ " - 19s - loss: 0.3489 - acc: 0.9372 - val_loss: 0.3432 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01374: val_acc did not improve from 0.94225\n",
+ "Epoch 1375/100000\n",
+ " - 18s - loss: 0.3507 - acc: 0.9366 - val_loss: 0.4488 - val_acc: 0.8877\n",
+ "\n",
+ "Epoch 01375: val_acc did not improve from 0.94225\n",
+ "Epoch 1376/100000\n",
+ " - 19s - loss: 0.3525 - acc: 0.9355 - val_loss: 0.3381 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01376: val_acc did not improve from 0.94225\n",
+ "Epoch 1377/100000\n",
+ " - 18s - loss: 0.3502 - acc: 0.9362 - val_loss: 0.3431 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01377: val_acc did not improve from 0.94225\n",
+ "Epoch 1378/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9369 - val_loss: 0.4166 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 01378: val_acc did not improve from 0.94225\n",
+ "Epoch 1379/100000\n",
+ " - 18s - loss: 0.3491 - acc: 0.9363 - val_loss: 0.3777 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 01379: val_acc did not improve from 0.94225\n",
+ "Epoch 1380/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9365 - val_loss: 0.3459 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01380: val_acc did not improve from 0.94225\n",
+ "Epoch 1381/100000\n",
+ " - 19s - loss: 0.3553 - acc: 0.9357 - val_loss: 0.3466 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01381: val_acc did not improve from 0.94225\n",
+ "Epoch 1382/100000\n",
+ " - 18s - loss: 0.3523 - acc: 0.9364 - val_loss: 0.3564 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 01382: val_acc did not improve from 0.94225\n",
+ "Epoch 1383/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9366 - val_loss: 0.4778 - val_acc: 0.8678\n",
+ "\n",
+ "Epoch 01383: val_acc did not improve from 0.94225\n",
+ "Epoch 1384/100000\n",
+ " - 18s - loss: 0.3506 - acc: 0.9362 - val_loss: 0.4096 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 01384: val_acc did not improve from 0.94225\n",
+ "Epoch 1385/100000\n",
+ " - 18s - loss: 0.3499 - acc: 0.9352 - val_loss: 0.3587 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 01385: val_acc did not improve from 0.94225\n",
+ "Epoch 1386/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9369 - val_loss: 0.3566 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 01386: val_acc did not improve from 0.94225\n",
+ "Epoch 1387/100000\n",
+ " - 18s - loss: 0.3481 - acc: 0.9369 - val_loss: 0.4287 - val_acc: 0.9017\n",
+ "\n",
+ "Epoch 01387: val_acc did not improve from 0.94225\n",
+ "Epoch 1388/100000\n",
+ " - 19s - loss: 0.3513 - acc: 0.9354 - val_loss: 0.3424 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01388: val_acc did not improve from 0.94225\n",
+ "Epoch 1389/100000\n",
+ " - 19s - loss: 0.3523 - acc: 0.9361 - val_loss: 0.3537 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 01389: val_acc did not improve from 0.94225\n",
+ "Epoch 1390/100000\n",
+ " - 19s - loss: 0.3468 - acc: 0.9367 - val_loss: 0.3287 - val_acc: 0.9418\n",
+ "\n",
+ "Epoch 01390: val_acc did not improve from 0.94225\n",
+ "Epoch 1391/100000\n",
+ " - 19s - loss: 0.3535 - acc: 0.9348 - val_loss: 0.3419 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01391: val_acc did not improve from 0.94225\n",
+ "Epoch 1392/100000\n",
+ " - 19s - loss: 0.3498 - acc: 0.9371 - val_loss: 0.3676 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 01392: val_acc did not improve from 0.94225\n",
+ "Epoch 1393/100000\n",
+ " - 19s - loss: 0.3536 - acc: 0.9356 - val_loss: 0.3657 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 01393: val_acc did not improve from 0.94225\n",
+ "Epoch 1394/100000\n",
+ " - 18s - loss: 0.3476 - acc: 0.9364 - val_loss: 0.3571 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 01394: val_acc did not improve from 0.94225\n",
+ "Epoch 1395/100000\n",
+ " - 18s - loss: 0.3513 - acc: 0.9362 - val_loss: 0.4587 - val_acc: 0.8906\n",
+ "\n",
+ "Epoch 01395: val_acc did not improve from 0.94225\n",
+ "Epoch 1396/100000\n",
+ " - 18s - loss: 0.3477 - acc: 0.9367 - val_loss: 0.3422 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01396: val_acc did not improve from 0.94225\n",
+ "Epoch 1397/100000\n",
+ " - 18s - loss: 0.3479 - acc: 0.9364 - val_loss: 0.3876 - val_acc: 0.9124\n",
+ "\n",
+ "Epoch 01397: val_acc did not improve from 0.94225\n",
+ "Epoch 1398/100000\n",
+ " - 18s - loss: 0.3481 - acc: 0.9367 - val_loss: 0.3598 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01398: val_acc did not improve from 0.94225\n",
+ "Epoch 1399/100000\n",
+ " - 18s - loss: 0.3494 - acc: 0.9362 - val_loss: 0.3834 - val_acc: 0.9194\n",
+ "\n",
+ "Epoch 01399: val_acc did not improve from 0.94225\n",
+ "Epoch 1400/100000\n",
+ " - 18s - loss: 0.3488 - acc: 0.9366 - val_loss: 0.3538 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 01400: val_acc did not improve from 0.94225\n",
+ "Epoch 1401/100000\n",
+ " - 19s - loss: 0.3506 - acc: 0.9352 - val_loss: 0.3365 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 01401: val_acc did not improve from 0.94225\n",
+ "Epoch 1402/100000\n",
+ " - 19s - loss: 0.3538 - acc: 0.9358 - val_loss: 0.3517 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01402: val_acc did not improve from 0.94225\n",
+ "Epoch 1403/100000\n",
+ " - 19s - loss: 0.3511 - acc: 0.9363 - val_loss: 0.3398 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01403: val_acc did not improve from 0.94225\n",
+ "Epoch 1404/100000\n",
+ " - 19s - loss: 0.3514 - acc: 0.9360 - val_loss: 0.3674 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 01404: val_acc did not improve from 0.94225\n",
+ "Epoch 1405/100000\n",
+ " - 18s - loss: 0.3513 - acc: 0.9357 - val_loss: 0.3635 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 01405: val_acc did not improve from 0.94225\n",
+ "Epoch 1406/100000\n",
+ " - 19s - loss: 0.3516 - acc: 0.9358 - val_loss: 0.5827 - val_acc: 0.8352\n",
+ "\n",
+ "Epoch 01406: val_acc did not improve from 0.94225\n",
+ "Epoch 1407/100000\n",
+ " - 18s - loss: 0.3533 - acc: 0.9349 - val_loss: 0.3791 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 01407: val_acc did not improve from 0.94225\n",
+ "Epoch 1408/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9364 - val_loss: 0.3378 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 01408: val_acc did not improve from 0.94225\n",
+ "Epoch 1409/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9359 - val_loss: 0.4120 - val_acc: 0.9102\n",
+ "\n",
+ "Epoch 01409: val_acc did not improve from 0.94225\n",
+ "Epoch 1410/100000\n",
+ " - 18s - loss: 0.3529 - acc: 0.9359 - val_loss: 0.3996 - val_acc: 0.9085\n",
+ "\n",
+ "Epoch 01410: val_acc did not improve from 0.94225\n",
+ "Epoch 1411/100000\n",
+ " - 19s - loss: 0.3528 - acc: 0.9361 - val_loss: 0.3415 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01411: val_acc did not improve from 0.94225\n",
+ "Epoch 1412/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9364 - val_loss: 0.3477 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01412: val_acc did not improve from 0.94225\n",
+ "Epoch 1413/100000\n",
+ " - 19s - loss: 0.3496 - acc: 0.9363 - val_loss: 0.4005 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 01413: val_acc did not improve from 0.94225\n",
+ "Epoch 1414/100000\n",
+ " - 18s - loss: 0.3503 - acc: 0.9366 - val_loss: 0.3504 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 01414: val_acc did not improve from 0.94225\n",
+ "Epoch 1415/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9375 - val_loss: 0.3837 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 01415: val_acc did not improve from 0.94225\n",
+ "Epoch 1416/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9360 - val_loss: 0.4320 - val_acc: 0.8960\n",
+ "\n",
+ "Epoch 01416: val_acc did not improve from 0.94225\n",
+ "Epoch 1417/100000\n",
+ " - 19s - loss: 0.3505 - acc: 0.9356 - val_loss: 0.3397 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01417: val_acc did not improve from 0.94225\n",
+ "Epoch 1418/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9361 - val_loss: 0.4234 - val_acc: 0.9027\n",
+ "\n",
+ "Epoch 01418: val_acc did not improve from 0.94225\n",
+ "Epoch 1419/100000\n",
+ " - 19s - loss: 0.3484 - acc: 0.9362 - val_loss: 0.3763 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01419: val_acc did not improve from 0.94225\n",
+ "Epoch 1420/100000\n",
+ " - 19s - loss: 0.3476 - acc: 0.9363 - val_loss: 0.3391 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01420: val_acc did not improve from 0.94225\n",
+ "Epoch 1421/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9367 - val_loss: 0.3594 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 01421: val_acc did not improve from 0.94225\n",
+ "Epoch 1422/100000\n",
+ " - 19s - loss: 0.3533 - acc: 0.9362 - val_loss: 0.3419 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 01422: val_acc did not improve from 0.94225\n",
+ "Epoch 1423/100000\n",
+ " - 19s - loss: 0.3535 - acc: 0.9368 - val_loss: 0.3701 - val_acc: 0.9259\n",
+ "\n",
+ "Epoch 01423: val_acc did not improve from 0.94225\n",
+ "Epoch 1424/100000\n",
+ " - 18s - loss: 0.3520 - acc: 0.9358 - val_loss: 0.3560 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01424: val_acc did not improve from 0.94225\n",
+ "Epoch 1425/100000\n",
+ " - 19s - loss: 0.3510 - acc: 0.9367 - val_loss: 0.3446 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01425: val_acc did not improve from 0.94225\n",
+ "Epoch 1426/100000\n",
+ " - 18s - loss: 0.3493 - acc: 0.9359 - val_loss: 0.3683 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 01426: val_acc did not improve from 0.94225\n",
+ "Epoch 1427/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9370 - val_loss: 0.3341 - val_acc: 0.9406\n",
+ "\n",
+ "Epoch 01427: val_acc did not improve from 0.94225\n",
+ "Epoch 1428/100000\n",
+ " - 18s - loss: 0.3489 - acc: 0.9359 - val_loss: 0.3567 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 01428: val_acc did not improve from 0.94225\n",
+ "Epoch 1429/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9361 - val_loss: 0.3644 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 01429: val_acc did not improve from 0.94225\n",
+ "Epoch 1430/100000\n",
+ " - 18s - loss: 0.3497 - acc: 0.9361 - val_loss: 0.3904 - val_acc: 0.9063\n",
+ "\n",
+ "Epoch 01430: val_acc did not improve from 0.94225\n",
+ "Epoch 1431/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9368 - val_loss: 0.3572 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 01431: val_acc did not improve from 0.94225\n",
+ "Epoch 1432/100000\n",
+ " - 18s - loss: 0.3555 - acc: 0.9355 - val_loss: 0.3418 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01432: val_acc did not improve from 0.94225\n",
+ "Epoch 1433/100000\n",
+ " - 19s - loss: 0.3512 - acc: 0.9365 - val_loss: 0.3448 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01433: val_acc did not improve from 0.94225\n",
+ "Epoch 1434/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9359 - val_loss: 0.4012 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01434: val_acc did not improve from 0.94225\n",
+ "Epoch 1435/100000\n",
+ " - 19s - loss: 0.3531 - acc: 0.9357 - val_loss: 0.3553 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 01435: val_acc did not improve from 0.94225\n",
+ "Epoch 1436/100000\n",
+ " - 19s - loss: 0.3511 - acc: 0.9357 - val_loss: 0.3551 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01436: val_acc did not improve from 0.94225\n",
+ "Epoch 1437/100000\n",
+ " - 19s - loss: 0.3511 - acc: 0.9358 - val_loss: 0.3416 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01437: val_acc did not improve from 0.94225\n",
+ "Epoch 1438/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9365 - val_loss: 0.3846 - val_acc: 0.9108\n",
+ "\n",
+ "Epoch 01438: val_acc did not improve from 0.94225\n",
+ "Epoch 1439/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9361 - val_loss: 0.3707 - val_acc: 0.9206\n",
+ "\n",
+ "Epoch 01439: val_acc did not improve from 0.94225\n",
+ "Epoch 1440/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9361 - val_loss: 0.3389 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01440: val_acc did not improve from 0.94225\n",
+ "Epoch 1441/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9361 - val_loss: 0.3400 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01441: val_acc did not improve from 0.94225\n",
+ "Epoch 1442/100000\n",
+ " - 18s - loss: 0.3519 - acc: 0.9353 - val_loss: 0.3477 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 01442: val_acc did not improve from 0.94225\n",
+ "Epoch 1443/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9366 - val_loss: 0.4107 - val_acc: 0.9003\n",
+ "\n",
+ "Epoch 01443: val_acc did not improve from 0.94225\n",
+ "Epoch 1444/100000\n",
+ " - 18s - loss: 0.3545 - acc: 0.9353 - val_loss: 0.3454 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01444: val_acc did not improve from 0.94225\n",
+ "Epoch 1445/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9368 - val_loss: 0.4156 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 01445: val_acc did not improve from 0.94225\n",
+ "Epoch 1446/100000\n",
+ " - 19s - loss: 0.3478 - acc: 0.9366 - val_loss: 0.4297 - val_acc: 0.8980\n",
+ "\n",
+ "Epoch 01446: val_acc did not improve from 0.94225\n",
+ "Epoch 1447/100000\n",
+ " - 18s - loss: 0.3505 - acc: 0.9360 - val_loss: 0.3440 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01447: val_acc did not improve from 0.94225\n",
+ "Epoch 1448/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9366 - val_loss: 0.3783 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 01448: val_acc did not improve from 0.94225\n",
+ "Epoch 1449/100000\n",
+ " - 18s - loss: 0.3545 - acc: 0.9357 - val_loss: 0.3403 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01449: val_acc did not improve from 0.94225\n",
+ "Epoch 1450/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9364 - val_loss: 0.3599 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 01450: val_acc did not improve from 0.94225\n",
+ "Epoch 1451/100000\n",
+ " - 18s - loss: 0.3495 - acc: 0.9360 - val_loss: 0.3365 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01451: val_acc did not improve from 0.94225\n",
+ "Epoch 1452/100000\n",
+ " - 19s - loss: 0.3513 - acc: 0.9361 - val_loss: 0.3477 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 01452: val_acc did not improve from 0.94225\n",
+ "Epoch 1453/100000\n",
+ " - 18s - loss: 0.3509 - acc: 0.9364 - val_loss: 0.3647 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 01453: val_acc did not improve from 0.94225\n",
+ "Epoch 1454/100000\n",
+ " - 19s - loss: 0.3491 - acc: 0.9362 - val_loss: 0.3393 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01454: val_acc did not improve from 0.94225\n",
+ "Epoch 1455/100000\n",
+ " - 18s - loss: 0.3517 - acc: 0.9364 - val_loss: 0.3953 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 01455: val_acc did not improve from 0.94225\n",
+ "Epoch 1456/100000\n",
+ " - 18s - loss: 0.3523 - acc: 0.9358 - val_loss: 0.3341 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 01456: val_acc did not improve from 0.94225\n",
+ "Epoch 1457/100000\n",
+ " - 19s - loss: 0.3508 - acc: 0.9365 - val_loss: 0.3426 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01457: val_acc did not improve from 0.94225\n",
+ "Epoch 1458/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9364 - val_loss: 0.3435 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01458: val_acc did not improve from 0.94225\n",
+ "Epoch 1459/100000\n",
+ " - 19s - loss: 0.3508 - acc: 0.9364 - val_loss: 0.3481 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01459: val_acc did not improve from 0.94225\n",
+ "Epoch 1460/100000\n",
+ " - 18s - loss: 0.3535 - acc: 0.9361 - val_loss: 0.3494 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01460: val_acc did not improve from 0.94225\n",
+ "Epoch 1461/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9364 - val_loss: 0.3750 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 01461: val_acc did not improve from 0.94225\n",
+ "Epoch 1462/100000\n",
+ " - 19s - loss: 0.3516 - acc: 0.9369 - val_loss: 0.3636 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 01462: val_acc did not improve from 0.94225\n",
+ "Epoch 1463/100000\n",
+ " - 18s - loss: 0.3530 - acc: 0.9357 - val_loss: 0.3581 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01463: val_acc did not improve from 0.94225\n",
+ "Epoch 1464/100000\n",
+ " - 18s - loss: 0.3494 - acc: 0.9363 - val_loss: 0.6312 - val_acc: 0.8087\n",
+ "\n",
+ "Epoch 01464: val_acc did not improve from 0.94225\n",
+ "Epoch 1465/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9358 - val_loss: 0.4659 - val_acc: 0.8841\n",
+ "\n",
+ "Epoch 01465: val_acc did not improve from 0.94225\n",
+ "Epoch 1466/100000\n",
+ " - 18s - loss: 0.3462 - acc: 0.9370 - val_loss: 0.3509 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01466: val_acc did not improve from 0.94225\n",
+ "Epoch 1467/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9369 - val_loss: 0.3432 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01467: val_acc did not improve from 0.94225\n",
+ "Epoch 1468/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9360 - val_loss: 0.3659 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 01468: val_acc did not improve from 0.94225\n",
+ "Epoch 1469/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9361 - val_loss: 0.3613 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 01469: val_acc did not improve from 0.94225\n",
+ "Epoch 1470/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9365 - val_loss: 0.3663 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 01470: val_acc did not improve from 0.94225\n",
+ "Epoch 1471/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9367 - val_loss: 0.3404 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01471: val_acc did not improve from 0.94225\n",
+ "Epoch 1472/100000\n",
+ " - 19s - loss: 0.3519 - acc: 0.9363 - val_loss: 0.3744 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 01472: val_acc did not improve from 0.94225\n",
+ "Epoch 1473/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9362 - val_loss: 0.3351 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 01473: val_acc did not improve from 0.94225\n",
+ "Epoch 1474/100000\n",
+ " - 19s - loss: 0.3569 - acc: 0.9359 - val_loss: 0.3683 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 01474: val_acc did not improve from 0.94225\n",
+ "Epoch 1475/100000\n",
+ " - 19s - loss: 0.3504 - acc: 0.9362 - val_loss: 0.4214 - val_acc: 0.8992\n",
+ "\n",
+ "Epoch 01475: val_acc did not improve from 0.94225\n",
+ "Epoch 1476/100000\n",
+ " - 19s - loss: 0.3469 - acc: 0.9367 - val_loss: 0.3385 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01476: val_acc did not improve from 0.94225\n",
+ "Epoch 1477/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9360 - val_loss: 0.3513 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01477: val_acc did not improve from 0.94225\n",
+ "Epoch 1478/100000\n",
+ " - 19s - loss: 0.3521 - acc: 0.9364 - val_loss: 0.3562 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 01478: val_acc did not improve from 0.94225\n",
+ "Epoch 1479/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9364 - val_loss: 0.3768 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 01479: val_acc did not improve from 0.94225\n",
+ "Epoch 1480/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9364 - val_loss: 0.3593 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 01480: val_acc did not improve from 0.94225\n",
+ "Epoch 1481/100000\n",
+ " - 19s - loss: 0.3502 - acc: 0.9361 - val_loss: 0.3468 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01481: val_acc did not improve from 0.94225\n",
+ "Epoch 1482/100000\n",
+ " - 18s - loss: 0.3479 - acc: 0.9367 - val_loss: 0.3384 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01482: val_acc did not improve from 0.94225\n",
+ "Epoch 1483/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9365 - val_loss: 0.4288 - val_acc: 0.8928\n",
+ "\n",
+ "Epoch 01483: val_acc did not improve from 0.94225\n",
+ "Epoch 1484/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9369 - val_loss: 0.4086 - val_acc: 0.8979\n",
+ "\n",
+ "Epoch 01484: val_acc did not improve from 0.94225\n",
+ "Epoch 1485/100000\n",
+ " - 19s - loss: 0.3510 - acc: 0.9358 - val_loss: 0.3734 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 01485: val_acc did not improve from 0.94225\n",
+ "Epoch 1486/100000\n",
+ " - 19s - loss: 0.3470 - acc: 0.9365 - val_loss: 0.3493 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01486: val_acc did not improve from 0.94225\n",
+ "Epoch 1487/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9356 - val_loss: 0.3544 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 01487: val_acc did not improve from 0.94225\n",
+ "Epoch 1488/100000\n",
+ " - 19s - loss: 0.3498 - acc: 0.9361 - val_loss: 0.4364 - val_acc: 0.8996\n",
+ "\n",
+ "Epoch 01488: val_acc did not improve from 0.94225\n",
+ "Epoch 1489/100000\n",
+ " - 19s - loss: 0.3591 - acc: 0.9352 - val_loss: 0.4778 - val_acc: 0.8883\n",
+ "\n",
+ "Epoch 01489: val_acc did not improve from 0.94225\n",
+ "Epoch 1490/100000\n",
+ " - 19s - loss: 0.3585 - acc: 0.9358 - val_loss: 0.3359 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01490: val_acc did not improve from 0.94225\n",
+ "Epoch 1491/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9367 - val_loss: 0.3578 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 01491: val_acc did not improve from 0.94225\n",
+ "Epoch 1492/100000\n",
+ " - 19s - loss: 0.3529 - acc: 0.9355 - val_loss: 0.3451 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01492: val_acc did not improve from 0.94225\n",
+ "Epoch 1493/100000\n",
+ " - 18s - loss: 0.3507 - acc: 0.9362 - val_loss: 0.3988 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 01493: val_acc did not improve from 0.94225\n",
+ "Epoch 1494/100000\n",
+ " - 19s - loss: 0.3505 - acc: 0.9363 - val_loss: 0.5826 - val_acc: 0.8449\n",
+ "\n",
+ "Epoch 01494: val_acc did not improve from 0.94225\n",
+ "Epoch 1495/100000\n",
+ " - 18s - loss: 0.3493 - acc: 0.9362 - val_loss: 0.3989 - val_acc: 0.9055\n",
+ "\n",
+ "Epoch 01495: val_acc did not improve from 0.94225\n",
+ "Epoch 1496/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9356 - val_loss: 0.3461 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01496: val_acc did not improve from 0.94225\n",
+ "Epoch 1497/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9360 - val_loss: 0.3541 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 01497: val_acc did not improve from 0.94225\n",
+ "Epoch 1498/100000\n",
+ " - 19s - loss: 0.3477 - acc: 0.9372 - val_loss: 0.4306 - val_acc: 0.8910\n",
+ "\n",
+ "Epoch 01498: val_acc did not improve from 0.94225\n",
+ "Epoch 1499/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9372 - val_loss: 0.3435 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01499: val_acc did not improve from 0.94225\n",
+ "Epoch 1500/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9362 - val_loss: 0.3405 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01500: val_acc did not improve from 0.94225\n",
+ "Epoch 1501/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9361 - val_loss: 0.4006 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 01501: val_acc did not improve from 0.94225\n",
+ "Epoch 1502/100000\n",
+ " - 19s - loss: 0.3476 - acc: 0.9373 - val_loss: 0.3332 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01502: val_acc did not improve from 0.94225\n",
+ "Epoch 1503/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9333 - val_loss: 0.5306 - val_acc: 0.9109\n",
+ "\n",
+ "Epoch 01503: val_acc did not improve from 0.94225\n",
+ "Epoch 1504/100000\n",
+ " - 19s - loss: 0.3992 - acc: 0.9342 - val_loss: 0.3567 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01504: val_acc did not improve from 0.94225\n",
+ "Epoch 1505/100000\n",
+ " - 18s - loss: 0.3694 - acc: 0.9332 - val_loss: 0.3941 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01505: val_acc did not improve from 0.94225\n",
+ "Epoch 1506/100000\n",
+ " - 19s - loss: 0.3810 - acc: 0.9341 - val_loss: 0.4813 - val_acc: 0.8712\n",
+ "\n",
+ "Epoch 01506: val_acc did not improve from 0.94225\n",
+ "Epoch 1507/100000\n",
+ " - 18s - loss: 0.3643 - acc: 0.9347 - val_loss: 0.3607 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01507: val_acc did not improve from 0.94225\n",
+ "Epoch 1508/100000\n",
+ " - 19s - loss: 0.3639 - acc: 0.9346 - val_loss: 0.3479 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01508: val_acc did not improve from 0.94225\n",
+ "Epoch 1509/100000\n",
+ " - 18s - loss: 0.3628 - acc: 0.9343 - val_loss: 0.4007 - val_acc: 0.9044\n",
+ "\n",
+ "Epoch 01509: val_acc did not improve from 0.94225\n",
+ "Epoch 1510/100000\n",
+ " - 18s - loss: 0.3621 - acc: 0.9346 - val_loss: 0.3396 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01510: val_acc did not improve from 0.94225\n",
+ "Epoch 1511/100000\n",
+ " - 19s - loss: 0.3599 - acc: 0.9352 - val_loss: 0.3535 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01511: val_acc did not improve from 0.94225\n",
+ "Epoch 1512/100000\n",
+ " - 19s - loss: 0.3616 - acc: 0.9350 - val_loss: 0.4083 - val_acc: 0.8999\n",
+ "\n",
+ "Epoch 01512: val_acc did not improve from 0.94225\n",
+ "Epoch 1513/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9345 - val_loss: 0.3352 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01513: val_acc did not improve from 0.94225\n",
+ "Epoch 1514/100000\n",
+ " - 18s - loss: 0.3585 - acc: 0.9350 - val_loss: 0.3534 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01514: val_acc did not improve from 0.94225\n",
+ "Epoch 1515/100000\n",
+ " - 19s - loss: 0.3614 - acc: 0.9337 - val_loss: 0.3539 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01515: val_acc did not improve from 0.94225\n",
+ "Epoch 1516/100000\n",
+ " - 19s - loss: 0.3582 - acc: 0.9345 - val_loss: 0.4412 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 01516: val_acc did not improve from 0.94225\n",
+ "Epoch 1517/100000\n",
+ " - 19s - loss: 0.3604 - acc: 0.9345 - val_loss: 0.3486 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 01517: val_acc did not improve from 0.94225\n",
+ "Epoch 1518/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9341 - val_loss: 0.3562 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01518: val_acc did not improve from 0.94225\n",
+ "Epoch 1519/100000\n",
+ " - 18s - loss: 0.3559 - acc: 0.9347 - val_loss: 0.3548 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 01519: val_acc did not improve from 0.94225\n",
+ "Epoch 1520/100000\n",
+ " - 19s - loss: 0.3577 - acc: 0.9343 - val_loss: 0.3877 - val_acc: 0.9121\n",
+ "\n",
+ "Epoch 01520: val_acc did not improve from 0.94225\n",
+ "Epoch 1521/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9342 - val_loss: 0.3806 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 01521: val_acc did not improve from 0.94225\n",
+ "Epoch 1522/100000\n",
+ " - 19s - loss: 0.3570 - acc: 0.9346 - val_loss: 0.4517 - val_acc: 0.8753\n",
+ "\n",
+ "Epoch 01522: val_acc did not improve from 0.94225\n",
+ "Epoch 1523/100000\n",
+ " - 19s - loss: 0.3562 - acc: 0.9345 - val_loss: 0.3592 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01523: val_acc did not improve from 0.94225\n",
+ "Epoch 1524/100000\n",
+ " - 19s - loss: 0.3584 - acc: 0.9343 - val_loss: 0.3479 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01524: val_acc did not improve from 0.94225\n",
+ "Epoch 1525/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9340 - val_loss: 0.3395 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01525: val_acc did not improve from 0.94225\n",
+ "Epoch 1526/100000\n",
+ " - 19s - loss: 0.3541 - acc: 0.9350 - val_loss: 0.3697 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 01526: val_acc did not improve from 0.94225\n",
+ "Epoch 1527/100000\n",
+ " - 18s - loss: 0.3566 - acc: 0.9343 - val_loss: 0.5285 - val_acc: 0.8809\n",
+ "\n",
+ "Epoch 01527: val_acc did not improve from 0.94225\n",
+ "Epoch 1528/100000\n",
+ " - 19s - loss: 0.3588 - acc: 0.9340 - val_loss: 0.3725 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 01528: val_acc did not improve from 0.94225\n",
+ "Epoch 1529/100000\n",
+ " - 19s - loss: 0.3604 - acc: 0.9343 - val_loss: 0.3467 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01529: val_acc did not improve from 0.94225\n",
+ "Epoch 1530/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9336 - val_loss: 0.3526 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01530: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 01530: ReduceLROnPlateau reducing learning rate to 0.0006983372120885178.\n",
+ "Epoch 1531/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9353 - val_loss: 0.3665 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 01531: val_acc did not improve from 0.94225\n",
+ "Epoch 1532/100000\n",
+ " - 18s - loss: 0.3486 - acc: 0.9353 - val_loss: 0.3354 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01532: val_acc did not improve from 0.94225\n",
+ "Epoch 1533/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9350 - val_loss: 0.3638 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 01533: val_acc did not improve from 0.94225\n",
+ "Epoch 1534/100000\n",
+ " - 18s - loss: 0.3462 - acc: 0.9355 - val_loss: 0.3396 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01534: val_acc did not improve from 0.94225\n",
+ "Epoch 1535/100000\n",
+ " - 18s - loss: 0.3471 - acc: 0.9351 - val_loss: 0.3609 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 01535: val_acc did not improve from 0.94225\n",
+ "Epoch 1536/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9350 - val_loss: 0.3394 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01536: val_acc did not improve from 0.94225\n",
+ "Epoch 1537/100000\n",
+ " - 18s - loss: 0.3518 - acc: 0.9342 - val_loss: 0.3750 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 01537: val_acc did not improve from 0.94225\n",
+ "Epoch 1538/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9351 - val_loss: 0.3339 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01538: val_acc did not improve from 0.94225\n",
+ "Epoch 1539/100000\n",
+ " - 18s - loss: 0.3491 - acc: 0.9345 - val_loss: 0.3946 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 01539: val_acc did not improve from 0.94225\n",
+ "Epoch 1540/100000\n",
+ " - 19s - loss: 0.3539 - acc: 0.9330 - val_loss: 0.3448 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01540: val_acc did not improve from 0.94225\n",
+ "Epoch 1541/100000\n",
+ " - 18s - loss: 0.3482 - acc: 0.9349 - val_loss: 0.3439 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01541: val_acc did not improve from 0.94225\n",
+ "Epoch 1542/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9355 - val_loss: 0.4212 - val_acc: 0.8824\n",
+ "\n",
+ "Epoch 01542: val_acc did not improve from 0.94225\n",
+ "Epoch 1543/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9351 - val_loss: 0.3338 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01543: val_acc did not improve from 0.94225\n",
+ "Epoch 1544/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9351 - val_loss: 0.4214 - val_acc: 0.9045\n",
+ "\n",
+ "Epoch 01544: val_acc did not improve from 0.94225\n",
+ "Epoch 1545/100000\n",
+ " - 18s - loss: 0.3503 - acc: 0.9352 - val_loss: 0.4191 - val_acc: 0.8949\n",
+ "\n",
+ "Epoch 01545: val_acc did not improve from 0.94225\n",
+ "Epoch 1546/100000\n",
+ " - 19s - loss: 0.3474 - acc: 0.9351 - val_loss: 0.3665 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 01546: val_acc did not improve from 0.94225\n",
+ "Epoch 1547/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9345 - val_loss: 0.3359 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01547: val_acc did not improve from 0.94225\n",
+ "Epoch 1548/100000\n",
+ " - 18s - loss: 0.3481 - acc: 0.9353 - val_loss: 0.3314 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01548: val_acc did not improve from 0.94225\n",
+ "Epoch 1549/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9343 - val_loss: 0.3400 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 01549: val_acc did not improve from 0.94225\n",
+ "Epoch 1550/100000\n",
+ " - 19s - loss: 0.3479 - acc: 0.9347 - val_loss: 0.3540 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01550: val_acc did not improve from 0.94225\n",
+ "Epoch 1551/100000\n",
+ " - 18s - loss: 0.3486 - acc: 0.9345 - val_loss: 0.3953 - val_acc: 0.9038\n",
+ "\n",
+ "Epoch 01551: val_acc did not improve from 0.94225\n",
+ "Epoch 1552/100000\n",
+ " - 18s - loss: 0.3487 - acc: 0.9353 - val_loss: 0.3591 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01552: val_acc did not improve from 0.94225\n",
+ "Epoch 1553/100000\n",
+ " - 18s - loss: 0.3469 - acc: 0.9354 - val_loss: 0.4223 - val_acc: 0.8981\n",
+ "\n",
+ "Epoch 01553: val_acc did not improve from 0.94225\n",
+ "Epoch 1554/100000\n",
+ " - 18s - loss: 0.3462 - acc: 0.9353 - val_loss: 0.3439 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 01554: val_acc did not improve from 0.94225\n",
+ "Epoch 1555/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9344 - val_loss: 0.3527 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 01555: val_acc did not improve from 0.94225\n",
+ "Epoch 1556/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9345 - val_loss: 0.3484 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 01556: val_acc did not improve from 0.94225\n",
+ "Epoch 1557/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9348 - val_loss: 0.3360 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 01557: val_acc did not improve from 0.94225\n",
+ "Epoch 1558/100000\n",
+ " - 18s - loss: 0.3495 - acc: 0.9351 - val_loss: 0.3474 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01558: val_acc did not improve from 0.94225\n",
+ "Epoch 1559/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9358 - val_loss: 0.3691 - val_acc: 0.9205\n",
+ "\n",
+ "Epoch 01559: val_acc did not improve from 0.94225\n",
+ "Epoch 1560/100000\n",
+ " - 18s - loss: 0.3494 - acc: 0.9343 - val_loss: 0.3717 - val_acc: 0.9190\n",
+ "\n",
+ "Epoch 01560: val_acc did not improve from 0.94225\n",
+ "Epoch 1561/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9361 - val_loss: 0.3438 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01561: val_acc did not improve from 0.94225\n",
+ "Epoch 1562/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9345 - val_loss: 0.3556 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 01562: val_acc did not improve from 0.94225\n",
+ "Epoch 1563/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9356 - val_loss: 0.3443 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 01563: val_acc did not improve from 0.94225\n",
+ "Epoch 1564/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9347 - val_loss: 0.3411 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01564: val_acc did not improve from 0.94225\n",
+ "Epoch 1565/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9349 - val_loss: 0.3609 - val_acc: 0.9210\n",
+ "\n",
+ "Epoch 01565: val_acc did not improve from 0.94225\n",
+ "Epoch 1566/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9346 - val_loss: 0.3521 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01566: val_acc did not improve from 0.94225\n",
+ "Epoch 1567/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9349 - val_loss: 0.3436 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01567: val_acc did not improve from 0.94225\n",
+ "Epoch 1568/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9351 - val_loss: 0.3788 - val_acc: 0.9105\n",
+ "\n",
+ "Epoch 01568: val_acc did not improve from 0.94225\n",
+ "Epoch 1569/100000\n",
+ " - 19s - loss: 0.3482 - acc: 0.9347 - val_loss: 0.5227 - val_acc: 0.8554\n",
+ "\n",
+ "Epoch 01569: val_acc did not improve from 0.94225\n",
+ "Epoch 1570/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9351 - val_loss: 0.3496 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01570: val_acc did not improve from 0.94225\n",
+ "Epoch 1571/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9350 - val_loss: 0.3829 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 01571: val_acc did not improve from 0.94225\n",
+ "Epoch 1572/100000\n",
+ " - 18s - loss: 0.3498 - acc: 0.9344 - val_loss: 0.3900 - val_acc: 0.9012\n",
+ "\n",
+ "Epoch 01572: val_acc did not improve from 0.94225\n",
+ "Epoch 1573/100000\n",
+ " - 18s - loss: 0.3479 - acc: 0.9351 - val_loss: 0.3395 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01573: val_acc did not improve from 0.94225\n",
+ "Epoch 1574/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9352 - val_loss: 0.3400 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01574: val_acc did not improve from 0.94225\n",
+ "Epoch 1575/100000\n",
+ " - 18s - loss: 0.3497 - acc: 0.9352 - val_loss: 0.3423 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01575: val_acc did not improve from 0.94225\n",
+ "Epoch 1576/100000\n",
+ " - 18s - loss: 0.3481 - acc: 0.9352 - val_loss: 0.3398 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01576: val_acc did not improve from 0.94225\n",
+ "Epoch 1577/100000\n",
+ " - 19s - loss: 0.3498 - acc: 0.9345 - val_loss: 0.3477 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01577: val_acc did not improve from 0.94225\n",
+ "Epoch 1578/100000\n",
+ " - 19s - loss: 0.3491 - acc: 0.9352 - val_loss: 0.3490 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 01578: val_acc did not improve from 0.94225\n",
+ "Epoch 1579/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9351 - val_loss: 0.3394 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01579: val_acc did not improve from 0.94225\n",
+ "Epoch 1580/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9359 - val_loss: 0.3401 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 01580: val_acc did not improve from 0.94225\n",
+ "Epoch 1581/100000\n",
+ " - 19s - loss: 0.3468 - acc: 0.9353 - val_loss: 0.3471 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01581: val_acc did not improve from 0.94225\n",
+ "Epoch 1582/100000\n",
+ " - 18s - loss: 0.3449 - acc: 0.9359 - val_loss: 0.3564 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 01582: val_acc did not improve from 0.94225\n",
+ "Epoch 1583/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9348 - val_loss: 0.3328 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01583: val_acc did not improve from 0.94225\n",
+ "Epoch 1584/100000\n",
+ " - 18s - loss: 0.3464 - acc: 0.9360 - val_loss: 0.3413 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01584: val_acc did not improve from 0.94225\n",
+ "Epoch 1585/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9362 - val_loss: 0.3646 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 01585: val_acc did not improve from 0.94225\n",
+ "Epoch 1586/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9352 - val_loss: 0.3585 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 01586: val_acc did not improve from 0.94225\n",
+ "Epoch 1587/100000\n",
+ " - 19s - loss: 0.3474 - acc: 0.9353 - val_loss: 0.3410 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01587: val_acc did not improve from 0.94225\n",
+ "Epoch 1588/100000\n",
+ " - 18s - loss: 0.3488 - acc: 0.9350 - val_loss: 0.4182 - val_acc: 0.9034\n",
+ "\n",
+ "Epoch 01588: val_acc did not improve from 0.94225\n",
+ "Epoch 1589/100000\n",
+ " - 19s - loss: 0.3477 - acc: 0.9351 - val_loss: 0.3368 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01589: val_acc did not improve from 0.94225\n",
+ "Epoch 1590/100000\n",
+ " - 18s - loss: 0.3435 - acc: 0.9362 - val_loss: 0.3360 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01590: val_acc did not improve from 0.94225\n",
+ "Epoch 1591/100000\n",
+ " - 18s - loss: 0.3445 - acc: 0.9356 - val_loss: 0.3583 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 01591: val_acc did not improve from 0.94225\n",
+ "Epoch 1592/100000\n",
+ " - 18s - loss: 0.3474 - acc: 0.9354 - val_loss: 0.3424 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01592: val_acc did not improve from 0.94225\n",
+ "Epoch 1593/100000\n",
+ " - 19s - loss: 0.3489 - acc: 0.9349 - val_loss: 0.3690 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 01593: val_acc did not improve from 0.94225\n",
+ "Epoch 1594/100000\n",
+ " - 18s - loss: 0.3464 - acc: 0.9350 - val_loss: 0.3367 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01594: val_acc did not improve from 0.94225\n",
+ "Epoch 1595/100000\n",
+ " - 19s - loss: 0.3434 - acc: 0.9362 - val_loss: 0.3363 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01595: val_acc did not improve from 0.94225\n",
+ "Epoch 1596/100000\n",
+ " - 19s - loss: 0.3446 - acc: 0.9355 - val_loss: 0.3364 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01596: val_acc did not improve from 0.94225\n",
+ "Epoch 1597/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9358 - val_loss: 0.3494 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01597: val_acc did not improve from 0.94225\n",
+ "Epoch 1598/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9358 - val_loss: 0.3393 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01598: val_acc did not improve from 0.94225\n",
+ "Epoch 1599/100000\n",
+ " - 18s - loss: 0.3464 - acc: 0.9351 - val_loss: 0.3433 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 01599: val_acc did not improve from 0.94225\n",
+ "Epoch 1600/100000\n",
+ " - 18s - loss: 0.3488 - acc: 0.9344 - val_loss: 0.3429 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01600: val_acc did not improve from 0.94225\n",
+ "Epoch 1601/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9362 - val_loss: 0.4774 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01601: val_acc did not improve from 0.94225\n",
+ "Epoch 1602/100000\n",
+ " - 18s - loss: 0.3483 - acc: 0.9346 - val_loss: 0.3416 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01602: val_acc did not improve from 0.94225\n",
+ "Epoch 1603/100000\n",
+ " - 19s - loss: 0.3445 - acc: 0.9357 - val_loss: 0.3289 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01603: val_acc did not improve from 0.94225\n",
+ "Epoch 1604/100000\n",
+ " - 18s - loss: 0.3447 - acc: 0.9366 - val_loss: 0.3392 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01604: val_acc did not improve from 0.94225\n",
+ "Epoch 1605/100000\n",
+ " - 19s - loss: 0.3473 - acc: 0.9351 - val_loss: 0.3371 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 01605: val_acc did not improve from 0.94225\n",
+ "Epoch 1606/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9350 - val_loss: 0.3333 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01606: val_acc did not improve from 0.94225\n",
+ "Epoch 1607/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9355 - val_loss: 0.3370 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01607: val_acc did not improve from 0.94225\n",
+ "Epoch 1608/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9354 - val_loss: 0.3343 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01608: val_acc did not improve from 0.94225\n",
+ "Epoch 1609/100000\n",
+ " - 19s - loss: 0.3466 - acc: 0.9356 - val_loss: 0.3300 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 01609: val_acc did not improve from 0.94225\n",
+ "Epoch 1610/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9358 - val_loss: 0.3603 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01610: val_acc did not improve from 0.94225\n",
+ "Epoch 1611/100000\n",
+ " - 18s - loss: 0.3464 - acc: 0.9362 - val_loss: 0.3817 - val_acc: 0.9193\n",
+ "\n",
+ "Epoch 01611: val_acc did not improve from 0.94225\n",
+ "Epoch 1612/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9362 - val_loss: 0.3258 - val_acc: 0.9417\n",
+ "\n",
+ "Epoch 01612: val_acc did not improve from 0.94225\n",
+ "Epoch 1613/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9360 - val_loss: 0.3432 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01613: val_acc did not improve from 0.94225\n",
+ "Epoch 1614/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9356 - val_loss: 0.3327 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01614: val_acc did not improve from 0.94225\n",
+ "Epoch 1615/100000\n",
+ " - 18s - loss: 0.3446 - acc: 0.9359 - val_loss: 0.3321 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 01615: val_acc did not improve from 0.94225\n",
+ "Epoch 1616/100000\n",
+ " - 19s - loss: 0.3469 - acc: 0.9355 - val_loss: 0.3439 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01616: val_acc did not improve from 0.94225\n",
+ "Epoch 1617/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9355 - val_loss: 0.3449 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01617: val_acc did not improve from 0.94225\n",
+ "Epoch 1618/100000\n",
+ " - 19s - loss: 0.3466 - acc: 0.9346 - val_loss: 0.3747 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 01618: val_acc did not improve from 0.94225\n",
+ "Epoch 1619/100000\n",
+ " - 18s - loss: 0.3439 - acc: 0.9360 - val_loss: 0.3319 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01619: val_acc did not improve from 0.94225\n",
+ "Epoch 1620/100000\n",
+ " - 19s - loss: 0.3460 - acc: 0.9355 - val_loss: 0.3611 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01620: val_acc did not improve from 0.94225\n",
+ "Epoch 1621/100000\n",
+ " - 18s - loss: 0.3443 - acc: 0.9361 - val_loss: 0.3344 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01621: val_acc did not improve from 0.94225\n",
+ "Epoch 1622/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9357 - val_loss: 0.3352 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01622: val_acc did not improve from 0.94225\n",
+ "Epoch 1623/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9354 - val_loss: 0.3325 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01623: val_acc did not improve from 0.94225\n",
+ "Epoch 1624/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9346 - val_loss: 0.3577 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 01624: val_acc did not improve from 0.94225\n",
+ "Epoch 1625/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9352 - val_loss: 0.3412 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01625: val_acc did not improve from 0.94225\n",
+ "Epoch 1626/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9362 - val_loss: 0.3484 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 01626: val_acc did not improve from 0.94225\n",
+ "Epoch 1627/100000\n",
+ " - 18s - loss: 0.3444 - acc: 0.9361 - val_loss: 0.3314 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01627: val_acc did not improve from 0.94225\n",
+ "Epoch 1628/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9360 - val_loss: 0.3709 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 01628: val_acc did not improve from 0.94225\n",
+ "Epoch 1629/100000\n",
+ " - 18s - loss: 0.3443 - acc: 0.9358 - val_loss: 0.3754 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 01629: val_acc did not improve from 0.94225\n",
+ "Epoch 1630/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9360 - val_loss: 0.3456 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01630: val_acc did not improve from 0.94225\n",
+ "Epoch 1631/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9352 - val_loss: 0.3426 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 01631: val_acc did not improve from 0.94225\n",
+ "Epoch 1632/100000\n",
+ " - 18s - loss: 0.3438 - acc: 0.9361 - val_loss: 0.3754 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 01632: val_acc did not improve from 0.94225\n",
+ "Epoch 1633/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9358 - val_loss: 0.3397 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01633: val_acc did not improve from 0.94225\n",
+ "Epoch 1634/100000\n",
+ " - 19s - loss: 0.3453 - acc: 0.9359 - val_loss: 0.4063 - val_acc: 0.8991\n",
+ "\n",
+ "Epoch 01634: val_acc did not improve from 0.94225\n",
+ "Epoch 1635/100000\n",
+ " - 19s - loss: 0.3429 - acc: 0.9365 - val_loss: 0.3493 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 01635: val_acc did not improve from 0.94225\n",
+ "Epoch 1636/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9354 - val_loss: 0.3527 - val_acc: 0.9253\n",
+ "\n",
+ "Epoch 01636: val_acc did not improve from 0.94225\n",
+ "Epoch 1637/100000\n",
+ " - 19s - loss: 0.3425 - acc: 0.9363 - val_loss: 0.3715 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 01637: val_acc did not improve from 0.94225\n",
+ "Epoch 1638/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9350 - val_loss: 0.3477 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01638: val_acc did not improve from 0.94225\n",
+ "Epoch 1639/100000\n",
+ " - 19s - loss: 0.3435 - acc: 0.9357 - val_loss: 0.3380 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01639: val_acc did not improve from 0.94225\n",
+ "Epoch 1640/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9361 - val_loss: 0.3594 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 01640: val_acc did not improve from 0.94225\n",
+ "Epoch 1641/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9358 - val_loss: 0.3258 - val_acc: 0.9397\n",
+ "\n",
+ "Epoch 01641: val_acc did not improve from 0.94225\n",
+ "Epoch 1642/100000\n",
+ " - 18s - loss: 0.3408 - acc: 0.9363 - val_loss: 0.3678 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 01642: val_acc did not improve from 0.94225\n",
+ "Epoch 1643/100000\n",
+ " - 18s - loss: 0.3461 - acc: 0.9348 - val_loss: 0.3350 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01643: val_acc did not improve from 0.94225\n",
+ "Epoch 1644/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9355 - val_loss: 0.3369 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 01644: val_acc did not improve from 0.94225\n",
+ "Epoch 1645/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9364 - val_loss: 0.3309 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 01645: val_acc did not improve from 0.94225\n",
+ "Epoch 1646/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9361 - val_loss: 0.3353 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01646: val_acc did not improve from 0.94225\n",
+ "Epoch 1647/100000\n",
+ " - 19s - loss: 0.3414 - acc: 0.9366 - val_loss: 0.3882 - val_acc: 0.9058\n",
+ "\n",
+ "Epoch 01647: val_acc did not improve from 0.94225\n",
+ "Epoch 1648/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9358 - val_loss: 0.3841 - val_acc: 0.9138\n",
+ "\n",
+ "Epoch 01648: val_acc did not improve from 0.94225\n",
+ "Epoch 1649/100000\n",
+ " - 18s - loss: 0.3428 - acc: 0.9357 - val_loss: 0.3569 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01649: val_acc did not improve from 0.94225\n",
+ "Epoch 1650/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9355 - val_loss: 0.4562 - val_acc: 0.8792\n",
+ "\n",
+ "Epoch 01650: val_acc did not improve from 0.94225\n",
+ "Epoch 1651/100000\n",
+ " - 19s - loss: 0.3428 - acc: 0.9360 - val_loss: 0.3273 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01651: val_acc did not improve from 0.94225\n",
+ "Epoch 1652/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9355 - val_loss: 0.3486 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01652: val_acc did not improve from 0.94225\n",
+ "Epoch 1653/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9355 - val_loss: 0.3413 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01653: val_acc did not improve from 0.94225\n",
+ "Epoch 1654/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9356 - val_loss: 0.3870 - val_acc: 0.9082\n",
+ "\n",
+ "Epoch 01654: val_acc did not improve from 0.94225\n",
+ "Epoch 1655/100000\n",
+ " - 19s - loss: 0.3412 - acc: 0.9367 - val_loss: 0.4030 - val_acc: 0.9109\n",
+ "\n",
+ "Epoch 01655: val_acc did not improve from 0.94225\n",
+ "Epoch 1656/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9360 - val_loss: 0.3620 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 01656: val_acc did not improve from 0.94225\n",
+ "Epoch 1657/100000\n",
+ " - 18s - loss: 0.3476 - acc: 0.9351 - val_loss: 0.3447 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 01657: val_acc did not improve from 0.94225\n",
+ "Epoch 1658/100000\n",
+ " - 18s - loss: 0.3434 - acc: 0.9354 - val_loss: 0.3915 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 01658: val_acc did not improve from 0.94225\n",
+ "Epoch 1659/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9361 - val_loss: 0.4365 - val_acc: 0.9025\n",
+ "\n",
+ "Epoch 01659: val_acc did not improve from 0.94225\n",
+ "Epoch 1660/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9366 - val_loss: 0.3494 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01660: val_acc did not improve from 0.94225\n",
+ "Epoch 1661/100000\n",
+ " - 18s - loss: 0.3466 - acc: 0.9356 - val_loss: 0.3381 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01661: val_acc did not improve from 0.94225\n",
+ "Epoch 1662/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9357 - val_loss: 0.3647 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01662: val_acc did not improve from 0.94225\n",
+ "Epoch 1663/100000\n",
+ " - 18s - loss: 0.3427 - acc: 0.9363 - val_loss: 0.3479 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01663: val_acc did not improve from 0.94225\n",
+ "Epoch 1664/100000\n",
+ " - 19s - loss: 0.3469 - acc: 0.9354 - val_loss: 0.3681 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 01664: val_acc did not improve from 0.94225\n",
+ "Epoch 1665/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9361 - val_loss: 0.3655 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01665: val_acc did not improve from 0.94225\n",
+ "Epoch 1666/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9355 - val_loss: 0.3244 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01666: val_acc did not improve from 0.94225\n",
+ "Epoch 1667/100000\n",
+ " - 18s - loss: 0.3452 - acc: 0.9353 - val_loss: 0.3734 - val_acc: 0.9197\n",
+ "\n",
+ "Epoch 01667: val_acc did not improve from 0.94225\n",
+ "Epoch 1668/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9359 - val_loss: 0.3355 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 01668: val_acc did not improve from 0.94225\n",
+ "Epoch 1669/100000\n",
+ " - 18s - loss: 0.3443 - acc: 0.9362 - val_loss: 0.3602 - val_acc: 0.9214\n",
+ "\n",
+ "Epoch 01669: val_acc did not improve from 0.94225\n",
+ "Epoch 1670/100000\n",
+ " - 18s - loss: 0.3449 - acc: 0.9359 - val_loss: 0.3741 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 01670: val_acc did not improve from 0.94225\n",
+ "Epoch 1671/100000\n",
+ " - 18s - loss: 0.3432 - acc: 0.9362 - val_loss: 0.3407 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01671: val_acc did not improve from 0.94225\n",
+ "Epoch 1672/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9361 - val_loss: 0.4221 - val_acc: 0.8875\n",
+ "\n",
+ "Epoch 01672: val_acc did not improve from 0.94225\n",
+ "Epoch 1673/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9346 - val_loss: 0.3327 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 01673: val_acc did not improve from 0.94225\n",
+ "Epoch 1674/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9360 - val_loss: 0.3337 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01674: val_acc did not improve from 0.94225\n",
+ "Epoch 1675/100000\n",
+ " - 18s - loss: 0.3420 - acc: 0.9367 - val_loss: 0.3317 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01675: val_acc did not improve from 0.94225\n",
+ "Epoch 1676/100000\n",
+ " - 19s - loss: 0.3458 - acc: 0.9359 - val_loss: 0.3403 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01676: val_acc did not improve from 0.94225\n",
+ "Epoch 1677/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9363 - val_loss: 0.3326 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01677: val_acc did not improve from 0.94225\n",
+ "Epoch 1678/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9359 - val_loss: 0.4178 - val_acc: 0.8981\n",
+ "\n",
+ "Epoch 01678: val_acc did not improve from 0.94225\n",
+ "Epoch 1679/100000\n",
+ " - 18s - loss: 0.3452 - acc: 0.9360 - val_loss: 0.3279 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 01679: val_acc did not improve from 0.94225\n",
+ "Epoch 1680/100000\n",
+ " - 19s - loss: 0.3480 - acc: 0.9356 - val_loss: 0.3342 - val_acc: 0.9407\n",
+ "\n",
+ "Epoch 01680: val_acc did not improve from 0.94225\n",
+ "Epoch 1681/100000\n",
+ " - 19s - loss: 0.3467 - acc: 0.9359 - val_loss: 0.3473 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01681: val_acc did not improve from 0.94225\n",
+ "Epoch 1682/100000\n",
+ " - 19s - loss: 0.3464 - acc: 0.9359 - val_loss: 0.3478 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 01682: val_acc did not improve from 0.94225\n",
+ "Epoch 1683/100000\n",
+ " - 18s - loss: 0.3445 - acc: 0.9359 - val_loss: 0.3420 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 01683: val_acc did not improve from 0.94225\n",
+ "Epoch 1684/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9362 - val_loss: 0.3478 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01684: val_acc did not improve from 0.94225\n",
+ "Epoch 1685/100000\n",
+ " - 19s - loss: 0.3470 - acc: 0.9355 - val_loss: 0.3705 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 01685: val_acc did not improve from 0.94225\n",
+ "Epoch 1686/100000\n",
+ " - 19s - loss: 0.3464 - acc: 0.9354 - val_loss: 0.3491 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 01686: val_acc did not improve from 0.94225\n",
+ "Epoch 1687/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9358 - val_loss: 0.3414 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01687: val_acc did not improve from 0.94225\n",
+ "Epoch 1688/100000\n",
+ " - 19s - loss: 0.3431 - acc: 0.9353 - val_loss: 0.3396 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01688: val_acc did not improve from 0.94225\n",
+ "Epoch 1689/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9359 - val_loss: 0.3668 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 01689: val_acc did not improve from 0.94225\n",
+ "Epoch 1690/100000\n",
+ " - 18s - loss: 0.3441 - acc: 0.9361 - val_loss: 0.3416 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 01690: val_acc did not improve from 0.94225\n",
+ "Epoch 1691/100000\n",
+ " - 19s - loss: 0.3458 - acc: 0.9357 - val_loss: 0.3389 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01691: val_acc did not improve from 0.94225\n",
+ "Epoch 1692/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9351 - val_loss: 0.3362 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01692: val_acc did not improve from 0.94225\n",
+ "Epoch 1693/100000\n",
+ " - 18s - loss: 0.3433 - acc: 0.9363 - val_loss: 0.3345 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01693: val_acc did not improve from 0.94225\n",
+ "Epoch 1694/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9359 - val_loss: 0.4655 - val_acc: 0.8590\n",
+ "\n",
+ "Epoch 01694: val_acc did not improve from 0.94225\n",
+ "Epoch 1695/100000\n",
+ " - 18s - loss: 0.3440 - acc: 0.9354 - val_loss: 0.3398 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01695: val_acc did not improve from 0.94225\n",
+ "Epoch 1696/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9355 - val_loss: 0.3628 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 01696: val_acc did not improve from 0.94225\n",
+ "Epoch 1697/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9363 - val_loss: 0.3391 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01697: val_acc did not improve from 0.94225\n",
+ "Epoch 1698/100000\n",
+ " - 18s - loss: 0.3475 - acc: 0.9351 - val_loss: 0.3350 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01698: val_acc did not improve from 0.94225\n",
+ "Epoch 1699/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9350 - val_loss: 0.3376 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01699: val_acc did not improve from 0.94225\n",
+ "Epoch 1700/100000\n",
+ " - 18s - loss: 0.3490 - acc: 0.9349 - val_loss: 0.3493 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 01700: val_acc did not improve from 0.94225\n",
+ "Epoch 1701/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9363 - val_loss: 0.3804 - val_acc: 0.9165\n",
+ "\n",
+ "Epoch 01701: val_acc did not improve from 0.94225\n",
+ "Epoch 1702/100000\n",
+ " - 19s - loss: 0.3431 - acc: 0.9354 - val_loss: 0.3645 - val_acc: 0.9149\n",
+ "\n",
+ "Epoch 01702: val_acc did not improve from 0.94225\n",
+ "Epoch 1703/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9354 - val_loss: 0.3316 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01703: val_acc did not improve from 0.94225\n",
+ "Epoch 1704/100000\n",
+ " - 19s - loss: 0.3448 - acc: 0.9358 - val_loss: 0.3767 - val_acc: 0.9126\n",
+ "\n",
+ "Epoch 01704: val_acc did not improve from 0.94225\n",
+ "Epoch 1705/100000\n",
+ " - 19s - loss: 0.3428 - acc: 0.9358 - val_loss: 0.3345 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01705: val_acc did not improve from 0.94225\n",
+ "Epoch 1706/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9359 - val_loss: 0.3294 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01706: val_acc did not improve from 0.94225\n",
+ "Epoch 1707/100000\n",
+ " - 19s - loss: 0.3434 - acc: 0.9363 - val_loss: 0.5831 - val_acc: 0.8240\n",
+ "\n",
+ "Epoch 01707: val_acc did not improve from 0.94225\n",
+ "Epoch 1708/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9357 - val_loss: 0.3494 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01708: val_acc did not improve from 0.94225\n",
+ "Epoch 1709/100000\n",
+ " - 18s - loss: 0.3461 - acc: 0.9349 - val_loss: 0.3415 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01709: val_acc did not improve from 0.94225\n",
+ "Epoch 1710/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9365 - val_loss: 0.3345 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01710: val_acc did not improve from 0.94225\n",
+ "Epoch 1711/100000\n",
+ " - 19s - loss: 0.3448 - acc: 0.9355 - val_loss: 0.3742 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01711: val_acc did not improve from 0.94225\n",
+ "Epoch 1712/100000\n",
+ " - 18s - loss: 0.3457 - acc: 0.9356 - val_loss: 0.3552 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01712: val_acc did not improve from 0.94225\n",
+ "Epoch 1713/100000\n",
+ " - 19s - loss: 0.3476 - acc: 0.9357 - val_loss: 0.3498 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 01713: val_acc did not improve from 0.94225\n",
+ "Epoch 1714/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9357 - val_loss: 0.3344 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 01714: val_acc did not improve from 0.94225\n",
+ "Epoch 1715/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9355 - val_loss: 0.3500 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 01715: val_acc did not improve from 0.94225\n",
+ "Epoch 1716/100000\n",
+ " - 18s - loss: 0.3433 - acc: 0.9358 - val_loss: 0.3420 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01716: val_acc did not improve from 0.94225\n",
+ "Epoch 1717/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9361 - val_loss: 0.3423 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 01717: val_acc did not improve from 0.94225\n",
+ "Epoch 1718/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9359 - val_loss: 0.3352 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 01718: val_acc did not improve from 0.94225\n",
+ "Epoch 1719/100000\n",
+ " - 18s - loss: 0.3438 - acc: 0.9365 - val_loss: 0.3395 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01719: val_acc did not improve from 0.94225\n",
+ "Epoch 1720/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9348 - val_loss: 0.3764 - val_acc: 0.9224\n",
+ "\n",
+ "Epoch 01720: val_acc did not improve from 0.94225\n",
+ "Epoch 1721/100000\n",
+ " - 19s - loss: 0.3417 - acc: 0.9370 - val_loss: 0.3564 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 01721: val_acc did not improve from 0.94225\n",
+ "Epoch 1722/100000\n",
+ " - 18s - loss: 0.3439 - acc: 0.9359 - val_loss: 0.3345 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01722: val_acc did not improve from 0.94225\n",
+ "Epoch 1723/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9353 - val_loss: 0.3339 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01723: val_acc did not improve from 0.94225\n",
+ "Epoch 1724/100000\n",
+ " - 19s - loss: 0.3426 - acc: 0.9361 - val_loss: 0.3722 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 01724: val_acc did not improve from 0.94225\n",
+ "Epoch 1725/100000\n",
+ " - 19s - loss: 0.3446 - acc: 0.9360 - val_loss: 0.3671 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 01725: val_acc did not improve from 0.94225\n",
+ "Epoch 1726/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9360 - val_loss: 0.3396 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01726: val_acc did not improve from 0.94225\n",
+ "Epoch 1727/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9357 - val_loss: 0.5618 - val_acc: 0.8532\n",
+ "\n",
+ "Epoch 01727: val_acc did not improve from 0.94225\n",
+ "Epoch 1728/100000\n",
+ " - 19s - loss: 0.3446 - acc: 0.9367 - val_loss: 0.3351 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01728: val_acc did not improve from 0.94225\n",
+ "Epoch 1729/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9349 - val_loss: 0.3545 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01729: val_acc did not improve from 0.94225\n",
+ "Epoch 1730/100000\n",
+ " - 18s - loss: 0.3458 - acc: 0.9359 - val_loss: 0.3827 - val_acc: 0.9148\n",
+ "\n",
+ "Epoch 01730: val_acc did not improve from 0.94225\n",
+ "Epoch 1731/100000\n",
+ " - 19s - loss: 0.3466 - acc: 0.9350 - val_loss: 0.4218 - val_acc: 0.8952\n",
+ "\n",
+ "Epoch 01731: val_acc did not improve from 0.94225\n",
+ "Epoch 1732/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9361 - val_loss: 0.3861 - val_acc: 0.9171\n",
+ "\n",
+ "Epoch 01732: val_acc did not improve from 0.94225\n",
+ "Epoch 1733/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9349 - val_loss: 0.3724 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 01733: val_acc did not improve from 0.94225\n",
+ "Epoch 1734/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9362 - val_loss: 0.3649 - val_acc: 0.9167\n",
+ "\n",
+ "Epoch 01734: val_acc did not improve from 0.94225\n",
+ "Epoch 1735/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9361 - val_loss: 0.3591 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 01735: val_acc did not improve from 0.94225\n",
+ "Epoch 1736/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9363 - val_loss: 0.3331 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01736: val_acc did not improve from 0.94225\n",
+ "Epoch 1737/100000\n",
+ " - 18s - loss: 0.3444 - acc: 0.9356 - val_loss: 0.3535 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 01737: val_acc did not improve from 0.94225\n",
+ "Epoch 1738/100000\n",
+ " - 18s - loss: 0.3451 - acc: 0.9352 - val_loss: 0.3405 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01738: val_acc did not improve from 0.94225\n",
+ "Epoch 1739/100000\n",
+ " - 18s - loss: 0.3450 - acc: 0.9360 - val_loss: 0.3644 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 01739: val_acc did not improve from 0.94225\n",
+ "Epoch 1740/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9354 - val_loss: 0.3492 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 01740: val_acc did not improve from 0.94225\n",
+ "Epoch 1741/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9364 - val_loss: 0.3353 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 01741: val_acc did not improve from 0.94225\n",
+ "Epoch 1742/100000\n",
+ " - 19s - loss: 0.3432 - acc: 0.9358 - val_loss: 0.3533 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01742: val_acc did not improve from 0.94225\n",
+ "Epoch 1743/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9349 - val_loss: 0.3384 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01743: val_acc did not improve from 0.94225\n",
+ "Epoch 1744/100000\n",
+ " - 19s - loss: 0.3461 - acc: 0.9351 - val_loss: 0.3420 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01744: val_acc did not improve from 0.94225\n",
+ "Epoch 1745/100000\n",
+ " - 19s - loss: 0.3434 - acc: 0.9356 - val_loss: 0.5406 - val_acc: 0.8353\n",
+ "\n",
+ "Epoch 01745: val_acc did not improve from 0.94225\n",
+ "Epoch 1746/100000\n",
+ " - 19s - loss: 0.3448 - acc: 0.9356 - val_loss: 0.3458 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01746: val_acc did not improve from 0.94225\n",
+ "Epoch 1747/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9362 - val_loss: 0.3567 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 01747: val_acc did not improve from 0.94225\n",
+ "Epoch 1748/100000\n",
+ " - 18s - loss: 0.3457 - acc: 0.9356 - val_loss: 0.3527 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 01748: val_acc did not improve from 0.94225\n",
+ "Epoch 1749/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9360 - val_loss: 0.3767 - val_acc: 0.9055\n",
+ "\n",
+ "Epoch 01749: val_acc did not improve from 0.94225\n",
+ "Epoch 1750/100000\n",
+ " - 19s - loss: 0.3460 - acc: 0.9357 - val_loss: 0.3426 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 01750: val_acc did not improve from 0.94225\n",
+ "Epoch 1751/100000\n",
+ " - 18s - loss: 0.3447 - acc: 0.9355 - val_loss: 0.3402 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01751: val_acc did not improve from 0.94225\n",
+ "Epoch 1752/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9366 - val_loss: 0.3238 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 01752: val_acc did not improve from 0.94225\n",
+ "Epoch 1753/100000\n",
+ " - 18s - loss: 0.3451 - acc: 0.9357 - val_loss: 0.4722 - val_acc: 0.8796\n",
+ "\n",
+ "Epoch 01753: val_acc did not improve from 0.94225\n",
+ "Epoch 1754/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9354 - val_loss: 0.3667 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 01754: val_acc did not improve from 0.94225\n",
+ "Epoch 1755/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9365 - val_loss: 0.3503 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 01755: val_acc did not improve from 0.94225\n",
+ "Epoch 1756/100000\n",
+ " - 18s - loss: 0.3440 - acc: 0.9365 - val_loss: 0.3486 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 01756: val_acc did not improve from 0.94225\n",
+ "Epoch 1757/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9360 - val_loss: 0.3721 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 01757: val_acc did not improve from 0.94225\n",
+ "Epoch 1758/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9370 - val_loss: 0.3434 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 01758: val_acc did not improve from 0.94225\n",
+ "Epoch 1759/100000\n",
+ " - 18s - loss: 0.3469 - acc: 0.9354 - val_loss: 0.3970 - val_acc: 0.9048\n",
+ "\n",
+ "Epoch 01759: val_acc did not improve from 0.94225\n",
+ "Epoch 1760/100000\n",
+ " - 18s - loss: 0.3462 - acc: 0.9358 - val_loss: 0.4063 - val_acc: 0.9033\n",
+ "\n",
+ "Epoch 01760: val_acc did not improve from 0.94225\n",
+ "Epoch 1761/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9356 - val_loss: 0.3360 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01761: val_acc did not improve from 0.94225\n",
+ "Epoch 1762/100000\n",
+ " - 18s - loss: 0.3458 - acc: 0.9357 - val_loss: 0.3582 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 01762: val_acc did not improve from 0.94225\n",
+ "Epoch 1763/100000\n",
+ " - 19s - loss: 0.3464 - acc: 0.9352 - val_loss: 0.3693 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 01763: val_acc did not improve from 0.94225\n",
+ "Epoch 1764/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9360 - val_loss: 0.3368 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01764: val_acc did not improve from 0.94225\n",
+ "Epoch 1765/100000\n",
+ " - 19s - loss: 0.3435 - acc: 0.9361 - val_loss: 0.3365 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01765: val_acc did not improve from 0.94225\n",
+ "Epoch 1766/100000\n",
+ " - 19s - loss: 0.3482 - acc: 0.9354 - val_loss: 0.3367 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 01766: val_acc did not improve from 0.94225\n",
+ "Epoch 1767/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9360 - val_loss: 0.3322 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01767: val_acc did not improve from 0.94225\n",
+ "Epoch 1768/100000\n",
+ " - 19s - loss: 0.3461 - acc: 0.9358 - val_loss: 0.5011 - val_acc: 0.8784\n",
+ "\n",
+ "Epoch 01768: val_acc did not improve from 0.94225\n",
+ "Epoch 1769/100000\n",
+ " - 18s - loss: 0.3445 - acc: 0.9361 - val_loss: 0.3607 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 01769: val_acc did not improve from 0.94225\n",
+ "Epoch 1770/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9361 - val_loss: 0.3617 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 01770: val_acc did not improve from 0.94225\n",
+ "Epoch 1771/100000\n",
+ " - 19s - loss: 0.3460 - acc: 0.9355 - val_loss: 0.3482 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01771: val_acc did not improve from 0.94225\n",
+ "Epoch 1772/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9363 - val_loss: 0.3517 - val_acc: 0.9241\n",
+ "\n",
+ "Epoch 01772: val_acc did not improve from 0.94225\n",
+ "Epoch 1773/100000\n",
+ " - 19s - loss: 0.3476 - acc: 0.9357 - val_loss: 0.3437 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01773: val_acc did not improve from 0.94225\n",
+ "Epoch 1774/100000\n",
+ " - 18s - loss: 0.3475 - acc: 0.9351 - val_loss: 0.3953 - val_acc: 0.9081\n",
+ "\n",
+ "Epoch 01774: val_acc did not improve from 0.94225\n",
+ "Epoch 1775/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9353 - val_loss: 0.3874 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 01775: val_acc did not improve from 0.94225\n",
+ "Epoch 1776/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9358 - val_loss: 0.3388 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01776: val_acc did not improve from 0.94225\n",
+ "Epoch 1777/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9360 - val_loss: 0.3924 - val_acc: 0.9108\n",
+ "\n",
+ "Epoch 01777: val_acc did not improve from 0.94225\n",
+ "Epoch 1778/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9354 - val_loss: 0.3360 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01778: val_acc did not improve from 0.94225\n",
+ "Epoch 1779/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9362 - val_loss: 0.4493 - val_acc: 0.9014\n",
+ "\n",
+ "Epoch 01779: val_acc did not improve from 0.94225\n",
+ "Epoch 1780/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9362 - val_loss: 0.3414 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 01780: val_acc did not improve from 0.94225\n",
+ "Epoch 1781/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9357 - val_loss: 0.3748 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 01781: val_acc did not improve from 0.94225\n",
+ "Epoch 1782/100000\n",
+ " - 18s - loss: 0.3441 - acc: 0.9363 - val_loss: 0.3718 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01782: val_acc did not improve from 0.94225\n",
+ "Epoch 1783/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9358 - val_loss: 0.3416 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01783: val_acc did not improve from 0.94225\n",
+ "Epoch 1784/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9360 - val_loss: 0.4069 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 01784: val_acc did not improve from 0.94225\n",
+ "Epoch 1785/100000\n",
+ " - 19s - loss: 0.3436 - acc: 0.9367 - val_loss: 0.3453 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01785: val_acc did not improve from 0.94225\n",
+ "Epoch 1786/100000\n",
+ " - 18s - loss: 0.3436 - acc: 0.9363 - val_loss: 0.3952 - val_acc: 0.9058\n",
+ "\n",
+ "Epoch 01786: val_acc did not improve from 0.94225\n",
+ "Epoch 1787/100000\n",
+ " - 19s - loss: 0.3453 - acc: 0.9362 - val_loss: 0.3863 - val_acc: 0.9085\n",
+ "\n",
+ "Epoch 01787: val_acc did not improve from 0.94225\n",
+ "Epoch 1788/100000\n",
+ " - 18s - loss: 0.3433 - acc: 0.9366 - val_loss: 0.3388 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01788: val_acc did not improve from 0.94225\n",
+ "Epoch 1789/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9351 - val_loss: 0.3632 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 01789: val_acc did not improve from 0.94225\n",
+ "Epoch 1790/100000\n",
+ " - 18s - loss: 0.3439 - acc: 0.9360 - val_loss: 0.3549 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01790: val_acc did not improve from 0.94225\n",
+ "Epoch 1791/100000\n",
+ " - 19s - loss: 0.3456 - acc: 0.9351 - val_loss: 0.3600 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 01791: val_acc did not improve from 0.94225\n",
+ "Epoch 1792/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9356 - val_loss: 0.3565 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01792: val_acc did not improve from 0.94225\n",
+ "Epoch 1793/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9359 - val_loss: 0.3614 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 01793: val_acc did not improve from 0.94225\n",
+ "Epoch 1794/100000\n",
+ " - 19s - loss: 0.3473 - acc: 0.9358 - val_loss: 0.3562 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 01794: val_acc did not improve from 0.94225\n",
+ "Epoch 1795/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9357 - val_loss: 0.3765 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 01795: val_acc did not improve from 0.94225\n",
+ "Epoch 1796/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9355 - val_loss: 0.3620 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 01796: val_acc did not improve from 0.94225\n",
+ "Epoch 1797/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9349 - val_loss: 0.3366 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01797: val_acc did not improve from 0.94225\n",
+ "Epoch 1798/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9369 - val_loss: 0.3318 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01798: val_acc did not improve from 0.94225\n",
+ "Epoch 1799/100000\n",
+ " - 19s - loss: 0.3499 - acc: 0.9347 - val_loss: 0.3550 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01799: val_acc did not improve from 0.94225\n",
+ "Epoch 1800/100000\n",
+ " - 19s - loss: 0.3434 - acc: 0.9360 - val_loss: 0.3579 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 01800: val_acc did not improve from 0.94225\n",
+ "Epoch 1801/100000\n",
+ " - 18s - loss: 0.3461 - acc: 0.9351 - val_loss: 0.3316 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01801: val_acc did not improve from 0.94225\n",
+ "Epoch 1802/100000\n",
+ " - 19s - loss: 0.3431 - acc: 0.9370 - val_loss: 0.3450 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01802: val_acc did not improve from 0.94225\n",
+ "Epoch 1803/100000\n",
+ " - 19s - loss: 0.3478 - acc: 0.9351 - val_loss: 0.3380 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 01803: val_acc did not improve from 0.94225\n",
+ "Epoch 1804/100000\n",
+ " - 19s - loss: 0.3470 - acc: 0.9360 - val_loss: 0.3379 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01804: val_acc did not improve from 0.94225\n",
+ "Epoch 1805/100000\n",
+ " - 18s - loss: 0.3482 - acc: 0.9349 - val_loss: 0.3516 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01805: val_acc did not improve from 0.94225\n",
+ "Epoch 1806/100000\n",
+ " - 19s - loss: 0.3464 - acc: 0.9354 - val_loss: 0.3631 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01806: val_acc did not improve from 0.94225\n",
+ "Epoch 1807/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9356 - val_loss: 0.3397 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01807: val_acc did not improve from 0.94225\n",
+ "Epoch 1808/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9356 - val_loss: 0.4644 - val_acc: 0.8650\n",
+ "\n",
+ "Epoch 01808: val_acc did not improve from 0.94225\n",
+ "Epoch 1809/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9365 - val_loss: 0.4945 - val_acc: 0.8542\n",
+ "\n",
+ "Epoch 01809: val_acc did not improve from 0.94225\n",
+ "Epoch 1810/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9360 - val_loss: 0.3433 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 01810: val_acc did not improve from 0.94225\n",
+ "Epoch 1811/100000\n",
+ " - 18s - loss: 0.3458 - acc: 0.9351 - val_loss: 0.3511 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 01811: val_acc did not improve from 0.94225\n",
+ "Epoch 1812/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9354 - val_loss: 0.3510 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01812: val_acc did not improve from 0.94225\n",
+ "Epoch 1813/100000\n",
+ " - 19s - loss: 0.3432 - acc: 0.9359 - val_loss: 0.3347 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01813: val_acc did not improve from 0.94225\n",
+ "Epoch 1814/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9358 - val_loss: 0.3679 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 01814: val_acc did not improve from 0.94225\n",
+ "Epoch 1815/100000\n",
+ " - 18s - loss: 0.3439 - acc: 0.9360 - val_loss: 0.3394 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01815: val_acc did not improve from 0.94225\n",
+ "Epoch 1816/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9356 - val_loss: 0.4141 - val_acc: 0.8937\n",
+ "\n",
+ "Epoch 01816: val_acc did not improve from 0.94225\n",
+ "Epoch 1817/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9359 - val_loss: 0.3329 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01817: val_acc did not improve from 0.94225\n",
+ "Epoch 1818/100000\n",
+ " - 18s - loss: 0.3493 - acc: 0.9352 - val_loss: 0.3462 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01818: val_acc did not improve from 0.94225\n",
+ "Epoch 1819/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9366 - val_loss: 0.3872 - val_acc: 0.9099\n",
+ "\n",
+ "Epoch 01819: val_acc did not improve from 0.94225\n",
+ "Epoch 1820/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9351 - val_loss: 0.4943 - val_acc: 0.8578\n",
+ "\n",
+ "Epoch 01820: val_acc did not improve from 0.94225\n",
+ "Epoch 1821/100000\n",
+ " - 19s - loss: 0.3467 - acc: 0.9355 - val_loss: 0.3535 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01821: val_acc did not improve from 0.94225\n",
+ "Epoch 1822/100000\n",
+ " - 18s - loss: 0.3455 - acc: 0.9355 - val_loss: 0.3351 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01822: val_acc did not improve from 0.94225\n",
+ "Epoch 1823/100000\n",
+ " - 19s - loss: 0.3469 - acc: 0.9359 - val_loss: 0.3380 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01823: val_acc did not improve from 0.94225\n",
+ "Epoch 1824/100000\n",
+ " - 18s - loss: 0.3442 - acc: 0.9356 - val_loss: 0.3514 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 01824: val_acc did not improve from 0.94225\n",
+ "Epoch 1825/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9359 - val_loss: 0.4265 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 01825: val_acc did not improve from 0.94225\n",
+ "Epoch 1826/100000\n",
+ " - 19s - loss: 0.3453 - acc: 0.9362 - val_loss: 0.3394 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01826: val_acc did not improve from 0.94225\n",
+ "Epoch 1827/100000\n",
+ " - 18s - loss: 0.3455 - acc: 0.9353 - val_loss: 0.3601 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 01827: val_acc did not improve from 0.94225\n",
+ "Epoch 1828/100000\n",
+ " - 19s - loss: 0.3453 - acc: 0.9359 - val_loss: 0.3357 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 01828: val_acc did not improve from 0.94225\n",
+ "Epoch 1829/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9356 - val_loss: 0.3413 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01829: val_acc did not improve from 0.94225\n",
+ "Epoch 1830/100000\n",
+ " - 19s - loss: 0.3445 - acc: 0.9353 - val_loss: 0.3349 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01830: val_acc did not improve from 0.94225\n",
+ "Epoch 1831/100000\n",
+ " - 18s - loss: 0.3448 - acc: 0.9364 - val_loss: 0.3533 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 01831: val_acc did not improve from 0.94225\n",
+ "Epoch 1832/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9356 - val_loss: 0.3687 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 01832: val_acc did not improve from 0.94225\n",
+ "Epoch 1833/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9345 - val_loss: 0.3467 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01833: val_acc did not improve from 0.94225\n",
+ "Epoch 1834/100000\n",
+ " - 19s - loss: 0.3482 - acc: 0.9358 - val_loss: 0.3371 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01834: val_acc did not improve from 0.94225\n",
+ "Epoch 1835/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9363 - val_loss: 0.3371 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01835: val_acc did not improve from 0.94225\n",
+ "Epoch 1836/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9353 - val_loss: 0.4199 - val_acc: 0.8958\n",
+ "\n",
+ "Epoch 01836: val_acc did not improve from 0.94225\n",
+ "Epoch 1837/100000\n",
+ " - 19s - loss: 0.3425 - acc: 0.9361 - val_loss: 0.4083 - val_acc: 0.9120\n",
+ "\n",
+ "Epoch 01837: val_acc did not improve from 0.94225\n",
+ "Epoch 1838/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9363 - val_loss: 0.3328 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01838: val_acc did not improve from 0.94225\n",
+ "Epoch 1839/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9363 - val_loss: 0.3676 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01839: val_acc did not improve from 0.94225\n",
+ "Epoch 1840/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9357 - val_loss: 0.3266 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01840: val_acc did not improve from 0.94225\n",
+ "Epoch 1841/100000\n",
+ " - 19s - loss: 0.3429 - acc: 0.9358 - val_loss: 0.4048 - val_acc: 0.8994\n",
+ "\n",
+ "Epoch 01841: val_acc did not improve from 0.94225\n",
+ "Epoch 1842/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9354 - val_loss: 0.3389 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01842: val_acc did not improve from 0.94225\n",
+ "Epoch 1843/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9361 - val_loss: 0.3264 - val_acc: 0.9400\n",
+ "\n",
+ "Epoch 01843: val_acc did not improve from 0.94225\n",
+ "Epoch 1844/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9353 - val_loss: 0.3441 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01844: val_acc did not improve from 0.94225\n",
+ "Epoch 1845/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9360 - val_loss: 0.3494 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01845: val_acc did not improve from 0.94225\n",
+ "Epoch 1846/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9351 - val_loss: 0.3424 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01846: val_acc did not improve from 0.94225\n",
+ "Epoch 1847/100000\n",
+ " - 19s - loss: 0.3446 - acc: 0.9356 - val_loss: 0.3768 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 01847: val_acc did not improve from 0.94225\n",
+ "Epoch 1848/100000\n",
+ " - 19s - loss: 0.3474 - acc: 0.9363 - val_loss: 0.3597 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01848: val_acc did not improve from 0.94225\n",
+ "Epoch 1849/100000\n",
+ " - 19s - loss: 0.3456 - acc: 0.9359 - val_loss: 0.3336 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01849: val_acc did not improve from 0.94225\n",
+ "Epoch 1850/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9362 - val_loss: 0.3817 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 01850: val_acc did not improve from 0.94225\n",
+ "Epoch 1851/100000\n",
+ " - 19s - loss: 0.3429 - acc: 0.9366 - val_loss: 0.3690 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 01851: val_acc did not improve from 0.94225\n",
+ "Epoch 1852/100000\n",
+ " - 19s - loss: 0.3431 - acc: 0.9359 - val_loss: 0.3440 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01852: val_acc did not improve from 0.94225\n",
+ "Epoch 1853/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9356 - val_loss: 0.3455 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 01853: val_acc did not improve from 0.94225\n",
+ "Epoch 1854/100000\n",
+ " - 18s - loss: 0.3458 - acc: 0.9353 - val_loss: 0.3345 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01854: val_acc did not improve from 0.94225\n",
+ "Epoch 1855/100000\n",
+ " - 19s - loss: 0.3426 - acc: 0.9366 - val_loss: 0.3273 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01855: val_acc did not improve from 0.94225\n",
+ "Epoch 1856/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9348 - val_loss: 0.3643 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 01856: val_acc did not improve from 0.94225\n",
+ "Epoch 1857/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9351 - val_loss: 0.3886 - val_acc: 0.9213\n",
+ "\n",
+ "Epoch 01857: val_acc did not improve from 0.94225\n",
+ "Epoch 1858/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9356 - val_loss: 0.3429 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 01858: val_acc did not improve from 0.94225\n",
+ "Epoch 1859/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9355 - val_loss: 0.3502 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01859: val_acc did not improve from 0.94225\n",
+ "Epoch 1860/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9357 - val_loss: 0.3297 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01860: val_acc did not improve from 0.94225\n",
+ "Epoch 1861/100000\n",
+ " - 18s - loss: 0.3473 - acc: 0.9359 - val_loss: 0.3376 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01861: val_acc did not improve from 0.94225\n",
+ "Epoch 1862/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9366 - val_loss: 0.3787 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 01862: val_acc did not improve from 0.94225\n",
+ "Epoch 1863/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9356 - val_loss: 0.3275 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01863: val_acc did not improve from 0.94225\n",
+ "Epoch 1864/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9361 - val_loss: 0.3474 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01864: val_acc did not improve from 0.94225\n",
+ "Epoch 1865/100000\n",
+ " - 19s - loss: 0.3452 - acc: 0.9356 - val_loss: 0.4720 - val_acc: 0.8775\n",
+ "\n",
+ "Epoch 01865: val_acc did not improve from 0.94225\n",
+ "Epoch 1866/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9364 - val_loss: 0.3485 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 01866: val_acc did not improve from 0.94225\n",
+ "Epoch 1867/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9360 - val_loss: 0.3654 - val_acc: 0.9213\n",
+ "\n",
+ "Epoch 01867: val_acc did not improve from 0.94225\n",
+ "Epoch 1868/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9363 - val_loss: 0.4282 - val_acc: 0.8903\n",
+ "\n",
+ "Epoch 01868: val_acc did not improve from 0.94225\n",
+ "Epoch 1869/100000\n",
+ " - 19s - loss: 0.3466 - acc: 0.9356 - val_loss: 0.3756 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 01869: val_acc did not improve from 0.94225\n",
+ "Epoch 1870/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9366 - val_loss: 0.3423 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 01870: val_acc did not improve from 0.94225\n",
+ "Epoch 1871/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9356 - val_loss: 0.3364 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01871: val_acc did not improve from 0.94225\n",
+ "Epoch 1872/100000\n",
+ " - 19s - loss: 0.3428 - acc: 0.9361 - val_loss: 0.3392 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01872: val_acc did not improve from 0.94225\n",
+ "Epoch 1873/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9363 - val_loss: 0.3449 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01873: val_acc did not improve from 0.94225\n",
+ "Epoch 1874/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9351 - val_loss: 0.3953 - val_acc: 0.9092\n",
+ "\n",
+ "Epoch 01874: val_acc did not improve from 0.94225\n",
+ "Epoch 1875/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9367 - val_loss: 0.3386 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01875: val_acc did not improve from 0.94225\n",
+ "Epoch 1876/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9361 - val_loss: 0.3408 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01876: val_acc did not improve from 0.94225\n",
+ "Epoch 1877/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9359 - val_loss: 0.3338 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01877: val_acc did not improve from 0.94225\n",
+ "Epoch 1878/100000\n",
+ " - 19s - loss: 0.3448 - acc: 0.9360 - val_loss: 0.3281 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 01878: val_acc did not improve from 0.94225\n",
+ "Epoch 1879/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9347 - val_loss: 0.3600 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01879: val_acc did not improve from 0.94225\n",
+ "Epoch 1880/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9362 - val_loss: 0.3468 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 01880: val_acc did not improve from 0.94225\n",
+ "Epoch 1881/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9356 - val_loss: 0.3463 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01881: val_acc did not improve from 0.94225\n",
+ "Epoch 1882/100000\n",
+ " - 19s - loss: 0.3456 - acc: 0.9362 - val_loss: 0.3378 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01882: val_acc did not improve from 0.94225\n",
+ "Epoch 1883/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9348 - val_loss: 0.4030 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 01883: val_acc did not improve from 0.94225\n",
+ "Epoch 1884/100000\n",
+ " - 18s - loss: 0.3457 - acc: 0.9350 - val_loss: 0.3446 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01884: val_acc did not improve from 0.94225\n",
+ "Epoch 1885/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9359 - val_loss: 0.3403 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01885: val_acc did not improve from 0.94225\n",
+ "Epoch 1886/100000\n",
+ " - 18s - loss: 0.3436 - acc: 0.9359 - val_loss: 0.3296 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01886: val_acc did not improve from 0.94225\n",
+ "Epoch 1887/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9356 - val_loss: 0.3730 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01887: val_acc did not improve from 0.94225\n",
+ "Epoch 1888/100000\n",
+ " - 18s - loss: 0.3432 - acc: 0.9364 - val_loss: 0.3392 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 01888: val_acc did not improve from 0.94225\n",
+ "Epoch 1889/100000\n",
+ " - 19s - loss: 0.3470 - acc: 0.9346 - val_loss: 0.3721 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01889: val_acc did not improve from 0.94225\n",
+ "Epoch 1890/100000\n",
+ " - 18s - loss: 0.3470 - acc: 0.9361 - val_loss: 0.4126 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 01890: val_acc did not improve from 0.94225\n",
+ "Epoch 1891/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9352 - val_loss: 0.3599 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 01891: val_acc did not improve from 0.94225\n",
+ "Epoch 1892/100000\n",
+ " - 18s - loss: 0.3444 - acc: 0.9359 - val_loss: 0.3766 - val_acc: 0.9184\n",
+ "\n",
+ "Epoch 01892: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 01892: ReduceLROnPlateau reducing learning rate to 0.0006634203542489559.\n",
+ "Epoch 1893/100000\n",
+ " - 18s - loss: 0.3396 - acc: 0.9357 - val_loss: 0.3297 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01893: val_acc did not improve from 0.94225\n",
+ "Epoch 1894/100000\n",
+ " - 18s - loss: 0.3397 - acc: 0.9355 - val_loss: 0.3516 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01894: val_acc did not improve from 0.94225\n",
+ "Epoch 1895/100000\n",
+ " - 18s - loss: 0.3364 - acc: 0.9359 - val_loss: 0.3380 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01895: val_acc did not improve from 0.94225\n",
+ "Epoch 1896/100000\n",
+ " - 19s - loss: 0.3376 - acc: 0.9362 - val_loss: 0.3287 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01896: val_acc did not improve from 0.94225\n",
+ "Epoch 1897/100000\n",
+ " - 18s - loss: 0.3398 - acc: 0.9353 - val_loss: 0.5274 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 01897: val_acc did not improve from 0.94225\n",
+ "Epoch 1898/100000\n",
+ " - 19s - loss: 0.3387 - acc: 0.9359 - val_loss: 0.6311 - val_acc: 0.8186\n",
+ "\n",
+ "Epoch 01898: val_acc did not improve from 0.94225\n",
+ "Epoch 1899/100000\n",
+ " - 19s - loss: 0.3411 - acc: 0.9353 - val_loss: 0.3286 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01899: val_acc did not improve from 0.94225\n",
+ "Epoch 1900/100000\n",
+ " - 19s - loss: 0.3371 - acc: 0.9370 - val_loss: 0.3265 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01900: val_acc did not improve from 0.94225\n",
+ "Epoch 1901/100000\n",
+ " - 19s - loss: 0.3371 - acc: 0.9364 - val_loss: 0.3699 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 01901: val_acc did not improve from 0.94225\n",
+ "Epoch 1902/100000\n",
+ " - 19s - loss: 0.3402 - acc: 0.9352 - val_loss: 0.3211 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 01902: val_acc did not improve from 0.94225\n",
+ "Epoch 1903/100000\n",
+ " - 19s - loss: 0.3367 - acc: 0.9364 - val_loss: 0.3658 - val_acc: 0.9219\n",
+ "\n",
+ "Epoch 01903: val_acc did not improve from 0.94225\n",
+ "Epoch 1904/100000\n",
+ " - 18s - loss: 0.3389 - acc: 0.9361 - val_loss: 0.3663 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 01904: val_acc did not improve from 0.94225\n",
+ "Epoch 1905/100000\n",
+ " - 19s - loss: 0.3407 - acc: 0.9355 - val_loss: 0.3689 - val_acc: 0.9154\n",
+ "\n",
+ "Epoch 01905: val_acc did not improve from 0.94225\n",
+ "Epoch 1906/100000\n",
+ " - 18s - loss: 0.3377 - acc: 0.9362 - val_loss: 0.3263 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01906: val_acc did not improve from 0.94225\n",
+ "Epoch 1907/100000\n",
+ " - 18s - loss: 0.3376 - acc: 0.9362 - val_loss: 0.4354 - val_acc: 0.8946\n",
+ "\n",
+ "Epoch 01907: val_acc did not improve from 0.94225\n",
+ "Epoch 1908/100000\n",
+ " - 19s - loss: 0.3376 - acc: 0.9363 - val_loss: 0.3607 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 01908: val_acc did not improve from 0.94225\n",
+ "Epoch 1909/100000\n",
+ " - 19s - loss: 0.3379 - acc: 0.9358 - val_loss: 0.3790 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 01909: val_acc did not improve from 0.94225\n",
+ "Epoch 1910/100000\n",
+ " - 18s - loss: 0.3392 - acc: 0.9357 - val_loss: 0.3260 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01910: val_acc did not improve from 0.94225\n",
+ "Epoch 1911/100000\n",
+ " - 19s - loss: 0.3397 - acc: 0.9354 - val_loss: 0.3240 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01911: val_acc did not improve from 0.94225\n",
+ "Epoch 1912/100000\n",
+ " - 19s - loss: 0.3392 - acc: 0.9358 - val_loss: 0.3316 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01912: val_acc did not improve from 0.94225\n",
+ "Epoch 1913/100000\n",
+ " - 19s - loss: 0.3367 - acc: 0.9365 - val_loss: 0.3337 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01913: val_acc did not improve from 0.94225\n",
+ "Epoch 1914/100000\n",
+ " - 19s - loss: 0.3390 - acc: 0.9362 - val_loss: 0.3576 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 01914: val_acc did not improve from 0.94225\n",
+ "Epoch 1915/100000\n",
+ " - 19s - loss: 0.3398 - acc: 0.9358 - val_loss: 0.3367 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 01915: val_acc did not improve from 0.94225\n",
+ "Epoch 1916/100000\n",
+ " - 18s - loss: 0.3396 - acc: 0.9354 - val_loss: 0.3309 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 01916: val_acc did not improve from 0.94225\n",
+ "Epoch 1917/100000\n",
+ " - 19s - loss: 0.3388 - acc: 0.9361 - val_loss: 0.3215 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 01917: val_acc did not improve from 0.94225\n",
+ "Epoch 1918/100000\n",
+ " - 18s - loss: 0.3384 - acc: 0.9360 - val_loss: 0.3281 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01918: val_acc did not improve from 0.94225\n",
+ "Epoch 1919/100000\n",
+ " - 19s - loss: 0.3367 - acc: 0.9371 - val_loss: 0.3318 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01919: val_acc did not improve from 0.94225\n",
+ "Epoch 1920/100000\n",
+ " - 18s - loss: 0.3412 - acc: 0.9350 - val_loss: 0.3378 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01920: val_acc did not improve from 0.94225\n",
+ "Epoch 1921/100000\n",
+ " - 19s - loss: 0.3380 - acc: 0.9365 - val_loss: 0.3630 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 01921: val_acc did not improve from 0.94225\n",
+ "Epoch 1922/100000\n",
+ " - 18s - loss: 0.3374 - acc: 0.9363 - val_loss: 0.3540 - val_acc: 0.9206\n",
+ "\n",
+ "Epoch 01922: val_acc did not improve from 0.94225\n",
+ "Epoch 1923/100000\n",
+ " - 19s - loss: 0.3382 - acc: 0.9356 - val_loss: 0.3264 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01923: val_acc did not improve from 0.94225\n",
+ "Epoch 1924/100000\n",
+ " - 19s - loss: 0.3390 - acc: 0.9351 - val_loss: 0.3745 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 01924: val_acc did not improve from 0.94225\n",
+ "Epoch 1925/100000\n",
+ " - 18s - loss: 0.3367 - acc: 0.9365 - val_loss: 0.4982 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 01925: val_acc did not improve from 0.94225\n",
+ "Epoch 1926/100000\n",
+ " - 19s - loss: 0.3383 - acc: 0.9354 - val_loss: 0.3614 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01926: val_acc did not improve from 0.94225\n",
+ "Epoch 1927/100000\n",
+ " - 19s - loss: 0.3390 - acc: 0.9359 - val_loss: 0.4017 - val_acc: 0.9062\n",
+ "\n",
+ "Epoch 01927: val_acc did not improve from 0.94225\n",
+ "Epoch 1928/100000\n",
+ " - 19s - loss: 0.3574 - acc: 0.9344 - val_loss: 0.3459 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 01928: val_acc did not improve from 0.94225\n",
+ "Epoch 1929/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9359 - val_loss: 0.3590 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 01929: val_acc did not improve from 0.94225\n",
+ "Epoch 1930/100000\n",
+ " - 19s - loss: 0.3396 - acc: 0.9363 - val_loss: 0.3248 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 01930: val_acc did not improve from 0.94225\n",
+ "Epoch 1931/100000\n",
+ " - 18s - loss: 0.3386 - acc: 0.9363 - val_loss: 0.3324 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01931: val_acc did not improve from 0.94225\n",
+ "Epoch 1932/100000\n",
+ " - 19s - loss: 0.3385 - acc: 0.9357 - val_loss: 0.3623 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 01932: val_acc did not improve from 0.94225\n",
+ "Epoch 1933/100000\n",
+ " - 19s - loss: 0.3369 - acc: 0.9367 - val_loss: 0.3405 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 01933: val_acc did not improve from 0.94225\n",
+ "Epoch 1934/100000\n",
+ " - 19s - loss: 0.3373 - acc: 0.9365 - val_loss: 0.3684 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 01934: val_acc did not improve from 0.94225\n",
+ "Epoch 1935/100000\n",
+ " - 19s - loss: 0.3375 - acc: 0.9365 - val_loss: 0.3464 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01935: val_acc did not improve from 0.94225\n",
+ "Epoch 1936/100000\n",
+ " - 19s - loss: 0.3357 - acc: 0.9368 - val_loss: 0.4412 - val_acc: 0.8824\n",
+ "\n",
+ "Epoch 01936: val_acc did not improve from 0.94225\n",
+ "Epoch 1937/100000\n",
+ " - 19s - loss: 0.3405 - acc: 0.9347 - val_loss: 0.3646 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 01937: val_acc did not improve from 0.94225\n",
+ "Epoch 1938/100000\n",
+ " - 19s - loss: 0.3385 - acc: 0.9350 - val_loss: 0.3396 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 01938: val_acc did not improve from 0.94225\n",
+ "Epoch 1939/100000\n",
+ " - 19s - loss: 0.3375 - acc: 0.9359 - val_loss: 0.3470 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 01939: val_acc did not improve from 0.94225\n",
+ "Epoch 1940/100000\n",
+ " - 19s - loss: 0.3379 - acc: 0.9358 - val_loss: 0.3424 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 01940: val_acc did not improve from 0.94225\n",
+ "Epoch 1941/100000\n",
+ " - 18s - loss: 0.3372 - acc: 0.9357 - val_loss: 0.3254 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01941: val_acc did not improve from 0.94225\n",
+ "Epoch 1942/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9366 - val_loss: 0.4281 - val_acc: 0.8955\n",
+ "\n",
+ "Epoch 01942: val_acc did not improve from 0.94225\n",
+ "Epoch 1943/100000\n",
+ " - 19s - loss: 0.3380 - acc: 0.9364 - val_loss: 0.4161 - val_acc: 0.8874\n",
+ "\n",
+ "Epoch 01943: val_acc did not improve from 0.94225\n",
+ "Epoch 1944/100000\n",
+ " - 18s - loss: 0.3364 - acc: 0.9367 - val_loss: 0.3362 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01944: val_acc did not improve from 0.94225\n",
+ "Epoch 1945/100000\n",
+ " - 19s - loss: 0.3383 - acc: 0.9357 - val_loss: 0.3365 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01945: val_acc did not improve from 0.94225\n",
+ "Epoch 1946/100000\n",
+ " - 18s - loss: 0.3394 - acc: 0.9355 - val_loss: 0.3554 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 01946: val_acc did not improve from 0.94225\n",
+ "Epoch 1947/100000\n",
+ " - 18s - loss: 0.3407 - acc: 0.9352 - val_loss: 0.3323 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01947: val_acc did not improve from 0.94225\n",
+ "Epoch 1948/100000\n",
+ " - 18s - loss: 0.3358 - acc: 0.9365 - val_loss: 0.3352 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01948: val_acc did not improve from 0.94225\n",
+ "Epoch 1949/100000\n",
+ " - 18s - loss: 0.3373 - acc: 0.9364 - val_loss: 0.3615 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 01949: val_acc did not improve from 0.94225\n",
+ "Epoch 1950/100000\n",
+ " - 18s - loss: 0.3383 - acc: 0.9360 - val_loss: 0.4481 - val_acc: 0.8908\n",
+ "\n",
+ "Epoch 01950: val_acc did not improve from 0.94225\n",
+ "Epoch 1951/100000\n",
+ " - 18s - loss: 0.3353 - acc: 0.9367 - val_loss: 0.3681 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 01951: val_acc did not improve from 0.94225\n",
+ "Epoch 1952/100000\n",
+ " - 19s - loss: 0.3383 - acc: 0.9358 - val_loss: 0.3576 - val_acc: 0.9182\n",
+ "\n",
+ "Epoch 01952: val_acc did not improve from 0.94225\n",
+ "Epoch 1953/100000\n",
+ " - 19s - loss: 0.3391 - acc: 0.9357 - val_loss: 0.3268 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01953: val_acc did not improve from 0.94225\n",
+ "Epoch 1954/100000\n",
+ " - 19s - loss: 0.3412 - acc: 0.9361 - val_loss: 0.3746 - val_acc: 0.9137\n",
+ "\n",
+ "Epoch 01954: val_acc did not improve from 0.94225\n",
+ "Epoch 1955/100000\n",
+ " - 18s - loss: 0.3406 - acc: 0.9357 - val_loss: 0.3281 - val_acc: 0.9412\n",
+ "\n",
+ "Epoch 01955: val_acc did not improve from 0.94225\n",
+ "Epoch 1956/100000\n",
+ " - 19s - loss: 0.3355 - acc: 0.9367 - val_loss: 0.3335 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01956: val_acc did not improve from 0.94225\n",
+ "Epoch 1957/100000\n",
+ " - 19s - loss: 0.3379 - acc: 0.9359 - val_loss: 0.3946 - val_acc: 0.8980\n",
+ "\n",
+ "Epoch 01957: val_acc did not improve from 0.94225\n",
+ "Epoch 1958/100000\n",
+ " - 19s - loss: 0.3383 - acc: 0.9357 - val_loss: 0.3344 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 01958: val_acc did not improve from 0.94225\n",
+ "Epoch 1959/100000\n",
+ " - 19s - loss: 0.3366 - acc: 0.9359 - val_loss: 0.3317 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01959: val_acc did not improve from 0.94225\n",
+ "Epoch 1960/100000\n",
+ " - 19s - loss: 0.3396 - acc: 0.9348 - val_loss: 0.3438 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 01960: val_acc did not improve from 0.94225\n",
+ "Epoch 1961/100000\n",
+ " - 18s - loss: 0.3386 - acc: 0.9361 - val_loss: 0.3336 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01961: val_acc did not improve from 0.94225\n",
+ "Epoch 1962/100000\n",
+ " - 19s - loss: 0.3856 - acc: 0.9325 - val_loss: 0.3397 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01962: val_acc did not improve from 0.94225\n",
+ "Epoch 1963/100000\n",
+ " - 19s - loss: 0.3484 - acc: 0.9358 - val_loss: 0.3832 - val_acc: 0.9013\n",
+ "\n",
+ "Epoch 01963: val_acc did not improve from 0.94225\n",
+ "Epoch 1964/100000\n",
+ " - 18s - loss: 0.3457 - acc: 0.9352 - val_loss: 0.3273 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 01964: val_acc did not improve from 0.94225\n",
+ "Epoch 1965/100000\n",
+ " - 19s - loss: 0.3445 - acc: 0.9350 - val_loss: 0.3479 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 01965: val_acc did not improve from 0.94225\n",
+ "Epoch 1966/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9355 - val_loss: 0.3806 - val_acc: 0.9073\n",
+ "\n",
+ "Epoch 01966: val_acc did not improve from 0.94225\n",
+ "Epoch 1967/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9341 - val_loss: 0.3320 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01967: val_acc did not improve from 0.94225\n",
+ "Epoch 1968/100000\n",
+ " - 19s - loss: 0.3429 - acc: 0.9354 - val_loss: 0.3834 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 01968: val_acc did not improve from 0.94225\n",
+ "Epoch 1969/100000\n",
+ " - 19s - loss: 0.3416 - acc: 0.9362 - val_loss: 0.6219 - val_acc: 0.7903\n",
+ "\n",
+ "Epoch 01969: val_acc did not improve from 0.94225\n",
+ "Epoch 1970/100000\n",
+ " - 18s - loss: 0.3480 - acc: 0.9336 - val_loss: 0.3623 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01970: val_acc did not improve from 0.94225\n",
+ "Epoch 1971/100000\n",
+ " - 19s - loss: 0.3420 - acc: 0.9356 - val_loss: 0.3406 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01971: val_acc did not improve from 0.94225\n",
+ "Epoch 1972/100000\n",
+ " - 18s - loss: 0.3414 - acc: 0.9354 - val_loss: 0.4540 - val_acc: 0.8788\n",
+ "\n",
+ "Epoch 01972: val_acc did not improve from 0.94225\n",
+ "Epoch 1973/100000\n",
+ " - 18s - loss: 0.3437 - acc: 0.9353 - val_loss: 0.4030 - val_acc: 0.8900\n",
+ "\n",
+ "Epoch 01973: val_acc did not improve from 0.94225\n",
+ "Epoch 1974/100000\n",
+ " - 18s - loss: 0.3437 - acc: 0.9354 - val_loss: 0.3295 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01974: val_acc did not improve from 0.94225\n",
+ "Epoch 1975/100000\n",
+ " - 18s - loss: 0.3416 - acc: 0.9357 - val_loss: 0.3305 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01975: val_acc did not improve from 0.94225\n",
+ "Epoch 1976/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9355 - val_loss: 0.3444 - val_acc: 0.9259\n",
+ "\n",
+ "Epoch 01976: val_acc did not improve from 0.94225\n",
+ "Epoch 1977/100000\n",
+ " - 19s - loss: 0.3402 - acc: 0.9362 - val_loss: 0.4115 - val_acc: 0.8915\n",
+ "\n",
+ "Epoch 01977: val_acc did not improve from 0.94225\n",
+ "Epoch 1978/100000\n",
+ " - 19s - loss: 0.3410 - acc: 0.9358 - val_loss: 0.3272 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01978: val_acc did not improve from 0.94225\n",
+ "Epoch 1979/100000\n",
+ " - 19s - loss: 0.3505 - acc: 0.9347 - val_loss: 0.3568 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 01979: val_acc did not improve from 0.94225\n",
+ "Epoch 1980/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9361 - val_loss: 0.3264 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01980: val_acc did not improve from 0.94225\n",
+ "Epoch 1981/100000\n",
+ " - 18s - loss: 0.3440 - acc: 0.9354 - val_loss: 0.3464 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01981: val_acc did not improve from 0.94225\n",
+ "Epoch 1982/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9356 - val_loss: 0.4236 - val_acc: 0.8851\n",
+ "\n",
+ "Epoch 01982: val_acc did not improve from 0.94225\n",
+ "Epoch 1983/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9356 - val_loss: 0.3553 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 01983: val_acc did not improve from 0.94225\n",
+ "Epoch 1984/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9357 - val_loss: 0.3376 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01984: val_acc did not improve from 0.94225\n",
+ "Epoch 1985/100000\n",
+ " - 19s - loss: 0.3416 - acc: 0.9357 - val_loss: 0.3321 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01985: val_acc did not improve from 0.94225\n",
+ "Epoch 1986/100000\n",
+ " - 18s - loss: 0.3424 - acc: 0.9353 - val_loss: 0.3612 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 01986: val_acc did not improve from 0.94225\n",
+ "Epoch 1987/100000\n",
+ " - 18s - loss: 0.3426 - acc: 0.9347 - val_loss: 0.4087 - val_acc: 0.8853\n",
+ "\n",
+ "Epoch 01987: val_acc did not improve from 0.94225\n",
+ "Epoch 1988/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9350 - val_loss: 0.5892 - val_acc: 0.8060\n",
+ "\n",
+ "Epoch 01988: val_acc did not improve from 0.94225\n",
+ "Epoch 1989/100000\n",
+ " - 18s - loss: 0.3434 - acc: 0.9350 - val_loss: 0.3648 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 01989: val_acc did not improve from 0.94225\n",
+ "Epoch 1990/100000\n",
+ " - 19s - loss: 0.3408 - acc: 0.9360 - val_loss: 0.4497 - val_acc: 0.8646\n",
+ "\n",
+ "Epoch 01990: val_acc did not improve from 0.94225\n",
+ "Epoch 1991/100000\n",
+ " - 18s - loss: 0.3425 - acc: 0.9353 - val_loss: 0.3556 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01991: val_acc did not improve from 0.94225\n",
+ "Epoch 1992/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9344 - val_loss: 0.3330 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01992: val_acc did not improve from 0.94225\n",
+ "Epoch 1993/100000\n",
+ " - 19s - loss: 0.3420 - acc: 0.9350 - val_loss: 0.3585 - val_acc: 0.9165\n",
+ "\n",
+ "Epoch 01993: val_acc did not improve from 0.94225\n",
+ "Epoch 1994/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9345 - val_loss: 0.3227 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 01994: val_acc did not improve from 0.94225\n",
+ "Epoch 1995/100000\n",
+ " - 19s - loss: 0.3397 - acc: 0.9355 - val_loss: 0.3269 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01995: val_acc did not improve from 0.94225\n",
+ "Epoch 1996/100000\n",
+ " - 19s - loss: 0.3406 - acc: 0.9354 - val_loss: 0.3438 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01996: val_acc did not improve from 0.94225\n",
+ "Epoch 1997/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9349 - val_loss: 0.3937 - val_acc: 0.8923\n",
+ "\n",
+ "Epoch 01997: val_acc did not improve from 0.94225\n",
+ "Epoch 1998/100000\n",
+ " - 19s - loss: 0.3396 - acc: 0.9359 - val_loss: 0.4087 - val_acc: 0.8817\n",
+ "\n",
+ "Epoch 01998: val_acc did not improve from 0.94225\n",
+ "Epoch 1999/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9340 - val_loss: 0.3427 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01999: val_acc did not improve from 0.94225\n",
+ "Epoch 2000/100000\n",
+ " - 19s - loss: 0.3418 - acc: 0.9354 - val_loss: 0.3387 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02000: val_acc did not improve from 0.94225\n",
+ "Epoch 2001/100000\n",
+ " - 18s - loss: 0.3411 - acc: 0.9349 - val_loss: 0.3382 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02001: val_acc did not improve from 0.94225\n",
+ "Epoch 2002/100000\n",
+ " - 19s - loss: 0.3417 - acc: 0.9355 - val_loss: 0.3714 - val_acc: 0.9135\n",
+ "\n",
+ "Epoch 02002: val_acc did not improve from 0.94225\n",
+ "Epoch 2003/100000\n",
+ " - 19s - loss: 0.3412 - acc: 0.9353 - val_loss: 0.3347 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 02003: val_acc did not improve from 0.94225\n",
+ "Epoch 2004/100000\n",
+ " - 18s - loss: 0.3393 - acc: 0.9359 - val_loss: 0.3619 - val_acc: 0.9132\n",
+ "\n",
+ "Epoch 02004: val_acc did not improve from 0.94225\n",
+ "Epoch 2005/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9346 - val_loss: 0.3287 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02005: val_acc did not improve from 0.94225\n",
+ "Epoch 2006/100000\n",
+ " - 18s - loss: 0.3422 - acc: 0.9347 - val_loss: 0.3251 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02006: val_acc did not improve from 0.94225\n",
+ "Epoch 2007/100000\n",
+ " - 19s - loss: 0.3412 - acc: 0.9352 - val_loss: 0.3373 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02007: val_acc did not improve from 0.94225\n",
+ "Epoch 2008/100000\n",
+ " - 19s - loss: 0.3386 - acc: 0.9358 - val_loss: 0.3450 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 02008: val_acc did not improve from 0.94225\n",
+ "Epoch 2009/100000\n",
+ " - 19s - loss: 0.3408 - acc: 0.9353 - val_loss: 0.4146 - val_acc: 0.9035\n",
+ "\n",
+ "Epoch 02009: val_acc did not improve from 0.94225\n",
+ "Epoch 2010/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9343 - val_loss: 0.3258 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02010: val_acc did not improve from 0.94225\n",
+ "Epoch 2011/100000\n",
+ " - 19s - loss: 0.3419 - acc: 0.9351 - val_loss: 0.3459 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 02011: val_acc did not improve from 0.94225\n",
+ "Epoch 2012/100000\n",
+ " - 19s - loss: 0.3410 - acc: 0.9352 - val_loss: 0.3782 - val_acc: 0.9057\n",
+ "\n",
+ "Epoch 02012: val_acc did not improve from 0.94225\n",
+ "Epoch 2013/100000\n",
+ " - 19s - loss: 0.3426 - acc: 0.9344 - val_loss: 0.4218 - val_acc: 0.8894\n",
+ "\n",
+ "Epoch 02013: val_acc did not improve from 0.94225\n",
+ "Epoch 2014/100000\n",
+ " - 19s - loss: 0.3398 - acc: 0.9348 - val_loss: 0.4696 - val_acc: 0.8771\n",
+ "\n",
+ "Epoch 02014: val_acc did not improve from 0.94225\n",
+ "Epoch 2015/100000\n",
+ " - 18s - loss: 0.3421 - acc: 0.9348 - val_loss: 0.3225 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02015: val_acc did not improve from 0.94225\n",
+ "Epoch 2016/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9345 - val_loss: 0.5538 - val_acc: 0.8742\n",
+ "\n",
+ "Epoch 02016: val_acc did not improve from 0.94225\n",
+ "Epoch 2017/100000\n",
+ " - 18s - loss: 0.3411 - acc: 0.9353 - val_loss: 0.3358 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 02017: val_acc did not improve from 0.94225\n",
+ "Epoch 2018/100000\n",
+ " - 19s - loss: 0.3414 - acc: 0.9354 - val_loss: 0.3666 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 02018: val_acc did not improve from 0.94225\n",
+ "Epoch 2019/100000\n",
+ " - 18s - loss: 0.3423 - acc: 0.9341 - val_loss: 0.3280 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02019: val_acc did not improve from 0.94225\n",
+ "Epoch 2020/100000\n",
+ " - 19s - loss: 0.3418 - acc: 0.9349 - val_loss: 0.3397 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02020: val_acc did not improve from 0.94225\n",
+ "Epoch 2021/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9355 - val_loss: 0.3634 - val_acc: 0.9163\n",
+ "\n",
+ "Epoch 02021: val_acc did not improve from 0.94225\n",
+ "Epoch 2022/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9351 - val_loss: 0.3620 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 02022: val_acc did not improve from 0.94225\n",
+ "Epoch 2023/100000\n",
+ " - 18s - loss: 0.3420 - acc: 0.9350 - val_loss: 0.3409 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 02023: val_acc did not improve from 0.94225\n",
+ "Epoch 2024/100000\n",
+ " - 19s - loss: 0.3410 - acc: 0.9350 - val_loss: 0.3271 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02024: val_acc did not improve from 0.94225\n",
+ "Epoch 2025/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9347 - val_loss: 0.3480 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02025: val_acc did not improve from 0.94225\n",
+ "Epoch 2026/100000\n",
+ " - 19s - loss: 0.3420 - acc: 0.9351 - val_loss: 0.3223 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 02026: val_acc did not improve from 0.94225\n",
+ "Epoch 2027/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9338 - val_loss: 0.3394 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02027: val_acc did not improve from 0.94225\n",
+ "Epoch 2028/100000\n",
+ " - 19s - loss: 0.3388 - acc: 0.9358 - val_loss: 0.3579 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 02028: val_acc did not improve from 0.94225\n",
+ "Epoch 2029/100000\n",
+ " - 19s - loss: 0.3411 - acc: 0.9353 - val_loss: 0.3750 - val_acc: 0.9064\n",
+ "\n",
+ "Epoch 02029: val_acc did not improve from 0.94225\n",
+ "Epoch 2030/100000\n",
+ " - 19s - loss: 0.3413 - acc: 0.9354 - val_loss: 0.3302 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02030: val_acc did not improve from 0.94225\n",
+ "Epoch 2031/100000\n",
+ " - 18s - loss: 0.3403 - acc: 0.9355 - val_loss: 0.4689 - val_acc: 0.8633\n",
+ "\n",
+ "Epoch 02031: val_acc did not improve from 0.94225\n",
+ "Epoch 2032/100000\n",
+ " - 18s - loss: 0.3428 - acc: 0.9344 - val_loss: 0.3881 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02032: val_acc did not improve from 0.94225\n",
+ "Epoch 2033/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9350 - val_loss: 0.3290 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02033: val_acc did not improve from 0.94225\n",
+ "Epoch 2034/100000\n",
+ " - 18s - loss: 0.3427 - acc: 0.9347 - val_loss: 0.3256 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 02034: val_acc did not improve from 0.94225\n",
+ "Epoch 2035/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9341 - val_loss: 0.3846 - val_acc: 0.9193\n",
+ "\n",
+ "Epoch 02035: val_acc did not improve from 0.94225\n",
+ "Epoch 2036/100000\n",
+ " - 19s - loss: 0.3393 - acc: 0.9358 - val_loss: 0.3229 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02036: val_acc did not improve from 0.94225\n",
+ "Epoch 2037/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9348 - val_loss: 0.3326 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02037: val_acc did not improve from 0.94225\n",
+ "Epoch 2038/100000\n",
+ " - 19s - loss: 0.3406 - acc: 0.9351 - val_loss: 0.3303 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02038: val_acc did not improve from 0.94225\n",
+ "Epoch 2039/100000\n",
+ " - 19s - loss: 0.3396 - acc: 0.9357 - val_loss: 0.3359 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02039: val_acc did not improve from 0.94225\n",
+ "Epoch 2040/100000\n",
+ " - 19s - loss: 0.3399 - acc: 0.9355 - val_loss: 0.3266 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02040: val_acc did not improve from 0.94225\n",
+ "Epoch 2041/100000\n",
+ " - 18s - loss: 0.3422 - acc: 0.9348 - val_loss: 0.3240 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02041: val_acc did not improve from 0.94225\n",
+ "Epoch 2042/100000\n",
+ " - 19s - loss: 0.3404 - acc: 0.9354 - val_loss: 0.3493 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02042: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 02042: ReduceLROnPlateau reducing learning rate to 0.0006302493420662358.\n",
+ "Epoch 2043/100000\n",
+ " - 19s - loss: 0.3349 - acc: 0.9356 - val_loss: 0.3276 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02043: val_acc did not improve from 0.94225\n",
+ "Epoch 2044/100000\n",
+ " - 19s - loss: 0.3355 - acc: 0.9348 - val_loss: 0.3959 - val_acc: 0.8978\n",
+ "\n",
+ "Epoch 02044: val_acc did not improve from 0.94225\n",
+ "Epoch 2045/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9356 - val_loss: 0.3602 - val_acc: 0.9113\n",
+ "\n",
+ "Epoch 02045: val_acc did not improve from 0.94225\n",
+ "Epoch 2046/100000\n",
+ " - 19s - loss: 0.3352 - acc: 0.9351 - val_loss: 0.3152 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02046: val_acc did not improve from 0.94225\n",
+ "Epoch 2047/100000\n",
+ " - 19s - loss: 0.3325 - acc: 0.9355 - val_loss: 0.3465 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 02047: val_acc did not improve from 0.94225\n",
+ "Epoch 2048/100000\n",
+ " - 19s - loss: 0.3347 - acc: 0.9350 - val_loss: 0.3155 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 02048: val_acc did not improve from 0.94225\n",
+ "Epoch 2049/100000\n",
+ " - 19s - loss: 0.3343 - acc: 0.9358 - val_loss: 0.3084 - val_acc: 0.9406\n",
+ "\n",
+ "Epoch 02049: val_acc did not improve from 0.94225\n",
+ "Epoch 2050/100000\n",
+ " - 18s - loss: 0.3321 - acc: 0.9359 - val_loss: 0.3933 - val_acc: 0.9154\n",
+ "\n",
+ "Epoch 02050: val_acc did not improve from 0.94225\n",
+ "Epoch 2051/100000\n",
+ " - 19s - loss: 0.3356 - acc: 0.9350 - val_loss: 0.3647 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 02051: val_acc did not improve from 0.94225\n",
+ "Epoch 2052/100000\n",
+ " - 18s - loss: 0.3357 - acc: 0.9350 - val_loss: 0.3320 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02052: val_acc did not improve from 0.94225\n",
+ "Epoch 2053/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9350 - val_loss: 0.3236 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02053: val_acc did not improve from 0.94225\n",
+ "Epoch 2054/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9352 - val_loss: 0.3183 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02054: val_acc did not improve from 0.94225\n",
+ "Epoch 2055/100000\n",
+ " - 18s - loss: 0.3337 - acc: 0.9354 - val_loss: 0.3158 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02055: val_acc did not improve from 0.94225\n",
+ "Epoch 2056/100000\n",
+ " - 19s - loss: 0.3347 - acc: 0.9352 - val_loss: 0.3342 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 02056: val_acc did not improve from 0.94225\n",
+ "Epoch 2057/100000\n",
+ " - 18s - loss: 0.3335 - acc: 0.9357 - val_loss: 0.3153 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02057: val_acc did not improve from 0.94225\n",
+ "Epoch 2058/100000\n",
+ " - 19s - loss: 0.3341 - acc: 0.9350 - val_loss: 0.3229 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02058: val_acc did not improve from 0.94225\n",
+ "Epoch 2059/100000\n",
+ " - 19s - loss: 0.3336 - acc: 0.9355 - val_loss: 0.3492 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02059: val_acc did not improve from 0.94225\n",
+ "Epoch 2060/100000\n",
+ " - 19s - loss: 0.3361 - acc: 0.9350 - val_loss: 0.3216 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02060: val_acc did not improve from 0.94225\n",
+ "Epoch 2061/100000\n",
+ " - 19s - loss: 0.3322 - acc: 0.9365 - val_loss: 0.3327 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02061: val_acc did not improve from 0.94225\n",
+ "Epoch 2062/100000\n",
+ " - 18s - loss: 0.3359 - acc: 0.9346 - val_loss: 0.3320 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02062: val_acc did not improve from 0.94225\n",
+ "Epoch 2063/100000\n",
+ " - 19s - loss: 0.3367 - acc: 0.9352 - val_loss: 0.3330 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02063: val_acc did not improve from 0.94225\n",
+ "Epoch 2064/100000\n",
+ " - 19s - loss: 0.3350 - acc: 0.9345 - val_loss: 0.4152 - val_acc: 0.8842\n",
+ "\n",
+ "Epoch 02064: val_acc did not improve from 0.94225\n",
+ "Epoch 2065/100000\n",
+ " - 18s - loss: 0.3340 - acc: 0.9350 - val_loss: 0.3929 - val_acc: 0.9132\n",
+ "\n",
+ "Epoch 02065: val_acc did not improve from 0.94225\n",
+ "Epoch 2066/100000\n",
+ " - 19s - loss: 0.3336 - acc: 0.9348 - val_loss: 0.3201 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02066: val_acc did not improve from 0.94225\n",
+ "Epoch 2067/100000\n",
+ " - 19s - loss: 0.3312 - acc: 0.9359 - val_loss: 0.3360 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02067: val_acc did not improve from 0.94225\n",
+ "Epoch 2068/100000\n",
+ " - 19s - loss: 0.3324 - acc: 0.9355 - val_loss: 0.3958 - val_acc: 0.8971\n",
+ "\n",
+ "Epoch 02068: val_acc did not improve from 0.94225\n",
+ "Epoch 2069/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9351 - val_loss: 0.3209 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02069: val_acc did not improve from 0.94225\n",
+ "Epoch 2070/100000\n",
+ " - 19s - loss: 0.3334 - acc: 0.9355 - val_loss: 0.3228 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02070: val_acc did not improve from 0.94225\n",
+ "Epoch 2071/100000\n",
+ " - 19s - loss: 0.3325 - acc: 0.9360 - val_loss: 0.3383 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 02071: val_acc did not improve from 0.94225\n",
+ "Epoch 2072/100000\n",
+ " - 19s - loss: 0.3358 - acc: 0.9345 - val_loss: 0.3357 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 02072: val_acc did not improve from 0.94225\n",
+ "Epoch 2073/100000\n",
+ " - 19s - loss: 0.3335 - acc: 0.9358 - val_loss: 0.3274 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 02073: val_acc did not improve from 0.94225\n",
+ "Epoch 2074/100000\n",
+ " - 19s - loss: 0.3382 - acc: 0.9341 - val_loss: 0.3464 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 02074: val_acc did not improve from 0.94225\n",
+ "Epoch 2075/100000\n",
+ " - 19s - loss: 0.3333 - acc: 0.9356 - val_loss: 0.3558 - val_acc: 0.9148\n",
+ "\n",
+ "Epoch 02075: val_acc did not improve from 0.94225\n",
+ "Epoch 2076/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9354 - val_loss: 0.3221 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02076: val_acc did not improve from 0.94225\n",
+ "Epoch 2077/100000\n",
+ " - 19s - loss: 0.3346 - acc: 0.9353 - val_loss: 0.4634 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02077: val_acc did not improve from 0.94225\n",
+ "Epoch 2078/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9352 - val_loss: 0.3210 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02078: val_acc did not improve from 0.94225\n",
+ "Epoch 2079/100000\n",
+ " - 19s - loss: 0.3304 - acc: 0.9362 - val_loss: 0.3238 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 02079: val_acc did not improve from 0.94225\n",
+ "Epoch 2080/100000\n",
+ " - 19s - loss: 0.3341 - acc: 0.9348 - val_loss: 0.3165 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02080: val_acc did not improve from 0.94225\n",
+ "Epoch 2081/100000\n",
+ " - 19s - loss: 0.3335 - acc: 0.9348 - val_loss: 0.3676 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 02081: val_acc did not improve from 0.94225\n",
+ "Epoch 2082/100000\n",
+ " - 19s - loss: 0.3326 - acc: 0.9357 - val_loss: 0.3365 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02082: val_acc did not improve from 0.94225\n",
+ "Epoch 2083/100000\n",
+ " - 18s - loss: 0.3384 - acc: 0.9333 - val_loss: 0.3670 - val_acc: 0.9261\n",
+ "\n",
+ "Epoch 02083: val_acc did not improve from 0.94225\n",
+ "Epoch 2084/100000\n",
+ " - 19s - loss: 0.3318 - acc: 0.9358 - val_loss: 0.3263 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02084: val_acc did not improve from 0.94225\n",
+ "Epoch 2085/100000\n",
+ " - 18s - loss: 0.3329 - acc: 0.9358 - val_loss: 0.3370 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 02085: val_acc did not improve from 0.94225\n",
+ "Epoch 2086/100000\n",
+ " - 19s - loss: 0.3322 - acc: 0.9357 - val_loss: 0.3138 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02086: val_acc did not improve from 0.94225\n",
+ "Epoch 2087/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9353 - val_loss: 0.3397 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 02087: val_acc did not improve from 0.94225\n",
+ "Epoch 2088/100000\n",
+ " - 19s - loss: 0.3302 - acc: 0.9364 - val_loss: 0.3245 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02088: val_acc did not improve from 0.94225\n",
+ "Epoch 2089/100000\n",
+ " - 19s - loss: 0.3316 - acc: 0.9352 - val_loss: 0.3207 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02089: val_acc did not improve from 0.94225\n",
+ "Epoch 2090/100000\n",
+ " - 19s - loss: 0.3321 - acc: 0.9350 - val_loss: 0.3192 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 02090: val_acc did not improve from 0.94225\n",
+ "Epoch 2091/100000\n",
+ " - 19s - loss: 0.3344 - acc: 0.9348 - val_loss: 0.3396 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02091: val_acc did not improve from 0.94225\n",
+ "Epoch 2092/100000\n",
+ " - 19s - loss: 0.3313 - acc: 0.9360 - val_loss: 0.3140 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 02092: val_acc did not improve from 0.94225\n",
+ "Epoch 2093/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9348 - val_loss: 0.3243 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 02093: val_acc did not improve from 0.94225\n",
+ "Epoch 2094/100000\n",
+ " - 19s - loss: 0.3327 - acc: 0.9353 - val_loss: 0.3118 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 02094: val_acc did not improve from 0.94225\n",
+ "Epoch 2095/100000\n",
+ " - 18s - loss: 0.3335 - acc: 0.9358 - val_loss: 0.3522 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02095: val_acc did not improve from 0.94225\n",
+ "Epoch 2096/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9350 - val_loss: 0.3496 - val_acc: 0.9165\n",
+ "\n",
+ "Epoch 02096: val_acc did not improve from 0.94225\n",
+ "Epoch 2097/100000\n",
+ " - 19s - loss: 0.3309 - acc: 0.9363 - val_loss: 0.3159 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02097: val_acc did not improve from 0.94225\n",
+ "Epoch 2098/100000\n",
+ " - 18s - loss: 0.3341 - acc: 0.9352 - val_loss: 0.3640 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 02098: val_acc did not improve from 0.94225\n",
+ "Epoch 2099/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9352 - val_loss: 0.3843 - val_acc: 0.9035\n",
+ "\n",
+ "Epoch 02099: val_acc did not improve from 0.94225\n",
+ "Epoch 2100/100000\n",
+ " - 18s - loss: 0.3299 - acc: 0.9361 - val_loss: 0.3456 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 02100: val_acc did not improve from 0.94225\n",
+ "Epoch 2101/100000\n",
+ " - 19s - loss: 0.3311 - acc: 0.9357 - val_loss: 0.3218 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 02101: val_acc did not improve from 0.94225\n",
+ "Epoch 2102/100000\n",
+ " - 19s - loss: 0.3331 - acc: 0.9357 - val_loss: 0.3317 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 02102: val_acc did not improve from 0.94225\n",
+ "Epoch 2103/100000\n",
+ " - 19s - loss: 0.3341 - acc: 0.9353 - val_loss: 0.3162 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02103: val_acc did not improve from 0.94225\n",
+ "Epoch 2104/100000\n",
+ " - 19s - loss: 0.3323 - acc: 0.9356 - val_loss: 0.3803 - val_acc: 0.9099\n",
+ "\n",
+ "Epoch 02104: val_acc did not improve from 0.94225\n",
+ "Epoch 2105/100000\n",
+ " - 18s - loss: 0.3338 - acc: 0.9351 - val_loss: 0.3132 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02105: val_acc did not improve from 0.94225\n",
+ "Epoch 2106/100000\n",
+ " - 19s - loss: 0.3333 - acc: 0.9353 - val_loss: 0.3960 - val_acc: 0.9018\n",
+ "\n",
+ "Epoch 02106: val_acc did not improve from 0.94225\n",
+ "Epoch 2107/100000\n",
+ " - 19s - loss: 0.3334 - acc: 0.9353 - val_loss: 0.3250 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02107: val_acc did not improve from 0.94225\n",
+ "Epoch 2108/100000\n",
+ " - 19s - loss: 0.3332 - acc: 0.9356 - val_loss: 0.3377 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 02108: val_acc did not improve from 0.94225\n",
+ "Epoch 2109/100000\n",
+ " - 19s - loss: 0.3351 - acc: 0.9353 - val_loss: 0.3141 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02109: val_acc did not improve from 0.94225\n",
+ "Epoch 2110/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9350 - val_loss: 0.3303 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02110: val_acc did not improve from 0.94225\n",
+ "Epoch 2111/100000\n",
+ " - 19s - loss: 0.3336 - acc: 0.9349 - val_loss: 0.3149 - val_acc: 0.9404\n",
+ "\n",
+ "Epoch 02111: val_acc did not improve from 0.94225\n",
+ "Epoch 2112/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9358 - val_loss: 0.3876 - val_acc: 0.9032\n",
+ "\n",
+ "Epoch 02112: val_acc did not improve from 0.94225\n",
+ "Epoch 2113/100000\n",
+ " - 20s - loss: 0.3332 - acc: 0.9353 - val_loss: 0.3361 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02113: val_acc did not improve from 0.94225\n",
+ "Epoch 2114/100000\n",
+ " - 19s - loss: 0.3330 - acc: 0.9353 - val_loss: 0.3229 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02114: val_acc did not improve from 0.94225\n",
+ "Epoch 2115/100000\n",
+ " - 19s - loss: 0.3331 - acc: 0.9355 - val_loss: 0.3429 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 02115: val_acc did not improve from 0.94225\n",
+ "Epoch 2116/100000\n",
+ " - 19s - loss: 0.3329 - acc: 0.9353 - val_loss: 0.3966 - val_acc: 0.9144\n",
+ "\n",
+ "Epoch 02116: val_acc did not improve from 0.94225\n",
+ "Epoch 2117/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9349 - val_loss: 0.3314 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02117: val_acc did not improve from 0.94225\n",
+ "Epoch 2118/100000\n",
+ " - 19s - loss: 0.3329 - acc: 0.9353 - val_loss: 0.3228 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02118: val_acc did not improve from 0.94225\n",
+ "Epoch 2119/100000\n",
+ " - 19s - loss: 0.3315 - acc: 0.9360 - val_loss: 0.3308 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 02119: val_acc did not improve from 0.94225\n",
+ "Epoch 2120/100000\n",
+ " - 19s - loss: 0.3343 - acc: 0.9349 - val_loss: 0.3178 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02120: val_acc did not improve from 0.94225\n",
+ "Epoch 2121/100000\n",
+ " - 19s - loss: 0.3323 - acc: 0.9357 - val_loss: 0.3650 - val_acc: 0.9106\n",
+ "\n",
+ "Epoch 02121: val_acc did not improve from 0.94225\n",
+ "Epoch 2122/100000\n",
+ " - 19s - loss: 0.3313 - acc: 0.9357 - val_loss: 0.3353 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02122: val_acc did not improve from 0.94225\n",
+ "Epoch 2123/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9348 - val_loss: 0.3137 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 02123: val_acc did not improve from 0.94225\n",
+ "Epoch 2124/100000\n",
+ " - 19s - loss: 0.3331 - acc: 0.9353 - val_loss: 0.3365 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 02124: val_acc did not improve from 0.94225\n",
+ "Epoch 2125/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9352 - val_loss: 0.3208 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02125: val_acc did not improve from 0.94225\n",
+ "Epoch 2126/100000\n",
+ " - 19s - loss: 0.3335 - acc: 0.9356 - val_loss: 0.3218 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02126: val_acc did not improve from 0.94225\n",
+ "Epoch 2127/100000\n",
+ " - 19s - loss: 0.3366 - acc: 0.9343 - val_loss: 0.3370 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 02127: val_acc did not improve from 0.94225\n",
+ "Epoch 2128/100000\n",
+ " - 18s - loss: 0.3328 - acc: 0.9355 - val_loss: 0.3392 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 02128: val_acc did not improve from 0.94225\n",
+ "Epoch 2129/100000\n",
+ " - 19s - loss: 0.3349 - acc: 0.9344 - val_loss: 0.3334 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 02129: val_acc did not improve from 0.94225\n",
+ "Epoch 2130/100000\n",
+ " - 19s - loss: 0.3356 - acc: 0.9343 - val_loss: 0.3244 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02130: val_acc did not improve from 0.94225\n",
+ "Epoch 2131/100000\n",
+ " - 19s - loss: 0.3363 - acc: 0.9344 - val_loss: 0.3163 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02131: val_acc did not improve from 0.94225\n",
+ "Epoch 2132/100000\n",
+ " - 19s - loss: 0.3312 - acc: 0.9360 - val_loss: 0.3229 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02132: val_acc did not improve from 0.94225\n",
+ "Epoch 2133/100000\n",
+ " - 19s - loss: 0.3326 - acc: 0.9351 - val_loss: 0.3677 - val_acc: 0.9062\n",
+ "\n",
+ "Epoch 02133: val_acc did not improve from 0.94225\n",
+ "Epoch 2134/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9343 - val_loss: 0.3764 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 02134: val_acc did not improve from 0.94225\n",
+ "Epoch 2135/100000\n",
+ " - 19s - loss: 0.3361 - acc: 0.9348 - val_loss: 0.3705 - val_acc: 0.9189\n",
+ "\n",
+ "Epoch 02135: val_acc did not improve from 0.94225\n",
+ "Epoch 2136/100000\n",
+ " - 18s - loss: 0.3336 - acc: 0.9351 - val_loss: 0.3457 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 02136: val_acc did not improve from 0.94225\n",
+ "Epoch 2137/100000\n",
+ " - 19s - loss: 0.3343 - acc: 0.9345 - val_loss: 0.3550 - val_acc: 0.9164\n",
+ "\n",
+ "Epoch 02137: val_acc did not improve from 0.94225\n",
+ "Epoch 2138/100000\n",
+ " - 19s - loss: 0.3326 - acc: 0.9353 - val_loss: 0.3245 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02138: val_acc did not improve from 0.94225\n",
+ "Epoch 2139/100000\n",
+ " - 18s - loss: 0.3331 - acc: 0.9356 - val_loss: 0.3215 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02139: val_acc did not improve from 0.94225\n",
+ "Epoch 2140/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9353 - val_loss: 0.4185 - val_acc: 0.8827\n",
+ "\n",
+ "Epoch 02140: val_acc did not improve from 0.94225\n",
+ "Epoch 2141/100000\n",
+ " - 19s - loss: 0.3352 - acc: 0.9347 - val_loss: 0.3284 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02141: val_acc did not improve from 0.94225\n",
+ "Epoch 2142/100000\n",
+ " - 18s - loss: 0.3326 - acc: 0.9354 - val_loss: 0.4411 - val_acc: 0.8780\n",
+ "\n",
+ "Epoch 02142: val_acc did not improve from 0.94225\n",
+ "Epoch 2143/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9349 - val_loss: 0.3459 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02143: val_acc did not improve from 0.94225\n",
+ "Epoch 2144/100000\n",
+ " - 18s - loss: 0.3340 - acc: 0.9352 - val_loss: 0.3508 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 02144: val_acc did not improve from 0.94225\n",
+ "Epoch 2145/100000\n",
+ " - 19s - loss: 0.3319 - acc: 0.9361 - val_loss: 0.4259 - val_acc: 0.8833\n",
+ "\n",
+ "Epoch 02145: val_acc did not improve from 0.94225\n",
+ "Epoch 2146/100000\n",
+ " - 18s - loss: 0.3373 - acc: 0.9346 - val_loss: 0.3129 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 02146: val_acc did not improve from 0.94225\n",
+ "Epoch 2147/100000\n",
+ " - 19s - loss: 0.3336 - acc: 0.9355 - val_loss: 0.3498 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02147: val_acc did not improve from 0.94225\n",
+ "Epoch 2148/100000\n",
+ " - 18s - loss: 0.3338 - acc: 0.9354 - val_loss: 0.3130 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02148: val_acc did not improve from 0.94225\n",
+ "Epoch 2149/100000\n",
+ " - 19s - loss: 0.3344 - acc: 0.9354 - val_loss: 0.3293 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 02149: val_acc did not improve from 0.94225\n",
+ "Epoch 2150/100000\n",
+ " - 19s - loss: 0.3339 - acc: 0.9351 - val_loss: 0.3223 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02150: val_acc did not improve from 0.94225\n",
+ "Epoch 2151/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9357 - val_loss: 0.3223 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02151: val_acc did not improve from 0.94225\n",
+ "Epoch 2152/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9352 - val_loss: 0.3645 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 02152: val_acc did not improve from 0.94225\n",
+ "Epoch 2153/100000\n",
+ " - 19s - loss: 0.3333 - acc: 0.9362 - val_loss: 0.3544 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 02153: val_acc did not improve from 0.94225\n",
+ "Epoch 2154/100000\n",
+ " - 19s - loss: 0.3363 - acc: 0.9344 - val_loss: 0.4285 - val_acc: 0.8861\n",
+ "\n",
+ "Epoch 02154: val_acc did not improve from 0.94225\n",
+ "Epoch 2155/100000\n",
+ " - 18s - loss: 0.3359 - acc: 0.9345 - val_loss: 0.3354 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 02155: val_acc did not improve from 0.94225\n",
+ "Epoch 2156/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9353 - val_loss: 0.3603 - val_acc: 0.9184\n",
+ "\n",
+ "Epoch 02156: val_acc did not improve from 0.94225\n",
+ "Epoch 2157/100000\n",
+ " - 19s - loss: 0.3315 - acc: 0.9361 - val_loss: 0.3314 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 02157: val_acc did not improve from 0.94225\n",
+ "Epoch 2158/100000\n",
+ " - 19s - loss: 0.3335 - acc: 0.9357 - val_loss: 0.3251 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02158: val_acc did not improve from 0.94225\n",
+ "Epoch 2159/100000\n",
+ " - 19s - loss: 0.3326 - acc: 0.9355 - val_loss: 0.3649 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 02159: val_acc did not improve from 0.94225\n",
+ "Epoch 2160/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9345 - val_loss: 0.3337 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 02160: val_acc did not improve from 0.94225\n",
+ "Epoch 2161/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9353 - val_loss: 0.3137 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 02161: val_acc did not improve from 0.94225\n",
+ "Epoch 2162/100000\n",
+ " - 19s - loss: 0.3328 - acc: 0.9355 - val_loss: 0.3173 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02162: val_acc did not improve from 0.94225\n",
+ "Epoch 2163/100000\n",
+ " - 19s - loss: 0.3330 - acc: 0.9356 - val_loss: 0.3783 - val_acc: 0.8888\n",
+ "\n",
+ "Epoch 02163: val_acc did not improve from 0.94225\n",
+ "Epoch 2164/100000\n",
+ " - 19s - loss: 0.3317 - acc: 0.9361 - val_loss: 0.3583 - val_acc: 0.9126\n",
+ "\n",
+ "Epoch 02164: val_acc did not improve from 0.94225\n",
+ "Epoch 2165/100000\n",
+ " - 18s - loss: 0.3353 - acc: 0.9346 - val_loss: 0.3442 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 02165: val_acc did not improve from 0.94225\n",
+ "Epoch 2166/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9351 - val_loss: 0.3311 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 02166: val_acc did not improve from 0.94225\n",
+ "Epoch 2167/100000\n",
+ " - 19s - loss: 0.3344 - acc: 0.9354 - val_loss: 0.3444 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02167: val_acc did not improve from 0.94225\n",
+ "Epoch 2168/100000\n",
+ " - 19s - loss: 0.3349 - acc: 0.9351 - val_loss: 0.3745 - val_acc: 0.9083\n",
+ "\n",
+ "Epoch 02168: val_acc did not improve from 0.94225\n",
+ "Epoch 2169/100000\n",
+ " - 19s - loss: 0.3332 - acc: 0.9354 - val_loss: 0.3176 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02169: val_acc did not improve from 0.94225\n",
+ "Epoch 2170/100000\n",
+ " - 19s - loss: 0.3341 - acc: 0.9354 - val_loss: 0.3231 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02170: val_acc did not improve from 0.94225\n",
+ "Epoch 2171/100000\n",
+ " - 19s - loss: 0.3356 - acc: 0.9346 - val_loss: 0.3619 - val_acc: 0.9135\n",
+ "\n",
+ "Epoch 02171: val_acc did not improve from 0.94225\n",
+ "Epoch 2172/100000\n",
+ " - 19s - loss: 0.3368 - acc: 0.9346 - val_loss: 0.3787 - val_acc: 0.9063\n",
+ "\n",
+ "Epoch 02172: val_acc did not improve from 0.94225\n",
+ "Epoch 2173/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9358 - val_loss: 0.3144 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02173: val_acc did not improve from 0.94225\n",
+ "Epoch 2174/100000\n",
+ " - 19s - loss: 0.3376 - acc: 0.9341 - val_loss: 0.3377 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 02174: val_acc did not improve from 0.94225\n",
+ "Epoch 2175/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9352 - val_loss: 0.3946 - val_acc: 0.8952\n",
+ "\n",
+ "Epoch 02175: val_acc did not improve from 0.94225\n",
+ "Epoch 2176/100000\n",
+ " - 19s - loss: 0.3347 - acc: 0.9352 - val_loss: 0.3259 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 02176: val_acc did not improve from 0.94225\n",
+ "Epoch 2177/100000\n",
+ " - 19s - loss: 0.3369 - acc: 0.9346 - val_loss: 0.3830 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 02177: val_acc did not improve from 0.94225\n",
+ "Epoch 2178/100000\n",
+ " - 19s - loss: 0.3362 - acc: 0.9347 - val_loss: 0.3291 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 02178: val_acc did not improve from 0.94225\n",
+ "Epoch 2179/100000\n",
+ " - 19s - loss: 0.3322 - acc: 0.9353 - val_loss: 0.3336 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 02179: val_acc did not improve from 0.94225\n",
+ "Epoch 2180/100000\n",
+ " - 19s - loss: 0.3343 - acc: 0.9353 - val_loss: 0.3778 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 02180: val_acc did not improve from 0.94225\n",
+ "Epoch 2181/100000\n",
+ " - 19s - loss: 0.3366 - acc: 0.9343 - val_loss: 0.3327 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 02181: val_acc did not improve from 0.94225\n",
+ "Epoch 2182/100000\n",
+ " - 19s - loss: 0.3346 - acc: 0.9363 - val_loss: 0.3593 - val_acc: 0.9164\n",
+ "\n",
+ "Epoch 02182: val_acc did not improve from 0.94225\n",
+ "Epoch 2183/100000\n",
+ " - 19s - loss: 0.3353 - acc: 0.9342 - val_loss: 0.3701 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 02183: val_acc did not improve from 0.94225\n",
+ "Epoch 2184/100000\n",
+ " - 19s - loss: 0.3355 - acc: 0.9351 - val_loss: 0.3306 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 02184: val_acc did not improve from 0.94225\n",
+ "Epoch 2185/100000\n",
+ " - 18s - loss: 0.3349 - acc: 0.9352 - val_loss: 0.3295 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02185: val_acc did not improve from 0.94225\n",
+ "Epoch 2186/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9357 - val_loss: 0.3553 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02186: val_acc did not improve from 0.94225\n",
+ "Epoch 2187/100000\n",
+ " - 18s - loss: 0.3374 - acc: 0.9347 - val_loss: 0.3646 - val_acc: 0.9104\n",
+ "\n",
+ "Epoch 02187: val_acc did not improve from 0.94225\n",
+ "Epoch 2188/100000\n",
+ " - 19s - loss: 0.3342 - acc: 0.9354 - val_loss: 0.3216 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02188: val_acc did not improve from 0.94225\n",
+ "Epoch 2189/100000\n",
+ " - 18s - loss: 0.3359 - acc: 0.9350 - val_loss: 0.3492 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 02189: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 02189: ReduceLROnPlateau reducing learning rate to 0.0005987368611386045.\n",
+ "Epoch 2190/100000\n",
+ " - 18s - loss: 0.3301 - acc: 0.9345 - val_loss: 0.3098 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02190: val_acc did not improve from 0.94225\n",
+ "Epoch 2191/100000\n",
+ " - 18s - loss: 0.3264 - acc: 0.9355 - val_loss: 0.3180 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 02191: val_acc did not improve from 0.94225\n",
+ "Epoch 2192/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9365 - val_loss: 0.4054 - val_acc: 0.8870\n",
+ "\n",
+ "Epoch 02192: val_acc did not improve from 0.94225\n",
+ "Epoch 2193/100000\n",
+ " - 19s - loss: 0.3266 - acc: 0.9360 - val_loss: 0.3161 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02193: val_acc did not improve from 0.94225\n",
+ "Epoch 2194/100000\n",
+ " - 19s - loss: 0.3276 - acc: 0.9360 - val_loss: 0.6201 - val_acc: 0.7837\n",
+ "\n",
+ "Epoch 02194: val_acc did not improve from 0.94225\n",
+ "Epoch 2195/100000\n",
+ " - 19s - loss: 0.3267 - acc: 0.9361 - val_loss: 0.3205 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02195: val_acc did not improve from 0.94225\n",
+ "Epoch 2196/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9360 - val_loss: 0.3317 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 02196: val_acc did not improve from 0.94225\n",
+ "Epoch 2197/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9361 - val_loss: 0.3308 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02197: val_acc did not improve from 0.94225\n",
+ "Epoch 2198/100000\n",
+ " - 18s - loss: 0.3301 - acc: 0.9348 - val_loss: 0.3132 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02198: val_acc did not improve from 0.94225\n",
+ "Epoch 2199/100000\n",
+ " - 18s - loss: 0.3252 - acc: 0.9362 - val_loss: 0.3166 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02199: val_acc did not improve from 0.94225\n",
+ "Epoch 2200/100000\n",
+ " - 19s - loss: 0.3265 - acc: 0.9358 - val_loss: 0.3651 - val_acc: 0.9142\n",
+ "\n",
+ "Epoch 02200: val_acc did not improve from 0.94225\n",
+ "Epoch 2201/100000\n",
+ " - 19s - loss: 0.3280 - acc: 0.9360 - val_loss: 0.3169 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02201: val_acc did not improve from 0.94225\n",
+ "Epoch 2202/100000\n",
+ " - 19s - loss: 0.3272 - acc: 0.9358 - val_loss: 0.3498 - val_acc: 0.9144\n",
+ "\n",
+ "Epoch 02202: val_acc did not improve from 0.94225\n",
+ "Epoch 2203/100000\n",
+ " - 19s - loss: 0.3267 - acc: 0.9364 - val_loss: 0.3128 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02203: val_acc did not improve from 0.94225\n",
+ "Epoch 2204/100000\n",
+ " - 18s - loss: 0.3266 - acc: 0.9358 - val_loss: 0.3199 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02204: val_acc did not improve from 0.94225\n",
+ "Epoch 2205/100000\n",
+ " - 19s - loss: 0.3291 - acc: 0.9351 - val_loss: 0.3252 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02205: val_acc did not improve from 0.94225\n",
+ "Epoch 2206/100000\n",
+ " - 18s - loss: 0.3269 - acc: 0.9360 - val_loss: 0.3469 - val_acc: 0.9120\n",
+ "\n",
+ "Epoch 02206: val_acc did not improve from 0.94225\n",
+ "Epoch 2207/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9365 - val_loss: 0.3283 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 02207: val_acc did not improve from 0.94225\n",
+ "Epoch 2208/100000\n",
+ " - 18s - loss: 0.3274 - acc: 0.9353 - val_loss: 0.3119 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02208: val_acc did not improve from 0.94225\n",
+ "Epoch 2209/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9363 - val_loss: 0.3488 - val_acc: 0.9109\n",
+ "\n",
+ "Epoch 02209: val_acc did not improve from 0.94225\n",
+ "Epoch 2210/100000\n",
+ " - 18s - loss: 0.3271 - acc: 0.9357 - val_loss: 0.3175 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02210: val_acc did not improve from 0.94225\n",
+ "Epoch 2211/100000\n",
+ " - 19s - loss: 0.3286 - acc: 0.9359 - val_loss: 0.3201 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02211: val_acc did not improve from 0.94225\n",
+ "Epoch 2212/100000\n",
+ " - 18s - loss: 0.3238 - acc: 0.9369 - val_loss: 0.3060 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02212: val_acc did not improve from 0.94225\n",
+ "Epoch 2213/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9368 - val_loss: 0.3233 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 02213: val_acc did not improve from 0.94225\n",
+ "Epoch 2214/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9358 - val_loss: 0.3237 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02214: val_acc did not improve from 0.94225\n",
+ "Epoch 2215/100000\n",
+ " - 19s - loss: 0.3264 - acc: 0.9360 - val_loss: 0.3171 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 02215: val_acc did not improve from 0.94225\n",
+ "Epoch 2216/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9364 - val_loss: 0.3694 - val_acc: 0.9097\n",
+ "\n",
+ "Epoch 02216: val_acc did not improve from 0.94225\n",
+ "Epoch 2217/100000\n",
+ " - 19s - loss: 0.3265 - acc: 0.9360 - val_loss: 0.3070 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02217: val_acc did not improve from 0.94225\n",
+ "Epoch 2218/100000\n",
+ " - 18s - loss: 0.3261 - acc: 0.9361 - val_loss: 0.3175 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02218: val_acc did not improve from 0.94225\n",
+ "Epoch 2219/100000\n",
+ " - 19s - loss: 0.3253 - acc: 0.9365 - val_loss: 0.3220 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02219: val_acc did not improve from 0.94225\n",
+ "Epoch 2220/100000\n",
+ " - 18s - loss: 0.3263 - acc: 0.9360 - val_loss: 0.3304 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02220: val_acc did not improve from 0.94225\n",
+ "Epoch 2221/100000\n",
+ " - 19s - loss: 0.3271 - acc: 0.9354 - val_loss: 0.3224 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 02221: val_acc did not improve from 0.94225\n",
+ "Epoch 2222/100000\n",
+ " - 19s - loss: 0.3256 - acc: 0.9362 - val_loss: 0.3245 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02222: val_acc did not improve from 0.94225\n",
+ "Epoch 2223/100000\n",
+ " - 18s - loss: 0.3256 - acc: 0.9364 - val_loss: 0.3130 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02223: val_acc did not improve from 0.94225\n",
+ "Epoch 2224/100000\n",
+ " - 19s - loss: 0.3252 - acc: 0.9366 - val_loss: 0.3106 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02224: val_acc did not improve from 0.94225\n",
+ "Epoch 2225/100000\n",
+ " - 19s - loss: 0.3244 - acc: 0.9365 - val_loss: 0.3203 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02225: val_acc did not improve from 0.94225\n",
+ "Epoch 2226/100000\n",
+ " - 19s - loss: 0.3255 - acc: 0.9365 - val_loss: 0.3763 - val_acc: 0.9112\n",
+ "\n",
+ "Epoch 02226: val_acc did not improve from 0.94225\n",
+ "Epoch 2227/100000\n",
+ " - 18s - loss: 0.3283 - acc: 0.9358 - val_loss: 0.5042 - val_acc: 0.8577\n",
+ "\n",
+ "Epoch 02227: val_acc did not improve from 0.94225\n",
+ "Epoch 2228/100000\n",
+ " - 19s - loss: 0.3245 - acc: 0.9363 - val_loss: 0.3263 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 02228: val_acc did not improve from 0.94225\n",
+ "Epoch 2229/100000\n",
+ " - 19s - loss: 0.3252 - acc: 0.9362 - val_loss: 0.5468 - val_acc: 0.8286\n",
+ "\n",
+ "Epoch 02229: val_acc did not improve from 0.94225\n",
+ "Epoch 2230/100000\n",
+ " - 19s - loss: 0.3283 - acc: 0.9355 - val_loss: 0.3467 - val_acc: 0.9214\n",
+ "\n",
+ "Epoch 02230: val_acc did not improve from 0.94225\n",
+ "Epoch 2231/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9364 - val_loss: 0.4078 - val_acc: 0.8927\n",
+ "\n",
+ "Epoch 02231: val_acc did not improve from 0.94225\n",
+ "Epoch 2232/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9366 - val_loss: 0.3183 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 02232: val_acc did not improve from 0.94225\n",
+ "Epoch 2233/100000\n",
+ " - 19s - loss: 0.3280 - acc: 0.9355 - val_loss: 0.3069 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02233: val_acc did not improve from 0.94225\n",
+ "Epoch 2234/100000\n",
+ " - 19s - loss: 0.3249 - acc: 0.9363 - val_loss: 0.3451 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 02234: val_acc did not improve from 0.94225\n",
+ "Epoch 2235/100000\n",
+ " - 19s - loss: 0.3280 - acc: 0.9354 - val_loss: 0.3080 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02235: val_acc did not improve from 0.94225\n",
+ "Epoch 2236/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9365 - val_loss: 0.3322 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 02236: val_acc did not improve from 0.94225\n",
+ "Epoch 2237/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9361 - val_loss: 0.3407 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02237: val_acc did not improve from 0.94225\n",
+ "Epoch 2238/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9370 - val_loss: 0.5896 - val_acc: 0.8032\n",
+ "\n",
+ "Epoch 02238: val_acc did not improve from 0.94225\n",
+ "Epoch 2239/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9364 - val_loss: 0.3138 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02239: val_acc did not improve from 0.94225\n",
+ "Epoch 2240/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9363 - val_loss: 0.3226 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 02240: val_acc did not improve from 0.94225\n",
+ "Epoch 2241/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9362 - val_loss: 0.3218 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02241: val_acc did not improve from 0.94225\n",
+ "Epoch 2242/100000\n",
+ " - 19s - loss: 0.3248 - acc: 0.9368 - val_loss: 0.3115 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02242: val_acc did not improve from 0.94225\n",
+ "Epoch 2243/100000\n",
+ " - 18s - loss: 0.3263 - acc: 0.9357 - val_loss: 0.3314 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02243: val_acc did not improve from 0.94225\n",
+ "Epoch 2244/100000\n",
+ " - 18s - loss: 0.3253 - acc: 0.9362 - val_loss: 0.3391 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 02244: val_acc did not improve from 0.94225\n",
+ "Epoch 2245/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9363 - val_loss: 0.3027 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 02245: val_acc did not improve from 0.94225\n",
+ "Epoch 2246/100000\n",
+ " - 18s - loss: 0.3241 - acc: 0.9366 - val_loss: 0.3634 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 02246: val_acc did not improve from 0.94225\n",
+ "Epoch 2247/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9365 - val_loss: 0.3143 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02247: val_acc did not improve from 0.94225\n",
+ "Epoch 2248/100000\n",
+ " - 18s - loss: 0.3246 - acc: 0.9364 - val_loss: 0.3112 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02248: val_acc did not improve from 0.94225\n",
+ "Epoch 2249/100000\n",
+ " - 18s - loss: 0.3267 - acc: 0.9356 - val_loss: 0.3113 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02249: val_acc did not improve from 0.94225\n",
+ "Epoch 2250/100000\n",
+ " - 18s - loss: 0.3253 - acc: 0.9366 - val_loss: 0.3163 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 02250: val_acc did not improve from 0.94225\n",
+ "Epoch 2251/100000\n",
+ " - 19s - loss: 0.3284 - acc: 0.9353 - val_loss: 0.3277 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02251: val_acc did not improve from 0.94225\n",
+ "Epoch 2252/100000\n",
+ " - 18s - loss: 0.3253 - acc: 0.9360 - val_loss: 0.3213 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 02252: val_acc did not improve from 0.94225\n",
+ "Epoch 2253/100000\n",
+ " - 19s - loss: 0.3253 - acc: 0.9361 - val_loss: 0.4819 - val_acc: 0.8655\n",
+ "\n",
+ "Epoch 02253: val_acc did not improve from 0.94225\n",
+ "Epoch 2254/100000\n",
+ " - 18s - loss: 0.3273 - acc: 0.9364 - val_loss: 0.3747 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 02254: val_acc did not improve from 0.94225\n",
+ "Epoch 2255/100000\n",
+ " - 18s - loss: 0.3272 - acc: 0.9360 - val_loss: 0.3189 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02255: val_acc did not improve from 0.94225\n",
+ "Epoch 2256/100000\n",
+ " - 18s - loss: 0.3273 - acc: 0.9357 - val_loss: 0.3492 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 02256: val_acc did not improve from 0.94225\n",
+ "Epoch 2257/100000\n",
+ " - 18s - loss: 0.3248 - acc: 0.9366 - val_loss: 0.3142 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02257: val_acc did not improve from 0.94225\n",
+ "Epoch 2258/100000\n",
+ " - 18s - loss: 0.3250 - acc: 0.9365 - val_loss: 0.3086 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02258: val_acc did not improve from 0.94225\n",
+ "Epoch 2259/100000\n",
+ " - 19s - loss: 0.3236 - acc: 0.9364 - val_loss: 0.3166 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02259: val_acc did not improve from 0.94225\n",
+ "Epoch 2260/100000\n",
+ " - 18s - loss: 0.3239 - acc: 0.9363 - val_loss: 0.3211 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02260: val_acc did not improve from 0.94225\n",
+ "Epoch 2261/100000\n",
+ " - 19s - loss: 0.3245 - acc: 0.9361 - val_loss: 0.3495 - val_acc: 0.9156\n",
+ "\n",
+ "Epoch 02261: val_acc did not improve from 0.94225\n",
+ "Epoch 2262/100000\n",
+ " - 18s - loss: 0.3262 - acc: 0.9357 - val_loss: 0.3503 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 02262: val_acc did not improve from 0.94225\n",
+ "Epoch 2263/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9361 - val_loss: 0.3151 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02263: val_acc did not improve from 0.94225\n",
+ "Epoch 2264/100000\n",
+ " - 19s - loss: 0.3240 - acc: 0.9365 - val_loss: 0.3147 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02264: val_acc did not improve from 0.94225\n",
+ "Epoch 2265/100000\n",
+ " - 19s - loss: 0.3277 - acc: 0.9354 - val_loss: 0.3428 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 02265: val_acc did not improve from 0.94225\n",
+ "Epoch 2266/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9365 - val_loss: 0.4584 - val_acc: 0.8730\n",
+ "\n",
+ "Epoch 02266: val_acc did not improve from 0.94225\n",
+ "Epoch 2267/100000\n",
+ " - 19s - loss: 0.3261 - acc: 0.9353 - val_loss: 0.3240 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02267: val_acc did not improve from 0.94225\n",
+ "Epoch 2268/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9363 - val_loss: 0.3135 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02268: val_acc did not improve from 0.94225\n",
+ "Epoch 2269/100000\n",
+ " - 18s - loss: 0.3240 - acc: 0.9362 - val_loss: 0.3363 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 02269: val_acc did not improve from 0.94225\n",
+ "Epoch 2270/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9362 - val_loss: 0.3340 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 02270: val_acc did not improve from 0.94225\n",
+ "Epoch 2271/100000\n",
+ " - 18s - loss: 0.3245 - acc: 0.9363 - val_loss: 0.3231 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02271: val_acc did not improve from 0.94225\n",
+ "Epoch 2272/100000\n",
+ " - 18s - loss: 0.3251 - acc: 0.9366 - val_loss: 0.3156 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02272: val_acc did not improve from 0.94225\n",
+ "Epoch 2273/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9365 - val_loss: 0.3711 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 02273: val_acc did not improve from 0.94225\n",
+ "Epoch 2274/100000\n",
+ " - 18s - loss: 0.3255 - acc: 0.9361 - val_loss: 0.3116 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02274: val_acc did not improve from 0.94225\n",
+ "Epoch 2275/100000\n",
+ " - 18s - loss: 0.3261 - acc: 0.9360 - val_loss: 0.3327 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02275: val_acc did not improve from 0.94225\n",
+ "Epoch 2276/100000\n",
+ " - 18s - loss: 0.3263 - acc: 0.9357 - val_loss: 0.6150 - val_acc: 0.7694\n",
+ "\n",
+ "Epoch 02276: val_acc did not improve from 0.94225\n",
+ "Epoch 2277/100000\n",
+ " - 18s - loss: 0.3228 - acc: 0.9369 - val_loss: 0.3367 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 02277: val_acc did not improve from 0.94225\n",
+ "Epoch 2278/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9364 - val_loss: 0.3398 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 02278: val_acc did not improve from 0.94225\n",
+ "Epoch 2279/100000\n",
+ " - 18s - loss: 0.3250 - acc: 0.9363 - val_loss: 0.3728 - val_acc: 0.9084\n",
+ "\n",
+ "Epoch 02279: val_acc did not improve from 0.94225\n",
+ "Epoch 2280/100000\n",
+ " - 19s - loss: 0.3272 - acc: 0.9358 - val_loss: 0.4430 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02280: val_acc did not improve from 0.94225\n",
+ "Epoch 2281/100000\n",
+ " - 18s - loss: 0.3249 - acc: 0.9362 - val_loss: 0.3073 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02281: val_acc did not improve from 0.94225\n",
+ "Epoch 2282/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9362 - val_loss: 0.3218 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02282: val_acc did not improve from 0.94225\n",
+ "Epoch 2283/100000\n",
+ " - 18s - loss: 0.3241 - acc: 0.9367 - val_loss: 0.3333 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 02283: val_acc did not improve from 0.94225\n",
+ "Epoch 2284/100000\n",
+ " - 19s - loss: 0.3290 - acc: 0.9350 - val_loss: 0.3161 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02284: val_acc did not improve from 0.94225\n",
+ "Epoch 2285/100000\n",
+ " - 18s - loss: 0.3266 - acc: 0.9359 - val_loss: 0.3252 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02285: val_acc did not improve from 0.94225\n",
+ "Epoch 2286/100000\n",
+ " - 19s - loss: 0.3233 - acc: 0.9367 - val_loss: 0.3043 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02286: val_acc did not improve from 0.94225\n",
+ "Epoch 2287/100000\n",
+ " - 18s - loss: 0.3261 - acc: 0.9361 - val_loss: 0.3320 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02287: val_acc did not improve from 0.94225\n",
+ "Epoch 2288/100000\n",
+ " - 19s - loss: 0.3236 - acc: 0.9368 - val_loss: 0.3539 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 02288: val_acc did not improve from 0.94225\n",
+ "Epoch 2289/100000\n",
+ " - 18s - loss: 0.3229 - acc: 0.9367 - val_loss: 0.3393 - val_acc: 0.9224\n",
+ "\n",
+ "Epoch 02289: val_acc did not improve from 0.94225\n",
+ "Epoch 2290/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9358 - val_loss: 0.3263 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 02290: val_acc did not improve from 0.94225\n",
+ "Epoch 2291/100000\n",
+ " - 18s - loss: 0.3262 - acc: 0.9358 - val_loss: 0.3188 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02291: val_acc did not improve from 0.94225\n",
+ "Epoch 2292/100000\n",
+ " - 18s - loss: 0.3229 - acc: 0.9372 - val_loss: 0.3220 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02292: val_acc did not improve from 0.94225\n",
+ "Epoch 2293/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9367 - val_loss: 0.3219 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 02293: val_acc did not improve from 0.94225\n",
+ "Epoch 2294/100000\n",
+ " - 18s - loss: 0.3245 - acc: 0.9363 - val_loss: 0.3592 - val_acc: 0.9103\n",
+ "\n",
+ "Epoch 02294: val_acc did not improve from 0.94225\n",
+ "Epoch 2295/100000\n",
+ " - 19s - loss: 0.3267 - acc: 0.9357 - val_loss: 0.3187 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02295: val_acc did not improve from 0.94225\n",
+ "Epoch 2296/100000\n",
+ " - 18s - loss: 0.3265 - acc: 0.9361 - val_loss: 0.3323 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02296: val_acc did not improve from 0.94225\n",
+ "Epoch 2297/100000\n",
+ " - 19s - loss: 0.3239 - acc: 0.9367 - val_loss: 0.3172 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02297: val_acc did not improve from 0.94225\n",
+ "Epoch 2298/100000\n",
+ " - 19s - loss: 0.3233 - acc: 0.9372 - val_loss: 0.3212 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 02298: val_acc did not improve from 0.94225\n",
+ "Epoch 2299/100000\n",
+ " - 19s - loss: 0.3235 - acc: 0.9369 - val_loss: 0.3246 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02299: val_acc did not improve from 0.94225\n",
+ "Epoch 2300/100000\n",
+ " - 18s - loss: 0.3239 - acc: 0.9364 - val_loss: 0.3143 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02300: val_acc did not improve from 0.94225\n",
+ "Epoch 2301/100000\n",
+ " - 19s - loss: 0.3275 - acc: 0.9354 - val_loss: 0.3141 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02301: val_acc did not improve from 0.94225\n",
+ "Epoch 2302/100000\n",
+ " - 18s - loss: 0.3221 - acc: 0.9374 - val_loss: 0.3295 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 02302: val_acc did not improve from 0.94225\n",
+ "Epoch 2303/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9363 - val_loss: 0.3076 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02303: val_acc did not improve from 0.94225\n",
+ "Epoch 2304/100000\n",
+ " - 19s - loss: 0.3261 - acc: 0.9357 - val_loss: 0.3140 - val_acc: 0.9400\n",
+ "\n",
+ "Epoch 02304: val_acc did not improve from 0.94225\n",
+ "Epoch 2305/100000\n",
+ " - 18s - loss: 0.3247 - acc: 0.9362 - val_loss: 0.3223 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 02305: val_acc did not improve from 0.94225\n",
+ "Epoch 2306/100000\n",
+ " - 19s - loss: 0.3234 - acc: 0.9367 - val_loss: 0.3293 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02306: val_acc did not improve from 0.94225\n",
+ "Epoch 2307/100000\n",
+ " - 18s - loss: 0.3234 - acc: 0.9367 - val_loss: 0.3402 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 02307: val_acc did not improve from 0.94225\n",
+ "Epoch 2308/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9363 - val_loss: 0.3314 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02308: val_acc did not improve from 0.94225\n",
+ "Epoch 2309/100000\n",
+ " - 19s - loss: 0.3245 - acc: 0.9369 - val_loss: 0.3466 - val_acc: 0.9214\n",
+ "\n",
+ "Epoch 02309: val_acc did not improve from 0.94225\n",
+ "Epoch 2310/100000\n",
+ " - 19s - loss: 0.3229 - acc: 0.9369 - val_loss: 0.3236 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 02310: val_acc did not improve from 0.94225\n",
+ "Epoch 2311/100000\n",
+ " - 19s - loss: 0.3258 - acc: 0.9362 - val_loss: 0.3756 - val_acc: 0.9017\n",
+ "\n",
+ "Epoch 02311: val_acc did not improve from 0.94225\n",
+ "Epoch 2312/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9359 - val_loss: 0.3144 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02312: val_acc did not improve from 0.94225\n",
+ "Epoch 2313/100000\n",
+ " - 19s - loss: 0.3256 - acc: 0.9365 - val_loss: 0.3100 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02313: val_acc did not improve from 0.94225\n",
+ "Epoch 2314/100000\n",
+ " - 18s - loss: 0.3224 - acc: 0.9373 - val_loss: 0.3446 - val_acc: 0.9193\n",
+ "\n",
+ "Epoch 02314: val_acc did not improve from 0.94225\n",
+ "Epoch 2315/100000\n",
+ " - 19s - loss: 0.3252 - acc: 0.9364 - val_loss: 0.3087 - val_acc: 0.9397\n",
+ "\n",
+ "Epoch 02315: val_acc did not improve from 0.94225\n",
+ "Epoch 2316/100000\n",
+ " - 18s - loss: 0.3267 - acc: 0.9363 - val_loss: 0.4227 - val_acc: 0.8733\n",
+ "\n",
+ "Epoch 02316: val_acc did not improve from 0.94225\n",
+ "Epoch 2317/100000\n",
+ " - 19s - loss: 0.3268 - acc: 0.9362 - val_loss: 0.3213 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 02317: val_acc did not improve from 0.94225\n",
+ "Epoch 2318/100000\n",
+ " - 18s - loss: 0.3254 - acc: 0.9362 - val_loss: 0.3268 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 02318: val_acc did not improve from 0.94225\n",
+ "Epoch 2319/100000\n",
+ " - 19s - loss: 0.3248 - acc: 0.9362 - val_loss: 0.3260 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 02319: val_acc did not improve from 0.94225\n",
+ "Epoch 2320/100000\n",
+ " - 18s - loss: 0.3230 - acc: 0.9368 - val_loss: 0.3252 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02320: val_acc did not improve from 0.94225\n",
+ "Epoch 2321/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9357 - val_loss: 0.3084 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02321: val_acc did not improve from 0.94225\n",
+ "Epoch 2322/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9355 - val_loss: 0.3238 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02322: val_acc did not improve from 0.94225\n",
+ "Epoch 2323/100000\n",
+ " - 19s - loss: 0.3236 - acc: 0.9371 - val_loss: 0.3112 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02323: val_acc did not improve from 0.94225\n",
+ "Epoch 2324/100000\n",
+ " - 18s - loss: 0.3230 - acc: 0.9371 - val_loss: 0.3043 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02324: val_acc did not improve from 0.94225\n",
+ "Epoch 2325/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9359 - val_loss: 0.3364 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 02325: val_acc did not improve from 0.94225\n",
+ "Epoch 2326/100000\n",
+ " - 19s - loss: 0.3265 - acc: 0.9364 - val_loss: 0.3259 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02326: val_acc did not improve from 0.94225\n",
+ "Epoch 2327/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9362 - val_loss: 0.3227 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 02327: val_acc did not improve from 0.94225\n",
+ "Epoch 2328/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9357 - val_loss: 0.3348 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 02328: val_acc did not improve from 0.94225\n",
+ "Epoch 2329/100000\n",
+ " - 18s - loss: 0.3231 - acc: 0.9369 - val_loss: 0.3246 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02329: val_acc did not improve from 0.94225\n",
+ "Epoch 2330/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9357 - val_loss: 0.3276 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 02330: val_acc did not improve from 0.94225\n",
+ "Epoch 2331/100000\n",
+ " - 19s - loss: 0.3261 - acc: 0.9360 - val_loss: 0.3266 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02331: val_acc did not improve from 0.94225\n",
+ "Epoch 2332/100000\n",
+ " - 18s - loss: 0.3264 - acc: 0.9364 - val_loss: 0.3534 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 02332: val_acc did not improve from 0.94225\n",
+ "Epoch 2333/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9368 - val_loss: 0.3533 - val_acc: 0.9096\n",
+ "\n",
+ "Epoch 02333: val_acc did not improve from 0.94225\n",
+ "Epoch 2334/100000\n",
+ " - 18s - loss: 0.3258 - acc: 0.9359 - val_loss: 0.3143 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02334: val_acc did not improve from 0.94225\n",
+ "Epoch 2335/100000\n",
+ " - 18s - loss: 0.3267 - acc: 0.9359 - val_loss: 0.3557 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 02335: val_acc did not improve from 0.94225\n",
+ "Epoch 2336/100000\n",
+ " - 19s - loss: 0.3214 - acc: 0.9375 - val_loss: 0.3133 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02336: val_acc did not improve from 0.94225\n",
+ "Epoch 2337/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9362 - val_loss: 0.3200 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 02337: val_acc did not improve from 0.94225\n",
+ "Epoch 2338/100000\n",
+ " - 18s - loss: 0.3242 - acc: 0.9362 - val_loss: 0.3723 - val_acc: 0.9087\n",
+ "\n",
+ "Epoch 02338: val_acc did not improve from 0.94225\n",
+ "Epoch 2339/100000\n",
+ " - 19s - loss: 0.3249 - acc: 0.9361 - val_loss: 0.3482 - val_acc: 0.9206\n",
+ "\n",
+ "Epoch 02339: val_acc did not improve from 0.94225\n",
+ "Epoch 2340/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9363 - val_loss: 0.3215 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02340: val_acc did not improve from 0.94225\n",
+ "Epoch 2341/100000\n",
+ " - 18s - loss: 0.3265 - acc: 0.9360 - val_loss: 0.3111 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02341: val_acc did not improve from 0.94225\n",
+ "Epoch 2342/100000\n",
+ " - 19s - loss: 0.3239 - acc: 0.9366 - val_loss: 0.3118 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02342: val_acc did not improve from 0.94225\n",
+ "Epoch 2343/100000\n",
+ " - 20s - loss: 0.3243 - acc: 0.9367 - val_loss: 0.3158 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02343: val_acc did not improve from 0.94225\n",
+ "Epoch 2344/100000\n",
+ " - 19s - loss: 0.3267 - acc: 0.9359 - val_loss: 0.3128 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02344: val_acc did not improve from 0.94225\n",
+ "Epoch 2345/100000\n",
+ " - 18s - loss: 0.3246 - acc: 0.9370 - val_loss: 0.3161 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02345: val_acc did not improve from 0.94225\n",
+ "Epoch 2346/100000\n",
+ " - 19s - loss: 0.3221 - acc: 0.9374 - val_loss: 0.3173 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02346: val_acc did not improve from 0.94225\n",
+ "Epoch 2347/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9362 - val_loss: 0.3353 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 02347: val_acc did not improve from 0.94225\n",
+ "Epoch 2348/100000\n",
+ " - 19s - loss: 0.3236 - acc: 0.9370 - val_loss: 0.3107 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02348: val_acc did not improve from 0.94225\n",
+ "Epoch 2349/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9357 - val_loss: 0.3488 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 02349: val_acc did not improve from 0.94225\n",
+ "Epoch 2350/100000\n",
+ " - 18s - loss: 0.3247 - acc: 0.9365 - val_loss: 0.3054 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 02350: val_acc did not improve from 0.94225\n",
+ "Epoch 2351/100000\n",
+ " - 19s - loss: 0.3237 - acc: 0.9369 - val_loss: 0.3227 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 02351: val_acc did not improve from 0.94225\n",
+ "Epoch 2352/100000\n",
+ " - 18s - loss: 0.3249 - acc: 0.9360 - val_loss: 0.3290 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02352: val_acc did not improve from 0.94225\n",
+ "Epoch 2353/100000\n",
+ " - 18s - loss: 0.3254 - acc: 0.9365 - val_loss: 0.3071 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 02353: val_acc did not improve from 0.94225\n",
+ "Epoch 2354/100000\n",
+ " - 18s - loss: 0.3245 - acc: 0.9365 - val_loss: 0.3834 - val_acc: 0.9012\n",
+ "\n",
+ "Epoch 02354: val_acc did not improve from 0.94225\n",
+ "Epoch 2355/100000\n",
+ " - 18s - loss: 0.3241 - acc: 0.9366 - val_loss: 0.3500 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 02355: val_acc did not improve from 0.94225\n",
+ "Epoch 2356/100000\n",
+ " - 18s - loss: 0.3258 - acc: 0.9366 - val_loss: 0.3230 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02356: val_acc did not improve from 0.94225\n",
+ "Epoch 2357/100000\n",
+ " - 18s - loss: 0.3265 - acc: 0.9366 - val_loss: 0.3845 - val_acc: 0.9014\n",
+ "\n",
+ "Epoch 02357: val_acc did not improve from 0.94225\n",
+ "Epoch 2358/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9368 - val_loss: 0.3185 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02358: val_acc did not improve from 0.94225\n",
+ "Epoch 2359/100000\n",
+ " - 19s - loss: 0.3279 - acc: 0.9357 - val_loss: 0.3255 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 02359: val_acc did not improve from 0.94225\n",
+ "Epoch 2360/100000\n",
+ " - 19s - loss: 0.3230 - acc: 0.9366 - val_loss: 0.3089 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02360: val_acc did not improve from 0.94225\n",
+ "Epoch 2361/100000\n",
+ " - 19s - loss: 0.3274 - acc: 0.9355 - val_loss: 0.3252 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02361: val_acc did not improve from 0.94225\n",
+ "Epoch 2362/100000\n",
+ " - 18s - loss: 0.3240 - acc: 0.9369 - val_loss: 0.3307 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02362: val_acc did not improve from 0.94225\n",
+ "Epoch 2363/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9358 - val_loss: 0.3076 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 02363: val_acc did not improve from 0.94225\n",
+ "Epoch 2364/100000\n",
+ " - 18s - loss: 0.3226 - acc: 0.9369 - val_loss: 0.3125 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02364: val_acc did not improve from 0.94225\n",
+ "Epoch 2365/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9369 - val_loss: 0.3119 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02365: val_acc did not improve from 0.94225\n",
+ "Epoch 2366/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9361 - val_loss: 0.3053 - val_acc: 0.9398\n",
+ "\n",
+ "Epoch 02366: val_acc did not improve from 0.94225\n",
+ "Epoch 2367/100000\n",
+ " - 19s - loss: 0.3239 - acc: 0.9366 - val_loss: 0.3149 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02367: val_acc did not improve from 0.94225\n",
+ "Epoch 2368/100000\n",
+ " - 18s - loss: 0.3247 - acc: 0.9362 - val_loss: 0.3220 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 02368: val_acc did not improve from 0.94225\n",
+ "Epoch 2369/100000\n",
+ " - 18s - loss: 0.3241 - acc: 0.9360 - val_loss: 0.4030 - val_acc: 0.8970\n",
+ "\n",
+ "Epoch 02369: val_acc did not improve from 0.94225\n",
+ "Epoch 2370/100000\n",
+ " - 19s - loss: 0.3237 - acc: 0.9370 - val_loss: 0.3140 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 02370: val_acc did not improve from 0.94225\n",
+ "Epoch 2371/100000\n",
+ " - 18s - loss: 0.3258 - acc: 0.9368 - val_loss: 0.3783 - val_acc: 0.8999\n",
+ "\n",
+ "Epoch 02371: val_acc did not improve from 0.94225\n",
+ "Epoch 2372/100000\n",
+ " - 18s - loss: 0.3260 - acc: 0.9358 - val_loss: 0.3245 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 02372: val_acc did not improve from 0.94225\n",
+ "Epoch 2373/100000\n",
+ " - 18s - loss: 0.3240 - acc: 0.9367 - val_loss: 0.3442 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 02373: val_acc did not improve from 0.94225\n",
+ "Epoch 2374/100000\n",
+ " - 18s - loss: 0.3256 - acc: 0.9365 - val_loss: 0.3210 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02374: val_acc did not improve from 0.94225\n",
+ "Epoch 2375/100000\n",
+ " - 19s - loss: 0.3242 - acc: 0.9371 - val_loss: 0.4090 - val_acc: 0.8915\n",
+ "\n",
+ "Epoch 02375: val_acc did not improve from 0.94225\n",
+ "Epoch 2376/100000\n",
+ " - 19s - loss: 0.3238 - acc: 0.9366 - val_loss: 0.3131 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02376: val_acc did not improve from 0.94225\n",
+ "Epoch 2377/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9362 - val_loss: 0.3064 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 02377: val_acc did not improve from 0.94225\n",
+ "Epoch 2378/100000\n",
+ " - 19s - loss: 0.3244 - acc: 0.9360 - val_loss: 0.3132 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02378: val_acc did not improve from 0.94225\n",
+ "Epoch 2379/100000\n",
+ " - 18s - loss: 0.3265 - acc: 0.9359 - val_loss: 0.3118 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02379: val_acc did not improve from 0.94225\n",
+ "Epoch 2380/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9368 - val_loss: 0.4455 - val_acc: 0.8694\n",
+ "\n",
+ "Epoch 02380: val_acc did not improve from 0.94225\n",
+ "Epoch 2381/100000\n",
+ " - 19s - loss: 0.3244 - acc: 0.9367 - val_loss: 0.3198 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 02381: val_acc did not improve from 0.94225\n",
+ "Epoch 2382/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9361 - val_loss: 0.3162 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02382: val_acc did not improve from 0.94225\n",
+ "Epoch 2383/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9361 - val_loss: 0.3094 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02383: val_acc did not improve from 0.94225\n",
+ "Epoch 2384/100000\n",
+ " - 19s - loss: 0.3239 - acc: 0.9370 - val_loss: 0.4408 - val_acc: 0.8690\n",
+ "\n",
+ "Epoch 02384: val_acc did not improve from 0.94225\n",
+ "Epoch 2385/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9367 - val_loss: 0.3213 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 02385: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 02385: ReduceLROnPlateau reducing learning rate to 0.0005688000208465382.\n",
+ "Epoch 2386/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9370 - val_loss: 0.3104 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02386: val_acc did not improve from 0.94225\n",
+ "Epoch 2387/100000\n",
+ " - 19s - loss: 0.3187 - acc: 0.9371 - val_loss: 0.3102 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02387: val_acc did not improve from 0.94225\n",
+ "Epoch 2388/100000\n",
+ " - 18s - loss: 0.3234 - acc: 0.9357 - val_loss: 0.3174 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02388: val_acc did not improve from 0.94225\n",
+ "Epoch 2389/100000\n",
+ " - 19s - loss: 0.3186 - acc: 0.9369 - val_loss: 0.3173 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 02389: val_acc did not improve from 0.94225\n",
+ "Epoch 2390/100000\n",
+ " - 19s - loss: 0.3207 - acc: 0.9365 - val_loss: 0.3082 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02390: val_acc did not improve from 0.94225\n",
+ "Epoch 2391/100000\n",
+ " - 19s - loss: 0.3176 - acc: 0.9369 - val_loss: 0.3425 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 02391: val_acc did not improve from 0.94225\n",
+ "Epoch 2392/100000\n",
+ " - 19s - loss: 0.3192 - acc: 0.9362 - val_loss: 0.3515 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 02392: val_acc did not improve from 0.94225\n",
+ "Epoch 2393/100000\n",
+ " - 19s - loss: 0.3163 - acc: 0.9377 - val_loss: 0.3416 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 02393: val_acc did not improve from 0.94225\n",
+ "Epoch 2394/100000\n",
+ " - 19s - loss: 0.3180 - acc: 0.9365 - val_loss: 0.3120 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 02394: val_acc did not improve from 0.94225\n",
+ "Epoch 2395/100000\n",
+ " - 19s - loss: 0.3167 - acc: 0.9370 - val_loss: 0.3163 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02395: val_acc did not improve from 0.94225\n",
+ "Epoch 2396/100000\n",
+ " - 19s - loss: 0.3163 - acc: 0.9374 - val_loss: 0.3053 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02396: val_acc did not improve from 0.94225\n",
+ "Epoch 2397/100000\n",
+ " - 19s - loss: 0.3163 - acc: 0.9368 - val_loss: 0.3204 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02397: val_acc did not improve from 0.94225\n",
+ "Epoch 2398/100000\n",
+ " - 19s - loss: 0.3176 - acc: 0.9365 - val_loss: 0.3200 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02398: val_acc did not improve from 0.94225\n",
+ "Epoch 2399/100000\n",
+ " - 19s - loss: 0.3192 - acc: 0.9374 - val_loss: 0.3495 - val_acc: 0.9135\n",
+ "\n",
+ "Epoch 02399: val_acc did not improve from 0.94225\n",
+ "Epoch 2400/100000\n",
+ " - 18s - loss: 0.3227 - acc: 0.9357 - val_loss: 0.3385 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 02400: val_acc did not improve from 0.94225\n",
+ "Epoch 2401/100000\n",
+ " - 19s - loss: 0.3177 - acc: 0.9372 - val_loss: 0.5859 - val_acc: 0.7594\n",
+ "\n",
+ "Epoch 02401: val_acc did not improve from 0.94225\n",
+ "Epoch 2402/100000\n",
+ " - 18s - loss: 0.3167 - acc: 0.9375 - val_loss: 0.3520 - val_acc: 0.9121\n",
+ "\n",
+ "Epoch 02402: val_acc did not improve from 0.94225\n",
+ "Epoch 2403/100000\n",
+ " - 19s - loss: 0.3191 - acc: 0.9365 - val_loss: 0.3167 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02403: val_acc did not improve from 0.94225\n",
+ "Epoch 2404/100000\n",
+ " - 19s - loss: 0.3185 - acc: 0.9367 - val_loss: 0.3530 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 02404: val_acc did not improve from 0.94225\n",
+ "Epoch 2405/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9366 - val_loss: 0.5352 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 02405: val_acc did not improve from 0.94225\n",
+ "Epoch 2406/100000\n",
+ " - 19s - loss: 0.3207 - acc: 0.9368 - val_loss: 0.3196 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 02406: val_acc did not improve from 0.94225\n",
+ "Epoch 2407/100000\n",
+ " - 19s - loss: 0.3210 - acc: 0.9362 - val_loss: 0.3283 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 02407: val_acc did not improve from 0.94225\n",
+ "Epoch 2408/100000\n",
+ " - 19s - loss: 0.3196 - acc: 0.9366 - val_loss: 0.3149 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 02408: val_acc did not improve from 0.94225\n",
+ "Epoch 2409/100000\n",
+ " - 19s - loss: 0.3188 - acc: 0.9366 - val_loss: 0.3287 - val_acc: 0.9233\n",
+ "\n",
+ "Epoch 02409: val_acc did not improve from 0.94225\n",
+ "Epoch 2410/100000\n",
+ " - 20s - loss: 0.3207 - acc: 0.9361 - val_loss: 0.3108 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02410: val_acc did not improve from 0.94225\n",
+ "Epoch 2411/100000\n",
+ " - 19s - loss: 0.3188 - acc: 0.9369 - val_loss: 0.3259 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02411: val_acc did not improve from 0.94225\n",
+ "Epoch 2412/100000\n",
+ " - 18s - loss: 0.3179 - acc: 0.9373 - val_loss: 0.3297 - val_acc: 0.9210\n",
+ "\n",
+ "Epoch 02412: val_acc did not improve from 0.94225\n",
+ "Epoch 2413/100000\n",
+ " - 19s - loss: 0.3207 - acc: 0.9360 - val_loss: 0.3523 - val_acc: 0.9188\n",
+ "\n",
+ "Epoch 02413: val_acc did not improve from 0.94225\n",
+ "Epoch 2414/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9370 - val_loss: 0.3439 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02414: val_acc did not improve from 0.94225\n",
+ "Epoch 2415/100000\n",
+ " - 18s - loss: 0.3197 - acc: 0.9366 - val_loss: 0.3244 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 02415: val_acc did not improve from 0.94225\n",
+ "Epoch 2416/100000\n",
+ " - 19s - loss: 0.3214 - acc: 0.9362 - val_loss: 0.3253 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02416: val_acc did not improve from 0.94225\n",
+ "Epoch 2417/100000\n",
+ " - 18s - loss: 0.3189 - acc: 0.9369 - val_loss: 0.3104 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02417: val_acc did not improve from 0.94225\n",
+ "Epoch 2418/100000\n",
+ " - 19s - loss: 0.3179 - acc: 0.9370 - val_loss: 0.3061 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02418: val_acc did not improve from 0.94225\n",
+ "Epoch 2419/100000\n",
+ " - 18s - loss: 0.3197 - acc: 0.9362 - val_loss: 0.3178 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 02419: val_acc did not improve from 0.94225\n",
+ "Epoch 2420/100000\n",
+ " - 19s - loss: 0.3219 - acc: 0.9356 - val_loss: 0.3272 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02420: val_acc did not improve from 0.94225\n",
+ "Epoch 2421/100000\n",
+ " - 18s - loss: 0.3170 - acc: 0.9371 - val_loss: 0.5105 - val_acc: 0.8481\n",
+ "\n",
+ "Epoch 02421: val_acc did not improve from 0.94225\n",
+ "Epoch 2422/100000\n",
+ " - 19s - loss: 0.3178 - acc: 0.9370 - val_loss: 0.3109 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02422: val_acc did not improve from 0.94225\n",
+ "Epoch 2423/100000\n",
+ " - 18s - loss: 0.3189 - acc: 0.9363 - val_loss: 0.3007 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 02423: val_acc did not improve from 0.94225\n",
+ "Epoch 2424/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9369 - val_loss: 0.3033 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02424: val_acc did not improve from 0.94225\n",
+ "Epoch 2425/100000\n",
+ " - 19s - loss: 0.3178 - acc: 0.9371 - val_loss: 0.3358 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 02425: val_acc did not improve from 0.94225\n",
+ "Epoch 2426/100000\n",
+ " - 19s - loss: 0.3187 - acc: 0.9367 - val_loss: 0.3137 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02426: val_acc did not improve from 0.94225\n",
+ "Epoch 2427/100000\n",
+ " - 18s - loss: 0.3193 - acc: 0.9362 - val_loss: 0.3361 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 02427: val_acc did not improve from 0.94225\n",
+ "Epoch 2428/100000\n",
+ " - 19s - loss: 0.3202 - acc: 0.9360 - val_loss: 0.3120 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 02428: val_acc did not improve from 0.94225\n",
+ "Epoch 2429/100000\n",
+ " - 19s - loss: 0.3164 - acc: 0.9376 - val_loss: 0.3783 - val_acc: 0.8925\n",
+ "\n",
+ "Epoch 02429: val_acc did not improve from 0.94225\n",
+ "Epoch 2430/100000\n",
+ " - 18s - loss: 0.3187 - acc: 0.9373 - val_loss: 0.3086 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02430: val_acc did not improve from 0.94225\n",
+ "Epoch 2431/100000\n",
+ " - 19s - loss: 0.3205 - acc: 0.9363 - val_loss: 0.3177 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02431: val_acc did not improve from 0.94225\n",
+ "Epoch 2432/100000\n",
+ " - 19s - loss: 0.3164 - acc: 0.9372 - val_loss: 0.3266 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 02432: val_acc did not improve from 0.94225\n",
+ "Epoch 2433/100000\n",
+ " - 18s - loss: 0.3175 - acc: 0.9369 - val_loss: 0.3477 - val_acc: 0.9154\n",
+ "\n",
+ "Epoch 02433: val_acc did not improve from 0.94225\n",
+ "Epoch 2434/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9361 - val_loss: 0.3883 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 02434: val_acc did not improve from 0.94225\n",
+ "Epoch 2435/100000\n",
+ " - 19s - loss: 0.3192 - acc: 0.9370 - val_loss: 0.3047 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02435: val_acc did not improve from 0.94225\n",
+ "Epoch 2436/100000\n",
+ " - 18s - loss: 0.3188 - acc: 0.9367 - val_loss: 0.3071 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 02436: val_acc did not improve from 0.94225\n",
+ "Epoch 2437/100000\n",
+ " - 19s - loss: 0.3176 - acc: 0.9374 - val_loss: 0.3289 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 02437: val_acc did not improve from 0.94225\n",
+ "Epoch 2438/100000\n",
+ " - 19s - loss: 0.3193 - acc: 0.9367 - val_loss: 0.3056 - val_acc: 0.9398\n",
+ "\n",
+ "Epoch 02438: val_acc did not improve from 0.94225\n",
+ "Epoch 2439/100000\n",
+ " - 19s - loss: 0.3214 - acc: 0.9364 - val_loss: 0.3042 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 02439: val_acc did not improve from 0.94225\n",
+ "Epoch 2440/100000\n",
+ " - 19s - loss: 0.3187 - acc: 0.9366 - val_loss: 0.3043 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02440: val_acc did not improve from 0.94225\n",
+ "Epoch 2441/100000\n",
+ " - 19s - loss: 0.3181 - acc: 0.9368 - val_loss: 0.3128 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02441: val_acc did not improve from 0.94225\n",
+ "Epoch 2442/100000\n",
+ " - 19s - loss: 0.3218 - acc: 0.9355 - val_loss: 0.3077 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02442: val_acc did not improve from 0.94225\n",
+ "Epoch 2443/100000\n",
+ " - 19s - loss: 0.3180 - acc: 0.9376 - val_loss: 0.3283 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02450: val_acc did not improve from 0.94225\n",
+ "Epoch 2451/100000\n",
+ " - 19s - loss: 0.3184 - acc: 0.9372 - val_loss: 0.3024 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02451: val_acc did not improve from 0.94225\n",
+ "Epoch 2452/100000\n",
+ " - 19s - loss: 0.3215 - acc: 0.9368 - val_loss: 0.3043 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 02452: val_acc did not improve from 0.94225\n",
+ "Epoch 2453/100000\n",
+ " - 18s - loss: 0.3176 - acc: 0.9372 - val_loss: 0.3059 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02453: val_acc did not improve from 0.94225\n",
+ "Epoch 2454/100000\n",
+ " - 19s - loss: 0.3205 - acc: 0.9363 - val_loss: 0.3231 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 02454: val_acc did not improve from 0.94225\n",
+ "Epoch 2455/100000\n",
+ " - 18s - loss: 0.3179 - acc: 0.9373 - val_loss: 0.3436 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 02455: val_acc did not improve from 0.94225\n",
+ "Epoch 2456/100000\n",
+ " - 19s - loss: 0.3193 - acc: 0.9363 - val_loss: 0.3148 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02456: val_acc did not improve from 0.94225\n",
+ "Epoch 2457/100000\n",
+ " - 18s - loss: 0.3187 - acc: 0.9369 - val_loss: 0.4035 - val_acc: 0.8955\n",
+ "\n",
+ "Epoch 02457: val_acc did not improve from 0.94225\n",
+ "Epoch 2458/100000\n",
+ " - 18s - loss: 0.3191 - acc: 0.9360 - val_loss: 0.3115 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02458: val_acc did not improve from 0.94225\n",
+ "Epoch 2459/100000\n",
+ " - 19s - loss: 0.3176 - acc: 0.9376 - val_loss: 0.3182 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 02459: val_acc did not improve from 0.94225\n",
+ "Epoch 2460/100000\n",
+ " - 19s - loss: 0.3194 - acc: 0.9360 - val_loss: 0.3146 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02460: val_acc did not improve from 0.94225\n",
+ "Epoch 2461/100000\n",
+ " - 19s - loss: 0.3196 - acc: 0.9366 - val_loss: 0.3061 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02461: val_acc did not improve from 0.94225\n",
+ "Epoch 2462/100000\n",
+ " - 19s - loss: 0.3198 - acc: 0.9363 - val_loss: 0.3128 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02462: val_acc did not improve from 0.94225\n",
+ "Epoch 2463/100000\n",
+ " - 19s - loss: 0.3173 - acc: 0.9372 - val_loss: 0.3250 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 02463: val_acc did not improve from 0.94225\n",
+ "Epoch 2464/100000\n",
+ " - 18s - loss: 0.3182 - acc: 0.9367 - val_loss: 0.3645 - val_acc: 0.9084\n",
+ "\n",
+ "Epoch 02464: val_acc did not improve from 0.94225\n",
+ "Epoch 2465/100000\n",
+ " - 19s - loss: 0.3159 - acc: 0.9376 - val_loss: 0.4317 - val_acc: 0.8795\n",
+ "\n",
+ "Epoch 02465: val_acc did not improve from 0.94225\n",
+ "Epoch 2466/100000\n",
+ " - 19s - loss: 0.3197 - acc: 0.9361 - val_loss: 0.3043 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02466: val_acc did not improve from 0.94225\n",
+ "Epoch 2467/100000\n",
+ " - 18s - loss: 0.3171 - acc: 0.9376 - val_loss: 0.3035 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02467: val_acc did not improve from 0.94225\n",
+ "Epoch 2468/100000\n",
+ " - 19s - loss: 0.3195 - acc: 0.9362 - val_loss: 0.3121 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02468: val_acc did not improve from 0.94225\n",
+ "Epoch 2469/100000\n",
+ " - 19s - loss: 0.3179 - acc: 0.9373 - val_loss: 0.3193 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 02469: val_acc did not improve from 0.94225\n",
+ "Epoch 2470/100000\n",
+ " - 19s - loss: 0.3196 - acc: 0.9370 - val_loss: 0.3103 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02470: val_acc did not improve from 0.94225\n",
+ "Epoch 2471/100000\n",
+ " - 19s - loss: 0.3191 - acc: 0.9361 - val_loss: 0.3274 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 02471: val_acc did not improve from 0.94225\n",
+ "Epoch 2472/100000\n",
+ " - 19s - loss: 0.3205 - acc: 0.9358 - val_loss: 0.3385 - val_acc: 0.9209\n",
+ "\n",
+ "Epoch 02472: val_acc did not improve from 0.94225\n",
+ "Epoch 2473/100000\n",
+ " - 18s - loss: 0.3213 - acc: 0.9366 - val_loss: 0.3099 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02473: val_acc did not improve from 0.94225\n",
+ "Epoch 2474/100000\n",
+ " - 19s - loss: 0.3199 - acc: 0.9362 - val_loss: 0.3182 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02474: val_acc did not improve from 0.94225\n",
+ "Epoch 2475/100000\n",
+ " - 19s - loss: 0.3200 - acc: 0.9373 - val_loss: 0.3374 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 02475: val_acc did not improve from 0.94225\n",
+ "Epoch 2476/100000\n",
+ " - 18s - loss: 0.3177 - acc: 0.9371 - val_loss: 0.3270 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02476: val_acc did not improve from 0.94225\n",
+ "Epoch 2477/100000\n",
+ " - 19s - loss: 0.3177 - acc: 0.9367 - val_loss: 0.3843 - val_acc: 0.8978\n",
+ "\n",
+ "Epoch 02477: val_acc did not improve from 0.94225\n",
+ "Epoch 2478/100000\n",
+ " - 18s - loss: 0.3201 - acc: 0.9362 - val_loss: 0.3399 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 02478: val_acc did not improve from 0.94225\n",
+ "Epoch 2479/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9363 - val_loss: 0.3342 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02479: val_acc did not improve from 0.94225\n",
+ "Epoch 2480/100000\n",
+ " - 18s - loss: 0.3194 - acc: 0.9367 - val_loss: 0.3335 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 02480: val_acc did not improve from 0.94225\n",
+ "Epoch 2481/100000\n",
+ " - 19s - loss: 0.3179 - acc: 0.9373 - val_loss: 0.3182 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02481: val_acc did not improve from 0.94225\n",
+ "Epoch 2482/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9369 - val_loss: 0.3161 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02482: val_acc did not improve from 0.94225\n",
+ "Epoch 2483/100000\n",
+ " - 19s - loss: 0.3188 - acc: 0.9377 - val_loss: 0.3051 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 02483: val_acc did not improve from 0.94225\n",
+ "Epoch 2484/100000\n",
+ " - 19s - loss: 0.3180 - acc: 0.9378 - val_loss: 0.4424 - val_acc: 0.8918\n",
+ "\n",
+ "Epoch 02484: val_acc did not improve from 0.94225\n",
+ "Epoch 2485/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9364 - val_loss: 0.3101 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02485: val_acc did not improve from 0.94225\n",
+ "Epoch 2486/100000\n",
+ " - 19s - loss: 0.3201 - acc: 0.9355 - val_loss: 0.3231 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 02486: val_acc did not improve from 0.94225\n",
+ "Epoch 2487/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9365 - val_loss: 0.3167 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02487: val_acc did not improve from 0.94225\n",
+ "Epoch 2488/100000\n",
+ " - 19s - loss: 0.3175 - acc: 0.9372 - val_loss: 0.3213 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 02488: val_acc did not improve from 0.94225\n",
+ "Epoch 2489/100000\n",
+ " - 19s - loss: 0.3182 - acc: 0.9364 - val_loss: 0.3194 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02489: val_acc did not improve from 0.94225\n",
+ "Epoch 2490/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9366 - val_loss: 0.3181 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 02490: val_acc did not improve from 0.94225\n",
+ "Epoch 2491/100000\n",
+ " - 18s - loss: 0.3194 - acc: 0.9365 - val_loss: 0.3814 - val_acc: 0.8985\n",
+ "\n",
+ "Epoch 02491: val_acc did not improve from 0.94225\n",
+ "Epoch 2492/100000\n",
+ " - 19s - loss: 0.3226 - acc: 0.9359 - val_loss: 0.3085 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02492: val_acc did not improve from 0.94225\n",
+ "Epoch 2493/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9365 - val_loss: 0.3090 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02493: val_acc did not improve from 0.94225\n",
+ "Epoch 2494/100000\n",
+ " - 19s - loss: 0.3193 - acc: 0.9369 - val_loss: 0.3331 - val_acc: 0.9209\n",
+ "\n",
+ "Epoch 02494: val_acc did not improve from 0.94225\n",
+ "Epoch 2495/100000\n",
+ " - 18s - loss: 0.3178 - acc: 0.9368 - val_loss: 0.3256 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 02495: val_acc did not improve from 0.94225\n",
+ "Epoch 2496/100000\n",
+ " - 19s - loss: 0.3161 - acc: 0.9371 - val_loss: 0.3214 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 02496: val_acc did not improve from 0.94225\n",
+ "Epoch 2497/100000\n",
+ " - 19s - loss: 0.3173 - acc: 0.9378 - val_loss: 0.3406 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 02497: val_acc did not improve from 0.94225\n",
+ "Epoch 2498/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9369 - val_loss: 0.3650 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 02498: val_acc did not improve from 0.94225\n",
+ "Epoch 2499/100000\n",
+ " - 18s - loss: 0.3168 - acc: 0.9371 - val_loss: 0.3015 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 02499: val_acc did not improve from 0.94225\n",
+ "Epoch 2500/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9367 - val_loss: 0.4135 - val_acc: 0.8846\n",
+ "\n",
+ "Epoch 02500: val_acc did not improve from 0.94225\n",
+ "Epoch 2501/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9364 - val_loss: 0.3237 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 02501: val_acc did not improve from 0.94225\n",
+ "Epoch 2502/100000\n",
+ " - 19s - loss: 0.3202 - acc: 0.9360 - val_loss: 0.3122 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02502: val_acc did not improve from 0.94225\n",
+ "Epoch 2503/100000\n",
+ " - 18s - loss: 0.3184 - acc: 0.9364 - val_loss: 0.3195 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02503: val_acc did not improve from 0.94225\n",
+ "Epoch 2504/100000\n",
+ " - 19s - loss: 0.3182 - acc: 0.9367 - val_loss: 0.3176 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02504: val_acc did not improve from 0.94225\n",
+ "Epoch 2505/100000\n",
+ " - 18s - loss: 0.3213 - acc: 0.9364 - val_loss: 0.3554 - val_acc: 0.9137\n",
+ "\n",
+ "Epoch 02505: val_acc did not improve from 0.94225\n",
+ "Epoch 2506/100000\n",
+ " - 18s - loss: 0.3197 - acc: 0.9363 - val_loss: 0.3732 - val_acc: 0.8995\n",
+ "\n",
+ "Epoch 02506: val_acc did not improve from 0.94225\n",
+ "Epoch 2507/100000\n",
+ " - 18s - loss: 0.3192 - acc: 0.9366 - val_loss: 0.3578 - val_acc: 0.9034\n",
+ "\n",
+ "Epoch 02507: val_acc did not improve from 0.94225\n",
+ "Epoch 2508/100000\n",
+ " - 18s - loss: 0.3177 - acc: 0.9370 - val_loss: 0.3114 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 02508: val_acc did not improve from 0.94225\n",
+ "Epoch 2509/100000\n",
+ " - 19s - loss: 0.3219 - acc: 0.9358 - val_loss: 0.3391 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 02509: val_acc did not improve from 0.94225\n",
+ "Epoch 2510/100000\n",
+ " - 18s - loss: 0.3176 - acc: 0.9373 - val_loss: 0.3204 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 02510: val_acc did not improve from 0.94225\n",
+ "Epoch 2511/100000\n",
+ " - 19s - loss: 0.3175 - acc: 0.9369 - val_loss: 0.3183 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 02511: val_acc did not improve from 0.94225\n",
+ "Epoch 2512/100000\n",
+ " - 18s - loss: 0.3191 - acc: 0.9365 - val_loss: 0.3042 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02512: val_acc did not improve from 0.94225\n",
+ "Epoch 2513/100000\n",
+ " - 19s - loss: 0.3193 - acc: 0.9363 - val_loss: 0.3259 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02513: val_acc did not improve from 0.94225\n",
+ "Epoch 2514/100000\n",
+ " - 18s - loss: 0.3186 - acc: 0.9366 - val_loss: 0.3555 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02514: val_acc did not improve from 0.94225\n",
+ "Epoch 2515/100000\n",
+ " - 19s - loss: 0.3166 - acc: 0.9374 - val_loss: 0.3891 - val_acc: 0.8972\n",
+ "\n",
+ "Epoch 02515: val_acc did not improve from 0.94225\n",
+ "Epoch 2516/100000\n",
+ " - 19s - loss: 0.3209 - acc: 0.9362 - val_loss: 0.3199 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 02516: val_acc did not improve from 0.94225\n",
+ "Epoch 2517/100000\n",
+ " - 19s - loss: 0.3201 - acc: 0.9366 - val_loss: 0.3776 - val_acc: 0.9176\n",
+ "\n",
+ "Epoch 02517: val_acc did not improve from 0.94225\n",
+ "Epoch 2518/100000\n",
+ " - 19s - loss: 0.3172 - acc: 0.9372 - val_loss: 0.3152 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02518: val_acc did not improve from 0.94225\n",
+ "Epoch 2519/100000\n",
+ " - 18s - loss: 0.3169 - acc: 0.9375 - val_loss: 0.3149 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 02519: val_acc did not improve from 0.94225\n",
+ "Epoch 2520/100000\n",
+ " - 19s - loss: 0.3182 - acc: 0.9369 - val_loss: 0.3119 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02520: val_acc did not improve from 0.94225\n",
+ "Epoch 2521/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9366 - val_loss: 0.3394 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 02521: val_acc did not improve from 0.94225\n",
+ "Epoch 2522/100000\n",
+ " - 19s - loss: 0.3215 - acc: 0.9351 - val_loss: 0.3518 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 02522: val_acc did not improve from 0.94225\n",
+ "Epoch 2523/100000\n",
+ " - 19s - loss: 0.3206 - acc: 0.9362 - val_loss: 0.3932 - val_acc: 0.8890\n",
+ "\n",
+ "Epoch 02523: val_acc did not improve from 0.94225\n",
+ "Epoch 2524/100000\n",
+ " - 18s - loss: 0.3184 - acc: 0.9367 - val_loss: 0.3784 - val_acc: 0.8944\n",
+ "\n",
+ "Epoch 02524: val_acc did not improve from 0.94225\n",
+ "Epoch 2525/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9367 - val_loss: 0.3058 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02525: val_acc did not improve from 0.94225\n",
+ "Epoch 2526/100000\n",
+ " - 18s - loss: 0.3210 - acc: 0.9360 - val_loss: 0.3156 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 02526: val_acc did not improve from 0.94225\n",
+ "Epoch 2527/100000\n",
+ " - 19s - loss: 0.3173 - acc: 0.9369 - val_loss: 0.3069 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02527: val_acc did not improve from 0.94225\n",
+ "Epoch 2528/100000\n",
+ " - 19s - loss: 0.3178 - acc: 0.9369 - val_loss: 0.3450 - val_acc: 0.9140\n",
+ "\n",
+ "Epoch 02528: val_acc did not improve from 0.94225\n",
+ "Epoch 2529/100000\n",
+ " - 19s - loss: 0.3171 - acc: 0.9371 - val_loss: 0.3430 - val_acc: 0.9154\n",
+ "\n",
+ "Epoch 02529: val_acc did not improve from 0.94225\n",
+ "Epoch 2530/100000\n",
+ " - 18s - loss: 0.3191 - acc: 0.9364 - val_loss: 0.3154 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02530: val_acc did not improve from 0.94225\n",
+ "Epoch 2531/100000\n",
+ " - 19s - loss: 0.3204 - acc: 0.9368 - val_loss: 0.3671 - val_acc: 0.9059\n",
+ "\n",
+ "Epoch 02531: val_acc did not improve from 0.94225\n",
+ "Epoch 2532/100000\n",
+ " - 18s - loss: 0.3197 - acc: 0.9362 - val_loss: 0.3365 - val_acc: 0.9186\n",
+ "\n",
+ "Epoch 02532: val_acc did not improve from 0.94225\n",
+ "Epoch 2533/100000\n",
+ " - 19s - loss: 0.3220 - acc: 0.9359 - val_loss: 0.3199 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02533: val_acc did not improve from 0.94225\n",
+ "Epoch 2534/100000\n",
+ " - 18s - loss: 0.3180 - acc: 0.9367 - val_loss: 0.4454 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02534: val_acc did not improve from 0.94225\n",
+ "Epoch 2535/100000\n",
+ " - 19s - loss: 0.3197 - acc: 0.9366 - val_loss: 0.3290 - val_acc: 0.9254\n",
+ "\n",
+ "Epoch 02535: val_acc did not improve from 0.94225\n",
+ "Epoch 2536/100000\n",
+ " - 19s - loss: 0.3204 - acc: 0.9364 - val_loss: 0.3182 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02536: val_acc did not improve from 0.94225\n",
+ "Epoch 2537/100000\n",
+ " - 18s - loss: 0.3194 - acc: 0.9367 - val_loss: 0.3100 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02537: val_acc did not improve from 0.94225\n",
+ "Epoch 2538/100000\n",
+ " - 19s - loss: 0.3177 - acc: 0.9369 - val_loss: 0.3170 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 02538: val_acc did not improve from 0.94225\n",
+ "Epoch 2539/100000\n",
+ " - 18s - loss: 0.3212 - acc: 0.9361 - val_loss: 0.3176 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02539: val_acc did not improve from 0.94225\n",
+ "Epoch 2540/100000\n",
+ " - 19s - loss: 0.3172 - acc: 0.9371 - val_loss: 0.3248 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02540: val_acc did not improve from 0.94225\n",
+ "Epoch 2541/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9362 - val_loss: 0.3491 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 02541: val_acc did not improve from 0.94225\n",
+ "Epoch 2542/100000\n",
+ " - 19s - loss: 0.3179 - acc: 0.9366 - val_loss: 0.3156 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02542: val_acc did not improve from 0.94225\n",
+ "Epoch 2543/100000\n",
+ " - 19s - loss: 0.3209 - acc: 0.9359 - val_loss: 0.3072 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 02543: val_acc did not improve from 0.94225\n",
+ "Epoch 2544/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9372 - val_loss: 0.3204 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02544: val_acc did not improve from 0.94225\n",
+ "Epoch 2545/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9372 - val_loss: 0.3655 - val_acc: 0.9094\n",
+ "\n",
+ "Epoch 02545: val_acc did not improve from 0.94225\n",
+ "Epoch 2546/100000\n",
+ " - 18s - loss: 0.3166 - acc: 0.9370 - val_loss: 0.3452 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02546: val_acc did not improve from 0.94225\n",
+ "Epoch 2547/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9368 - val_loss: 0.3289 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 02547: val_acc did not improve from 0.94225\n",
+ "Epoch 2548/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9361 - val_loss: 0.3794 - val_acc: 0.9026\n",
+ "\n",
+ "Epoch 02548: val_acc did not improve from 0.94225\n",
+ "Epoch 2549/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9367 - val_loss: 0.3261 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 02549: val_acc did not improve from 0.94225\n",
+ "Epoch 2550/100000\n",
+ " - 18s - loss: 0.3185 - acc: 0.9363 - val_loss: 0.3037 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02550: val_acc did not improve from 0.94225\n",
+ "Epoch 2551/100000\n",
+ " - 19s - loss: 0.3196 - acc: 0.9367 - val_loss: 0.3156 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02551: val_acc did not improve from 0.94225\n",
+ "Epoch 2552/100000\n",
+ " - 19s - loss: 0.3171 - acc: 0.9372 - val_loss: 0.3165 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02552: val_acc did not improve from 0.94225\n",
+ "Epoch 2553/100000\n",
+ " - 19s - loss: 0.3215 - acc: 0.9362 - val_loss: 0.3076 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02553: val_acc did not improve from 0.94225\n",
+ "Epoch 2554/100000\n",
+ " - 19s - loss: 0.3209 - acc: 0.9361 - val_loss: 0.3035 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02554: val_acc did not improve from 0.94225\n",
+ "Epoch 2555/100000\n",
+ " - 19s - loss: 0.3207 - acc: 0.9368 - val_loss: 0.3473 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 02555: val_acc did not improve from 0.94225\n",
+ "Epoch 2556/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9367 - val_loss: 0.3064 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 02556: val_acc did not improve from 0.94225\n",
+ "Epoch 2557/100000\n",
+ " - 19s - loss: 0.3168 - acc: 0.9375 - val_loss: 0.3202 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02557: val_acc did not improve from 0.94225\n",
+ "Epoch 2558/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9366 - val_loss: 0.3630 - val_acc: 0.9061\n",
+ "\n",
+ "Epoch 02558: val_acc did not improve from 0.94225\n",
+ "Epoch 2559/100000\n",
+ " - 18s - loss: 0.3177 - acc: 0.9366 - val_loss: 0.3669 - val_acc: 0.8989\n",
+ "\n",
+ "Epoch 02559: val_acc did not improve from 0.94225\n",
+ "Epoch 2560/100000\n",
+ " - 19s - loss: 0.3197 - acc: 0.9366 - val_loss: 0.3121 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02560: val_acc did not improve from 0.94225\n",
+ "Epoch 2561/100000\n",
+ " - 18s - loss: 0.3185 - acc: 0.9368 - val_loss: 0.3179 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02561: val_acc did not improve from 0.94225\n",
+ "Epoch 2562/100000\n",
+ " - 19s - loss: 0.3210 - acc: 0.9360 - val_loss: 0.4296 - val_acc: 0.8792\n",
+ "\n",
+ "Epoch 02562: val_acc did not improve from 0.94225\n",
+ "Epoch 2563/100000\n",
+ " - 18s - loss: 0.3189 - acc: 0.9368 - val_loss: 0.3085 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02563: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 02563: ReduceLROnPlateau reducing learning rate to 0.0005403600225690752.\n",
+ "Epoch 2564/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9368 - val_loss: 0.3077 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02564: val_acc did not improve from 0.94225\n",
+ "Epoch 2565/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9376 - val_loss: 0.3070 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02565: val_acc did not improve from 0.94225\n",
+ "Epoch 2566/100000\n",
+ " - 18s - loss: 0.3140 - acc: 0.9365 - val_loss: 0.3828 - val_acc: 0.8967\n",
+ "\n",
+ "Epoch 02566: val_acc did not improve from 0.94225\n",
+ "Epoch 2567/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.3054 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 02567: val_acc did not improve from 0.94225\n",
+ "Epoch 2568/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9374 - val_loss: 0.3100 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02568: val_acc did not improve from 0.94225\n",
+ "Epoch 2569/100000\n",
+ " - 19s - loss: 0.3123 - acc: 0.9371 - val_loss: 0.2991 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02569: val_acc did not improve from 0.94225\n",
+ "Epoch 2570/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9373 - val_loss: 0.3394 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 02570: val_acc did not improve from 0.94225\n",
+ "Epoch 2571/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9364 - val_loss: 0.3048 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02571: val_acc did not improve from 0.94225\n",
+ "Epoch 2572/100000\n",
+ " - 18s - loss: 0.3109 - acc: 0.9376 - val_loss: 0.5494 - val_acc: 0.8317\n",
+ "\n",
+ "Epoch 02572: val_acc did not improve from 0.94225\n",
+ "Epoch 2573/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9365 - val_loss: 0.3078 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02573: val_acc did not improve from 0.94225\n",
+ "Epoch 2574/100000\n",
+ " - 19s - loss: 0.3165 - acc: 0.9360 - val_loss: 0.2965 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 02574: val_acc did not improve from 0.94225\n",
+ "Epoch 2575/100000\n",
+ " - 18s - loss: 0.3119 - acc: 0.9374 - val_loss: 0.3079 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02575: val_acc did not improve from 0.94225\n",
+ "Epoch 2576/100000\n",
+ " - 19s - loss: 0.3110 - acc: 0.9376 - val_loss: 0.3965 - val_acc: 0.8843\n",
+ "\n",
+ "Epoch 02576: val_acc did not improve from 0.94225\n",
+ "Epoch 2577/100000\n",
+ " - 19s - loss: 0.3140 - acc: 0.9368 - val_loss: 0.3188 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 02577: val_acc did not improve from 0.94225\n",
+ "Epoch 2578/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9368 - val_loss: 0.3076 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02578: val_acc did not improve from 0.94225\n",
+ "Epoch 2579/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9365 - val_loss: 0.3092 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02579: val_acc did not improve from 0.94225\n",
+ "Epoch 2580/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9371 - val_loss: 0.3713 - val_acc: 0.8988\n",
+ "\n",
+ "Epoch 02580: val_acc did not improve from 0.94225\n",
+ "Epoch 2581/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9370 - val_loss: 0.3088 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02581: val_acc did not improve from 0.94225\n",
+ "Epoch 2582/100000\n",
+ " - 19s - loss: 0.3156 - acc: 0.9365 - val_loss: 0.3302 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02582: val_acc did not improve from 0.94225\n",
+ "Epoch 2583/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9378 - val_loss: 0.3162 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02583: val_acc did not improve from 0.94225\n",
+ "Epoch 2584/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9373 - val_loss: 0.3061 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02584: val_acc did not improve from 0.94225\n",
+ "Epoch 2585/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9372 - val_loss: 0.3279 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 02585: val_acc did not improve from 0.94225\n",
+ "Epoch 2586/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9363 - val_loss: 0.3726 - val_acc: 0.9076\n",
+ "\n",
+ "Epoch 02586: val_acc did not improve from 0.94225\n",
+ "Epoch 2587/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9371 - val_loss: 0.3067 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02587: val_acc did not improve from 0.94225\n",
+ "Epoch 2588/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9365 - val_loss: 0.3073 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02588: val_acc did not improve from 0.94225\n",
+ "Epoch 2589/100000\n",
+ " - 18s - loss: 0.3128 - acc: 0.9370 - val_loss: 0.3091 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 02589: val_acc did not improve from 0.94225\n",
+ "Epoch 2590/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9368 - val_loss: 0.5209 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 02590: val_acc did not improve from 0.94225\n",
+ "Epoch 2591/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9366 - val_loss: 0.6249 - val_acc: 0.7502\n",
+ "\n",
+ "Epoch 02591: val_acc did not improve from 0.94225\n",
+ "Epoch 2592/100000\n",
+ " - 18s - loss: 0.3107 - acc: 0.9376 - val_loss: 0.3094 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 02592: val_acc did not improve from 0.94225\n",
+ "Epoch 2593/100000\n",
+ " - 18s - loss: 0.3150 - acc: 0.9362 - val_loss: 0.3023 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02593: val_acc did not improve from 0.94225\n",
+ "Epoch 2594/100000\n",
+ " - 18s - loss: 0.3139 - acc: 0.9366 - val_loss: 0.3176 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 02594: val_acc did not improve from 0.94225\n",
+ "Epoch 2595/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9364 - val_loss: 0.3192 - val_acc: 0.9260\n",
+ "\n",
+ "Epoch 02595: val_acc did not improve from 0.94225\n",
+ "Epoch 2596/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9368 - val_loss: 0.2980 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02596: val_acc did not improve from 0.94225\n",
+ "Epoch 2597/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.2959 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02597: val_acc did not improve from 0.94225\n",
+ "Epoch 2598/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9373 - val_loss: 0.3049 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02598: val_acc did not improve from 0.94225\n",
+ "Epoch 2599/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9372 - val_loss: 0.2989 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02599: val_acc did not improve from 0.94225\n",
+ "Epoch 2600/100000\n",
+ " - 18s - loss: 0.3112 - acc: 0.9376 - val_loss: 0.2944 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02600: val_acc did not improve from 0.94225\n",
+ "Epoch 2601/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9372 - val_loss: 0.3588 - val_acc: 0.9061\n",
+ "\n",
+ "Epoch 02601: val_acc did not improve from 0.94225\n",
+ "Epoch 2602/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9370 - val_loss: 0.2978 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02602: val_acc did not improve from 0.94225\n",
+ "Epoch 2603/100000\n",
+ " - 18s - loss: 0.3159 - acc: 0.9373 - val_loss: 0.3515 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 02603: val_acc did not improve from 0.94225\n",
+ "Epoch 2604/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9372 - val_loss: 0.3017 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02604: val_acc did not improve from 0.94225\n",
+ "Epoch 2605/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9372 - val_loss: 0.3148 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 02605: val_acc did not improve from 0.94225\n",
+ "Epoch 2606/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9374 - val_loss: 0.3068 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02606: val_acc did not improve from 0.94225\n",
+ "Epoch 2607/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9372 - val_loss: 0.2954 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02607: val_acc did not improve from 0.94225\n",
+ "Epoch 2608/100000\n",
+ " - 18s - loss: 0.3105 - acc: 0.9374 - val_loss: 0.3119 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 02608: val_acc did not improve from 0.94225\n",
+ "Epoch 2609/100000\n",
+ " - 19s - loss: 0.3158 - acc: 0.9353 - val_loss: 0.3030 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02609: val_acc did not improve from 0.94225\n",
+ "Epoch 2610/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9368 - val_loss: 0.3086 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02610: val_acc did not improve from 0.94225\n",
+ "Epoch 2611/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9369 - val_loss: 0.3218 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02611: val_acc did not improve from 0.94225\n",
+ "Epoch 2612/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9369 - val_loss: 0.3096 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02612: val_acc did not improve from 0.94225\n",
+ "Epoch 2613/100000\n",
+ " - 18s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.2978 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 02613: val_acc did not improve from 0.94225\n",
+ "Epoch 2614/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9369 - val_loss: 0.3082 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02614: val_acc did not improve from 0.94225\n",
+ "Epoch 2615/100000\n",
+ " - 18s - loss: 0.3126 - acc: 0.9369 - val_loss: 0.2999 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02615: val_acc did not improve from 0.94225\n",
+ "Epoch 2616/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9371 - val_loss: 0.3155 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 02616: val_acc did not improve from 0.94225\n",
+ "Epoch 2617/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9371 - val_loss: 0.4312 - val_acc: 0.8804\n",
+ "\n",
+ "Epoch 02617: val_acc did not improve from 0.94225\n",
+ "Epoch 2618/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9372 - val_loss: 0.3158 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02618: val_acc did not improve from 0.94225\n",
+ "Epoch 2619/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9365 - val_loss: 0.3003 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02619: val_acc did not improve from 0.94225\n",
+ "Epoch 2620/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9371 - val_loss: 0.3935 - val_acc: 0.9166\n",
+ "\n",
+ "Epoch 02620: val_acc did not improve from 0.94225\n",
+ "Epoch 2621/100000\n",
+ " - 18s - loss: 0.3142 - acc: 0.9368 - val_loss: 0.3503 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 02621: val_acc did not improve from 0.94225\n",
+ "Epoch 2622/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9369 - val_loss: 0.3128 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02622: val_acc did not improve from 0.94225\n",
+ "Epoch 2623/100000\n",
+ " - 19s - loss: 0.3098 - acc: 0.9375 - val_loss: 0.3708 - val_acc: 0.9012\n",
+ "\n",
+ "Epoch 02623: val_acc did not improve from 0.94225\n",
+ "Epoch 2624/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9364 - val_loss: 0.3036 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02624: val_acc did not improve from 0.94225\n",
+ "Epoch 2625/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9367 - val_loss: 0.3130 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02625: val_acc did not improve from 0.94225\n",
+ "Epoch 2626/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9367 - val_loss: 0.3643 - val_acc: 0.9112\n",
+ "\n",
+ "Epoch 02626: val_acc did not improve from 0.94225\n",
+ "Epoch 2627/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9370 - val_loss: 0.3113 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02627: val_acc did not improve from 0.94225\n",
+ "Epoch 2628/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9364 - val_loss: 0.4512 - val_acc: 0.8644\n",
+ "\n",
+ "Epoch 02628: val_acc did not improve from 0.94225\n",
+ "Epoch 2629/100000\n",
+ " - 18s - loss: 0.3162 - acc: 0.9366 - val_loss: 0.3039 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02629: val_acc did not improve from 0.94225\n",
+ "Epoch 2630/100000\n",
+ " - 19s - loss: 0.3122 - acc: 0.9373 - val_loss: 0.3103 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02630: val_acc did not improve from 0.94225\n",
+ "Epoch 2631/100000\n",
+ " - 18s - loss: 0.3133 - acc: 0.9367 - val_loss: 0.3025 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02631: val_acc did not improve from 0.94225\n",
+ "Epoch 2632/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9368 - val_loss: 0.3606 - val_acc: 0.9054\n",
+ "\n",
+ "Epoch 02632: val_acc did not improve from 0.94225\n",
+ "Epoch 2633/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9368 - val_loss: 0.3255 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 02633: val_acc did not improve from 0.94225\n",
+ "Epoch 2634/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9365 - val_loss: 0.3147 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02634: val_acc did not improve from 0.94225\n",
+ "Epoch 2635/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9368 - val_loss: 0.3008 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 02635: val_acc did not improve from 0.94225\n",
+ "Epoch 2636/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9374 - val_loss: 0.3185 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02636: val_acc did not improve from 0.94225\n",
+ "Epoch 2637/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9372 - val_loss: 0.3793 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 02637: val_acc did not improve from 0.94225\n",
+ "Epoch 2638/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9375 - val_loss: 0.3137 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 02638: val_acc did not improve from 0.94225\n",
+ "Epoch 2639/100000\n",
+ " - 20s - loss: 0.3122 - acc: 0.9370 - val_loss: 0.3182 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 02639: val_acc did not improve from 0.94225\n",
+ "Epoch 2640/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9374 - val_loss: 0.3005 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02640: val_acc did not improve from 0.94225\n",
+ "Epoch 2641/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9362 - val_loss: 0.3311 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 02641: val_acc did not improve from 0.94225\n",
+ "Epoch 2642/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9379 - val_loss: 0.3676 - val_acc: 0.9032\n",
+ "\n",
+ "Epoch 02642: val_acc did not improve from 0.94225\n",
+ "Epoch 2643/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9372 - val_loss: 0.7344 - val_acc: 0.6719\n",
+ "\n",
+ "Epoch 02643: val_acc did not improve from 0.94225\n",
+ "Epoch 2644/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9370 - val_loss: 0.3049 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02644: val_acc did not improve from 0.94225\n",
+ "Epoch 2645/100000\n",
+ " - 18s - loss: 0.3141 - acc: 0.9368 - val_loss: 0.3411 - val_acc: 0.9197\n",
+ "\n",
+ "Epoch 02645: val_acc did not improve from 0.94225\n",
+ "Epoch 2646/100000\n",
+ " - 19s - loss: 0.3138 - acc: 0.9371 - val_loss: 0.3001 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02646: val_acc did not improve from 0.94225\n",
+ "Epoch 2647/100000\n",
+ " - 18s - loss: 0.3143 - acc: 0.9366 - val_loss: 0.3471 - val_acc: 0.9083\n",
+ "\n",
+ "Epoch 02647: val_acc did not improve from 0.94225\n",
+ "Epoch 2648/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9372 - val_loss: 0.3309 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 02648: val_acc did not improve from 0.94225\n",
+ "Epoch 2649/100000\n",
+ " - 19s - loss: 0.3145 - acc: 0.9364 - val_loss: 0.3716 - val_acc: 0.8990\n",
+ "\n",
+ "Epoch 02649: val_acc did not improve from 0.94225\n",
+ "Epoch 2650/100000\n",
+ " - 18s - loss: 0.3123 - acc: 0.9371 - val_loss: 0.2940 - val_acc: 0.9398\n",
+ "\n",
+ "Epoch 02650: val_acc did not improve from 0.94225\n",
+ "Epoch 2651/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9373 - val_loss: 0.3113 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02651: val_acc did not improve from 0.94225\n",
+ "Epoch 2652/100000\n",
+ " - 18s - loss: 0.3140 - acc: 0.9367 - val_loss: 0.3311 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 02652: val_acc did not improve from 0.94225\n",
+ "Epoch 2653/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9365 - val_loss: 0.3049 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02653: val_acc did not improve from 0.94225\n",
+ "Epoch 2654/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9370 - val_loss: 0.3075 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02654: val_acc did not improve from 0.94225\n",
+ "Epoch 2655/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9377 - val_loss: 0.3035 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02655: val_acc did not improve from 0.94225\n",
+ "Epoch 2656/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9370 - val_loss: 0.3209 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02656: val_acc did not improve from 0.94225\n",
+ "Epoch 2657/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9373 - val_loss: 0.3175 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 02657: val_acc did not improve from 0.94225\n",
+ "Epoch 2658/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9367 - val_loss: 0.3135 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 02658: val_acc did not improve from 0.94225\n",
+ "Epoch 2659/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9369 - val_loss: 0.3038 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02659: val_acc did not improve from 0.94225\n",
+ "Epoch 2660/100000\n",
+ " - 19s - loss: 0.3110 - acc: 0.9373 - val_loss: 0.3099 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 02660: val_acc did not improve from 0.94225\n",
+ "Epoch 2661/100000\n",
+ " - 19s - loss: 0.3146 - acc: 0.9365 - val_loss: 0.3639 - val_acc: 0.9112\n",
+ "\n",
+ "Epoch 02661: val_acc did not improve from 0.94225\n",
+ "Epoch 2662/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9371 - val_loss: 0.3746 - val_acc: 0.8950\n",
+ "\n",
+ "Epoch 02662: val_acc did not improve from 0.94225\n",
+ "Epoch 2663/100000\n",
+ " - 18s - loss: 0.3132 - acc: 0.9365 - val_loss: 0.3088 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02663: val_acc did not improve from 0.94225\n",
+ "Epoch 2664/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9367 - val_loss: 0.3138 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02664: val_acc did not improve from 0.94225\n",
+ "Epoch 2665/100000\n",
+ " - 18s - loss: 0.3160 - acc: 0.9357 - val_loss: 0.3482 - val_acc: 0.9118\n",
+ "\n",
+ "Epoch 02665: val_acc did not improve from 0.94225\n",
+ "Epoch 2666/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9370 - val_loss: 0.3096 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 02666: val_acc did not improve from 0.94225\n",
+ "Epoch 2667/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9370 - val_loss: 0.2990 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02667: val_acc did not improve from 0.94225\n",
+ "Epoch 2668/100000\n",
+ " - 19s - loss: 0.3101 - acc: 0.9377 - val_loss: 0.3125 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 02668: val_acc did not improve from 0.94225\n",
+ "Epoch 2669/100000\n",
+ " - 19s - loss: 0.3144 - acc: 0.9373 - val_loss: 0.3016 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02669: val_acc did not improve from 0.94225\n",
+ "Epoch 2670/100000\n",
+ " - 18s - loss: 0.3123 - acc: 0.9369 - val_loss: 0.2944 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02670: val_acc did not improve from 0.94225\n",
+ "Epoch 2671/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9373 - val_loss: 0.3081 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 02671: val_acc did not improve from 0.94225\n",
+ "Epoch 2672/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9366 - val_loss: 0.3307 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02672: val_acc did not improve from 0.94225\n",
+ "Epoch 2673/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9365 - val_loss: 0.3490 - val_acc: 0.9096\n",
+ "\n",
+ "Epoch 02673: val_acc did not improve from 0.94225\n",
+ "Epoch 2674/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9373 - val_loss: 0.3054 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02674: val_acc did not improve from 0.94225\n",
+ "Epoch 2675/100000\n",
+ " - 18s - loss: 0.3116 - acc: 0.9374 - val_loss: 0.3452 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 02675: val_acc did not improve from 0.94225\n",
+ "Epoch 2676/100000\n",
+ " - 19s - loss: 0.3150 - acc: 0.9362 - val_loss: 0.3278 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02676: val_acc did not improve from 0.94225\n",
+ "Epoch 2677/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9374 - val_loss: 0.3131 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02677: val_acc did not improve from 0.94225\n",
+ "Epoch 2678/100000\n",
+ " - 19s - loss: 0.3154 - acc: 0.9355 - val_loss: 0.3480 - val_acc: 0.9079\n",
+ "\n",
+ "Epoch 02678: val_acc did not improve from 0.94225\n",
+ "Epoch 2679/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9367 - val_loss: 0.3817 - val_acc: 0.8891\n",
+ "\n",
+ "Epoch 02679: val_acc did not improve from 0.94225\n",
+ "Epoch 2680/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9369 - val_loss: 0.3378 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 02680: val_acc did not improve from 0.94225\n",
+ "Epoch 2681/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9365 - val_loss: 0.3057 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 02681: val_acc did not improve from 0.94225\n",
+ "Epoch 2682/100000\n",
+ " - 18s - loss: 0.3143 - acc: 0.9360 - val_loss: 0.3170 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 02682: val_acc did not improve from 0.94225\n",
+ "Epoch 2683/100000\n",
+ " - 18s - loss: 0.3161 - acc: 0.9363 - val_loss: 0.2972 - val_acc: 0.9402\n",
+ "\n",
+ "Epoch 02683: val_acc did not improve from 0.94225\n",
+ "Epoch 2684/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9374 - val_loss: 0.3190 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02684: val_acc did not improve from 0.94225\n",
+ "Epoch 2685/100000\n",
+ " - 18s - loss: 0.3135 - acc: 0.9370 - val_loss: 0.3055 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 02685: val_acc did not improve from 0.94225\n",
+ "Epoch 2686/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9377 - val_loss: 0.3093 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02686: val_acc did not improve from 0.94225\n",
+ "Epoch 2687/100000\n",
+ " - 18s - loss: 0.3141 - acc: 0.9369 - val_loss: 0.3203 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02687: val_acc did not improve from 0.94225\n",
+ "Epoch 2688/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9364 - val_loss: 0.3156 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02688: val_acc did not improve from 0.94225\n",
+ "Epoch 2689/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3189 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 02689: val_acc did not improve from 0.94225\n",
+ "Epoch 2690/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9372 - val_loss: 0.2932 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02690: val_acc did not improve from 0.94225\n",
+ "Epoch 2691/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9367 - val_loss: 0.3119 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02691: val_acc did not improve from 0.94225\n",
+ "Epoch 2692/100000\n",
+ " - 18s - loss: 0.3148 - acc: 0.9366 - val_loss: 0.3048 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02692: val_acc did not improve from 0.94225\n",
+ "Epoch 2693/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9373 - val_loss: 0.3069 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02693: val_acc did not improve from 0.94225\n",
+ "Epoch 2694/100000\n",
+ " - 18s - loss: 0.3115 - acc: 0.9375 - val_loss: 0.3023 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02694: val_acc did not improve from 0.94225\n",
+ "Epoch 2695/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9373 - val_loss: 0.3479 - val_acc: 0.9083\n",
+ "\n",
+ "Epoch 02695: val_acc did not improve from 0.94225\n",
+ "Epoch 2696/100000\n",
+ " - 19s - loss: 0.3158 - acc: 0.9364 - val_loss: 0.3216 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02696: val_acc did not improve from 0.94225\n",
+ "Epoch 2697/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9369 - val_loss: 0.3159 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02697: val_acc did not improve from 0.94225\n",
+ "Epoch 2698/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9376 - val_loss: 0.3066 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02698: val_acc did not improve from 0.94225\n",
+ "Epoch 2699/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9367 - val_loss: 0.2983 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02699: val_acc did not improve from 0.94225\n",
+ "Epoch 2700/100000\n",
+ " - 18s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.3095 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02700: val_acc did not improve from 0.94225\n",
+ "Epoch 2701/100000\n",
+ " - 19s - loss: 0.3114 - acc: 0.9372 - val_loss: 0.3352 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 02701: val_acc did not improve from 0.94225\n",
+ "Epoch 2702/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9366 - val_loss: 0.3016 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02702: val_acc did not improve from 0.94225\n",
+ "Epoch 2703/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9369 - val_loss: 0.3087 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02703: val_acc did not improve from 0.94225\n",
+ "Epoch 2704/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9368 - val_loss: 0.3540 - val_acc: 0.9023\n",
+ "\n",
+ "Epoch 02704: val_acc did not improve from 0.94225\n",
+ "Epoch 2705/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9367 - val_loss: 0.3097 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 02705: val_acc did not improve from 0.94225\n",
+ "Epoch 2706/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9371 - val_loss: 0.3196 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 02706: val_acc did not improve from 0.94225\n",
+ "Epoch 2707/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9375 - val_loss: 0.3009 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 02707: val_acc did not improve from 0.94225\n",
+ "Epoch 2708/100000\n",
+ " - 18s - loss: 0.3111 - acc: 0.9375 - val_loss: 0.2966 - val_acc: 0.9406\n",
+ "\n",
+ "Epoch 02708: val_acc did not improve from 0.94225\n",
+ "Epoch 2709/100000\n",
+ " - 19s - loss: 0.3114 - acc: 0.9373 - val_loss: 0.3487 - val_acc: 0.9159\n",
+ "\n",
+ "Epoch 02709: val_acc did not improve from 0.94225\n",
+ "Epoch 2710/100000\n",
+ " - 18s - loss: 0.3149 - acc: 0.9361 - val_loss: 0.3101 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02710: val_acc did not improve from 0.94225\n",
+ "Epoch 2711/100000\n",
+ " - 19s - loss: 0.3138 - acc: 0.9370 - val_loss: 0.3126 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02711: val_acc did not improve from 0.94225\n",
+ "Epoch 2712/100000\n",
+ " - 18s - loss: 0.3115 - acc: 0.9372 - val_loss: 0.3130 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02712: val_acc did not improve from 0.94225\n",
+ "Epoch 2713/100000\n",
+ " - 18s - loss: 0.3128 - acc: 0.9367 - val_loss: 0.3274 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 02713: val_acc did not improve from 0.94225\n",
+ "Epoch 2714/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9373 - val_loss: 0.3132 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02714: val_acc did not improve from 0.94225\n",
+ "Epoch 2715/100000\n",
+ " - 19s - loss: 0.3175 - acc: 0.9369 - val_loss: 0.3029 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02715: val_acc did not improve from 0.94225\n",
+ "Epoch 2716/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9375 - val_loss: 0.2987 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02716: val_acc did not improve from 0.94225\n",
+ "Epoch 2717/100000\n",
+ " - 18s - loss: 0.3134 - acc: 0.9369 - val_loss: 0.3246 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02717: val_acc did not improve from 0.94225\n",
+ "Epoch 2718/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9370 - val_loss: 0.3520 - val_acc: 0.9012\n",
+ "\n",
+ "Epoch 02718: val_acc did not improve from 0.94225\n",
+ "Epoch 2719/100000\n",
+ " - 19s - loss: 0.3112 - acc: 0.9374 - val_loss: 0.2955 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 02719: val_acc did not improve from 0.94225\n",
+ "Epoch 2720/100000\n",
+ " - 18s - loss: 0.3142 - acc: 0.9366 - val_loss: 0.3084 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02720: val_acc did not improve from 0.94225\n",
+ "Epoch 2721/100000\n",
+ " - 18s - loss: 0.3109 - acc: 0.9373 - val_loss: 0.2971 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02721: val_acc did not improve from 0.94225\n",
+ "Epoch 2722/100000\n",
+ " - 19s - loss: 0.3145 - acc: 0.9366 - val_loss: 0.3159 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 02722: val_acc did not improve from 0.94225\n",
+ "Epoch 2723/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9366 - val_loss: 0.3291 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 02723: val_acc did not improve from 0.94225\n",
+ "Epoch 2724/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9374 - val_loss: 0.3722 - val_acc: 0.8916\n",
+ "\n",
+ "Epoch 02724: val_acc did not improve from 0.94225\n",
+ "Epoch 2725/100000\n",
+ " - 18s - loss: 0.3135 - acc: 0.9368 - val_loss: 0.3330 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 02725: val_acc did not improve from 0.94225\n",
+ "Epoch 2726/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9376 - val_loss: 0.3212 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 02726: val_acc did not improve from 0.94225\n",
+ "Epoch 2727/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.3103 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02727: val_acc did not improve from 0.94225\n",
+ "Epoch 2728/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9370 - val_loss: 0.3468 - val_acc: 0.9089\n",
+ "\n",
+ "Epoch 02728: val_acc did not improve from 0.94225\n",
+ "Epoch 2729/100000\n",
+ " - 18s - loss: 0.3100 - acc: 0.9375 - val_loss: 0.3210 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02729: val_acc did not improve from 0.94225\n",
+ "Epoch 2730/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9370 - val_loss: 0.3311 - val_acc: 0.9192\n",
+ "\n",
+ "Epoch 02730: val_acc did not improve from 0.94225\n",
+ "Epoch 2731/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9360 - val_loss: 0.3299 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02731: val_acc did not improve from 0.94225\n",
+ "Epoch 2732/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9375 - val_loss: 0.3606 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 02732: val_acc did not improve from 0.94225\n",
+ "Epoch 2733/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9373 - val_loss: 0.3030 - val_acc: 0.9407\n",
+ "\n",
+ "Epoch 02733: val_acc did not improve from 0.94225\n",
+ "Epoch 2734/100000\n",
+ " - 19s - loss: 0.3186 - acc: 0.9367 - val_loss: 0.3074 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02734: val_acc did not improve from 0.94225\n",
+ "Epoch 2735/100000\n",
+ " - 18s - loss: 0.3114 - acc: 0.9375 - val_loss: 0.3233 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 02735: val_acc did not improve from 0.94225\n",
+ "Epoch 2736/100000\n",
+ " - 19s - loss: 0.3100 - acc: 0.9377 - val_loss: 0.3067 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02736: val_acc did not improve from 0.94225\n",
+ "Epoch 2737/100000\n",
+ " - 18s - loss: 0.3108 - acc: 0.9376 - val_loss: 0.3074 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02737: val_acc did not improve from 0.94225\n",
+ "Epoch 2738/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9374 - val_loss: 0.3018 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02738: val_acc did not improve from 0.94225\n",
+ "Epoch 2739/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9366 - val_loss: 0.3676 - val_acc: 0.9115\n",
+ "\n",
+ "Epoch 02739: val_acc did not improve from 0.94225\n",
+ "Epoch 2740/100000\n",
+ " - 19s - loss: 0.3154 - acc: 0.9361 - val_loss: 0.3080 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02740: val_acc did not improve from 0.94225\n",
+ "Epoch 2741/100000\n",
+ " - 19s - loss: 0.3101 - acc: 0.9371 - val_loss: 0.3192 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02741: val_acc did not improve from 0.94225\n",
+ "Epoch 2742/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9375 - val_loss: 0.3039 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02742: val_acc did not improve from 0.94225\n",
+ "Epoch 2743/100000\n",
+ " - 18s - loss: 0.3133 - acc: 0.9376 - val_loss: 0.3223 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 02743: val_acc did not improve from 0.94225\n",
+ "Epoch 2744/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9372 - val_loss: 0.3175 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 02744: val_acc did not improve from 0.94225\n",
+ "Epoch 2745/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9365 - val_loss: 0.2947 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02745: val_acc did not improve from 0.94225\n",
+ "Epoch 2746/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9373 - val_loss: 0.3162 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02746: val_acc did not improve from 0.94225\n",
+ "Epoch 2747/100000\n",
+ " - 18s - loss: 0.3135 - acc: 0.9369 - val_loss: 0.3559 - val_acc: 0.9045\n",
+ "\n",
+ "Epoch 02747: val_acc did not improve from 0.94225\n",
+ "Epoch 2748/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9372 - val_loss: 0.3374 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 02748: val_acc did not improve from 0.94225\n",
+ "Epoch 2749/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9374 - val_loss: 0.3082 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02749: val_acc did not improve from 0.94225\n",
+ "Epoch 2750/100000\n",
+ " - 18s - loss: 0.3128 - acc: 0.9370 - val_loss: 0.3491 - val_acc: 0.9120\n",
+ "\n",
+ "Epoch 02750: val_acc did not improve from 0.94225\n",
+ "Epoch 2751/100000\n",
+ " - 18s - loss: 0.3124 - acc: 0.9369 - val_loss: 0.2974 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02751: val_acc did not improve from 0.94225\n",
+ "Epoch 2752/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9366 - val_loss: 0.3915 - val_acc: 0.8857\n",
+ "\n",
+ "Epoch 02752: val_acc did not improve from 0.94225\n",
+ "Epoch 2753/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9370 - val_loss: 0.3198 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 02753: val_acc did not improve from 0.94225\n",
+ "Epoch 2754/100000\n",
+ " - 18s - loss: 0.3136 - acc: 0.9369 - val_loss: 0.3051 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02754: val_acc did not improve from 0.94225\n",
+ "Epoch 2755/100000\n",
+ " - 19s - loss: 0.3104 - acc: 0.9379 - val_loss: 0.3706 - val_acc: 0.9018\n",
+ "\n",
+ "Epoch 02755: val_acc did not improve from 0.94225\n",
+ "Epoch 2756/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9370 - val_loss: 0.3073 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02756: val_acc did not improve from 0.94225\n",
+ "Epoch 2757/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9373 - val_loss: 0.3080 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02757: val_acc did not improve from 0.94225\n",
+ "Epoch 2758/100000\n",
+ " - 19s - loss: 0.3156 - acc: 0.9361 - val_loss: 0.3138 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02758: val_acc did not improve from 0.94225\n",
+ "Epoch 2759/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9371 - val_loss: 0.3083 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02759: val_acc did not improve from 0.94225\n",
+ "Epoch 2760/100000\n",
+ " - 19s - loss: 0.3148 - acc: 0.9370 - val_loss: 0.3048 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02760: val_acc did not improve from 0.94225\n",
+ "Epoch 2761/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3277 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 02761: val_acc did not improve from 0.94225\n",
+ "Epoch 2762/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9367 - val_loss: 0.3034 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02762: val_acc did not improve from 0.94225\n",
+ "Epoch 2763/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9369 - val_loss: 0.3643 - val_acc: 0.9089\n",
+ "\n",
+ "Epoch 02763: val_acc did not improve from 0.94225\n",
+ "Epoch 2764/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9367 - val_loss: 0.3204 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 02764: val_acc did not improve from 0.94225\n",
+ "Epoch 2765/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9373 - val_loss: 0.3002 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 02765: val_acc did not improve from 0.94225\n",
+ "Epoch 2766/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9376 - val_loss: 0.4164 - val_acc: 0.8802\n",
+ "\n",
+ "Epoch 02766: val_acc did not improve from 0.94225\n",
+ "Epoch 2767/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9369 - val_loss: 0.6117 - val_acc: 0.7844\n",
+ "\n",
+ "Epoch 02767: val_acc did not improve from 0.94225\n",
+ "Epoch 2768/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9376 - val_loss: 0.3364 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 02768: val_acc did not improve from 0.94225\n",
+ "Epoch 2769/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9374 - val_loss: 0.2932 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 02769: val_acc did not improve from 0.94225\n",
+ "Epoch 2770/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9366 - val_loss: 0.3611 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 02770: val_acc did not improve from 0.94225\n",
+ "Epoch 2771/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9367 - val_loss: 0.3611 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 02771: val_acc did not improve from 0.94225\n",
+ "Epoch 2772/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9367 - val_loss: 0.3179 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02772: val_acc did not improve from 0.94225\n",
+ "Epoch 2773/100000\n",
+ " - 18s - loss: 0.3097 - acc: 0.9378 - val_loss: 0.3161 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02773: val_acc did not improve from 0.94225\n",
+ "Epoch 2774/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9367 - val_loss: 0.3253 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 02774: val_acc did not improve from 0.94225\n",
+ "Epoch 2775/100000\n",
+ " - 19s - loss: 0.3151 - acc: 0.9362 - val_loss: 0.3015 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02775: val_acc did not improve from 0.94225\n",
+ "Epoch 2776/100000\n",
+ " - 19s - loss: 0.3110 - acc: 0.9375 - val_loss: 0.3552 - val_acc: 0.9126\n",
+ "\n",
+ "Epoch 02776: val_acc did not improve from 0.94225\n",
+ "Epoch 2777/100000\n",
+ " - 19s - loss: 0.3122 - acc: 0.9373 - val_loss: 0.3383 - val_acc: 0.9210\n",
+ "\n",
+ "Epoch 02777: val_acc did not improve from 0.94225\n",
+ "Epoch 2778/100000\n",
+ " - 19s - loss: 0.3154 - acc: 0.9363 - val_loss: 0.3102 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02778: val_acc did not improve from 0.94225\n",
+ "Epoch 2779/100000\n",
+ " - 19s - loss: 0.3114 - acc: 0.9375 - val_loss: 0.2964 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 02779: val_acc did not improve from 0.94225\n",
+ "Epoch 2780/100000\n",
+ " - 19s - loss: 0.3153 - acc: 0.9370 - val_loss: 0.5286 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 02780: val_acc did not improve from 0.94225\n",
+ "Epoch 2781/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9373 - val_loss: 0.3296 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 02781: val_acc did not improve from 0.94225\n",
+ "Epoch 2782/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9367 - val_loss: 0.3391 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 02782: val_acc did not improve from 0.94225\n",
+ "Epoch 2783/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9368 - val_loss: 0.3073 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02783: val_acc did not improve from 0.94225\n",
+ "Epoch 2784/100000\n",
+ " - 18s - loss: 0.3137 - acc: 0.9370 - val_loss: 0.3107 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02784: val_acc did not improve from 0.94225\n",
+ "Epoch 2785/100000\n",
+ " - 19s - loss: 0.3150 - acc: 0.9370 - val_loss: 0.3174 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02785: val_acc did not improve from 0.94225\n",
+ "Epoch 2786/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9373 - val_loss: 0.3578 - val_acc: 0.9090\n",
+ "\n",
+ "Epoch 02786: val_acc did not improve from 0.94225\n",
+ "Epoch 2787/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9368 - val_loss: 0.3105 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02787: val_acc did not improve from 0.94225\n",
+ "Epoch 2788/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9367 - val_loss: 0.3224 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 02788: val_acc did not improve from 0.94225\n",
+ "Epoch 2789/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9373 - val_loss: 0.3091 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 02789: val_acc did not improve from 0.94225\n",
+ "Epoch 2790/100000\n",
+ " - 19s - loss: 0.3150 - acc: 0.9362 - val_loss: 0.3340 - val_acc: 0.9225\n",
+ "\n",
+ "Epoch 02790: val_acc did not improve from 0.94225\n",
+ "Epoch 2791/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9368 - val_loss: 0.2963 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 02791: val_acc did not improve from 0.94225\n",
+ "Epoch 2792/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9379 - val_loss: 0.3790 - val_acc: 0.9011\n",
+ "\n",
+ "Epoch 02792: val_acc did not improve from 0.94225\n",
+ "Epoch 2793/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9372 - val_loss: 0.2950 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02793: val_acc did not improve from 0.94225\n",
+ "Epoch 2794/100000\n",
+ " - 19s - loss: 0.3145 - acc: 0.9360 - val_loss: 0.3244 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 02794: val_acc did not improve from 0.94225\n",
+ "Epoch 2795/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9371 - val_loss: 0.3247 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 02795: val_acc did not improve from 0.94225\n",
+ "Epoch 2796/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9367 - val_loss: 0.2972 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 02796: val_acc did not improve from 0.94225\n",
+ "Epoch 2797/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9377 - val_loss: 0.3032 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02797: val_acc did not improve from 0.94225\n",
+ "Epoch 2798/100000\n",
+ " - 18s - loss: 0.3119 - acc: 0.9370 - val_loss: 0.3214 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02798: val_acc did not improve from 0.94225\n",
+ "Epoch 2799/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9367 - val_loss: 0.3097 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 02799: val_acc did not improve from 0.94225\n",
+ "Epoch 2800/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.4605 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02800: val_acc did not improve from 0.94225\n",
+ "Epoch 2801/100000\n",
+ " - 18s - loss: 0.3123 - acc: 0.9377 - val_loss: 0.3075 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02801: val_acc did not improve from 0.94225\n",
+ "Epoch 2802/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9372 - val_loss: 0.3261 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 02802: val_acc did not improve from 0.94225\n",
+ "Epoch 2803/100000\n",
+ " - 18s - loss: 0.3134 - acc: 0.9369 - val_loss: 0.3102 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 02803: val_acc did not improve from 0.94225\n",
+ "Epoch 2804/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9368 - val_loss: 0.2997 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 02804: val_acc did not improve from 0.94225\n",
+ "Epoch 2805/100000\n",
+ " - 19s - loss: 0.3156 - acc: 0.9356 - val_loss: 0.3062 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02805: val_acc did not improve from 0.94225\n",
+ "Epoch 2806/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9374 - val_loss: 0.3786 - val_acc: 0.8955\n",
+ "\n",
+ "Epoch 02806: val_acc did not improve from 0.94225\n",
+ "Epoch 2807/100000\n",
+ " - 19s - loss: 0.3155 - acc: 0.9367 - val_loss: 0.3332 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 02807: val_acc did not improve from 0.94225\n",
+ "Epoch 2808/100000\n",
+ " - 20s - loss: 0.3144 - acc: 0.9371 - val_loss: 0.3085 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02808: val_acc did not improve from 0.94225\n",
+ "Epoch 2809/100000\n",
+ " - 19s - loss: 0.3149 - acc: 0.9369 - val_loss: 0.3033 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02809: val_acc did not improve from 0.94225\n",
+ "Epoch 2810/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9375 - val_loss: 0.3368 - val_acc: 0.9137\n",
+ "\n",
+ "Epoch 02810: val_acc did not improve from 0.94225\n",
+ "Epoch 2811/100000\n",
+ " - 18s - loss: 0.3108 - acc: 0.9377 - val_loss: 0.3336 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 02811: val_acc did not improve from 0.94225\n",
+ "Epoch 2812/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9365 - val_loss: 0.3543 - val_acc: 0.9056\n",
+ "\n",
+ "Epoch 02812: val_acc did not improve from 0.94225\n",
+ "Epoch 2813/100000\n",
+ " - 18s - loss: 0.3119 - acc: 0.9377 - val_loss: 0.3112 - val_acc: 0.9270\n",
+ "\n",
+ "Epoch 02813: val_acc did not improve from 0.94225\n",
+ "Epoch 2814/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9373 - val_loss: 0.3110 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02814: val_acc did not improve from 0.94225\n",
+ "Epoch 2815/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9375 - val_loss: 0.3335 - val_acc: 0.9194\n",
+ "\n",
+ "Epoch 02815: val_acc did not improve from 0.94225\n",
+ "Epoch 2816/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9378 - val_loss: 0.2977 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02816: val_acc did not improve from 0.94225\n",
+ "Epoch 2817/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9368 - val_loss: 0.3001 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02817: val_acc did not improve from 0.94225\n",
+ "Epoch 2818/100000\n",
+ " - 19s - loss: 0.3146 - acc: 0.9366 - val_loss: 0.3084 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02818: val_acc did not improve from 0.94225\n",
+ "Epoch 2819/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9371 - val_loss: 0.3125 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02819: val_acc did not improve from 0.94225\n",
+ "Epoch 2820/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9372 - val_loss: 0.3064 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02820: val_acc did not improve from 0.94225\n",
+ "Epoch 2821/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9366 - val_loss: 0.3101 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02821: val_acc did not improve from 0.94225\n",
+ "Epoch 2822/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9371 - val_loss: 0.3382 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 02822: val_acc did not improve from 0.94225\n",
+ "Epoch 2823/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9369 - val_loss: 0.4093 - val_acc: 0.8929\n",
+ "\n",
+ "Epoch 02823: val_acc did not improve from 0.94225\n",
+ "Epoch 2824/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9373 - val_loss: 0.3273 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02824: val_acc did not improve from 0.94225\n",
+ "Epoch 2825/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9362 - val_loss: 0.3321 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 02825: val_acc did not improve from 0.94225\n",
+ "Epoch 2826/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9367 - val_loss: 0.2936 - val_acc: 0.9402\n",
+ "\n",
+ "Epoch 02826: val_acc did not improve from 0.94225\n",
+ "Epoch 2827/100000\n",
+ " - 18s - loss: 0.3128 - acc: 0.9369 - val_loss: 0.3520 - val_acc: 0.9089\n",
+ "\n",
+ "Epoch 02827: val_acc did not improve from 0.94225\n",
+ "Epoch 2828/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9370 - val_loss: 0.3042 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02828: val_acc did not improve from 0.94225\n",
+ "Epoch 2829/100000\n",
+ " - 19s - loss: 0.3149 - acc: 0.9365 - val_loss: 0.3041 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02829: val_acc did not improve from 0.94225\n",
+ "Epoch 2830/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9372 - val_loss: 0.2916 - val_acc: 0.9414\n",
+ "\n",
+ "Epoch 02830: val_acc did not improve from 0.94225\n",
+ "Epoch 2831/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9372 - val_loss: 0.3211 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 02831: val_acc did not improve from 0.94225\n",
+ "Epoch 2832/100000\n",
+ " - 19s - loss: 0.3122 - acc: 0.9371 - val_loss: 0.3010 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02832: val_acc did not improve from 0.94225\n",
+ "Epoch 2833/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9368 - val_loss: 0.3204 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 02833: val_acc did not improve from 0.94225\n",
+ "Epoch 2834/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9369 - val_loss: 0.3064 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02834: val_acc did not improve from 0.94225\n",
+ "Epoch 2835/100000\n",
+ " - 19s - loss: 0.3153 - acc: 0.9358 - val_loss: 0.3347 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 02835: val_acc did not improve from 0.94225\n",
+ "Epoch 2836/100000\n",
+ " - 18s - loss: 0.3123 - acc: 0.9372 - val_loss: 0.3473 - val_acc: 0.9115\n",
+ "\n",
+ "Epoch 02836: val_acc did not improve from 0.94225\n",
+ "Epoch 2837/100000\n",
+ " - 19s - loss: 0.3128 - acc: 0.9368 - val_loss: 0.2993 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02837: val_acc did not improve from 0.94225\n",
+ "Epoch 2838/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9367 - val_loss: 0.3023 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02838: val_acc did not improve from 0.94225\n",
+ "Epoch 2839/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9366 - val_loss: 0.3072 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02839: val_acc did not improve from 0.94225\n",
+ "Epoch 2840/100000\n",
+ " - 18s - loss: 0.3131 - acc: 0.9370 - val_loss: 0.3097 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02840: val_acc did not improve from 0.94225\n",
+ "Epoch 2841/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9366 - val_loss: 0.3110 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02841: val_acc did not improve from 0.94225\n",
+ "Epoch 2842/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9364 - val_loss: 0.3994 - val_acc: 0.8881\n",
+ "\n",
+ "Epoch 02842: val_acc did not improve from 0.94225\n",
+ "Epoch 2843/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9373 - val_loss: 0.3065 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02843: val_acc did not improve from 0.94225\n",
+ "Epoch 2844/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9365 - val_loss: 0.3907 - val_acc: 0.8980\n",
+ "\n",
+ "Epoch 02844: val_acc did not improve from 0.94225\n",
+ "Epoch 2845/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9374 - val_loss: 0.3015 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 02845: val_acc did not improve from 0.94225\n",
+ "Epoch 2846/100000\n",
+ " - 18s - loss: 0.3157 - acc: 0.9364 - val_loss: 0.3120 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02846: val_acc did not improve from 0.94225\n",
+ "Epoch 2847/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9367 - val_loss: 0.3006 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02847: val_acc did not improve from 0.94225\n",
+ "Epoch 2848/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3038 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02848: val_acc did not improve from 0.94225\n",
+ "Epoch 2849/100000\n",
+ " - 18s - loss: 0.3138 - acc: 0.9369 - val_loss: 0.3080 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02849: val_acc did not improve from 0.94225\n",
+ "Epoch 2850/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9371 - val_loss: 0.4164 - val_acc: 0.8890\n",
+ "\n",
+ "Epoch 02850: val_acc did not improve from 0.94225\n",
+ "Epoch 2851/100000\n",
+ " - 18s - loss: 0.3161 - acc: 0.9357 - val_loss: 0.3023 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02851: val_acc did not improve from 0.94225\n",
+ "Epoch 2852/100000\n",
+ " - 19s - loss: 0.3149 - acc: 0.9364 - val_loss: 0.3037 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02852: val_acc did not improve from 0.94225\n",
+ "Epoch 2853/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9375 - val_loss: 0.3645 - val_acc: 0.9028\n",
+ "\n",
+ "Epoch 02853: val_acc did not improve from 0.94225\n",
+ "Epoch 2854/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9367 - val_loss: 0.3121 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02854: val_acc did not improve from 0.94225\n",
+ "Epoch 2855/100000\n",
+ " - 18s - loss: 0.3134 - acc: 0.9372 - val_loss: 0.3006 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02855: val_acc did not improve from 0.94225\n",
+ "Epoch 2856/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9372 - val_loss: 0.3768 - val_acc: 0.9169\n",
+ "\n",
+ "Epoch 02856: val_acc did not improve from 0.94225\n",
+ "Epoch 2857/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9367 - val_loss: 0.3153 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02857: val_acc did not improve from 0.94225\n",
+ "Epoch 2858/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9370 - val_loss: 0.4579 - val_acc: 0.8811\n",
+ "\n",
+ "Epoch 02858: val_acc did not improve from 0.94225\n",
+ "Epoch 2859/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9377 - val_loss: 0.3072 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02859: val_acc did not improve from 0.94225\n",
+ "Epoch 2860/100000\n",
+ " - 19s - loss: 0.3148 - acc: 0.9361 - val_loss: 0.3073 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02860: val_acc did not improve from 0.94225\n",
+ "Epoch 2861/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9365 - val_loss: 0.3461 - val_acc: 0.9225\n",
+ "\n",
+ "Epoch 02861: val_acc did not improve from 0.94225\n",
+ "Epoch 2862/100000\n",
+ " - 19s - loss: 0.3151 - acc: 0.9364 - val_loss: 0.3352 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02862: val_acc did not improve from 0.94225\n",
+ "Epoch 2863/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9365 - val_loss: 0.6935 - val_acc: 0.7499\n",
+ "\n",
+ "Epoch 02863: val_acc did not improve from 0.94225\n",
+ "Epoch 2864/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9374 - val_loss: 0.3021 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02864: val_acc did not improve from 0.94225\n",
+ "Epoch 2865/100000\n",
+ " - 18s - loss: 0.3135 - acc: 0.9366 - val_loss: 0.3292 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02865: val_acc did not improve from 0.94225\n",
+ "Epoch 2866/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9362 - val_loss: 0.3847 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 02866: val_acc did not improve from 0.94225\n",
+ "Epoch 2867/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9371 - val_loss: 0.3084 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02867: val_acc did not improve from 0.94225\n",
+ "Epoch 2868/100000\n",
+ " - 19s - loss: 0.3153 - acc: 0.9373 - val_loss: 0.3014 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02868: val_acc did not improve from 0.94225\n",
+ "Epoch 2869/100000\n",
+ " - 18s - loss: 0.3141 - acc: 0.9366 - val_loss: 0.3253 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 02869: val_acc did not improve from 0.94225\n",
+ "Epoch 2870/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9374 - val_loss: 0.4106 - val_acc: 0.8753\n",
+ "\n",
+ "Epoch 02870: val_acc did not improve from 0.94225\n",
+ "Epoch 2871/100000\n",
+ " - 18s - loss: 0.3137 - acc: 0.9369 - val_loss: 0.3037 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 02871: val_acc did not improve from 0.94225\n",
+ "Epoch 2872/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9369 - val_loss: 0.3165 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 02872: val_acc did not improve from 0.94225\n",
+ "Epoch 2873/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.3171 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02873: val_acc did not improve from 0.94225\n",
+ "Epoch 2874/100000\n",
+ " - 18s - loss: 0.3113 - acc: 0.9371 - val_loss: 0.3059 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02874: val_acc did not improve from 0.94225\n",
+ "Epoch 2875/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9366 - val_loss: 0.3017 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02875: val_acc did not improve from 0.94225\n",
+ "Epoch 2876/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9375 - val_loss: 0.3260 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 02876: val_acc did not improve from 0.94225\n",
+ "Epoch 2877/100000\n",
+ " - 18s - loss: 0.3143 - acc: 0.9365 - val_loss: 0.3033 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02877: val_acc did not improve from 0.94225\n",
+ "Epoch 2878/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9376 - val_loss: 0.3161 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02878: val_acc did not improve from 0.94225\n",
+ "Epoch 2879/100000\n",
+ " - 18s - loss: 0.3151 - acc: 0.9360 - val_loss: 0.3143 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02879: val_acc did not improve from 0.94225\n",
+ "Epoch 2880/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9370 - val_loss: 0.3285 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02880: val_acc did not improve from 0.94225\n",
+ "Epoch 2881/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9378 - val_loss: 0.3252 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02881: val_acc did not improve from 0.94225\n",
+ "Epoch 2882/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9367 - val_loss: 0.3303 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 02882: val_acc did not improve from 0.94225\n",
+ "Epoch 2883/100000\n",
+ " - 19s - loss: 0.3140 - acc: 0.9368 - val_loss: 0.4265 - val_acc: 0.8835\n",
+ "\n",
+ "Epoch 02883: val_acc did not improve from 0.94225\n",
+ "Epoch 2884/100000\n",
+ " - 18s - loss: 0.3112 - acc: 0.9377 - val_loss: 0.8281 - val_acc: 0.6407\n",
+ "\n",
+ "Epoch 02884: val_acc did not improve from 0.94225\n",
+ "Epoch 2885/100000\n",
+ " - 19s - loss: 0.3140 - acc: 0.9370 - val_loss: 0.3159 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02885: val_acc did not improve from 0.94225\n",
+ "Epoch 2886/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9370 - val_loss: 0.4271 - val_acc: 0.8706\n",
+ "\n",
+ "Epoch 02886: val_acc did not improve from 0.94225\n",
+ "Epoch 2887/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9364 - val_loss: 0.3160 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02887: val_acc did not improve from 0.94225\n",
+ "Epoch 2888/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9363 - val_loss: 0.3293 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02888: val_acc did not improve from 0.94225\n",
+ "Epoch 2889/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9371 - val_loss: 0.3144 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 02889: val_acc did not improve from 0.94225\n",
+ "Epoch 2890/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3241 - val_acc: 0.9219\n",
+ "\n",
+ "Epoch 02890: val_acc did not improve from 0.94225\n",
+ "Epoch 2891/100000\n",
+ " - 18s - loss: 0.3156 - acc: 0.9358 - val_loss: 0.2988 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02891: val_acc did not improve from 0.94225\n",
+ "Epoch 2892/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9370 - val_loss: 0.3057 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02892: val_acc did not improve from 0.94225\n",
+ "Epoch 2893/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9366 - val_loss: 0.3199 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 02893: val_acc did not improve from 0.94225\n",
+ "Epoch 2894/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9370 - val_loss: 0.3424 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 02894: val_acc did not improve from 0.94225\n",
+ "Epoch 2895/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3280 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 02895: val_acc did not improve from 0.94225\n",
+ "Epoch 2896/100000\n",
+ " - 19s - loss: 0.3123 - acc: 0.9371 - val_loss: 0.3998 - val_acc: 0.8942\n",
+ "\n",
+ "Epoch 02896: val_acc did not improve from 0.94225\n",
+ "Epoch 2897/100000\n",
+ " - 18s - loss: 0.3138 - acc: 0.9372 - val_loss: 0.3077 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02897: val_acc did not improve from 0.94225\n",
+ "Epoch 2898/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9377 - val_loss: 0.3309 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02898: val_acc did not improve from 0.94225\n",
+ "Epoch 2899/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.3060 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02899: val_acc did not improve from 0.94225\n",
+ "Epoch 2900/100000\n",
+ " - 18s - loss: 0.3139 - acc: 0.9368 - val_loss: 0.3003 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02900: val_acc did not improve from 0.94225\n",
+ "Epoch 2901/100000\n",
+ " - 18s - loss: 0.3110 - acc: 0.9372 - val_loss: 0.3054 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02901: val_acc did not improve from 0.94225\n",
+ "Epoch 2902/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9370 - val_loss: 0.3478 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 02902: val_acc did not improve from 0.94225\n",
+ "Epoch 2903/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9368 - val_loss: 0.3032 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02903: val_acc did not improve from 0.94225\n",
+ "Epoch 2904/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9370 - val_loss: 0.3152 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 02904: val_acc did not improve from 0.94225\n",
+ "Epoch 2905/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9377 - val_loss: 0.3137 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02905: val_acc did not improve from 0.94225\n",
+ "Epoch 2906/100000\n",
+ " - 18s - loss: 0.3180 - acc: 0.9360 - val_loss: 0.3161 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 02906: val_acc did not improve from 0.94225\n",
+ "Epoch 2907/100000\n",
+ " - 19s - loss: 0.3152 - acc: 0.9366 - val_loss: 0.3007 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 02907: val_acc did not improve from 0.94225\n",
+ "Epoch 2908/100000\n",
+ " - 18s - loss: 0.3129 - acc: 0.9366 - val_loss: 0.3099 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02908: val_acc did not improve from 0.94225\n",
+ "Epoch 2909/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9363 - val_loss: 0.3114 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02909: val_acc did not improve from 0.94225\n",
+ "Epoch 2910/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9363 - val_loss: 0.3232 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 02910: val_acc did not improve from 0.94225\n",
+ "Epoch 2911/100000\n",
+ " - 18s - loss: 0.3140 - acc: 0.9371 - val_loss: 0.3096 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 02911: val_acc did not improve from 0.94225\n",
+ "Epoch 2912/100000\n",
+ " - 19s - loss: 0.3161 - acc: 0.9359 - val_loss: 0.3148 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 02912: val_acc did not improve from 0.94225\n",
+ "Epoch 2913/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9372 - val_loss: 0.3352 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 02913: val_acc did not improve from 0.94225\n",
+ "Epoch 2914/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9371 - val_loss: 0.3907 - val_acc: 0.9185\n",
+ "\n",
+ "Epoch 02914: val_acc did not improve from 0.94225\n",
+ "Epoch 2915/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9375 - val_loss: 0.3166 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02915: val_acc did not improve from 0.94225\n",
+ "Epoch 2916/100000\n",
+ " - 19s - loss: 0.3114 - acc: 0.9374 - val_loss: 0.3225 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02916: val_acc did not improve from 0.94225\n",
+ "Epoch 2917/100000\n",
+ " - 19s - loss: 0.3123 - acc: 0.9371 - val_loss: 0.3106 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02917: val_acc did not improve from 0.94225\n",
+ "Epoch 2918/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.3104 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 02918: val_acc did not improve from 0.94225\n",
+ "Epoch 2919/100000\n",
+ " - 18s - loss: 0.3133 - acc: 0.9368 - val_loss: 0.3168 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02919: val_acc did not improve from 0.94225\n",
+ "Epoch 2920/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9364 - val_loss: 0.3254 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02920: val_acc did not improve from 0.94225\n",
+ "Epoch 2921/100000\n",
+ " - 19s - loss: 0.3168 - acc: 0.9360 - val_loss: 0.3195 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 02921: val_acc did not improve from 0.94225\n",
+ "Epoch 2922/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9370 - val_loss: 0.3001 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02922: val_acc did not improve from 0.94225\n",
+ "Epoch 2923/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9369 - val_loss: 0.3131 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02923: val_acc did not improve from 0.94225\n",
+ "Epoch 2924/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9373 - val_loss: 0.3146 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02924: val_acc did not improve from 0.94225\n",
+ "Epoch 2925/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9363 - val_loss: 0.3907 - val_acc: 0.8911\n",
+ "\n",
+ "Epoch 02925: val_acc did not improve from 0.94225\n",
+ "Epoch 2926/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9368 - val_loss: 0.3157 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 02926: val_acc did not improve from 0.94225\n",
+ "Epoch 2927/100000\n",
+ " - 19s - loss: 0.3146 - acc: 0.9363 - val_loss: 0.4041 - val_acc: 0.8941\n",
+ "\n",
+ "Epoch 02927: val_acc did not improve from 0.94225\n",
+ "Epoch 2928/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9371 - val_loss: 0.3108 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 02928: val_acc did not improve from 0.94225\n",
+ "Epoch 2929/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.4674 - val_acc: 0.8730\n",
+ "\n",
+ "Epoch 02929: val_acc did not improve from 0.94225\n",
+ "Epoch 2930/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9359 - val_loss: 0.3118 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02930: val_acc did not improve from 0.94225\n",
+ "Epoch 2931/100000\n",
+ " - 18s - loss: 0.3142 - acc: 0.9363 - val_loss: 0.2941 - val_acc: 0.9430\n",
+ "\n",
+ "Epoch 02931: val_acc improved from 0.94225 to 0.94304, saving model to ./ModelSnapshots/CNN-2931.h5\n",
+ "Epoch 2932/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9366 - val_loss: 0.3210 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 02932: val_acc did not improve from 0.94304\n",
+ "Epoch 2933/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9375 - val_loss: 0.3085 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02933: val_acc did not improve from 0.94304\n",
+ "Epoch 2934/100000\n",
+ " - 18s - loss: 0.3141 - acc: 0.9362 - val_loss: 0.3120 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02934: val_acc did not improve from 0.94304\n",
+ "Epoch 2935/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9378 - val_loss: 0.3214 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 02935: val_acc did not improve from 0.94304\n",
+ "Epoch 2936/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9367 - val_loss: 0.3097 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02936: val_acc did not improve from 0.94304\n",
+ "Epoch 2937/100000\n",
+ " - 19s - loss: 0.3161 - acc: 0.9357 - val_loss: 0.3381 - val_acc: 0.9159\n",
+ "\n",
+ "Epoch 02937: val_acc did not improve from 0.94304\n",
+ "Epoch 2938/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9367 - val_loss: 0.3073 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 02938: val_acc did not improve from 0.94304\n",
+ "Epoch 2939/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9367 - val_loss: 0.2982 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02939: val_acc did not improve from 0.94304\n",
+ "Epoch 2940/100000\n",
+ " - 18s - loss: 0.3131 - acc: 0.9368 - val_loss: 0.4198 - val_acc: 0.8701\n",
+ "\n",
+ "Epoch 02940: val_acc did not improve from 0.94304\n",
+ "Epoch 2941/100000\n",
+ " - 19s - loss: 0.3172 - acc: 0.9360 - val_loss: 0.3651 - val_acc: 0.9016\n",
+ "\n",
+ "Epoch 02941: val_acc did not improve from 0.94304\n",
+ "Epoch 2942/100000\n",
+ " - 18s - loss: 0.3136 - acc: 0.9361 - val_loss: 0.3114 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02942: val_acc did not improve from 0.94304\n",
+ "Epoch 2943/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9374 - val_loss: 0.3058 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02943: val_acc did not improve from 0.94304\n",
+ "Epoch 2944/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9372 - val_loss: 0.3065 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02944: val_acc did not improve from 0.94304\n",
+ "Epoch 2945/100000\n",
+ " - 18s - loss: 0.3134 - acc: 0.9371 - val_loss: 0.3043 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02945: val_acc did not improve from 0.94304\n",
+ "Epoch 2946/100000\n",
+ " - 19s - loss: 0.3138 - acc: 0.9371 - val_loss: 0.3824 - val_acc: 0.8934\n",
+ "\n",
+ "Epoch 02946: val_acc did not improve from 0.94304\n",
+ "Epoch 2947/100000\n",
+ " - 18s - loss: 0.3126 - acc: 0.9372 - val_loss: 0.3157 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02947: val_acc did not improve from 0.94304\n",
+ "Epoch 2948/100000\n",
+ " - 18s - loss: 0.3129 - acc: 0.9371 - val_loss: 0.3048 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02948: val_acc did not improve from 0.94304\n",
+ "Epoch 2949/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9368 - val_loss: 0.3442 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 02949: val_acc did not improve from 0.94304\n",
+ "Epoch 2950/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9376 - val_loss: 0.3514 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 02950: val_acc did not improve from 0.94304\n",
+ "Epoch 2951/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9373 - val_loss: 0.3128 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02951: val_acc did not improve from 0.94304\n",
+ "Epoch 2952/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9370 - val_loss: 0.2992 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02952: val_acc did not improve from 0.94304\n",
+ "Epoch 2953/100000\n",
+ " - 19s - loss: 0.3150 - acc: 0.9360 - val_loss: 0.3061 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02953: val_acc did not improve from 0.94304\n",
+ "Epoch 2954/100000\n",
+ " - 18s - loss: 0.3142 - acc: 0.9368 - val_loss: 0.3075 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02954: val_acc did not improve from 0.94304\n",
+ "Epoch 2955/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9374 - val_loss: 0.3287 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 02955: val_acc did not improve from 0.94304\n",
+ "Epoch 2956/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9373 - val_loss: 0.3157 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02956: val_acc did not improve from 0.94304\n",
+ "Epoch 2957/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9371 - val_loss: 0.3593 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 02957: val_acc did not improve from 0.94304\n",
+ "Epoch 2958/100000\n",
+ " - 18s - loss: 0.3108 - acc: 0.9375 - val_loss: 0.3423 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02958: val_acc did not improve from 0.94304\n",
+ "Epoch 2959/100000\n",
+ " - 19s - loss: 0.3138 - acc: 0.9374 - val_loss: 0.3018 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02959: val_acc did not improve from 0.94304\n",
+ "Epoch 2960/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9369 - val_loss: 0.3666 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 02960: val_acc did not improve from 0.94304\n",
+ "Epoch 2961/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9369 - val_loss: 0.3044 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02961: val_acc did not improve from 0.94304\n",
+ "Epoch 2962/100000\n",
+ " - 19s - loss: 0.3122 - acc: 0.9369 - val_loss: 0.3599 - val_acc: 0.9058\n",
+ "\n",
+ "Epoch 02962: val_acc did not improve from 0.94304\n",
+ "Epoch 2963/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9366 - val_loss: 0.3026 - val_acc: 0.9400\n",
+ "\n",
+ "Epoch 02963: val_acc did not improve from 0.94304\n",
+ "Epoch 2964/100000\n",
+ " - 19s - loss: 0.3155 - acc: 0.9361 - val_loss: 0.3283 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 02964: val_acc did not improve from 0.94304\n",
+ "Epoch 2965/100000\n",
+ " - 18s - loss: 0.3112 - acc: 0.9377 - val_loss: 0.3065 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02965: val_acc did not improve from 0.94304\n",
+ "Epoch 2966/100000\n",
+ " - 18s - loss: 0.3152 - acc: 0.9370 - val_loss: 0.3454 - val_acc: 0.9164\n",
+ "\n",
+ "Epoch 02966: val_acc did not improve from 0.94304\n",
+ "Epoch 2967/100000\n",
+ " - 19s - loss: 0.3128 - acc: 0.9374 - val_loss: 0.3013 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02967: val_acc did not improve from 0.94304\n",
+ "Epoch 2968/100000\n",
+ " - 18s - loss: 0.3126 - acc: 0.9375 - val_loss: 0.3361 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 02968: val_acc did not improve from 0.94304\n",
+ "Epoch 2969/100000\n",
+ " - 18s - loss: 0.3110 - acc: 0.9372 - val_loss: 0.3153 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02969: val_acc did not improve from 0.94304\n",
+ "Epoch 2970/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9378 - val_loss: 0.3074 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02970: val_acc did not improve from 0.94304\n",
+ "\n",
+ "Epoch 02970: ReduceLROnPlateau reducing learning rate to 0.0005133419937919825.\n",
+ "Epoch 2971/100000\n",
+ " - 18s - loss: 0.3113 - acc: 0.9367 - val_loss: 0.3777 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 02971: val_acc did not improve from 0.94304\n",
+ "Epoch 2972/100000\n",
+ " - 19s - loss: 0.3085 - acc: 0.9370 - val_loss: 0.2987 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02972: val_acc did not improve from 0.94304\n",
+ "Epoch 2973/100000\n",
+ " - 18s - loss: 0.3064 - acc: 0.9372 - val_loss: 0.3204 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 02973: val_acc did not improve from 0.94304\n",
+ "Epoch 2974/100000\n",
+ " - 18s - loss: 0.3037 - acc: 0.9383 - val_loss: 0.2977 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02974: val_acc did not improve from 0.94304\n",
+ "Epoch 2975/100000\n",
+ " - 19s - loss: 0.3049 - acc: 0.9378 - val_loss: 0.3012 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02975: val_acc did not improve from 0.94304\n",
+ "Epoch 2976/100000\n",
+ " - 18s - loss: 0.3056 - acc: 0.9374 - val_loss: 0.3031 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02976: val_acc did not improve from 0.94304\n",
+ "Epoch 2977/100000\n",
+ " - 19s - loss: 0.3065 - acc: 0.9373 - val_loss: 0.2935 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02977: val_acc did not improve from 0.94304\n",
+ "Epoch 2978/100000\n",
+ " - 18s - loss: 0.3061 - acc: 0.9373 - val_loss: 0.3027 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02978: val_acc did not improve from 0.94304\n",
+ "Epoch 2979/100000\n",
+ " - 18s - loss: 0.3059 - acc: 0.9370 - val_loss: 0.3450 - val_acc: 0.9113\n",
+ "\n",
+ "Epoch 02979: val_acc did not improve from 0.94304\n",
+ "Epoch 2980/100000\n",
+ " - 19s - loss: 0.3073 - acc: 0.9371 - val_loss: 0.3051 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02980: val_acc did not improve from 0.94304\n",
+ "Epoch 2981/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9373 - val_loss: 0.2952 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02981: val_acc did not improve from 0.94304\n",
+ "Epoch 2982/100000\n",
+ " - 19s - loss: 0.3049 - acc: 0.9377 - val_loss: 0.2973 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02982: val_acc did not improve from 0.94304\n",
+ "Epoch 2983/100000\n",
+ " - 18s - loss: 0.3050 - acc: 0.9380 - val_loss: 0.2944 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02983: val_acc did not improve from 0.94304\n",
+ "Epoch 2984/100000\n",
+ " - 19s - loss: 0.3058 - acc: 0.9374 - val_loss: 0.3177 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 02984: val_acc did not improve from 0.94304\n",
+ "Epoch 2985/100000\n",
+ " - 19s - loss: 0.3069 - acc: 0.9378 - val_loss: 0.3013 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02985: val_acc did not improve from 0.94304\n",
+ "Epoch 2986/100000\n",
+ " - 19s - loss: 0.3069 - acc: 0.9377 - val_loss: 0.3304 - val_acc: 0.9155\n",
+ "\n",
+ "Epoch 02986: val_acc did not improve from 0.94304\n",
+ "Epoch 2987/100000\n",
+ " - 18s - loss: 0.3078 - acc: 0.9372 - val_loss: 0.3098 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02987: val_acc did not improve from 0.94304\n",
+ "Epoch 2988/100000\n",
+ " - 19s - loss: 0.3073 - acc: 0.9374 - val_loss: 0.3190 - val_acc: 0.9261\n",
+ "\n",
+ "Epoch 02988: val_acc did not improve from 0.94304\n",
+ "Epoch 2989/100000\n",
+ " - 19s - loss: 0.3071 - acc: 0.9369 - val_loss: 0.2924 - val_acc: 0.9402\n",
+ "\n",
+ "Epoch 02989: val_acc did not improve from 0.94304\n",
+ "Epoch 2990/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9375 - val_loss: 0.2919 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02990: val_acc did not improve from 0.94304\n",
+ "Epoch 2991/100000\n",
+ " - 19s - loss: 0.3061 - acc: 0.9370 - val_loss: 0.2994 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02991: val_acc did not improve from 0.94304\n",
+ "Epoch 2992/100000\n",
+ " - 19s - loss: 0.3024 - acc: 0.9380 - val_loss: 0.2994 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02992: val_acc did not improve from 0.94304\n",
+ "Epoch 2993/100000\n",
+ " - 19s - loss: 0.3048 - acc: 0.9375 - val_loss: 0.3206 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02993: val_acc did not improve from 0.94304\n",
+ "Epoch 2994/100000\n",
+ " - 18s - loss: 0.3070 - acc: 0.9371 - val_loss: 0.3311 - val_acc: 0.9157\n",
+ "\n",
+ "Epoch 02994: val_acc did not improve from 0.94304\n",
+ "Epoch 2995/100000\n",
+ " - 19s - loss: 0.3031 - acc: 0.9387 - val_loss: 0.3056 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 02995: val_acc did not improve from 0.94304\n",
+ "Epoch 2996/100000\n",
+ " - 18s - loss: 0.3071 - acc: 0.9371 - val_loss: 0.2975 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02996: val_acc did not improve from 0.94304\n",
+ "Epoch 2997/100000\n",
+ " - 19s - loss: 0.3092 - acc: 0.9370 - val_loss: 0.3102 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 02997: val_acc did not improve from 0.94304\n",
+ "Epoch 2998/100000\n",
+ " - 18s - loss: 0.3052 - acc: 0.9377 - val_loss: 0.3259 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 02998: val_acc did not improve from 0.94304\n",
+ "Epoch 2999/100000\n",
+ " - 19s - loss: 0.3077 - acc: 0.9369 - val_loss: 0.3729 - val_acc: 0.8995\n",
+ "\n",
+ "Epoch 02999: val_acc did not improve from 0.94304\n",
+ "Epoch 3000/100000\n",
+ " - 18s - loss: 0.3085 - acc: 0.9363 - val_loss: 0.3352 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 03000: val_acc did not improve from 0.94304\n",
+ "Epoch 3001/100000\n",
+ " - 19s - loss: 0.3077 - acc: 0.9369 - val_loss: 0.2941 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 03001: val_acc did not improve from 0.94304\n",
+ "Epoch 3002/100000\n",
+ " - 19s - loss: 0.3066 - acc: 0.9369 - val_loss: 0.3770 - val_acc: 0.8919\n",
+ "\n",
+ "Epoch 03002: val_acc did not improve from 0.94304\n",
+ "Epoch 3003/100000\n",
+ " - 18s - loss: 0.3057 - acc: 0.9375 - val_loss: 0.3034 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 03003: val_acc did not improve from 0.94304\n",
+ "Epoch 3004/100000\n",
+ " - 19s - loss: 0.3080 - acc: 0.9371 - val_loss: 0.3244 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 03004: val_acc did not improve from 0.94304\n",
+ "Epoch 3005/100000\n",
+ " - 19s - loss: 0.3039 - acc: 0.9379 - val_loss: 0.3105 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 03005: val_acc did not improve from 0.94304\n",
+ "Epoch 3006/100000\n",
+ " - 19s - loss: 0.3070 - acc: 0.9371 - val_loss: 0.3640 - val_acc: 0.9075\n",
+ "\n",
+ "Epoch 03006: val_acc did not improve from 0.94304\n",
+ "Epoch 3007/100000\n",
+ " - 19s - loss: 0.3078 - acc: 0.9371 - val_loss: 0.3673 - val_acc: 0.9123\n",
+ "\n",
+ "Epoch 03007: val_acc did not improve from 0.94304\n",
+ "Epoch 3008/100000\n",
+ " - 19s - loss: 0.3095 - acc: 0.9370 - val_loss: 0.2987 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 03008: val_acc did not improve from 0.94304\n",
+ "Epoch 3009/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9374 - val_loss: 0.2992 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 03009: val_acc did not improve from 0.94304\n",
+ "Epoch 3010/100000\n",
+ " - 19s - loss: 0.3102 - acc: 0.9372 - val_loss: 0.3152 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 03010: val_acc did not improve from 0.94304\n",
+ "Epoch 3011/100000\n",
+ " - 19s - loss: 0.3062 - acc: 0.9378 - val_loss: 0.3093 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 03011: val_acc did not improve from 0.94304\n",
+ "Epoch 3012/100000\n",
+ " - 18s - loss: 0.3075 - acc: 0.9370 - val_loss: 0.3008 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 03012: val_acc did not improve from 0.94304\n",
+ "Epoch 3013/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9376 - val_loss: 0.3104 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 03013: val_acc did not improve from 0.94304\n",
+ "Epoch 3014/100000\n",
+ " - 18s - loss: 0.3068 - acc: 0.9376 - val_loss: 0.3239 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 03014: val_acc did not improve from 0.94304\n",
+ "Epoch 3015/100000\n",
+ " - 19s - loss: 0.3088 - acc: 0.9369 - val_loss: 1.3347 - val_acc: 0.3410\n",
+ "\n",
+ "Epoch 03015: val_acc did not improve from 0.94304\n",
+ "Epoch 3016/100000\n",
+ " - 19s - loss: 0.3054 - acc: 0.9375 - val_loss: 0.4498 - val_acc: 0.8558\n",
+ "\n",
+ "Epoch 03016: val_acc did not improve from 0.94304\n",
+ "Epoch 3017/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9364 - val_loss: 0.3004 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 03017: val_acc did not improve from 0.94304\n",
+ "Epoch 3018/100000\n",
+ " - 19s - loss: 0.3074 - acc: 0.9375 - val_loss: 0.3651 - val_acc: 0.9028\n",
+ "\n",
+ "Epoch 03018: val_acc did not improve from 0.94304\n",
+ "Epoch 3019/100000\n",
+ " - 18s - loss: 0.3064 - acc: 0.9379 - val_loss: 0.3062 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 03019: val_acc did not improve from 0.94304\n",
+ "Epoch 3020/100000\n",
+ " - 19s - loss: 0.3095 - acc: 0.9363 - val_loss: 0.3107 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 03020: val_acc did not improve from 0.94304\n",
+ "Epoch 3021/100000\n",
+ " - 18s - loss: 0.3061 - acc: 0.9375 - val_loss: 0.3072 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 03021: val_acc did not improve from 0.94304\n",
+ "Epoch 3022/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9368 - val_loss: 0.3269 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 03022: val_acc did not improve from 0.94304\n",
+ "Epoch 3023/100000\n",
+ " - 18s - loss: 0.3061 - acc: 0.9381 - val_loss: 0.2915 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 03023: val_acc did not improve from 0.94304\n",
+ "Epoch 3024/100000\n",
+ " - 18s - loss: 0.3058 - acc: 0.9376 - val_loss: 0.3054 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 03024: val_acc did not improve from 0.94304\n",
+ "Epoch 3025/100000\n",
+ " - 19s - loss: 0.3063 - acc: 0.9369 - val_loss: 0.3510 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 03025: val_acc did not improve from 0.94304\n",
+ "Epoch 3026/100000\n",
+ " - 18s - loss: 0.3069 - acc: 0.9369 - val_loss: 0.3174 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 03026: val_acc did not improve from 0.94304\n",
+ "Epoch 3027/100000\n",
+ " - 19s - loss: 0.3063 - acc: 0.9372 - val_loss: 0.3686 - val_acc: 0.8980\n",
+ "\n",
+ "Epoch 03027: val_acc did not improve from 0.94304\n",
+ "Epoch 3028/100000\n",
+ " - 18s - loss: 0.3064 - acc: 0.9371 - val_loss: 0.3006 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 03028: val_acc did not improve from 0.94304\n",
+ "Epoch 3029/100000\n",
+ " - 19s - loss: 0.3046 - acc: 0.9382 - val_loss: 0.3270 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 03029: val_acc did not improve from 0.94304\n",
+ "Epoch 3030/100000\n",
+ " - 18s - loss: 0.3085 - acc: 0.9367 - val_loss: 0.3243 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 03030: val_acc did not improve from 0.94304\n",
+ "Epoch 3031/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9378 - val_loss: 0.3836 - val_acc: 0.8936\n",
+ "\n",
+ "Epoch 03031: val_acc did not improve from 0.94304\n",
+ "Epoch 3032/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9377 - val_loss: 0.8802 - val_acc: 0.5064\n",
+ "\n",
+ "Epoch 03032: val_acc did not improve from 0.94304\n",
+ "Epoch 3033/100000\n",
+ " - 18s - loss: 0.3067 - acc: 0.9371 - val_loss: 0.2938 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 03033: val_acc did not improve from 0.94304\n",
+ "Epoch 3034/100000\n",
+ " - 19s - loss: 0.3067 - acc: 0.9371 - val_loss: 0.2986 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 03034: val_acc did not improve from 0.94304\n",
+ "Epoch 3035/100000\n",
+ " - 18s - loss: 0.3055 - acc: 0.9373 - val_loss: 0.3246 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 03035: val_acc did not improve from 0.94304\n",
+ "Epoch 3036/100000\n",
+ " - 19s - loss: 0.3052 - acc: 0.9377 - val_loss: 0.3329 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 03036: val_acc did not improve from 0.94304\n",
+ "Epoch 3037/100000\n",
+ " - 18s - loss: 0.3057 - acc: 0.9373 - val_loss: 0.2892 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 03037: val_acc did not improve from 0.94304\n",
+ "Epoch 3038/100000\n",
+ " - 19s - loss: 0.3050 - acc: 0.9377 - val_loss: 0.2884 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 03038: val_acc did not improve from 0.94304\n",
+ "Epoch 3039/100000\n",
+ " - 18s - loss: 0.3076 - acc: 0.9370 - val_loss: 0.3069 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 03039: val_acc did not improve from 0.94304\n",
+ "Epoch 3040/100000\n",
+ " - 18s - loss: 0.3066 - acc: 0.9372 - val_loss: 0.3303 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 03040: val_acc did not improve from 0.94304\n",
+ "Epoch 3041/100000\n",
+ " - 19s - loss: 0.3060 - acc: 0.9373 - val_loss: 0.2938 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 03041: val_acc did not improve from 0.94304\n",
+ "Epoch 3042/100000\n",
+ " - 18s - loss: 0.3054 - acc: 0.9375 - val_loss: 0.3025 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 03042: val_acc did not improve from 0.94304\n",
+ "Epoch 3043/100000\n",
+ " - 19s - loss: 0.3075 - acc: 0.9371 - val_loss: 0.3080 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 03043: val_acc did not improve from 0.94304\n",
+ "Epoch 3044/100000\n",
+ " - 18s - loss: 0.3065 - acc: 0.9374 - val_loss: 0.2991 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 03044: val_acc did not improve from 0.94304\n",
+ "Epoch 3045/100000\n",
+ " - 19s - loss: 0.3087 - acc: 0.9372 - val_loss: 0.3193 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 03045: val_acc did not improve from 0.94304\n",
+ "Epoch 3046/100000\n",
+ " - 18s - loss: 0.3096 - acc: 0.9367 - val_loss: 0.3144 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 03046: val_acc did not improve from 0.94304\n",
+ "Epoch 3047/100000\n",
+ " - 19s - loss: 0.3083 - acc: 0.9372 - val_loss: 0.4617 - val_acc: 0.8495\n",
+ "\n",
+ "Epoch 03047: val_acc did not improve from 0.94304\n",
+ "Epoch 3048/100000\n",
+ " - 19s - loss: 0.3065 - acc: 0.9380 - val_loss: 0.3201 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 03048: val_acc did not improve from 0.94304\n",
+ "Epoch 3049/100000\n",
+ " - 18s - loss: 0.3076 - acc: 0.9368 - val_loss: 0.2988 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 03049: val_acc did not improve from 0.94304\n",
+ "Epoch 3050/100000\n",
+ " - 19s - loss: 0.3043 - acc: 0.9380 - val_loss: 0.3050 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 03050: val_acc did not improve from 0.94304\n",
+ "Epoch 3051/100000\n",
+ " - 19s - loss: 0.3079 - acc: 0.9368 - val_loss: 0.3312 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 03051: val_acc did not improve from 0.94304\n",
+ "Epoch 3052/100000\n",
+ " - 19s - loss: 0.3086 - acc: 0.9370 - val_loss: 0.3069 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 03052: val_acc did not improve from 0.94304\n",
+ "Epoch 3053/100000\n",
+ " - 18s - loss: 0.3113 - acc: 0.9363 - val_loss: 0.3058 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 03053: val_acc did not improve from 0.94304\n",
+ "Epoch 3054/100000\n",
+ " - 19s - loss: 0.3047 - acc: 0.9379 - val_loss: 0.2929 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 03054: val_acc did not improve from 0.94304\n",
+ "Epoch 3055/100000\n",
+ " - 18s - loss: 0.3065 - acc: 0.9373 - val_loss: 0.3973 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 03055: val_acc did not improve from 0.94304\n",
+ "Epoch 3056/100000\n",
+ " - 19s - loss: 0.3067 - acc: 0.9371 - val_loss: 0.3329 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 03056: val_acc did not improve from 0.94304\n",
+ "Epoch 3057/100000\n",
+ " - 19s - loss: 0.3044 - acc: 0.9380 - val_loss: 0.3077 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 03063: val_acc did not improve from 0.94304\n",
+ "Epoch 3064/100000\n",
+ " - 19s - loss: 0.3072 - acc: 0.9376 - val_loss: 0.3255 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 03064: val_acc did not improve from 0.94304\n",
+ "Epoch 3065/100000\n",
+ " - 19s - loss: 0.3072 - acc: 0.9371 - val_loss: 0.2982 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 03065: val_acc did not improve from 0.94304\n",
+ "Epoch 3066/100000\n",
+ " - 19s - loss: 0.3075 - acc: 0.9367 - val_loss: 0.3134 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 03066: val_acc did not improve from 0.94304\n",
+ "Epoch 3067/100000\n",
+ " - 18s - loss: 0.3041 - acc: 0.9374 - val_loss: 0.3434 - val_acc: 0.9165\n",
+ "\n",
+ "Epoch 03067: val_acc did not improve from 0.94304\n",
+ "Epoch 3068/100000\n",
+ " - 19s - loss: 0.3061 - acc: 0.9373 - val_loss: 0.3057 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 03068: val_acc did not improve from 0.94304\n",
+ "Epoch 3069/100000\n",
+ " - 18s - loss: 0.3058 - acc: 0.9371 - val_loss: 0.2997 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 03069: val_acc did not improve from 0.94304\n",
+ "Epoch 3070/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9374 - val_loss: 0.3157 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 03070: val_acc did not improve from 0.94304\n",
+ "Epoch 3071/100000\n",
+ " - 18s - loss: 0.3052 - acc: 0.9375 - val_loss: 0.3393 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 03071: val_acc did not improve from 0.94304\n",
+ "Epoch 3072/100000\n",
+ " - 18s - loss: 0.3093 - acc: 0.9363 - val_loss: 0.3119 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 03072: val_acc did not improve from 0.94304\n",
+ "Epoch 3073/100000\n",
+ " - 19s - loss: 0.3074 - acc: 0.9375 - val_loss: 0.3112 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 03073: val_acc did not improve from 0.94304\n",
+ "Epoch 3074/100000\n",
+ " - 18s - loss: 0.3074 - acc: 0.9371 - val_loss: 0.2980 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 03074: val_acc did not improve from 0.94304\n",
+ "Epoch 3075/100000\n",
+ " - 19s - loss: 0.3068 - acc: 0.9380 - val_loss: 0.4343 - val_acc: 0.8745\n",
+ "\n",
+ "Epoch 03075: val_acc did not improve from 0.94304\n",
+ "Epoch 3076/100000\n",
+ " - 18s - loss: 0.3077 - acc: 0.9369 - val_loss: 0.3055 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 03076: val_acc did not improve from 0.94304\n",
+ "Epoch 3077/100000\n",
+ " - 19s - loss: 0.3071 - acc: 0.9377 - val_loss: 0.3081 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 03077: val_acc did not improve from 0.94304\n",
+ "Epoch 3078/100000\n",
+ " - 19s - loss: 0.3089 - acc: 0.9374 - val_loss: 0.3407 - val_acc: 0.9102\n",
+ "\n",
+ "Epoch 03078: val_acc did not improve from 0.94304\n",
+ "Epoch 3079/100000\n",
+ " - 18s - loss: 0.3052 - acc: 0.9379 - val_loss: 0.3000 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 03079: val_acc did not improve from 0.94304\n",
+ "Epoch 3080/100000\n",
+ " - 18s - loss: 0.3062 - acc: 0.9373 - val_loss: 0.3151 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 03080: val_acc did not improve from 0.94304\n",
+ "Epoch 3081/100000\n",
+ " - 19s - loss: 0.3049 - acc: 0.9372 - val_loss: 0.2984 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 03081: val_acc did not improve from 0.94304\n",
+ "Epoch 3082/100000\n",
+ " - 18s - loss: 0.3062 - acc: 0.9376 - val_loss: 0.2963 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 03082: val_acc did not improve from 0.94304\n",
+ "Epoch 3083/100000\n",
+ " - 18s - loss: 0.3088 - acc: 0.9367 - val_loss: 0.3228 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 03083: val_acc did not improve from 0.94304\n",
+ "Epoch 3084/100000\n",
+ " - 19s - loss: 0.3047 - acc: 0.9378 - val_loss: 0.3067 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 03084: val_acc did not improve from 0.94304\n",
+ "Epoch 3085/100000\n",
+ " - 19s - loss: 0.3058 - acc: 0.9373 - val_loss: 0.3029 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 03085: val_acc did not improve from 0.94304\n",
+ "Epoch 3086/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9371 - val_loss: 0.3204 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 03086: val_acc did not improve from 0.94304\n",
+ "Epoch 3087/100000\n",
+ " - 18s - loss: 0.3068 - acc: 0.9377 - val_loss: 0.3117 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 03087: val_acc did not improve from 0.94304\n",
+ "Epoch 3088/100000\n",
+ " - 19s - loss: 0.3050 - acc: 0.9378 - val_loss: 0.3289 - val_acc: 0.9155\n",
+ "\n",
+ "Epoch 03088: val_acc did not improve from 0.94304\n",
+ "Epoch 3089/100000\n",
+ " - 18s - loss: 0.3059 - acc: 0.9371 - val_loss: 0.7666 - val_acc: 0.6464\n",
+ "\n",
+ "Epoch 03089: val_acc did not improve from 0.94304\n",
+ "Epoch 3090/100000\n",
+ " - 19s - loss: 0.3060 - acc: 0.9374 - val_loss: 0.3049 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 03090: val_acc did not improve from 0.94304\n",
+ "Epoch 3091/100000\n",
+ " - 18s - loss: 0.3083 - acc: 0.9364 - val_loss: 0.2953 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 03091: val_acc did not improve from 0.94304\n",
+ "Epoch 3092/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9369 - val_loss: 0.2957 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 03092: val_acc did not improve from 0.94304\n",
+ "Epoch 3093/100000\n",
+ " - 18s - loss: 0.3091 - acc: 0.9371 - val_loss: 0.2991 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 03093: val_acc did not improve from 0.94304\n",
+ "Epoch 3094/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9377 - val_loss: 0.2948 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 03094: val_acc did not improve from 0.94304\n",
+ "Epoch 3095/100000\n",
+ " - 19s - loss: 0.3073 - acc: 0.9374 - val_loss: 0.3284 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 03095: val_acc did not improve from 0.94304\n",
+ "Epoch 3096/100000\n",
+ " - 19s - loss: 0.3073 - acc: 0.9373 - val_loss: 0.3160 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 03096: val_acc did not improve from 0.94304\n",
+ "Epoch 3097/100000\n",
+ " - 19s - loss: 0.3045 - acc: 0.9378 - val_loss: 0.4257 - val_acc: 0.8796\n",
+ "\n",
+ "Epoch 03097: val_acc did not improve from 0.94304\n",
+ "Epoch 3098/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9371 - val_loss: 0.3316 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 03098: val_acc did not improve from 0.94304\n",
+ "Epoch 3099/100000\n",
+ " - 18s - loss: 0.3067 - acc: 0.9374 - val_loss: 0.3505 - val_acc: 0.9097\n",
+ "\n",
+ "Epoch 03099: val_acc did not improve from 0.94304\n",
+ "Epoch 3100/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9368 - val_loss: 0.3152 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 03100: val_acc did not improve from 0.94304\n",
+ "Epoch 3101/100000\n",
+ " - 18s - loss: 0.3080 - acc: 0.9369 - val_loss: 0.3300 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 03101: val_acc did not improve from 0.94304\n",
+ "Epoch 3102/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9378 - val_loss: 0.2967 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03102: val_acc did not improve from 0.94304\n",
+ "Epoch 3103/100000\n",
+ " - 18s - loss: 0.3045 - acc: 0.9378 - val_loss: 0.3180 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 03103: val_acc did not improve from 0.94304\n",
+ "Epoch 3104/100000\n",
+ " - 18s - loss: 0.3060 - acc: 0.9373 - val_loss: 0.3063 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 03104: val_acc did not improve from 0.94304\n",
+ "Epoch 3105/100000\n",
+ " - 18s - loss: 0.3069 - acc: 0.9375 - val_loss: 0.2912 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 03105: val_acc did not improve from 0.94304\n",
+ "Epoch 3106/100000\n",
+ " - 18s - loss: 0.3085 - acc: 0.9370 - val_loss: 0.4813 - val_acc: 0.8491\n",
+ "\n",
+ "Epoch 03106: val_acc did not improve from 0.94304\n",
+ "Epoch 3107/100000\n",
+ " - 19s - loss: 0.3068 - acc: 0.9377 - val_loss: 0.2939 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 03107: val_acc did not improve from 0.94304\n",
+ "Epoch 3108/100000\n",
+ " - 18s - loss: 0.3056 - acc: 0.9374 - val_loss: 0.2990 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 03108: val_acc did not improve from 0.94304\n",
+ "Epoch 3109/100000\n",
+ " - 19s - loss: 0.3064 - acc: 0.9373 - val_loss: 0.2968 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 03109: val_acc did not improve from 0.94304\n",
+ "Epoch 3110/100000\n",
+ " - 18s - loss: 0.3060 - acc: 0.9376 - val_loss: 0.3127 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 03110: val_acc did not improve from 0.94304\n",
+ "Epoch 3111/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9375 - val_loss: 0.3570 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 03111: val_acc did not improve from 0.94304\n",
+ "Epoch 3112/100000\n",
+ " - 18s - loss: 0.3037 - acc: 0.9382 - val_loss: 0.3017 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03112: val_acc did not improve from 0.94304\n",
+ "Epoch 3113/100000\n",
+ " - 19s - loss: 0.3087 - acc: 0.9370 - val_loss: 0.3007 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 03113: val_acc did not improve from 0.94304\n",
+ "Epoch 3114/100000\n",
+ " - 18s - loss: 0.3070 - acc: 0.9378 - val_loss: 0.2997 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 03114: val_acc did not improve from 0.94304\n",
+ "Epoch 3115/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9375 - val_loss: 0.3026 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 03115: val_acc did not improve from 0.94304\n",
+ "Epoch 3116/100000\n",
+ " - 18s - loss: 0.3077 - acc: 0.9367 - val_loss: 0.2950 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03116: val_acc did not improve from 0.94304\n",
+ "Epoch 3117/100000\n",
+ " - 18s - loss: 0.3062 - acc: 0.9375 - val_loss: 0.2959 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 03117: val_acc did not improve from 0.94304\n",
+ "Epoch 3118/100000\n",
+ " - 18s - loss: 0.3072 - acc: 0.9366 - val_loss: 0.3058 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 03118: val_acc did not improve from 0.94304\n",
+ "Epoch 3119/100000\n",
+ " - 18s - loss: 0.3080 - acc: 0.9367 - val_loss: 0.4545 - val_acc: 0.8612\n",
+ "\n",
+ "Epoch 03119: val_acc did not improve from 0.94304\n",
+ "Epoch 3120/100000\n",
+ " - 19s - loss: 0.3050 - acc: 0.9372 - val_loss: 0.3711 - val_acc: 0.8987\n",
+ "\n",
+ "Epoch 03120: val_acc did not improve from 0.94304\n",
+ "Epoch 3121/100000\n",
+ " - 18s - loss: 0.3059 - acc: 0.9373 - val_loss: 0.2965 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03121: val_acc did not improve from 0.94304\n",
+ "Epoch 3122/100000\n",
+ " - 19s - loss: 0.3084 - acc: 0.9365 - val_loss: 0.3181 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 03122: val_acc did not improve from 0.94304\n",
+ "Epoch 3123/100000\n",
+ " - 18s - loss: 0.3084 - acc: 0.9366 - val_loss: 0.3016 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 03123: val_acc did not improve from 0.94304\n",
+ "Epoch 3124/100000\n",
+ " - 18s - loss: 0.3071 - acc: 0.9370 - val_loss: 0.3064 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 03124: val_acc did not improve from 0.94304\n",
+ "Epoch 3125/100000\n",
+ " - 18s - loss: 0.3048 - acc: 0.9376 - val_loss: 0.4543 - val_acc: 0.8567\n",
+ "\n",
+ "Epoch 03125: val_acc did not improve from 0.94304\n",
+ "Epoch 3126/100000\n",
+ " - 19s - loss: 0.3079 - acc: 0.9378 - val_loss: 0.3155 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03126: val_acc did not improve from 0.94304\n",
+ "Epoch 3127/100000\n",
+ " - 18s - loss: 0.3077 - acc: 0.9378 - val_loss: 0.2989 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 03127: val_acc did not improve from 0.94304\n",
+ "Epoch 3128/100000\n",
+ " - 19s - loss: 0.3077 - acc: 0.9371 - val_loss: 0.3066 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 03128: val_acc did not improve from 0.94304\n",
+ "Epoch 3129/100000\n",
+ " - 18s - loss: 0.3078 - acc: 0.9371 - val_loss: 0.3697 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 03129: val_acc did not improve from 0.94304\n",
+ "Epoch 3130/100000\n",
+ " - 18s - loss: 0.3069 - acc: 0.9369 - val_loss: 0.3060 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 03130: val_acc did not improve from 0.94304\n",
+ "Epoch 3131/100000\n",
+ " - 18s - loss: 0.3058 - acc: 0.9378 - val_loss: 0.3096 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 03131: val_acc did not improve from 0.94304\n",
+ "Epoch 3132/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9371 - val_loss: 0.4269 - val_acc: 0.8692\n",
+ "\n",
+ "Epoch 03132: val_acc did not improve from 0.94304\n",
+ "Epoch 3133/100000\n",
+ " - 18s - loss: 0.3066 - acc: 0.9370 - val_loss: 0.2918 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 03133: val_acc did not improve from 0.94304\n",
+ "Epoch 3134/100000\n",
+ " - 19s - loss: 0.3078 - acc: 0.9370 - val_loss: 0.4157 - val_acc: 0.8707\n",
+ "\n",
+ "Epoch 03134: val_acc did not improve from 0.94304\n",
+ "Epoch 3135/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9363 - val_loss: 0.3089 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 03135: val_acc did not improve from 0.94304\n",
+ "Epoch 3136/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9374 - val_loss: 0.3001 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 03136: val_acc did not improve from 0.94304\n",
+ "Epoch 3137/100000\n",
+ " - 19s - loss: 0.3077 - acc: 0.9373 - val_loss: 0.2996 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 03137: val_acc did not improve from 0.94304\n",
+ "Epoch 3138/100000\n",
+ " - 18s - loss: 0.3072 - acc: 0.9366 - val_loss: 0.2920 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 03138: val_acc did not improve from 0.94304\n",
+ "Epoch 3139/100000\n",
+ " - 19s - loss: 0.3078 - acc: 0.9371 - val_loss: 0.3048 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 03139: val_acc did not improve from 0.94304\n",
+ "Epoch 3140/100000\n",
+ " - 18s - loss: 0.3037 - acc: 0.9382 - val_loss: 0.3267 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 03140: val_acc did not improve from 0.94304\n",
+ "Epoch 3141/100000\n",
+ " - 19s - loss: 0.3050 - acc: 0.9379 - val_loss: 0.3101 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 03141: val_acc did not improve from 0.94304\n",
+ "Epoch 3142/100000\n",
+ " - 18s - loss: 0.3087 - acc: 0.9370 - val_loss: 0.2946 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 03142: val_acc did not improve from 0.94304\n",
+ "Epoch 3143/100000\n",
+ " - 18s - loss: 0.3066 - acc: 0.9382 - val_loss: 0.8759 - val_acc: 0.5239\n",
+ "\n",
+ "Epoch 03143: val_acc did not improve from 0.94304\n",
+ "Epoch 3144/100000\n",
+ " - 18s - loss: 0.3051 - acc: 0.9381 - val_loss: 0.2953 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 03144: val_acc did not improve from 0.94304\n",
+ "Epoch 3145/100000\n",
+ " - 18s - loss: 0.3057 - acc: 0.9374 - val_loss: 0.3194 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 03145: val_acc did not improve from 0.94304\n",
+ "Epoch 3146/100000\n",
+ " - 18s - loss: 0.3058 - acc: 0.9371 - val_loss: 0.3219 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 03146: val_acc did not improve from 0.94304\n",
+ "Epoch 3147/100000\n",
+ " - 18s - loss: 0.3073 - acc: 0.9366 - val_loss: 0.2973 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 03147: val_acc did not improve from 0.94304\n",
+ "Epoch 3148/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9375 - val_loss: 0.3064 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 03148: val_acc did not improve from 0.94304\n",
+ "Epoch 3149/100000\n",
+ " - 19s - loss: 0.3058 - acc: 0.9382 - val_loss: 0.3211 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 03149: val_acc did not improve from 0.94304\n",
+ "Epoch 3150/100000\n",
+ " - 18s - loss: 0.3055 - acc: 0.9377 - val_loss: 0.3082 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 03150: val_acc did not improve from 0.94304\n",
+ "Epoch 3151/100000\n",
+ " - 19s - loss: 0.3061 - acc: 0.9371 - val_loss: 0.3066 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 03151: val_acc did not improve from 0.94304\n",
+ "Epoch 3152/100000\n",
+ " - 18s - loss: 0.3087 - acc: 0.9365 - val_loss: 0.3044 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 03152: val_acc did not improve from 0.94304\n",
+ "Epoch 3153/100000\n",
+ " - 19s - loss: 0.3069 - acc: 0.9379 - val_loss: 0.3525 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 03153: val_acc did not improve from 0.94304\n",
+ "Epoch 3154/100000\n",
+ " - 19s - loss: 0.3067 - acc: 0.9372 - val_loss: 0.3370 - val_acc: 0.9159\n",
+ "\n",
+ "Epoch 03154: val_acc did not improve from 0.94304\n",
+ "Epoch 3155/100000\n",
+ " - 18s - loss: 0.3061 - acc: 0.9370 - val_loss: 0.2943 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 03155: val_acc did not improve from 0.94304\n",
+ "Epoch 3156/100000\n",
+ " - 18s - loss: 0.3087 - acc: 0.9367 - val_loss: 0.3152 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 03156: val_acc did not improve from 0.94304\n",
+ "Epoch 3157/100000\n",
+ " - 19s - loss: 0.3063 - acc: 0.9374 - val_loss: 0.4472 - val_acc: 0.8742\n",
+ "\n",
+ "Epoch 03157: val_acc did not improve from 0.94304\n",
+ "Epoch 3158/100000\n",
+ " - 18s - loss: 0.3062 - acc: 0.9373 - val_loss: 0.2993 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 03158: val_acc did not improve from 0.94304\n",
+ "Epoch 3159/100000\n",
+ " - 19s - loss: 0.3080 - acc: 0.9367 - val_loss: 0.3168 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 03159: val_acc did not improve from 0.94304\n",
+ "Epoch 3160/100000\n",
+ " - 18s - loss: 0.3072 - acc: 0.9369 - val_loss: 0.3015 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 03160: val_acc did not improve from 0.94304\n",
+ "Epoch 3161/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9378 - val_loss: 0.3216 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 03161: val_acc did not improve from 0.94304\n",
+ "Epoch 3162/100000\n",
+ " - 19s - loss: 0.3071 - acc: 0.9368 - val_loss: 0.3350 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 03162: val_acc did not improve from 0.94304\n",
+ "Epoch 3163/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9364 - val_loss: 0.3439 - val_acc: 0.9148\n",
+ "\n",
+ "Epoch 03163: val_acc did not improve from 0.94304\n",
+ "Epoch 3164/100000\n",
+ " - 19s - loss: 0.3048 - acc: 0.9387 - val_loss: 0.3586 - val_acc: 0.8973\n",
+ "\n",
+ "Epoch 03164: val_acc did not improve from 0.94304\n",
+ "Epoch 3165/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9364 - val_loss: 0.3691 - val_acc: 0.8984\n",
+ "\n",
+ "Epoch 03165: val_acc did not improve from 0.94304\n",
+ "Epoch 3166/100000\n",
+ " - 19s - loss: 0.3076 - acc: 0.9376 - val_loss: 0.3802 - val_acc: 0.8953\n",
+ "\n",
+ "Epoch 03166: val_acc did not improve from 0.94304\n",
+ "Epoch 3167/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9377 - val_loss: 0.3180 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 03167: val_acc did not improve from 0.94304\n",
+ "Epoch 3168/100000\n",
+ " - 18s - loss: 0.3055 - acc: 0.9374 - val_loss: 0.3024 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 03168: val_acc did not improve from 0.94304\n",
+ "Epoch 3169/100000\n",
+ " - 19s - loss: 0.3065 - acc: 0.9373 - val_loss: 0.2976 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 03169: val_acc did not improve from 0.94304\n",
+ "Epoch 3170/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9373 - val_loss: 0.5487 - val_acc: 0.8129\n",
+ "\n",
+ "Epoch 03170: val_acc did not improve from 0.94304\n",
+ "Epoch 3171/100000\n",
+ " - 19s - loss: 0.3075 - acc: 0.9376 - val_loss: 0.3715 - val_acc: 0.9001\n",
+ "\n",
+ "Epoch 03171: val_acc did not improve from 0.94304\n",
+ "Epoch 3172/100000\n",
+ " - 18s - loss: 0.3046 - acc: 0.9384 - val_loss: 0.3271 - val_acc: 0.9189\n",
+ "\n",
+ "Epoch 03172: val_acc did not improve from 0.94304\n",
+ "Epoch 3173/100000\n",
+ " - 19s - loss: 0.3070 - acc: 0.9371 - val_loss: 0.3109 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 03173: val_acc did not improve from 0.94304\n",
+ "Epoch 3174/100000\n",
+ " - 18s - loss: 0.3081 - acc: 0.9366 - val_loss: 0.2895 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 03174: val_acc did not improve from 0.94304\n",
+ "Epoch 3175/100000\n",
+ " - 19s - loss: 0.3065 - acc: 0.9369 - val_loss: 0.2914 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 03175: val_acc did not improve from 0.94304\n",
+ "Epoch 3176/100000\n",
+ " - 18s - loss: 0.3041 - acc: 0.9382 - val_loss: 0.3353 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 03176: val_acc did not improve from 0.94304\n",
+ "Epoch 3177/100000\n",
+ " - 19s - loss: 0.3062 - acc: 0.9377 - val_loss: 0.3048 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 03177: val_acc did not improve from 0.94304\n",
+ "Epoch 3178/100000\n",
+ " - 18s - loss: 0.3069 - acc: 0.9372 - val_loss: 0.3574 - val_acc: 0.9108\n",
+ "\n",
+ "Epoch 03178: val_acc did not improve from 0.94304\n",
+ "\n",
+ "Epoch 03178: ReduceLROnPlateau reducing learning rate to 0.0004876748775132.\n",
+ "Epoch 3179/100000\n",
+ " - 19s - loss: 0.3044 - acc: 0.9365 - val_loss: 0.3126 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 03179: val_acc did not improve from 0.94304\n",
+ "Epoch 3180/100000\n",
+ " - 18s - loss: 0.3016 - acc: 0.9375 - val_loss: 0.2866 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 03180: val_acc did not improve from 0.94304\n",
+ "Epoch 3181/100000\n",
+ " - 19s - loss: 0.3009 - acc: 0.9380 - val_loss: 0.3235 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 03181: val_acc did not improve from 0.94304\n",
+ "Epoch 3182/100000\n",
+ " - 19s - loss: 0.3026 - acc: 0.9369 - val_loss: 0.2864 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 03182: val_acc did not improve from 0.94304\n",
+ "Epoch 3183/100000\n",
+ " - 19s - loss: 0.3015 - acc: 0.9375 - val_loss: 0.3091 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 03183: val_acc did not improve from 0.94304\n",
+ "Epoch 3184/100000\n",
+ " - 18s - loss: 0.2995 - acc: 0.9382 - val_loss: 0.2989 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 03184: val_acc did not improve from 0.94304\n",
+ "Epoch 3185/100000\n",
+ " - 19s - loss: 0.3010 - acc: 0.9378 - val_loss: 0.2989 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 03185: val_acc did not improve from 0.94304\n",
+ "Epoch 3186/100000\n",
+ " - 19s - loss: 0.3016 - acc: 0.9372 - val_loss: 0.3112 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 03186: val_acc did not improve from 0.94304\n",
+ "Epoch 3187/100000\n",
+ " - 18s - loss: 0.3011 - acc: 0.9375 - val_loss: 0.3042 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 03187: val_acc did not improve from 0.94304\n",
+ "Epoch 3188/100000\n",
+ " - 18s - loss: 0.3028 - acc: 0.9368 - val_loss: 0.3545 - val_acc: 0.9214\n",
+ "\n",
+ "Epoch 03188: val_acc did not improve from 0.94304\n",
+ "Epoch 3189/100000\n",
+ " - 18s - loss: 0.3024 - acc: 0.9376 - val_loss: 0.2885 - val_acc: 0.9403\n",
+ "\n",
+ "Epoch 03189: val_acc did not improve from 0.94304\n",
+ "Epoch 3190/100000\n",
+ " - 18s - loss: 0.2998 - acc: 0.9378 - val_loss: 0.3087 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 03190: val_acc did not improve from 0.94304\n",
+ "Epoch 3191/100000\n",
+ " - 19s - loss: 0.3004 - acc: 0.9385 - val_loss: 0.6119 - val_acc: 0.7614\n",
+ "\n",
+ "Epoch 03191: val_acc did not improve from 0.94304\n",
+ "Epoch 3192/100000\n",
+ " - 18s - loss: 0.3000 - acc: 0.9382 - val_loss: 0.3348 - val_acc: 0.9104\n",
+ "\n",
+ "Epoch 03192: val_acc did not improve from 0.94304\n",
+ "Epoch 3193/100000\n",
+ " - 19s - loss: 0.3029 - acc: 0.9369 - val_loss: 0.2857 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 03193: val_acc did not improve from 0.94304\n",
+ "Epoch 3194/100000\n",
+ " - 18s - loss: 0.2995 - acc: 0.9383 - val_loss: 0.3050 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 03194: val_acc did not improve from 0.94304\n",
+ "Epoch 3195/100000\n",
+ " - 19s - loss: 0.3001 - acc: 0.9378 - val_loss: 0.3074 - val_acc: 0.9253\n",
+ "\n",
+ "Epoch 03195: val_acc did not improve from 0.94304\n",
+ "Epoch 3196/100000\n",
+ " - 19s - loss: 0.2999 - acc: 0.9385 - val_loss: 0.2931 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 03196: val_acc did not improve from 0.94304\n",
+ "Epoch 3197/100000\n",
+ " - 18s - loss: 0.3038 - acc: 0.9368 - val_loss: 0.3030 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 03197: val_acc did not improve from 0.94304\n",
+ "Epoch 3198/100000\n",
+ " - 19s - loss: 0.3007 - acc: 0.9377 - val_loss: 0.3000 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 03198: val_acc did not improve from 0.94304\n",
+ "Epoch 3199/100000\n",
+ " - 19s - loss: 0.3071 - acc: 0.9371 - val_loss: 0.3467 - val_acc: 0.9136\n",
+ "\n",
+ "Epoch 03199: val_acc did not improve from 0.94304\n",
+ "Epoch 3200/100000\n",
+ " - 18s - loss: 0.3027 - acc: 0.9379 - val_loss: 0.4056 - val_acc: 0.8857\n",
+ "\n",
+ "Epoch 03200: val_acc did not improve from 0.94304\n",
+ "Epoch 3201/100000\n",
+ " - 18s - loss: 0.3031 - acc: 0.9371 - val_loss: 0.2881 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 03201: val_acc did not improve from 0.94304\n",
+ "Epoch 3202/100000\n",
+ " - 18s - loss: 0.3026 - acc: 0.9376 - val_loss: 0.2900 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 03202: val_acc did not improve from 0.94304\n",
+ "Epoch 3203/100000\n",
+ " - 18s - loss: 0.3038 - acc: 0.9374 - val_loss: 0.3219 - val_acc: 0.9183\n",
+ "\n",
+ "Epoch 03203: val_acc did not improve from 0.94304\n",
+ "Epoch 3204/100000\n",
+ " - 19s - loss: 0.3014 - acc: 0.9374 - val_loss: 0.4039 - val_acc: 0.8728\n",
+ "\n",
+ "Epoch 03204: val_acc did not improve from 0.94304\n",
+ "Epoch 3205/100000\n",
+ " - 18s - loss: 0.3001 - acc: 0.9376 - val_loss: 0.3022 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 03205: val_acc did not improve from 0.94304\n",
+ "Epoch 3206/100000\n",
+ " - 19s - loss: 0.2998 - acc: 0.9378 - val_loss: 0.3115 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 03206: val_acc did not improve from 0.94304\n",
+ "Epoch 3207/100000\n",
+ " - 19s - loss: 0.3015 - acc: 0.9374 - val_loss: 0.2904 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 03207: val_acc did not improve from 0.94304\n",
+ "Epoch 3208/100000\n",
+ " - 19s - loss: 0.2991 - acc: 0.9383 - val_loss: 0.2872 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 03208: val_acc did not improve from 0.94304\n",
+ "Epoch 3209/100000\n",
+ " - 19s - loss: 0.2998 - acc: 0.9377 - val_loss: 0.3062 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 03209: val_acc did not improve from 0.94304\n",
+ "Epoch 3210/100000\n",
+ " - 18s - loss: 0.3017 - acc: 0.9371 - val_loss: 0.3043 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 03210: val_acc did not improve from 0.94304\n",
+ "Epoch 3211/100000\n",
+ " - 19s - loss: 0.3034 - acc: 0.9367 - val_loss: 0.2863 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 03211: val_acc did not improve from 0.94304\n",
+ "Epoch 3212/100000\n",
+ " - 18s - loss: 0.3013 - acc: 0.9379 - val_loss: 0.2973 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 03212: val_acc did not improve from 0.94304\n",
+ "Epoch 3213/100000\n",
+ " - 18s - loss: 0.3009 - acc: 0.9378 - val_loss: 0.2939 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 03213: val_acc did not improve from 0.94304\n",
+ "Epoch 3214/100000\n",
+ " - 18s - loss: 0.2992 - acc: 0.9384 - val_loss: 0.3207 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 03214: val_acc did not improve from 0.94304\n",
+ "Epoch 3215/100000\n"
+ ]
+ }
+ ],
+ "source": [
+ "tf.get_default_graph()\n",
+ "########## HYPER PARAMETERS\n",
+ "\n",
+ "batch_size = 2000\n",
+ "epochs = 100000\n",
+ "optimizer = tf.keras.optimizers.Adam(lr=0.001)\n",
+ "\n",
+ "l1v = 0.005\n",
+ "l2v = 0.015\n",
+ "#optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.1)\n",
+ "#init=tf.global_variables_initializer()\n",
+ "\n",
+ "########## HYPER PARAMETERS\n",
+ "########## MODEL ARCHITECTURE\n",
+ "model = tf.keras.models.Sequential()\n",
+ "model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', input_shape=(27,15,1), \n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+ "model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same', \n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+ "model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same', data_format='channels_last'))\n",
+ "model.add(tf.keras.layers.Dropout(0.45))\n",
+ "\n",
+ "model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same',\n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+ "model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same',\n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+ "model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same', data_format='channels_last'))\n",
+ "model.add(tf.keras.layers.Dropout(0.45))\n",
+ "\n",
+ "model.add(tf.keras.layers.Flatten())\n",
+ "model.add(tf.keras.layers.Dense(140, activation='relu',\n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v), use_bias=True))\n",
+ "model.add(tf.keras.layers.Dropout(0.5))\n",
+ "model.add(tf.keras.layers.Dense(70, activation='relu',\n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v), use_bias=True))\n",
+ "model.add(tf.keras.layers.Dropout(0.5))\n",
+ "model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))\n",
+ "########## MODEL ARCHITECTURE\n",
+ "####TENSORBOARD\n",
+ "config = \"\"\n",
+ "for layer in model.layers:\n",
+ " config += str(layer.output).split('\\\"')[1].split(\"/\")[0] + str(layer.output_shape) + \"\\n\\n\"\n",
+ "#### END TENSORBOARD\n",
+ "config += \"batchsize: \" + str(batch_size) + \"\\n\\n\" + \"epochs: \" + str(epochs) + \"\\n\\n\"\n",
+ "\n",
+ "# Print summary\n",
+ "current_name = \"CNN\"\n",
+ "readable_timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')\n",
+ "tensorflowfolder = \"/srv/share/tensorboardfiles/\" + current_name + readable_timestamp\n",
+ "print(current_name + readable_timestamp)\n",
+ "\n",
+ "model.summary()\n",
+ "logger = LoggingTensorBoard(settings_str_to_log = config, log_dir=tensorflowfolder, histogram_freq=0,\n",
+ " write_graph=True, write_images=True, update_freq = 'epoch')\n",
+ "storer = ModelCheckpoint(\"./ModelSnapshots/\" + current_name + readable_timestamp + '-{epoch:03d}.h5',\n",
+ " monitor='val_acc', verbose=1,\n",
+ " save_best_only=True, save_weights_only=False,\n",
+ " mode='auto', period=1)\n",
+ "learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', \n",
+ " patience=140, \n",
+ " verbose=1, \n",
+ " factor=0.95, \n",
+ " min_lr=0.00001)\n",
+ "# compile model for training\n",
+ "model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=optimizer,\n",
+ " metrics=['accuracy'])\n",
+ "\n",
+ "history = model.fit(x_train, y_train_one_hot,\n",
+ " batch_size=batch_size,\n",
+ " epochs=epochs,\n",
+ " verbose=2,\n",
+ " validation_data=(x_test, y_test_one_hot),\n",
+ " callbacks=[logger, storer, learning_rate_reduction])\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model.save(\"./ModelSnapshots/\" + current_name + \"_DONE.h5\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# summarize history for accuracy\n",
+ "plt.plot(history.history['acc'])\n",
+ "plt.plot(history.history['val_acc'])\n",
+ "plt.title('model accuracy')\n",
+ "plt.ylabel('accuracy')\n",
+ "plt.xlabel('epoch')\n",
+ "plt.legend(['train', 'test'], loc='upper left')\n",
+ "plt.show()\n",
+ "# summarize history for loss\n",
+ "plt.plot(history.history['loss'])\n",
+ "plt.plot(history.history['val_loss'])\n",
+ "plt.title('model loss')\n",
+ "plt.ylabel('loss')\n",
+ "plt.xlabel('epoch')\n",
+ "plt.legend(['train', 'test'], loc='upper left')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_08_CNN-Report.ipynb b/python/Step_08_CNN-Report.ipynb
new file mode 100644
index 0000000..c17b718
--- /dev/null
+++ b/python/Step_08_CNN-Report.ipynb
@@ -0,0 +1,430 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n",
+ "/usr/local/lib/python3.6/dist-packages/requests/__init__.py:91: RequestsDependencyWarning: urllib3 (1.25.2) or chardet (3.0.4) doesn't match a supported version!\n",
+ " RequestsDependencyWarning)\n"
+ ]
+ }
+ ],
+ "source": [
+ "import keras\n",
+ "from keras.models import load_model\n",
+ "from keras import utils\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle \n",
+ "\n",
+ "# Used for graph export\n",
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "from keras import backend as K\n",
+ "from keras import regularizers\n",
+ "\n",
+ "import pickle as pkl\n",
+ "import h5py\n",
+ "\n",
+ "from pathlib import Path\n",
+ "import os.path\n",
+ "import sys\n",
+ "import datetime\n",
+ "import time\n",
+ "\n",
+ "target_names = [\"Knuckle\", \"Finger\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n",
+ "13 : 5\n",
+ "0.7222222222222222 : 0.2777777777777778\n",
+ "503886\n"
+ ]
+ }
+ ],
+ "source": [
+ "# the data, split between train and test sets\n",
+ "df = pd.read_pickle(\"DataStudyCollection/df_blobs_area.pkl\")\n",
+ "\n",
+ "lst = df.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)\n",
+ "print(len(train_ids), \":\", len(test_ids))\n",
+ "print(len(train_ids) / len(lst), \":\", len(test_ids)/ len(lst))\n",
+ "\n",
+ "df = df[df.userID.isin(train_ids) | df.userID.isin(test_ids) & (df.Version == \"Normal\")]\n",
+ "print(len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.vstack(df.Blobs)\n",
+ "x = x.reshape(-1, 27, 15, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = 2\n",
+ "y = utils.to_categorical(df.InputMethod, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Text(0.5, 1.0, 'Label for image 1 is: [1. 0.]')"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAALEAAAEICAYAAAAQmxXMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAD1tJREFUeJzt3X2wXHV9x/H35yaBQIiFAGYgPIRmUp2UGeIMgm3BQhEE+hD8oymM0GhpY1XG2vpQdNoGqVXGKVUZLaNAIEWBQShD1BQIGRjaKRUCA5oUNBASSBoSIASChIck3/5xflc2l717d++evXu/8HnN3Nlz9jx9d+9nf3vO2bP7U0RgltlAvwsw65ZDbOk5xJaeQ2zpOcSWnkNs6fUsxJLulvTndS+rytWSnpd0X3dVgqQjJL0kaUK36xov6npMkq6R9JqkdTWV1sm2f6M8hl0j5WjEEEtaJ+kD9ZXXtROAU4HDIuK4blcWEU9GxH4Rsav70npH0smS7pL0wkihqvkxfS0iZjbUMV/Sf0t6WdLdna5M0l9LelrSi5IWS9q72XwR8YuI2A/4z5HWmXF34khgXUT8stMFJU3sQT1j5ZfAYuBzfa5jK/AN4JJOF5T0QeBC4BSq/+OvA1/qtqBRh1jSAZJ+JOmZ8tb+I0mHDZltlqT7yqvuVknTGpZ/X3lFb5P0sKST2tjm+cCVwG+Vt5ovlfv/QtJjkrZKWirp0IZlQtInJa0B1jRZ58wyz8QyfrekL5faXpL0Q0kHSvp+eRz3S5rZsPw3JT1Vpj0g6cSGaftIWlKen0ckfV7Shobph0q6uTyHT0j61HCPPSLui4hrgbVtPE9DH9NHJK2VtL1s58MjraNFHXdGxI3A/41i8QXAVRGxOiKeB/4R+MhoaxnUTUs8AFxN9Yo6AtgBfGvIPH8K/BlwCLATuAxA0gzgx8CXgWnAZ4GbJR3caoMRcRXwl8C95e1ykaTfA74KzC/bWQ/cMGTRs4DjgTltPrazgfOAGcAs4N7yWKcBjwCLGua9H5hbpl0H/EDS5DJtETCTqsU5FTh3cCFJA8APgYfLdk4BPl1aq9pImkL1vJ8REVOB3wYeKtOOKI3IEXVus4XfpHq8gx4Gpks6sJuVjjrEEfFcRNwcES9HxHbgn4DfHTLbtRGxqrz1/z0wvxxsnAssi4hlEbE7IpYDK4EzR1HKh4HFEfFgRLwKfIGqpZ7ZMM9XI2JrROxoc51XR8TjEfEC8B/A46UF2gn8AHjP4IwR8b3yXOyMiEuBvYF3lcnzga9ExPMRsYHyIi7eCxwcERdHxGsRsRa4guoFVLfdwNGS9omITRGxutT+ZETsHxFP9mCbzewHvNAwPjg8tZuVdrM7sa+k70haL+lF4B5g/yFHxE81DK8HJgEHUbXef1xagW2StlEdsB0yilIOLesGICJeAp6jat2a1dGOzQ3DO5qM7zc4IumzZVfhhfI4fo3qMQ7W1rjtxuEjgUOHPAdfBKZ3WGtLpQH5E6p3sE2Sfizp3XVuowMvAe9oGB8c3t7NSrvZnfgMVYtzfES8A3h/uV8N8xzeMHwE8DrwLNU/89rSCgz+TYmIjg8WqPbNjhwcKW+fBwIbG+bpyaV6Zf/381Qt7gERsT9V6zL4HGwCGo8TGp+Pp4AnhjwHUyNiNO9GLUXE7RFxKlUj8ShVi98Pq4FjGsaPATZHxHPdrLTdEE+SNLnhbyLVW8AOYFs5YFvUZLlzJc2RtC9wMXBTOe3zPeAPJX1Q0oSyzpOaHBi243rgo5LmltM1XwF+EhHrRrGuTk2l2td/Bpgo6R/Ys6W5EfhCOQieAVzQMO0+YLukvy0HgBMkHS3pvc02JGmg7GtPqkY1WdJeIxUoabqkeeXF/SpVa7h7NA+2rG9CqWMiMFDqmNTm4v8GnF8ysT/wd8A1o61lULshXkYV2MG/i6hOs+xD1bL+D3Bbk+WuLUU+DUwGPgUQEU8B86jePp+hapU+10E9vxIRd1Ltb99M1fLNojf7lc3cTvW4f0G1S/MKe+4yXAxsAJ4A7gRuogoS5cX8B1QHhU9QPY9XUu2ONPN+qud+GW8cSN/RRo0DwN9QvWNtpTpu+Tjs8aFIJwd255VtXw6cWIZ/1bKX9Z3YbMGIuA34GnAX8CTVc7aoYdnVozlzIl8UP3YkfRw4OyKGHgCPS5KuAM6hesufNcbbnk115mcv4BMRcc2w8zrEvSPpEKrTa/cCs6lOK34rIr7R18LeYjJ/gpXBXsB3gKOAbVTnr/+1rxW9BbkltvQyXjthtocx353YS3vHZKYMP4M0/DRAE0a6urD1O0vsHNcXq70lbOf5ZyOi5SUEdaolxJJOB74JTACubPWhxWSmcPyE04Zf16TWJQ3sP9wZqGJX65Du2rqt9fK7HfJu3Rk3rR95rvp0vTtRPmb+NnAG1QU250hq90Ibs67VsU98HPBYRKyNiNeojsDn1bBes7bUEeIZ7Pkp1Qb2vPgGSQslrZS08vXqAyuz2ozJ2YmI+G5EHBsRx06i6bdRzEatjhBvZM+rsw5jzyvIzHqqjhDfD8yWdFS5qupsYGkN6zVrS9en2CJip6QLqK7omkD1LYvVrRca/krAgb1H2N2Y2uIcM8D2Eb4/6lNobzm1nCeOiGVUlwiajTl/7GzpOcSWnkNs6TnElp5DbOk5xJbemF9PrIEBBvbZZ/gZZrT+7ZDHzz2o5fRJ21tfj3z4ZS+1nL775ZdbTrfxxy2xpecQW3oOsaXnEFt6DrGl5xBbeg6xpTfufsYqRvhdiZ9/9PKu1v/7N/xRy+m7143Vj6ZbXdwSW3oOsaXnEFt6DrGl5xBbeg6xpecQW3rj7jzxwPMvtpx+9GWfaDn95UNa92717h0jdo1sybgltvQcYkvPIbb0HGJLzyG29BxiS88htvTG/Dxx7N7N7leG77dDr7zScvkjb9jQ3fZ3tF6/5VNXP3brgO3ALmBnRBxbx3rN2lFnS3xyRDxb4/rM2uJ9YkuvrhAHcIekByQtrGmdZm2pa3fihIjYKOmdwHJJj0bEPYMTS7AXAkxm35o2aVappSWOiI3ldgtwC1VXuY3T3Rmj9UwdHZRPkTR1cBg4DVjV7XrN2lXH7sR04BZJg+u7LiJua7lEi77kdj23tfXWRppubzt1dMa4FjimhlrMRsWn2Cw9h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSazvEkhZL2iJpVcN90yQtl7Sm3B7QmzLNhtdJS3wNcPqQ+y4EVkTEbGBFGTcbU22HuHTpNbTDjHnAkjK8BDirprrM2tZtnx3TI2JTGX6aqhOaN3E/dtZLtR3YRURQ9SzabJr7sbOe6TbEmyUdAlBut3Rfkllnug3xUmBBGV4A3Nrl+sw61skptuuBe4F3Sdog6XzgEuBUSWuAD5RxszHV9oFdRJwzzKRTaqrFbFT8iZ2l5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl120/dhdJ2ijpofJ3Zm/KNBtet/3YAXw9IuaWv2X1lGXWvm77sTPruzr2iS+Q9NOyu9G0W1xJCyWtlLTydV6tYZNmb+g2xJcDs4C5wCbg0mYzuR8766WuQhwRmyNiV0TsBq4AjqunLLP2dRXiwY4Yiw8Bq4ab16xX2u4CrPRjdxJwkKQNwCLgJElzqbrDXQd8rAc1mrXUbT92V9VYi9mo+BM7S88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS6+TfuwOl3SXpP+VtFrSX5X7p0laLmlNuW3a+YxZr3TSEu8EPhMRc4D3AZ+UNAe4EFgREbOBFWXcbMx00o/dpoh4sAxvBx4BZgDzgCVltiXAWXUXadZK290dNJI0E3gP8BNgekRsKpOeBqY3mX8hsBBgMvuOZpNmw+r4wE7SfsDNwKcj4sXGaRERVJ3QMOR+92NnPdNRiCVNogrw9yPi38vdmwe7Aiu3W+ot0ay1Ts5OiKq3pEci4l8aJi0FFpThBcCt9ZVnNrJO9ol/BzgP+Jmkh8p9XwQuAW6UdD6wHphfb4lmrXXSj91/ARpm8in1lGPWOX9iZ+k5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJZeWyFu0RHjRZI2Snqo/J3Z23LN3qzdX4of7IjxQUlTgQckLS/Tvh4R/9yb8sxG1laISz91m8rwdkmDHTGa9d1o+rGbyRsdMQJcIOmnkhYP16+zpIWSVkpa+TqvjrpYs2Y67cduaEeMlwOzgLlULfWlzZZzZ4zWS530Y/emjhgjYnNE7IqI3cAVwHG9KdNseO2enWjaEeNgT6LFh4BV9ZZnNrJ2z04M1xHjOZLmUvXnvA74WO0Vmo2g3bMTw3XEuKzecsw650/sLD2H2NJziC09h9jSc4gtPYfY0lNEjO0GpWeA9UPuPgh4dkwL6Yzr68yREXHwWG1szEPctAhpZUQc2+86huP6xjfvTlh6DrGlN15C/N1+FzAC1zeOjYt9YrNujJeW2GzUHGJLr68hlnS6pJ9LekzShf2spRlJ6yT9rPwcwcp+1wNQvsu4RdKqhvumSVouaU25bfpdx7eqvoVY0gTg28AZwByqC+zn9KueFk6OiLnj6DzsNcDpQ+67EFgREbOBFWX8baOfLfFxwGMRsTYiXgNuAOb1sZ4UIuIeYOuQu+cBS8rwEuCsMS2qz/oZ4hnAUw3jGxh/v2URwB2SHpC0sN/FtDC9/DYIwNPA9H4WM9ba/Y7d29UJEbFR0juB5ZIeLS3huBURIeltdd60ny3xRuDwhvHDyn3jRkRsLLdbgFsYvz9JsHnwm+fldkuf6xlT/Qzx/cBsSUdJ2gs4G1jax3r2IGlK+d05JE0BTmP8/iTBUmBBGV4A3NrHWsZc33YnImKnpAuA24EJwOKIWN2vepqYDtxS/eQGE4HrIuK2/pYEkq4HTgIOkrQBWARcAtwo6Xyqy1zn96/CseePnS09f2Jn6TnElp5DbOk5xJaeQ2zpOcSWnkNs6f0/kZhtd/D2o3sAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "i = 1\n",
+ "plt.imshow(x[i].reshape(27, 15)) #np.sqrt(784) = 28\n",
+ "plt.title(\"Label for image %i is: %s\" % (i, y[i]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.4\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loadpath = \"./ModelSnapshots/CNN-33767.h5\"\n",
+ "model = load_model(loadpath)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 1min 28s, sys: 9.52 s, total: 1min 37s\n",
+ "Wall time: 1min\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "lst = []\n",
+ "batch = 100\n",
+ "for i in range(0, len(x), batch):\n",
+ " _x = x[i: i+batch]\n",
+ " lst.extend(model.predict(_x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"InputMethodPred\"] = lst\n",
+ "df.InputMethodPred = df.InputMethodPred.apply(lambda x: np.argmax(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_train = df[df.userID.isin(train_ids)]\n",
+ "df_test = df[df.userID.isin(test_ids) & (df.Version == \"Normal\")]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[124207 12765]\n",
+ " [ 6596 322276]]\n",
+ "[[0.90680577 0.09319423]\n",
+ " [0.02005644 0.97994356]]\n",
+ "Accuray: 0.958\n",
+ "Recall: 0.943\n",
+ "Precision: 0.957\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.95 0.91 0.93 136972\n",
+ " Finger 0.96 0.98 0.97 328872\n",
+ "\n",
+ " micro avg 0.96 0.96 0.96 465844\n",
+ " macro avg 0.96 0.94 0.95 465844\n",
+ "weighted avg 0.96 0.96 0.96 465844\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_train.InputMethod.values, df_train.InputMethodPred.values, labels=[0, 1]))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_train.InputMethod.values, df_train.InputMethodPred.values, labels=[0, 1])\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(cm)\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_train.InputMethod.values, df_train.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_train.InputMethod.values, df_train.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(df_train.InputMethod.values, df_train.InputMethodPred.values, average=\"macro\"))\n",
+ "#print(\"F1-Score: %.3f\" % metrics.f1_score(df_train.InputMethod.values, df_train.InputMethodPred.values, average=\"macro\"))\n",
+ "print(sklearn.metrics.classification_report(df_train.InputMethod.values, df_train.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[ 8384 1037]\n",
+ " [ 1028 27593]]\n",
+ "[[0.88992676 0.11007324]\n",
+ " [0.03591768 0.96408232]]\n",
+ "Accuray: 0.946\n",
+ "Recall: 0.927\n",
+ "Precision: 0.956\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.89 0.89 0.89 9421\n",
+ " Finger 0.96 0.96 0.96 28621\n",
+ "\n",
+ " micro avg 0.95 0.95 0.95 38042\n",
+ " macro avg 0.93 0.93 0.93 38042\n",
+ "weighted avg 0.95 0.95 0.95 38042\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_test.InputMethod.values, df_test.InputMethodPred.values, labels=[0, 1]))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_test.InputMethod.values, df_test.InputMethodPred.values, labels=[0, 1])\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(cm)\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_test.InputMethod.values, df_test.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_test.InputMethod.values, df_test.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(df_test.InputMethod.values, df_test.InputMethodPred.values, average=\"macro\"))\n",
+ "#print(\"F1-Score: %.3f\" % metrics.f1_score(df_test.InputMethod.values, df_test.InputMethodPred.values, average=\"macro\"))\n",
+ "print(sklearn.metrics.classification_report(df_test.InputMethod.values, df_test.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Export"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "output nodes names are: ['output_node0']\n"
+ ]
+ }
+ ],
+ "source": [
+ "output_node_prefix = \"output_node\"\n",
+ "num_output = 1\n",
+ "pred = [None]*num_output\n",
+ "pred_node_names = [None]*num_output\n",
+ "for i in range(num_output):\n",
+ " pred_node_names[i] = output_node_prefix+str(i)\n",
+ " pred[i] = tf.identity(model.outputs[i], name=pred_node_names[i])\n",
+ "print('output nodes names are: ', pred_node_names)\n",
+ "output_node_prefix = pred_node_names[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[]"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "model.inputs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sess = K.get_session()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "output_path = \"./Models/\"\n",
+ "output_file = \"CNN.pb\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "INFO:tensorflow:Froze 30 variables.\n",
+ "INFO:tensorflow:Converted 30 variables to const ops.\n",
+ "Saved the freezed graph at: ./Models/CNN.pb\n"
+ ]
+ }
+ ],
+ "source": [
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)\n",
+ "\n",
+ "graph_io.write_graph(constant_graph, output_path, output_file, as_text=False)\n",
+ "\n",
+ "print('Saved the freezed graph at: ', (output_path + output_file))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_09_LSTM_ReadData.ipynb b/python/Step_09_LSTM_ReadData.ipynb
new file mode 100644
index 0000000..e5be1fa
--- /dev/null
+++ b/python/Step_09_LSTM_ReadData.ipynb
@@ -0,0 +1,152 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Filtering the data for the LSTM: removes all the rows, where we used the revert button, when the participant performed a wrong gesture\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyCollection/AllData.pkl\")\n",
+ "dfAll.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_actual = dfAll[(dfAll.Actual_Data == True) & (dfAll.Is_Pause == False)]\n",
+ "df_actual.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"all: %s, actual data: %s\" % (len(dfAll), len(df_actual)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%time\n",
+ "# filter out all gestures, where the revert button was pressed during the study and the gestrue was repeated\n",
+ "def is_max(df):\n",
+ " df_temp = df.copy(deep=True)\n",
+ " max_version = df_temp.RepetitionID.max()\n",
+ " df_temp[\"IsMax\"] = np.where(df_temp.RepetitionID == max_version, True, False)\n",
+ " df_temp[\"MaxRepetition\"] = [max_version] * len(df_temp)\n",
+ " return df_temp\n",
+ "\n",
+ "df_filtered = df_actual.copy(deep=True)\n",
+ "df_grp = df_filtered.groupby([df_filtered.userID, df_filtered.TaskID, df_filtered.VersionID])\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "result_lst = pool.map(is_max, [grp for name, grp in df_grp])\n",
+ "df_filtered = pd.concat(result_lst)\n",
+ "df_filtered = df_filtered[df_filtered.IsMax == True]\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered.to_pickle(\"DataStudyCollection/df_lstm.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"actual: %s, filtered data: %s\" % (len(df_actual), len(df_filtered)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_10_LSTM_Preprocessing.ipynb b/python/Step_10_LSTM_Preprocessing.ipynb
new file mode 100644
index 0000000..46a4064
--- /dev/null
+++ b/python/Step_10_LSTM_Preprocessing.ipynb
@@ -0,0 +1,1286 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Preprocessing for LSTM: Blobdetection and Cutting"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered = pd.read_pickle(\"DataStudyCollection/df_lstm.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ " IsMax | \n",
+ " MaxRepetition | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 291980 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 291981 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 291982 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 291983 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 291984 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "291980 1 1,54515E+12 33 680 0 2 \n",
+ "291981 1 1,54515E+12 33 680 0 2 \n",
+ "291982 1 1,54515E+12 33 680 0 2 \n",
+ "291983 1 1,54515E+12 33 680 0 2 \n",
+ "291984 1 1,54515E+12 33 680 0 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "291980 0 True False \n",
+ "291981 0 True False \n",
+ "291982 0 True False \n",
+ "291983 0 True False \n",
+ "291984 0 True False \n",
+ "\n",
+ " Image IsMax \\\n",
+ "291980 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "291981 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "291982 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "291983 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "291984 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "\n",
+ " MaxRepetition \n",
+ "291980 0 \n",
+ "291981 0 \n",
+ "291982 0 \n",
+ "291983 0 \n",
+ "291984 0 "
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_filtered.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.reshape(27, 15))\n",
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.clip(min=0, max=255))\n",
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.astype(np.uint8))\n",
+ "df_filtered[\"ImageSum\"] = df_filtered.Image.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#LSTMs new Blob detection (only detect, if there are blobs)\n",
+ "def detect_blobs(image):\n",
+ " #image = image.reshape(27, 15)\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = image\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 200, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " return len(contours) > 0"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 3.42 s, sys: 1.14 s, total: 4.57 s\n",
+ "Wall time: 4.94 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "temp_blobs = pool.map(detect_blobs, df_filtered.Image)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered[\"ContainsBlobs\"] = temp_blobs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Label if knuckle or finger\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " #val = \"Knuckle\"\n",
+ " val = 0\n",
+ " elif row['TaskID'] >= 17:\n",
+ " #val = \"Finger\"\n",
+ " val = 1\n",
+ " return val\n",
+ "df_filtered['InputMethod'] = df_filtered.apply(f, axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered.index = range(len(df_filtered))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "1\n",
+ "2\n",
+ "3\n",
+ "4\n",
+ "5\n",
+ "6\n",
+ "7\n",
+ "8\n",
+ "9\n",
+ "10\n",
+ "11\n",
+ "12\n",
+ "13\n",
+ "14\n",
+ "15\n",
+ "16\n",
+ "17\n",
+ "18\n",
+ "CPU times: user 4min 7s, sys: 424 ms, total: 4min 8s\n",
+ "Wall time: 4min 8s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "# trim image sequences down to only between first and last detected blob\n",
+ "UserIDs = []\n",
+ "TaskIDs = []\n",
+ "VersionIDs = []\n",
+ "Blobs = []\n",
+ "for userID in df_filtered.userID.unique():\n",
+ " print(userID)\n",
+ " for TaskID in df_filtered[df_filtered.userID == userID].TaskID.unique():\n",
+ " for VersionID in df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID)].VersionID.unique():\n",
+ " first_blob = -1\n",
+ " last_blob = -1\n",
+ " for index, row in df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID) & (df_filtered.VersionID == VersionID)].iterrows():\n",
+ " if row.ContainsBlobs:\n",
+ " last_blob = index\n",
+ " if first_blob == -1:\n",
+ " first_blob = index\n",
+ " if first_blob >= 0 and last_blob >= 0:\n",
+ " UserIDs.append(userID)\n",
+ " TaskIDs.append(TaskID)\n",
+ " VersionIDs.append(VersionID)\n",
+ " Blobs.append(df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID) & (df_filtered.VersionID == VersionID) & (df_filtered.index >= first_blob) & (df_filtered.index <= last_blob)].Image.tolist())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "UserIDs = np.array(UserIDs, dtype=np.int64)\n",
+ "TaskIDs = np.array(TaskIDs, dtype=np.int64)\n",
+ "VersionIDs = np.array(VersionIDs, dtype=np.int64)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 239,... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 7 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 8 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 3 \n",
+ "1 1 0 5 \n",
+ "2 1 0 6 \n",
+ "3 1 0 7 \n",
+ "4 1 0 8 \n",
+ "\n",
+ " Blobs \n",
+ "0 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... \n",
+ "1 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... \n",
+ "2 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 239,... \n",
+ "3 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... \n",
+ "4 [[[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0... "
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all = pd.DataFrame()\n",
+ "df_lstm_all[\"userID\"] = UserIDs\n",
+ "df_lstm_all[\"TaskID\"] = TaskIDs\n",
+ "df_lstm_all[\"VersionID\"] = VersionIDs\n",
+ "df_lstm_all[\"Blobs\"] = Blobs\n",
+ "df_lstm_all.Blobs = df_lstm_all.Blobs.map(np.array)\n",
+ "df_lstm_all.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ " BlobCount | \n",
+ " GestureOnly | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ " 38 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ " 57 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 239,... | \n",
+ " 41 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 7 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ " 20 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 8 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0... | \n",
+ " 41 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 3 \n",
+ "1 1 0 5 \n",
+ "2 1 0 6 \n",
+ "3 1 0 7 \n",
+ "4 1 0 8 \n",
+ "\n",
+ " Blobs BlobCount GestureOnly \n",
+ "0 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... 38 0 \n",
+ "1 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... 57 0 \n",
+ "2 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 239,... 41 0 \n",
+ "3 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... 20 0 \n",
+ "4 [[[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0... 41 0 "
+ ]
+ },
+ "execution_count": 33,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_lstm_all[\"Length\"] = df_lstm_all.Blobs.apply(lambda x: x.shape[0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 43,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAEP1JREFUeJzt3XuMXOV5x/HvE5ubSBpDiFaWbXVNsRQ5oiVoRYgSRVtQwEBVU4lEjlBxIkuWWqImElVrGqnkhgSVCE2iXOQGK06EApQkMgpU1AVGUf8AAuFiDCJsEkd45WAlBidLFNqlT/+Yd+l0s+ud2Z3dubzfj7TyOe9558zz7Fn7t+fMmXFkJpKk+ryp1wVIknrDAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVanWvCziRs846K0dHRxf9+FdffZXTTz+9ewX1yLD0AfbSj4alD7CXGY8//vgvM/PtC83r6wAYHR3lscceW/TjG40G4+Pj3SuoR4alD7CXfjQsfYC9zIiIn7czz0tAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUqb5+J/BKGd11b1vzDt10xTJXIkkrxzMASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkirVdgBExKqIeCIivl/WN0bEIxExERF3RsTJZfyUsj5Rto+27OP6Mv58RFza7WYkSe3r5Azg48BzLes3A7dm5jnAy8COMr4DeLmM31rmERGbgW3AO4EtwFciYtXSypckLVZbARAR64ErgK+X9QAuAu4uU/YCV5blrWWdsv3iMn8rcEdmvpaZPwMmgAu60YQkqXOr25z3z8DfAW8p628DXsnM6bJ+GFhXltcBLwJk5nREHC/z1wEPt+yz9TFviIidwE6AkZERGo1Gu738nqmpqbYef9250wvOAZZUy1K028cgsJf+Myx9gL10asEAiIg/A45m5uMRMb6s1QCZuRvYDTA2Npbj44t/ykajQTuP/8iue9va36GrF1/LUrTbxyCwl/4zLH2AvXSqnTOA9wJ/HhGXA6cCfwB8AVgTEavLWcB6YLLMnwQ2AIcjYjXwVuBXLeMzWh8jSVphC74GkJnXZ+b6zByl+SLug5l5NfAQcFWZth3YV5bvKeuU7Q9mZpbxbeUuoY3AJuDRrnUiSepIu68BzOXvgTsi4nPAE8BtZfw24FsRMQEcoxkaZObBiLgLeBaYBq7NzNeX8PySpCXoKAAyswE0yvJPmeMunsz8HfDBeR5/I3Bjp0VKkrrPdwJLUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqtbrXBSynA5PH+ciue3tdhiT1Jc8AJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkSi0YABFxakQ8GhFPRcTBiPh0Gd8YEY9ExERE3BkRJ5fxU8r6RNk+2rKv68v48xFx6XI1JUlaWDtnAK8BF2XmnwDnAVsi4kLgZuDWzDwHeBnYUebvAF4u47eWeUTEZmAb8E5gC/CViFjVzWYkSe1bMACyaaqsnlS+ErgIuLuM7wWuLMtbyzpl+8UREWX8jsx8LTN/BkwAF3SlC0lSx9r6OOjym/rjwDnAl4GfAK9k5nSZchhYV5bXAS8CZOZ0RBwH3lbGH27ZbetjWp9rJ7ATYGRkhEaj0VlHLUZOg+vOnV54YpuWUstSTE1N9ey5u81e+s+w9AH20qm2AiAzXwfOi4g1wPeAdyxXQZm5G9gNMDY2luPj44ve15du38ctB7r3Xx4cunrxtSxFo9FgKd+HfmIv/WdY+gB76VRHdwFl5ivAQ8B7gDURMfOv63pgsixPAhsAyva3Ar9qHZ/jMZKkFdbOXUBvL7/5ExGnAR8AnqMZBFeVaduBfWX5nrJO2f5gZmYZ31buEtoIbAIe7VYjkqTOtHN9ZC2wt7wO8Cbgrsz8fkQ8C9wREZ8DngBuK/NvA74VERPAMZp3/pCZByPiLuBZYBq4tlxakiT1wIIBkJlPA++aY/ynzHEXT2b+DvjgPPu6Ebix8zIlSd3mO4ElqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKLRgAEbEhIh6KiGcj4mBEfLyMnxkR+yPihfLnGWU8IuKLETEREU9HxPkt+9pe5r8QEduXry1J0kLaOQOYBq7LzM3AhcC1EbEZ2AU8kJmbgAfKOsBlwKbytRP4KjQDA7gBeDdwAXDDTGhIklbeggGQmUcy80dl+TfAc8A6YCuwt0zbC1xZlrcC38ymh4E1EbEWuBTYn5nHMvNlYD+wpavdSJLa1tFrABExCrwLeAQYycwjZdMvgJGyvA54seVhh8vYfOOSpB5Y3e7EiHgz8B3gE5n564h4Y1tmZkRkNwqKiJ00Lx0xMjJCo9FY9L5GToPrzp3uRlkAS6plKaampnr23N1mL/1nWPoAe+lUWwEQESfR/Mf/9sz8bhl+KSLWZuaRconnaBmfBDa0PHx9GZsExmeNN2Y/V2buBnYDjI2N5fj4+OwpbfvS7fu45UDbGbegQ1cvvpalaDQaLOX70E/spf8MSx9gL51q5y6gAG4DnsvMz7dsugeYuZNnO7CvZfyacjfQhcDxcqnofuCSiDijvPh7SRmTJPVAO78evxf4S+BARDxZxv4BuAm4KyJ2AD8HPlS23QdcDkwAvwU+CpCZxyLis8APy7zPZOaxrnQhSerYggGQmf8JxDybL55jfgLXzrOvPcCeTgqUJC0P3wksSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpbr3/yVWYHTXvW3NO3TTFctciSQtnWcAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmq1IIBEBF7IuJoRDzTMnZmROyPiBfKn2eU8YiIL0bEREQ8HRHntzxme5n/QkRsX552JEntaucM4BvAlllju4AHMnMT8EBZB7gM2FS+dgJfhWZgADcA7wYuAG6YCQ1JUm8sGACZ+QPg2KzhrcDesrwXuLJl/JvZ9DCwJiLWApcC+zPzWGa+DOzn90NFkrSCFvsawEhmHinLvwBGyvI64MWWeYfL2HzjkqQeWb3UHWRmRkR2oxiAiNhJ8/IRIyMjNBqNRe9r5DS47tzpLlXWvqXUPJepqamu77NX7KX/DEsfYC+dWmwAvBQRazPzSLnEc7SMTwIbWuatL2OTwPis8cZcO87M3cBugLGxsRwfH59rWlu+dPs+bjmw5Izr2KGrx7u6v0ajwVK+D/3EXvrPsPQB9tKpxV4CugeYuZNnO7CvZfyacjfQhcDxcqnofuCSiDijvPh7SRmTJPXIgr8eR8S3af72flZEHKZ5N89NwF0RsQP4OfChMv0+4HJgAvgt8FGAzDwWEZ8FfljmfSYzZ7+wLElaQQsGQGZ+eJ5NF88xN4Fr59nPHmBPR9VJkpaN7wSWpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKrXg/wmszo3uureteYduumKZK5Gk+XkGIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUbwTroXbfMPaNLacvcyWSauQZgCRVygCQpEoZAJJUKQNAkirli8AD4MDkcT7SxgvGfrqopE54BiBJlTIAJKlSK34JKCK2AF8AVgFfz8ybVrqGYdXu+wrAy0WSVvgMICJWAV8GLgM2Ax+OiM0rWYMkqWmlzwAuACYy86cAEXEHsBV4doXrqJ7/baWklQ6AdcCLLeuHgXevcA3qQCeXldrhx1pI/aPvbgONiJ3AzrI6FRHPL2F3ZwG/XHpVvfU3Q9IHwJ/ePDy9MDzHZVj6AHuZ8YftTFrpAJgENrSsry9jb8jM3cDubjxZRDyWmWPd2FcvDUsfYC/9aFj6AHvp1ErfBvpDYFNEbIyIk4FtwD0rXIMkiRU+A8jM6Yj4GHA/zdtA92TmwZWsQZLUtOKvAWTmfcB9K/R0XbmU1AeGpQ+wl340LH2AvXQkMnO5n0OS1If8KAhJqtRQBkBEbImI5yNiIiJ29bqeTkXEoYg4EBFPRsRjZezMiNgfES+UP8/odZ1ziYg9EXE0Ip5pGZuz9mj6YjlOT0fE+b2r/P+bp49PRcRkOS5PRsTlLduuL308HxGX9qbquUXEhoh4KCKejYiDEfHxMj5Qx+UEfQzccYmIUyPi0Yh4qvTy6TK+MSIeKTXfWW6WISJOKesTZftoVwrJzKH6ovni8k+As4GTgaeAzb2uq8MeDgFnzRr7J2BXWd4F3NzrOuep/f3A+cAzC9UOXA78GxDAhcAjva5/gT4+BfztHHM3l5+zU4CN5edvVa97aKlvLXB+WX4L8ONS80AdlxP0MXDHpXxv31yWTwIeKd/ru4BtZfxrwF+V5b8GvlaWtwF3dqOOYTwDeOPjJjLzv4CZj5sYdFuBvWV5L3BlD2uZV2b+ADg2a3i+2rcC38ymh4E1EbF2ZSo9sXn6mM9W4I7MfC0zfwZM0Pw57AuZeSQzf1SWfwM8R/Nd+QN1XE7Qx3z69riU7+1UWT2pfCVwEXB3GZ99TGaO1d3AxRERS61jGANgro+bONEPST9K4N8j4vHyzmiAkcw8UpZ/AYz0prRFma/2QTxWHyuXRfa0XIYbmD7KpYN30fyNc2CPy6w+YACPS0SsiogngaPAfppnKK9k5nSZ0lrvG72U7ceBty21hmEMgGHwvsw8n+anpl4bEe9v3ZjN88CBvH1rkGsHvgr8EXAecAS4pbfldCYi3gx8B/hEZv66ddsgHZc5+hjI45KZr2fmeTQ/EeEC4B0rXcMwBsCCHzfR7zJzsvx5FPgezR+Ol2ZOw8ufR3tXYcfmq32gjlVmvlT+0v4P8C/83+WEvu8jIk6i+Y/m7Zn53TI8cMdlrj4G+bgAZOYrwEPAe2hebpt5f1ZrvW/0Ura/FfjVUp97GANgoD9uIiJOj4i3zCwDlwDP0Oxhe5m2HdjXmwoXZb7a7wGuKXedXAgcb7kk0XdmXQf/C5rHBZp9bCt3amwENgGPrnR98ynXim8DnsvMz7dsGqjjMl8fg3hcIuLtEbGmLJ8GfIDmaxoPAVeVabOPycyxugp4sJy1LU2vXw1fji+adzH8mOY1tU/2up4Oaz+b5p0LTwEHZ+qneb3vAeAF4D+AM3td6zz1f5vmafh/07yGuWO+2mneCfHlcpwOAGO9rn+BPr5V6ny6/IVc2zL/k6WP54HLel3/rF7eR/PyztPAk+Xr8kE7LifoY+COC/DHwBOl5meAfyzjZ9MMqQngX4FTyvipZX2ibD+7G3X4TmBJqtQwXgKSJLXBAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVL/C2+FhSKKT6n/AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "df_lstm_all.Length.hist(range=(0,300), bins=30)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.02870949403069926"
+ ]
+ },
+ "execution_count": 46,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(df_lstm_all[df_lstm_all.Length > 50]) / len(df_lstm_all)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 52,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 10554.0\n",
+ "mean 15.9\n",
+ "std 13.6\n",
+ "min 1.0\n",
+ "25% 8.0\n",
+ "50% 13.0\n",
+ "75% 19.0\n",
+ "max 301.0\n",
+ "Name: Length, dtype: float64"
+ ]
+ },
+ "execution_count": 52,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.Length.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lengths = []\n",
+ "for index, row in df_lstm_all.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])\n",
+ "df_lstm_all[\"BlobCount\"] = lengths\n",
+ "# add a column for pure gesture recognition without finger/knuckle\n",
+ "df_lstm_all[\"GestureOnly\"] = df_lstm_all.TaskID % 17"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 10554.000000\n",
+ "mean 15.906576\n",
+ "std 13.605214\n",
+ "min 1.000000\n",
+ "25% 8.000000\n",
+ "50% 13.000000\n",
+ "75% 19.000000\n",
+ "max 301.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " mean | \n",
+ " std | \n",
+ "
\n",
+ " \n",
+ " GestureOnly | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 11.421429 | \n",
+ " 8.940925 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 13.618683 | \n",
+ " 13.864708 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 8.852596 | \n",
+ " 6.315931 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 8.672913 | \n",
+ " 5.580500 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 9.828767 | \n",
+ " 6.793559 | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 9.211221 | \n",
+ " 6.861675 | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 14.622496 | \n",
+ " 8.338379 | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 13.684524 | \n",
+ " 13.263753 | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 20.397129 | \n",
+ " 12.916920 | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " 14.468599 | \n",
+ " 10.042060 | \n",
+ "
\n",
+ " \n",
+ " 10 | \n",
+ " 14.921440 | \n",
+ " 8.909217 | \n",
+ "
\n",
+ " \n",
+ " 11 | \n",
+ " 13.695578 | \n",
+ " 7.661549 | \n",
+ "
\n",
+ " \n",
+ " 12 | \n",
+ " 17.070853 | \n",
+ " 11.755087 | \n",
+ "
\n",
+ " \n",
+ " 13 | \n",
+ " 15.712219 | \n",
+ " 10.545010 | \n",
+ "
\n",
+ " \n",
+ " 14 | \n",
+ " 16.468354 | \n",
+ " 9.826818 | \n",
+ "
\n",
+ " \n",
+ " 15 | \n",
+ " 19.840836 | \n",
+ " 11.239255 | \n",
+ "
\n",
+ " \n",
+ " 16 | \n",
+ " 42.931624 | \n",
+ " 21.024635 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " mean std\n",
+ "GestureOnly \n",
+ "0 11.421429 8.940925\n",
+ "1 13.618683 13.864708\n",
+ "2 8.852596 6.315931\n",
+ "3 8.672913 5.580500\n",
+ "4 9.828767 6.793559\n",
+ "5 9.211221 6.861675\n",
+ "6 14.622496 8.338379\n",
+ "7 13.684524 13.263753\n",
+ "8 20.397129 12.916920\n",
+ "9 14.468599 10.042060\n",
+ "10 14.921440 8.909217\n",
+ "11 13.695578 7.661549\n",
+ "12 17.070853 11.755087\n",
+ "13 15.712219 10.545010\n",
+ "14 16.468354 9.826818\n",
+ "15 19.840836 11.239255\n",
+ "16 42.931624 21.024635"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.groupby(df_lstm_all.GestureOnly)[\"BlobCount\"].agg([\"mean\", \"std\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "before: 10554\n",
+ "after: 9193\n",
+ "ratio: 12.895584612469206\n"
+ ]
+ }
+ ],
+ "source": [
+ "# filter on gesture lengths\n",
+ "print(\"before: %s\" % len(df_lstm_all))\n",
+ "df_lstm = df_lstm_all[(df_lstm_all.BlobCount <= 100) & (df_lstm_all.BlobCount >= 5)]\n",
+ "print(\"after: %s\" % len(df_lstm))\n",
+ "print(\"ratio: %s\" % ((len(df_lstm_all) - len(df_lstm)) / len(df_lstm_all) * 100))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 9193.000000\n",
+ "mean 17.678995\n",
+ "std 12.059369\n",
+ "min 5.000000\n",
+ "25% 10.000000\n",
+ "50% 15.000000\n",
+ "75% 20.000000\n",
+ "max 97.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lengths = []\n",
+ "for index, row in df_lstm.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \n",
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
+ "\n",
+ "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n",
+ " \"\"\"Entry point for launching an IPython kernel.\n"
+ ]
+ }
+ ],
+ "source": [
+ "df_lstm[\"BlobCount\"] = lengths"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 9193.000000\n",
+ "mean 17.678995\n",
+ "std 12.059369\n",
+ "min 5.000000\n",
+ "25% 10.000000\n",
+ "50% 15.000000\n",
+ "75% 20.000000\n",
+ "max 97.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def lerp(a, b, c=0.5):\n",
+ " return c * b + (1.0 - c) * a\n",
+ "\n",
+ "#Svens new Blob detection\n",
+ "def detect_blobs_return_old(image, task):\n",
+ " #image = e.Image\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = np.copy(image)\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 205, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstCenter = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " contours.sort(key=lambda a: cv2.contourArea(a))\n",
+ " if len(contours) > 0:\n",
+ " # if two finger or knuckle\n",
+ " cont_count = 2 if task in [1, 6, 7, 18, 23, 24] and len(contours) > 1 else 1\n",
+ " for i in range(1, cont_count + 1):\n",
+ " max_contour = contours[-1 * i]\n",
+ " xmax, ymax = np.max(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " xmin, ymin = np.min(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " M = cv2.moments(max_contour)\n",
+ " cX = int(M[\"m10\"] / M[\"m00\"]) - 1\n",
+ " cY = int(M[\"m01\"] / M[\"m00\"]) - 1\n",
+ " #croped_im = np.zeros((27,15))\n",
+ " blob = large[max(ymin - 1, 0):min(ymax + 1, large.shape[0]),max(xmin - 1, 0):min(xmax + 1, large.shape[1])]\n",
+ " #croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " #return (1, [croped_im])\n",
+ " lstBlob.append(blob)\n",
+ " lstCenter.append((cY, cX))\n",
+ " lstMin.append(xmax-xmin)\n",
+ " lstMax.append(ymax-ymin)\n",
+ " count = count + 1\n",
+ " return (count, lstBlob, lstCenter)\n",
+ " else:\n",
+ " return (0, [np.zeros((29, 19))], 0, 0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# descides whether or not a normalization is neccessary\n",
+ "# and cuts or adds zeros\n",
+ "def normalize_blobs(blobs, new_len=50):\n",
+ " new_count = new_len - blobs.shape[0]\n",
+ " if new_count == 0:\n",
+ " return blobs\n",
+ " elif new_count > 0:\n",
+ " temp = np.array([np.zeros((27, 15))] * new_count)\n",
+ " return np.append(blobs, temp, axis=0)\n",
+ " else:\n",
+ " return blobs[0:new_len]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 3.24 s, sys: 556 ms, total: 3.8 s\n",
+ "Wall time: 3.8 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "# normalizes all image sequences\n",
+ "df_lstm_norm = df_lstm.copy(deep=True)\n",
+ "new_blobs = []\n",
+ "for index, row in df_lstm.iterrows():\n",
+ " new_blobs.append(normalize_blobs(row.Blobs, 50))\n",
+ "\n",
+ "df_lstm_norm.Blobs = new_blobs\n",
+ "\n",
+ "lengths = []\n",
+ "for index, row in df_lstm_norm.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])\n",
+ "df_lstm_norm[\"BlobCount\"] = lengths"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 9193.0\n",
+ "mean 50.0\n",
+ "std 0.0\n",
+ "min 50.0\n",
+ "25% 50.0\n",
+ "50% 50.0\n",
+ "75% 50.0\n",
+ "max 50.0\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_norm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_lstm_norm.to_pickle(\"DataStudyCollection/df_lstm_norm50.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ " BlobCount | \n",
+ " GestureOnly | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 7 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 8 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 3 \n",
+ "1 1 0 5 \n",
+ "2 1 0 6 \n",
+ "3 1 0 7 \n",
+ "4 1 0 8 \n",
+ "\n",
+ " Blobs BlobCount GestureOnly \n",
+ "0 [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... 50 0 \n",
+ "1 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... 50 0 \n",
+ "2 [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... 50 0 \n",
+ "3 [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... 50 0 \n",
+ "4 [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0... 50 0 "
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_norm.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "finished\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"finished\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_11_LSTM.ipynb b/python/Step_11_LSTM.ipynb
new file mode 100644
index 0000000..60efca3
--- /dev/null
+++ b/python/Step_11_LSTM.ipynb
@@ -0,0 +1,2550 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "from keras.models import Sequential, load_model\n",
+ "from keras.layers import *\n",
+ "from keras import optimizers\n",
+ "from keras import utils\n",
+ "from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau\n",
+ "import keras\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle \n",
+ "\n",
+ "# Used for graph export\n",
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "from keras import backend as K\n",
+ "\n",
+ "import pickle as pkl\n",
+ "import h5py\n",
+ "\n",
+ "from pathlib import Path\n",
+ "import os.path\n",
+ "import sys\n",
+ "import datetime\n",
+ "import time\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class LoggingTensorBoard(TensorBoard): \n",
+ "\n",
+ " def __init__(self, log_dir, settings_str_to_log, **kwargs):\n",
+ " super(LoggingTensorBoard, self).__init__(log_dir, **kwargs)\n",
+ "\n",
+ " self.settings_str = settings_str_to_log\n",
+ "\n",
+ " def on_train_begin(self, logs=None):\n",
+ " TensorBoard.on_train_begin(self, logs=logs)\n",
+ "\n",
+ " tensor = tf.convert_to_tensor(self.settings_str)\n",
+ " summary = tf.summary.text (\"Run_Settings\", tensor)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " s = sess.run(summary)\n",
+ " self.writer.add_summary(s)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n"
+ ]
+ }
+ ],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyCollection/df_lstm_norm50.pkl\")\n",
+ "\n",
+ "lst = dfAll.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\n",
+ " 18])"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.userID.unique()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfAll.TaskID = dfAll.TaskID % 17"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_train = dfAll[dfAll.userID.isin(train_ids)][['Blobs', 'TaskID']]\n",
+ "df_test = dfAll[dfAll.userID.isin(test_ids)][['Blobs', 'TaskID']]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_train = np.concatenate(df_train.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "x_test = np.concatenate(df_test.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "\n",
+ "y_train = df_train.TaskID.values\n",
+ "y_test = df_test.TaskID.values\n",
+ "\n",
+ "x_train = x_train / 255.0\n",
+ "x_test = x_test / 255.0\n",
+ "\n",
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = len(dfAll.TaskID.unique())\n",
+ "y_train_one_hot = utils.to_categorical(df_train.TaskID, num_classes)\n",
+ "y_test_one_hot = utils.to_categorical(df_test.TaskID, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.3\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "_________________________________________________________________\n",
+ "Layer (type) Output Shape Param # \n",
+ "=================================================================\n",
+ "time_distributed_10 (TimeDis (None, 50, 27, 15, 64) 640 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_11 (TimeDis (None, 50, 27, 15, 32) 18464 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_12 (TimeDis (None, 50, 14, 8, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_13 (TimeDis (None, 50, 14, 8, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_14 (TimeDis (None, 50, 14, 8, 32) 9248 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_15 (TimeDis (None, 50, 14, 8, 16) 4624 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_16 (TimeDis (None, 50, 7, 4, 16) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_17 (TimeDis (None, 50, 7, 4, 16) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_18 (TimeDis (None, 50, 448) 0 \n",
+ "_________________________________________________________________\n",
+ "cu_dnnlstm_3 (CuDNNLSTM) (None, 50, 80) 169600 \n",
+ "_________________________________________________________________\n",
+ "dropout_7 (Dropout) (None, 50, 80) 0 \n",
+ "_________________________________________________________________\n",
+ "cu_dnnlstm_4 (CuDNNLSTM) (None, 50) 26400 \n",
+ "_________________________________________________________________\n",
+ "dropout_8 (Dropout) (None, 50) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_2 (Dense) (None, 17) 867 \n",
+ "=================================================================\n",
+ "Total params: 229,843\n",
+ "Trainable params: 229,843\n",
+ "Non-trainable params: 0\n",
+ "_________________________________________________________________\n",
+ "LSTM-v1-\n",
+ "Train on 6624 samples, validate on 2569 samples\n",
+ "Epoch 1/3000\n",
+ " - 25s - loss: 49.2683 - acc: 0.0694 - val_loss: 41.9912 - val_acc: 0.0689\n",
+ "\n",
+ "Epoch 00001: val_acc improved from -inf to 0.06890, saving model to ./ModelSnapshots/LSTM-v1-001.h5\n",
+ "Epoch 2/3000\n",
+ " - 27s - loss: 35.8620 - acc: 0.0836 - val_loss: 30.0710 - val_acc: 0.1117\n",
+ "\n",
+ "Epoch 00002: val_acc improved from 0.06890 to 0.11172, saving model to ./ModelSnapshots/LSTM-v1-002.h5\n",
+ "Epoch 3/3000\n",
+ " - 31s - loss: 25.3320 - acc: 0.1129 - val_loss: 20.9569 - val_acc: 0.1179\n",
+ "\n",
+ "Epoch 00003: val_acc improved from 0.11172 to 0.11794, saving model to ./ModelSnapshots/LSTM-v1-003.h5\n",
+ "Epoch 4/3000\n",
+ " - 31s - loss: 17.4001 - acc: 0.1131 - val_loss: 14.1625 - val_acc: 0.1125\n",
+ "\n",
+ "Epoch 00004: val_acc did not improve from 0.11794\n",
+ "Epoch 5/3000\n",
+ " - 32s - loss: 11.5850 - acc: 0.1144 - val_loss: 9.3017 - val_acc: 0.1238\n",
+ "\n",
+ "Epoch 00005: val_acc improved from 0.11794 to 0.12378, saving model to ./ModelSnapshots/LSTM-v1-005.h5\n",
+ "Epoch 6/3000\n",
+ " - 32s - loss: 7.5852 - acc: 0.1152 - val_loss: 6.1837 - val_acc: 0.1269\n",
+ "\n",
+ "Epoch 00006: val_acc improved from 0.12378 to 0.12690, saving model to ./ModelSnapshots/LSTM-v1-006.h5\n",
+ "Epoch 7/3000\n",
+ " - 32s - loss: 5.1798 - acc: 0.1193 - val_loss: 4.4709 - val_acc: 0.1390\n",
+ "\n",
+ "Epoch 00007: val_acc improved from 0.12690 to 0.13896, saving model to ./ModelSnapshots/LSTM-v1-007.h5\n",
+ "Epoch 8/3000\n",
+ " - 33s - loss: 4.0865 - acc: 0.1162 - val_loss: 3.8336 - val_acc: 0.1296\n",
+ "\n",
+ "Epoch 00008: val_acc did not improve from 0.13896\n",
+ "Epoch 9/3000\n",
+ " - 33s - loss: 3.6080 - acc: 0.1200 - val_loss: 3.5308 - val_acc: 0.1059\n",
+ "\n",
+ "Epoch 00009: val_acc did not improve from 0.13896\n",
+ "Epoch 10/3000\n",
+ " - 34s - loss: 3.2663 - acc: 0.1255 - val_loss: 3.2663 - val_acc: 0.1070\n",
+ "\n",
+ "Epoch 00010: val_acc did not improve from 0.13896\n",
+ "Epoch 11/3000\n",
+ " - 32s - loss: 3.0279 - acc: 0.1294 - val_loss: 3.0289 - val_acc: 0.1191\n",
+ "\n",
+ "Epoch 00011: val_acc did not improve from 0.13896\n",
+ "Epoch 12/3000\n",
+ " - 32s - loss: 2.8994 - acc: 0.1335 - val_loss: 2.8769 - val_acc: 0.1549\n",
+ "\n",
+ "Epoch 00012: val_acc improved from 0.13896 to 0.15492, saving model to ./ModelSnapshots/LSTM-v1-012.h5\n",
+ "Epoch 13/3000\n",
+ " - 33s - loss: 2.8275 - acc: 0.1332 - val_loss: 2.8374 - val_acc: 0.1584\n",
+ "\n",
+ "Epoch 00013: val_acc improved from 0.15492 to 0.15843, saving model to ./ModelSnapshots/LSTM-v1-013.h5\n",
+ "Epoch 14/3000\n",
+ " - 33s - loss: 2.7877 - acc: 0.1437 - val_loss: 2.8330 - val_acc: 0.1323\n",
+ "\n",
+ "Epoch 00014: val_acc did not improve from 0.15843\n",
+ "Epoch 15/3000\n",
+ " - 34s - loss: 2.7728 - acc: 0.1386 - val_loss: 2.7907 - val_acc: 0.1541\n",
+ "\n",
+ "Epoch 00015: val_acc did not improve from 0.15843\n",
+ "Epoch 16/3000\n",
+ " - 32s - loss: 2.7565 - acc: 0.1409 - val_loss: 2.8496 - val_acc: 0.1164\n",
+ "\n",
+ "Epoch 00016: val_acc did not improve from 0.15843\n",
+ "Epoch 17/3000\n",
+ " - 32s - loss: 2.7440 - acc: 0.1449 - val_loss: 2.7934 - val_acc: 0.1436\n",
+ "\n",
+ "Epoch 00017: val_acc did not improve from 0.15843\n",
+ "Epoch 18/3000\n",
+ " - 33s - loss: 2.7210 - acc: 0.1458 - val_loss: 2.7578 - val_acc: 0.1374\n",
+ "\n",
+ "Epoch 00018: val_acc did not improve from 0.15843\n",
+ "Epoch 19/3000\n",
+ " - 33s - loss: 2.7060 - acc: 0.1487 - val_loss: 2.7522 - val_acc: 0.1296\n",
+ "\n",
+ "Epoch 00019: val_acc did not improve from 0.15843\n",
+ "Epoch 20/3000\n",
+ " - 32s - loss: 2.7049 - acc: 0.1507 - val_loss: 2.7762 - val_acc: 0.1265\n",
+ "\n",
+ "Epoch 00020: val_acc did not improve from 0.15843\n",
+ "Epoch 21/3000\n",
+ " - 32s - loss: 2.6780 - acc: 0.1514 - val_loss: 2.8229 - val_acc: 0.1222\n",
+ "\n",
+ "Epoch 00021: val_acc did not improve from 0.15843\n",
+ "Epoch 22/3000\n",
+ " - 31s - loss: 2.6713 - acc: 0.1508 - val_loss: 2.7640 - val_acc: 0.1277\n",
+ "\n",
+ "Epoch 00022: val_acc did not improve from 0.15843\n",
+ "Epoch 23/3000\n",
+ " - 32s - loss: 2.6633 - acc: 0.1534 - val_loss: 2.7439 - val_acc: 0.1386\n",
+ "\n",
+ "Epoch 00023: val_acc did not improve from 0.15843\n",
+ "Epoch 24/3000\n",
+ " - 32s - loss: 2.6539 - acc: 0.1511 - val_loss: 2.7243 - val_acc: 0.1339\n",
+ "\n",
+ "Epoch 00024: val_acc did not improve from 0.15843\n",
+ "Epoch 25/3000\n",
+ " - 33s - loss: 2.6404 - acc: 0.1552 - val_loss: 2.7407 - val_acc: 0.1335\n",
+ "\n",
+ "Epoch 00025: val_acc did not improve from 0.15843\n",
+ "Epoch 26/3000\n",
+ " - 29s - loss: 2.6376 - acc: 0.1559 - val_loss: 2.7438 - val_acc: 0.1362\n",
+ "\n",
+ "Epoch 00026: val_acc did not improve from 0.15843\n",
+ "Epoch 27/3000\n",
+ " - 32s - loss: 2.6272 - acc: 0.1538 - val_loss: 2.7144 - val_acc: 0.1444\n",
+ "\n",
+ "Epoch 00027: val_acc did not improve from 0.15843\n",
+ "Epoch 28/3000\n",
+ " - 33s - loss: 2.6152 - acc: 0.1677 - val_loss: 2.7139 - val_acc: 0.1495\n",
+ "\n",
+ "Epoch 00028: val_acc did not improve from 0.15843\n",
+ "Epoch 29/3000\n",
+ " - 31s - loss: 2.6027 - acc: 0.1694 - val_loss: 2.7126 - val_acc: 0.1475\n",
+ "\n",
+ "Epoch 00029: val_acc did not improve from 0.15843\n",
+ "Epoch 30/3000\n",
+ " - 31s - loss: 2.6002 - acc: 0.1639 - val_loss: 2.7428 - val_acc: 0.1347\n",
+ "\n",
+ "Epoch 00030: val_acc did not improve from 0.15843\n",
+ "Epoch 31/3000\n",
+ " - 32s - loss: 2.5983 - acc: 0.1683 - val_loss: 2.6875 - val_acc: 0.1565\n",
+ "\n",
+ "Epoch 00031: val_acc did not improve from 0.15843\n",
+ "Epoch 32/3000\n",
+ " - 31s - loss: 2.5796 - acc: 0.1781 - val_loss: 2.6927 - val_acc: 0.1627\n",
+ "\n",
+ "Epoch 00032: val_acc improved from 0.15843 to 0.16271, saving model to ./ModelSnapshots/LSTM-v1-032.h5\n",
+ "Epoch 33/3000\n",
+ " - 32s - loss: 2.5626 - acc: 0.1792 - val_loss: 2.6635 - val_acc: 0.1682\n",
+ "\n",
+ "Epoch 00033: val_acc improved from 0.16271 to 0.16816, saving model to ./ModelSnapshots/LSTM-v1-033.h5\n",
+ "Epoch 34/3000\n",
+ " - 31s - loss: 2.5736 - acc: 0.1762 - val_loss: 2.6519 - val_acc: 0.1724\n",
+ "\n",
+ "Epoch 00034: val_acc improved from 0.16816 to 0.17244, saving model to ./ModelSnapshots/LSTM-v1-034.h5\n",
+ "Epoch 35/3000\n",
+ " - 31s - loss: 2.5582 - acc: 0.1789 - val_loss: 2.6432 - val_acc: 0.1791\n",
+ "\n",
+ "Epoch 00035: val_acc improved from 0.17244 to 0.17906, saving model to ./ModelSnapshots/LSTM-v1-035.h5\n",
+ "Epoch 36/3000\n",
+ " - 31s - loss: 2.5423 - acc: 0.1784 - val_loss: 2.6365 - val_acc: 0.1771\n",
+ "\n",
+ "Epoch 00036: val_acc did not improve from 0.17906\n",
+ "Epoch 37/3000\n",
+ " - 33s - loss: 2.5256 - acc: 0.1877 - val_loss: 2.6288 - val_acc: 0.1837\n",
+ "\n",
+ "Epoch 00037: val_acc improved from 0.17906 to 0.18373, saving model to ./ModelSnapshots/LSTM-v1-037.h5\n",
+ "Epoch 38/3000\n",
+ " - 31s - loss: 2.5277 - acc: 0.1864 - val_loss: 2.6043 - val_acc: 0.1907\n",
+ "\n",
+ "Epoch 00038: val_acc improved from 0.18373 to 0.19074, saving model to ./ModelSnapshots/LSTM-v1-038.h5\n",
+ "Epoch 39/3000\n",
+ " - 30s - loss: 2.5188 - acc: 0.1928 - val_loss: 2.5917 - val_acc: 0.1958\n",
+ "\n",
+ "Epoch 00039: val_acc improved from 0.19074 to 0.19580, saving model to ./ModelSnapshots/LSTM-v1-039.h5\n",
+ "Epoch 40/3000\n",
+ " - 30s - loss: 2.5115 - acc: 0.1996 - val_loss: 2.5736 - val_acc: 0.2009\n",
+ "\n",
+ "Epoch 00040: val_acc improved from 0.19580 to 0.20086, saving model to ./ModelSnapshots/LSTM-v1-040.h5\n",
+ "Epoch 41/3000\n",
+ " - 33s - loss: 2.4997 - acc: 0.1981 - val_loss: 2.5785 - val_acc: 0.2079\n",
+ "\n",
+ "Epoch 00041: val_acc improved from 0.20086 to 0.20786, saving model to ./ModelSnapshots/LSTM-v1-041.h5\n",
+ "Epoch 42/3000\n",
+ " - 31s - loss: 2.4816 - acc: 0.2097 - val_loss: 2.5575 - val_acc: 0.2141\n",
+ "\n",
+ "Epoch 00042: val_acc improved from 0.20786 to 0.21409, saving model to ./ModelSnapshots/LSTM-v1-042.h5\n",
+ "Epoch 43/3000\n",
+ " - 31s - loss: 2.4817 - acc: 0.2008 - val_loss: 2.5963 - val_acc: 0.1911\n",
+ "\n",
+ "Epoch 00043: val_acc did not improve from 0.21409\n",
+ "Epoch 44/3000\n",
+ " - 31s - loss: 2.4594 - acc: 0.2091 - val_loss: 2.6390 - val_acc: 0.1526\n",
+ "\n",
+ "Epoch 00044: val_acc did not improve from 0.21409\n",
+ "Epoch 45/3000\n",
+ " - 32s - loss: 2.4536 - acc: 0.2120 - val_loss: 2.5017 - val_acc: 0.2382\n",
+ "\n",
+ "Epoch 00045: val_acc improved from 0.21409 to 0.23822, saving model to ./ModelSnapshots/LSTM-v1-045.h5\n",
+ "Epoch 46/3000\n",
+ " - 31s - loss: 2.4471 - acc: 0.2126 - val_loss: 2.6241 - val_acc: 0.1654\n",
+ "\n",
+ "Epoch 00046: val_acc did not improve from 0.23822\n",
+ "Epoch 47/3000\n",
+ " - 32s - loss: 2.4455 - acc: 0.2165 - val_loss: 2.5086 - val_acc: 0.2351\n",
+ "\n",
+ "Epoch 00047: val_acc did not improve from 0.23822\n",
+ "Epoch 48/3000\n",
+ " - 31s - loss: 2.4194 - acc: 0.2236 - val_loss: 2.5044 - val_acc: 0.2413\n",
+ "\n",
+ "Epoch 00048: val_acc improved from 0.23822 to 0.24134, saving model to ./ModelSnapshots/LSTM-v1-048.h5\n",
+ "Epoch 49/3000\n",
+ " - 33s - loss: 2.4320 - acc: 0.2210 - val_loss: 2.5420 - val_acc: 0.1938\n",
+ "\n",
+ "Epoch 00049: val_acc did not improve from 0.24134\n",
+ "Epoch 50/3000\n",
+ " - 30s - loss: 2.4093 - acc: 0.2382 - val_loss: 2.5476 - val_acc: 0.2145\n",
+ "\n",
+ "Epoch 00050: val_acc did not improve from 0.24134\n",
+ "Epoch 51/3000\n",
+ " - 31s - loss: 2.4086 - acc: 0.2403 - val_loss: 2.4596 - val_acc: 0.2853\n",
+ "\n",
+ "Epoch 00051: val_acc improved from 0.24134 to 0.28533, saving model to ./ModelSnapshots/LSTM-v1-051.h5\n",
+ "Epoch 52/3000\n",
+ " - 32s - loss: 2.3899 - acc: 0.2452 - val_loss: 2.4842 - val_acc: 0.2589\n",
+ "\n",
+ "Epoch 00052: val_acc did not improve from 0.28533\n",
+ "Epoch 53/3000\n",
+ " - 32s - loss: 2.3794 - acc: 0.2509 - val_loss: 2.4321 - val_acc: 0.2896\n",
+ "\n",
+ "Epoch 00053: val_acc improved from 0.28533 to 0.28961, saving model to ./ModelSnapshots/LSTM-v1-053.h5\n",
+ "Epoch 54/3000\n",
+ " - 30s - loss: 2.3714 - acc: 0.2548 - val_loss: 2.4887 - val_acc: 0.2740\n",
+ "\n",
+ "Epoch 00054: val_acc did not improve from 0.28961\n",
+ "Epoch 55/3000\n",
+ " - 31s - loss: 2.3547 - acc: 0.2649 - val_loss: 2.3934 - val_acc: 0.3091\n",
+ "\n",
+ "Epoch 00055: val_acc improved from 0.28961 to 0.30907, saving model to ./ModelSnapshots/LSTM-v1-055.h5\n",
+ "Epoch 56/3000\n",
+ " - 30s - loss: 2.3493 - acc: 0.2672 - val_loss: 2.4147 - val_acc: 0.2958\n",
+ "\n",
+ "Epoch 00056: val_acc did not improve from 0.30907\n",
+ "Epoch 57/3000\n",
+ " - 33s - loss: 2.3285 - acc: 0.2704 - val_loss: 2.4170 - val_acc: 0.2919\n",
+ "\n",
+ "Epoch 00057: val_acc did not improve from 0.30907\n",
+ "Epoch 58/3000\n",
+ " - 32s - loss: 2.3374 - acc: 0.2640 - val_loss: 2.3739 - val_acc: 0.3149\n",
+ "\n",
+ "Epoch 00058: val_acc improved from 0.30907 to 0.31491, saving model to ./ModelSnapshots/LSTM-v1-058.h5\n",
+ "Epoch 59/3000\n",
+ " - 32s - loss: 2.3138 - acc: 0.2742 - val_loss: 2.3773 - val_acc: 0.2919\n",
+ "\n",
+ "Epoch 00059: val_acc did not improve from 0.31491\n",
+ "Epoch 60/3000\n",
+ " - 32s - loss: 2.3025 - acc: 0.2838 - val_loss: 2.3299 - val_acc: 0.3130\n",
+ "\n",
+ "Epoch 00060: val_acc did not improve from 0.31491\n",
+ "Epoch 61/3000\n",
+ " - 32s - loss: 2.2968 - acc: 0.2849 - val_loss: 2.3850 - val_acc: 0.2744\n",
+ "\n",
+ "Epoch 00061: val_acc did not improve from 0.31491\n",
+ "Epoch 62/3000\n",
+ " - 30s - loss: 2.2868 - acc: 0.2841 - val_loss: 2.2854 - val_acc: 0.3106\n",
+ "\n",
+ "Epoch 00062: val_acc did not improve from 0.31491\n",
+ "Epoch 63/3000\n",
+ " - 32s - loss: 2.2707 - acc: 0.2951 - val_loss: 2.3034 - val_acc: 0.3266\n",
+ "\n",
+ "Epoch 00063: val_acc improved from 0.31491 to 0.32659, saving model to ./ModelSnapshots/LSTM-v1-063.h5\n",
+ "Epoch 64/3000\n",
+ " - 31s - loss: 2.2708 - acc: 0.2915 - val_loss: 2.2739 - val_acc: 0.3106\n",
+ "\n",
+ "Epoch 00064: val_acc did not improve from 0.32659\n",
+ "Epoch 65/3000\n",
+ " - 31s - loss: 2.2486 - acc: 0.3016 - val_loss: 2.2597 - val_acc: 0.3258\n",
+ "\n",
+ "Epoch 00065: val_acc did not improve from 0.32659\n",
+ "Epoch 66/3000\n",
+ " - 32s - loss: 2.2553 - acc: 0.2968 - val_loss: 2.3376 - val_acc: 0.2970\n",
+ "\n",
+ "Epoch 00066: val_acc did not improve from 0.32659\n",
+ "Epoch 67/3000\n",
+ " - 31s - loss: 2.2348 - acc: 0.3053 - val_loss: 2.3033 - val_acc: 0.3036\n",
+ "\n",
+ "Epoch 00067: val_acc did not improve from 0.32659\n",
+ "Epoch 68/3000\n",
+ " - 32s - loss: 2.2185 - acc: 0.3098 - val_loss: 2.2912 - val_acc: 0.3079\n",
+ "\n",
+ "Epoch 00068: val_acc did not improve from 0.32659\n",
+ "Epoch 69/3000\n",
+ " - 31s - loss: 2.2371 - acc: 0.3043 - val_loss: 2.2778 - val_acc: 0.3098\n",
+ "\n",
+ "Epoch 00069: val_acc did not improve from 0.32659\n",
+ "Epoch 70/3000\n",
+ " - 32s - loss: 2.2254 - acc: 0.3021 - val_loss: 2.2988 - val_acc: 0.3063\n",
+ "\n",
+ "Epoch 00070: val_acc did not improve from 0.32659\n",
+ "Epoch 71/3000\n",
+ " - 30s - loss: 2.2071 - acc: 0.3151 - val_loss: 2.2761 - val_acc: 0.3106\n",
+ "\n",
+ "Epoch 00071: val_acc did not improve from 0.32659\n",
+ "Epoch 72/3000\n",
+ " - 31s - loss: 2.2169 - acc: 0.3133 - val_loss: 2.2711 - val_acc: 0.3192\n",
+ "\n",
+ "Epoch 00072: val_acc did not improve from 0.32659\n",
+ "Epoch 73/3000\n",
+ " - 31s - loss: 2.2030 - acc: 0.3190 - val_loss: 2.2297 - val_acc: 0.3297\n",
+ "\n",
+ "Epoch 00073: val_acc improved from 0.32659 to 0.32970, saving model to ./ModelSnapshots/LSTM-v1-073.h5\n",
+ "Epoch 74/3000\n",
+ " - 31s - loss: 2.1944 - acc: 0.3182 - val_loss: 2.2618 - val_acc: 0.3180\n",
+ "\n",
+ "Epoch 00074: val_acc did not improve from 0.32970\n",
+ "Epoch 75/3000\n",
+ " - 31s - loss: 2.1754 - acc: 0.3243 - val_loss: 2.2761 - val_acc: 0.3017\n",
+ "\n",
+ "Epoch 00078: val_acc did not improve from 0.34916\n",
+ "Epoch 79/3000\n",
+ " - 31s - loss: 2.1787 - acc: 0.3123 - val_loss: 2.2157 - val_acc: 0.3320\n",
+ "\n",
+ "Epoch 00079: val_acc did not improve from 0.34916\n",
+ "Epoch 80/3000\n",
+ " - 31s - loss: 2.1571 - acc: 0.3318 - val_loss: 2.2067 - val_acc: 0.3348\n",
+ "\n",
+ "Epoch 00080: val_acc did not improve from 0.34916\n",
+ "Epoch 81/3000\n",
+ " - 30s - loss: 2.1638 - acc: 0.3287 - val_loss: 2.1681 - val_acc: 0.3340\n",
+ "\n",
+ "Epoch 00081: val_acc did not improve from 0.34916\n",
+ "Epoch 82/3000\n",
+ " - 32s - loss: 2.1578 - acc: 0.3309 - val_loss: 2.2740 - val_acc: 0.2993\n",
+ "\n",
+ "Epoch 00082: val_acc did not improve from 0.34916\n",
+ "Epoch 83/3000\n",
+ " - 30s - loss: 2.1539 - acc: 0.3296 - val_loss: 2.1840 - val_acc: 0.3414\n",
+ "\n",
+ "Epoch 00083: val_acc did not improve from 0.34916\n",
+ "Epoch 84/3000\n",
+ " - 31s - loss: 2.1431 - acc: 0.3389 - val_loss: 2.2319 - val_acc: 0.3274\n",
+ "\n",
+ "Epoch 00084: val_acc did not improve from 0.34916\n",
+ "Epoch 85/3000\n",
+ " - 31s - loss: 2.1423 - acc: 0.3315 - val_loss: 2.1704 - val_acc: 0.3340\n",
+ "\n",
+ "Epoch 00085: val_acc did not improve from 0.34916\n",
+ "Epoch 86/3000\n",
+ " - 32s - loss: 2.1380 - acc: 0.3392 - val_loss: 2.1629 - val_acc: 0.3511\n",
+ "\n",
+ "Epoch 00086: val_acc improved from 0.34916 to 0.35111, saving model to ./ModelSnapshots/LSTM-v1-086.h5\n",
+ "Epoch 87/3000\n",
+ " - 31s - loss: 2.1325 - acc: 0.3471 - val_loss: 2.1480 - val_acc: 0.3499\n",
+ "\n",
+ "Epoch 00087: val_acc did not improve from 0.35111\n",
+ "Epoch 88/3000\n",
+ " - 33s - loss: 2.1316 - acc: 0.3465 - val_loss: 2.2243 - val_acc: 0.3297\n",
+ "\n",
+ "Epoch 00088: val_acc did not improve from 0.35111\n",
+ "Epoch 89/3000\n",
+ " - 32s - loss: 2.1370 - acc: 0.3357 - val_loss: 2.1913 - val_acc: 0.3379\n",
+ "\n",
+ "Epoch 00089: val_acc did not improve from 0.35111\n",
+ "Epoch 90/3000\n",
+ " - 30s - loss: 2.1212 - acc: 0.3444 - val_loss: 2.1825 - val_acc: 0.3293\n",
+ "\n",
+ "Epoch 00090: val_acc did not improve from 0.35111\n",
+ "Epoch 91/3000\n",
+ " - 31s - loss: 2.1139 - acc: 0.3501 - val_loss: 2.1661 - val_acc: 0.3437\n",
+ "\n",
+ "Epoch 00091: val_acc did not improve from 0.35111\n",
+ "Epoch 92/3000\n",
+ " - 31s - loss: 2.1255 - acc: 0.3392 - val_loss: 2.1815 - val_acc: 0.3581\n",
+ "\n",
+ "Epoch 00092: val_acc improved from 0.35111 to 0.35812, saving model to ./ModelSnapshots/LSTM-v1-092.h5\n",
+ "Epoch 93/3000\n",
+ " - 32s - loss: 2.1239 - acc: 0.3484 - val_loss: 2.1803 - val_acc: 0.3507\n",
+ "\n",
+ "Epoch 00093: val_acc did not improve from 0.35812\n",
+ "Epoch 94/3000\n",
+ " - 32s - loss: 2.1060 - acc: 0.3588 - val_loss: 2.1368 - val_acc: 0.3612\n",
+ "\n",
+ "Epoch 00094: val_acc improved from 0.35812 to 0.36123, saving model to ./ModelSnapshots/LSTM-v1-094.h5\n",
+ "Epoch 95/3000\n",
+ " - 30s - loss: 2.1124 - acc: 0.3510 - val_loss: 2.2286 - val_acc: 0.3324\n",
+ "\n",
+ "Epoch 00095: val_acc did not improve from 0.36123\n",
+ "Epoch 96/3000\n",
+ " - 33s - loss: 2.0886 - acc: 0.3552 - val_loss: 2.1486 - val_acc: 0.3453\n",
+ "\n",
+ "Epoch 00096: val_acc did not improve from 0.36123\n",
+ "Epoch 97/3000\n",
+ " - 31s - loss: 2.1004 - acc: 0.3564 - val_loss: 2.1067 - val_acc: 0.3678\n",
+ "\n",
+ "Epoch 00097: val_acc improved from 0.36123 to 0.36785, saving model to ./ModelSnapshots/LSTM-v1-097.h5\n",
+ "Epoch 98/3000\n",
+ " - 32s - loss: 2.0886 - acc: 0.3573 - val_loss: 2.1157 - val_acc: 0.3725\n",
+ "\n",
+ "Epoch 00098: val_acc improved from 0.36785 to 0.37252, saving model to ./ModelSnapshots/LSTM-v1-098.h5\n",
+ "Epoch 99/3000\n",
+ " - 31s - loss: 2.0741 - acc: 0.3667 - val_loss: 2.1037 - val_acc: 0.3550\n",
+ "\n",
+ "Epoch 00099: val_acc did not improve from 0.37252\n",
+ "Epoch 100/3000\n",
+ " - 32s - loss: 2.0715 - acc: 0.3656 - val_loss: 2.1305 - val_acc: 0.3566\n",
+ "\n",
+ "Epoch 00100: val_acc did not improve from 0.37252\n",
+ "Epoch 101/3000\n",
+ " - 32s - loss: 2.0641 - acc: 0.3638 - val_loss: 2.1131 - val_acc: 0.3690\n",
+ "\n",
+ "Epoch 00101: val_acc did not improve from 0.37252\n",
+ "Epoch 102/3000\n",
+ " - 31s - loss: 2.0814 - acc: 0.3629 - val_loss: 2.1053 - val_acc: 0.3717\n",
+ "\n",
+ "Epoch 00102: val_acc did not improve from 0.37252\n",
+ "Epoch 103/3000\n",
+ " - 31s - loss: 2.0689 - acc: 0.3675 - val_loss: 2.1272 - val_acc: 0.3636\n",
+ "\n",
+ "Epoch 00103: val_acc did not improve from 0.37252\n",
+ "Epoch 104/3000\n",
+ " - 32s - loss: 2.0602 - acc: 0.3759 - val_loss: 2.0840 - val_acc: 0.3776\n",
+ "\n",
+ "Epoch 00104: val_acc improved from 0.37252 to 0.37758, saving model to ./ModelSnapshots/LSTM-v1-104.h5\n",
+ "Epoch 105/3000\n",
+ " - 31s - loss: 2.0519 - acc: 0.3715 - val_loss: 2.1287 - val_acc: 0.3605\n",
+ "\n",
+ "Epoch 00105: val_acc did not improve from 0.37758\n",
+ "Epoch 106/3000\n",
+ " - 32s - loss: 2.0573 - acc: 0.3759 - val_loss: 2.1473 - val_acc: 0.3706\n",
+ "\n",
+ "Epoch 00106: val_acc did not improve from 0.37758\n",
+ "Epoch 107/3000\n",
+ " - 32s - loss: 2.0489 - acc: 0.3735 - val_loss: 2.1317 - val_acc: 0.3745\n",
+ "\n",
+ "Epoch 00107: val_acc did not improve from 0.37758\n",
+ "Epoch 108/3000\n",
+ " - 30s - loss: 2.0403 - acc: 0.3819 - val_loss: 2.1252 - val_acc: 0.3931\n",
+ "\n",
+ "Epoch 00108: val_acc improved from 0.37758 to 0.39315, saving model to ./ModelSnapshots/LSTM-v1-108.h5\n",
+ "Epoch 109/3000\n",
+ " - 31s - loss: 2.0376 - acc: 0.3853 - val_loss: 2.1251 - val_acc: 0.3881\n",
+ "\n",
+ "Epoch 00109: val_acc did not improve from 0.39315\n",
+ "Epoch 110/3000\n",
+ " - 31s - loss: 2.0264 - acc: 0.3895 - val_loss: 2.0557 - val_acc: 0.3978\n",
+ "\n",
+ "Epoch 00110: val_acc improved from 0.39315 to 0.39782, saving model to ./ModelSnapshots/LSTM-v1-110.h5\n",
+ "Epoch 111/3000\n",
+ " - 31s - loss: 2.0173 - acc: 0.3859 - val_loss: 2.0634 - val_acc: 0.4188\n",
+ "\n",
+ "Epoch 00111: val_acc improved from 0.39782 to 0.41884, saving model to ./ModelSnapshots/LSTM-v1-111.h5\n",
+ "Epoch 112/3000\n",
+ " - 32s - loss: 2.0159 - acc: 0.3992 - val_loss: 2.1026 - val_acc: 0.4025\n",
+ "\n",
+ "Epoch 00112: val_acc did not improve from 0.41884\n",
+ "Epoch 113/3000\n",
+ " - 31s - loss: 2.0291 - acc: 0.3848 - val_loss: 2.0706 - val_acc: 0.4177\n",
+ "\n",
+ "Epoch 00113: val_acc did not improve from 0.41884\n",
+ "Epoch 114/3000\n",
+ " - 31s - loss: 2.0226 - acc: 0.3966 - val_loss: 2.0505 - val_acc: 0.4200\n",
+ "\n",
+ "Epoch 00114: val_acc improved from 0.41884 to 0.42001, saving model to ./ModelSnapshots/LSTM-v1-114.h5\n",
+ "Epoch 115/3000\n",
+ " - 32s - loss: 2.0020 - acc: 0.3954 - val_loss: 2.0881 - val_acc: 0.4079\n",
+ "\n",
+ "Epoch 00115: val_acc did not improve from 0.42001\n",
+ "Epoch 116/3000\n",
+ " - 30s - loss: 1.9986 - acc: 0.4007 - val_loss: 2.0770 - val_acc: 0.4177\n",
+ "\n",
+ "Epoch 00116: val_acc did not improve from 0.42001\n",
+ "Epoch 117/3000\n",
+ " - 32s - loss: 2.0018 - acc: 0.3958 - val_loss: 2.0692 - val_acc: 0.4111\n",
+ "\n",
+ "Epoch 00117: val_acc did not improve from 0.42001\n",
+ "Epoch 118/3000\n",
+ " - 33s - loss: 2.0095 - acc: 0.3973 - val_loss: 2.0942 - val_acc: 0.4103\n",
+ "\n",
+ "Epoch 00118: val_acc did not improve from 0.42001\n",
+ "Epoch 119/3000\n",
+ " - 31s - loss: 1.9914 - acc: 0.4087 - val_loss: 2.0766 - val_acc: 0.4138\n",
+ "\n",
+ "Epoch 00119: val_acc did not improve from 0.42001\n",
+ "Epoch 120/3000\n",
+ " - 31s - loss: 1.9960 - acc: 0.4066 - val_loss: 2.0496 - val_acc: 0.4290\n",
+ "\n",
+ "Epoch 00120: val_acc improved from 0.42001 to 0.42896, saving model to ./ModelSnapshots/LSTM-v1-120.h5\n",
+ "Epoch 121/3000\n",
+ " - 30s - loss: 1.9872 - acc: 0.4062 - val_loss: 2.0904 - val_acc: 0.3986\n",
+ "\n",
+ "Epoch 00121: val_acc did not improve from 0.42896\n",
+ "Epoch 122/3000\n",
+ " - 32s - loss: 1.9837 - acc: 0.4046 - val_loss: 2.0859 - val_acc: 0.4068\n",
+ "\n",
+ "Epoch 00122: val_acc did not improve from 0.42896\n",
+ "Epoch 123/3000\n",
+ " - 31s - loss: 1.9728 - acc: 0.4132 - val_loss: 2.0615 - val_acc: 0.4243\n",
+ "\n",
+ "Epoch 00123: val_acc did not improve from 0.42896\n",
+ "Epoch 124/3000\n",
+ " - 33s - loss: 1.9660 - acc: 0.4161 - val_loss: 2.0382 - val_acc: 0.4414\n",
+ "\n",
+ "Epoch 00124: val_acc improved from 0.42896 to 0.44142, saving model to ./ModelSnapshots/LSTM-v1-124.h5\n",
+ "Epoch 125/3000\n",
+ " - 29s - loss: 1.9553 - acc: 0.4141 - val_loss: 2.0318 - val_acc: 0.4395\n",
+ "\n",
+ "Epoch 00125: val_acc did not improve from 0.44142\n",
+ "Epoch 126/3000\n",
+ " - 31s - loss: 1.9489 - acc: 0.4239 - val_loss: 2.0009 - val_acc: 0.4519\n",
+ "\n",
+ "Epoch 00126: val_acc improved from 0.44142 to 0.45193, saving model to ./ModelSnapshots/LSTM-v1-126.h5\n",
+ "Epoch 127/3000\n",
+ " - 32s - loss: 1.9371 - acc: 0.4274 - val_loss: 2.0688 - val_acc: 0.4165\n",
+ "\n",
+ "Epoch 00127: val_acc did not improve from 0.45193\n",
+ "Epoch 128/3000\n",
+ " - 30s - loss: 1.9494 - acc: 0.4210 - val_loss: 2.0101 - val_acc: 0.4430\n",
+ "\n",
+ "Epoch 00128: val_acc did not improve from 0.45193\n",
+ "Epoch 129/3000\n",
+ " - 32s - loss: 1.9251 - acc: 0.4309 - val_loss: 1.9964 - val_acc: 0.4329\n",
+ "\n",
+ "Epoch 00129: val_acc did not improve from 0.45193\n",
+ "Epoch 130/3000\n",
+ " - 31s - loss: 1.9454 - acc: 0.4266 - val_loss: 2.0455 - val_acc: 0.4270\n",
+ "\n",
+ "Epoch 00130: val_acc did not improve from 0.45193\n",
+ "Epoch 131/3000\n",
+ " - 32s - loss: 1.9343 - acc: 0.4209 - val_loss: 2.0057 - val_acc: 0.4652\n",
+ "\n",
+ "Epoch 00131: val_acc improved from 0.45193 to 0.46516, saving model to ./ModelSnapshots/LSTM-v1-131.h5\n",
+ "Epoch 132/3000\n",
+ " - 31s - loss: 1.9280 - acc: 0.4309 - val_loss: 2.0194 - val_acc: 0.4562\n",
+ "\n",
+ "Epoch 00132: val_acc did not improve from 0.46516\n",
+ "Epoch 133/3000\n",
+ " - 32s - loss: 1.8961 - acc: 0.4429 - val_loss: 1.9824 - val_acc: 0.4601\n",
+ "\n",
+ "Epoch 00133: val_acc did not improve from 0.46516\n",
+ "Epoch 134/3000\n",
+ " - 31s - loss: 1.9103 - acc: 0.4377 - val_loss: 2.0832 - val_acc: 0.4336\n",
+ "\n",
+ "Epoch 00134: val_acc did not improve from 0.46516\n",
+ "Epoch 135/3000\n",
+ " - 31s - loss: 1.9186 - acc: 0.4408 - val_loss: 1.9925 - val_acc: 0.4675\n",
+ "\n",
+ "Epoch 00135: val_acc improved from 0.46516 to 0.46750, saving model to ./ModelSnapshots/LSTM-v1-135.h5\n",
+ "Epoch 136/3000\n",
+ " - 33s - loss: 1.8893 - acc: 0.4435 - val_loss: 2.0137 - val_acc: 0.4539\n",
+ "\n",
+ "Epoch 00136: val_acc did not improve from 0.46750\n",
+ "Epoch 137/3000\n",
+ " - 33s - loss: 1.8867 - acc: 0.4512 - val_loss: 1.9967 - val_acc: 0.4624\n",
+ "\n",
+ "Epoch 00137: val_acc did not improve from 0.46750\n",
+ "Epoch 138/3000\n",
+ " - 31s - loss: 1.8654 - acc: 0.4515 - val_loss: 1.9488 - val_acc: 0.4936\n",
+ "\n",
+ "Epoch 00138: val_acc improved from 0.46750 to 0.49358, saving model to ./ModelSnapshots/LSTM-v1-138.h5\n",
+ "Epoch 139/3000\n",
+ " - 30s - loss: 1.8678 - acc: 0.4567 - val_loss: 1.9670 - val_acc: 0.4710\n",
+ "\n",
+ "Epoch 00139: val_acc did not improve from 0.49358\n",
+ "Epoch 140/3000\n",
+ " - 30s - loss: 1.8754 - acc: 0.4543 - val_loss: 1.9655 - val_acc: 0.4909\n",
+ "\n",
+ "Epoch 00140: val_acc did not improve from 0.49358\n",
+ "Epoch 141/3000\n",
+ " - 31s - loss: 1.8421 - acc: 0.4589 - val_loss: 1.9581 - val_acc: 0.4889\n",
+ "\n",
+ "Epoch 00141: val_acc did not improve from 0.49358\n",
+ "Epoch 142/3000\n",
+ " - 32s - loss: 1.8475 - acc: 0.4614 - val_loss: 1.9425 - val_acc: 0.4944\n",
+ "\n",
+ "Epoch 00142: val_acc improved from 0.49358 to 0.49436, saving model to ./ModelSnapshots/LSTM-v1-142.h5\n",
+ "Epoch 143/3000\n",
+ " - 31s - loss: 1.8568 - acc: 0.4626 - val_loss: 2.0092 - val_acc: 0.4597\n",
+ "\n",
+ "Epoch 00143: val_acc did not improve from 0.49436\n",
+ "Epoch 144/3000\n",
+ " - 31s - loss: 1.8311 - acc: 0.4716 - val_loss: 1.9390 - val_acc: 0.4858\n",
+ "\n",
+ "Epoch 00144: val_acc did not improve from 0.49436\n",
+ "Epoch 145/3000\n",
+ " - 31s - loss: 1.8298 - acc: 0.4694 - val_loss: 1.9286 - val_acc: 0.4792\n",
+ "\n",
+ "Epoch 00145: val_acc did not improve from 0.49436\n",
+ "Epoch 146/3000\n",
+ " - 31s - loss: 1.8223 - acc: 0.4698 - val_loss: 1.9748 - val_acc: 0.4523\n",
+ "\n",
+ "Epoch 00146: val_acc did not improve from 0.49436\n",
+ "Epoch 147/3000\n",
+ " - 31s - loss: 1.8183 - acc: 0.4781 - val_loss: 1.9187 - val_acc: 0.5119\n",
+ "\n",
+ "Epoch 00147: val_acc improved from 0.49436 to 0.51187, saving model to ./ModelSnapshots/LSTM-v1-147.h5\n",
+ "Epoch 148/3000\n",
+ " - 31s - loss: 1.8252 - acc: 0.4710 - val_loss: 1.9349 - val_acc: 0.4815\n",
+ "\n",
+ "Epoch 00148: val_acc did not improve from 0.51187\n",
+ "Epoch 149/3000\n",
+ " - 31s - loss: 1.7967 - acc: 0.4855 - val_loss: 1.9059 - val_acc: 0.5056\n",
+ "\n",
+ "Epoch 00149: val_acc did not improve from 0.51187\n",
+ "Epoch 150/3000\n",
+ " - 32s - loss: 1.7899 - acc: 0.4887 - val_loss: 1.9118 - val_acc: 0.4971\n",
+ "\n",
+ "Epoch 00150: val_acc did not improve from 0.51187\n",
+ "Epoch 151/3000\n",
+ " - 30s - loss: 1.7822 - acc: 0.4956 - val_loss: 1.8954 - val_acc: 0.4877\n",
+ "\n",
+ "Epoch 00151: val_acc did not improve from 0.51187\n",
+ "Epoch 152/3000\n",
+ " - 32s - loss: 1.7850 - acc: 0.4961 - val_loss: 1.8768 - val_acc: 0.5002\n",
+ "\n",
+ "Epoch 00152: val_acc did not improve from 0.51187\n",
+ "Epoch 153/3000\n",
+ " - 32s - loss: 1.7858 - acc: 0.4949 - val_loss: 1.8764 - val_acc: 0.5165\n",
+ "\n",
+ "Epoch 00153: val_acc improved from 0.51187 to 0.51654, saving model to ./ModelSnapshots/LSTM-v1-153.h5\n",
+ "Epoch 154/3000\n",
+ " - 31s - loss: 1.7986 - acc: 0.4902 - val_loss: 1.9126 - val_acc: 0.4504\n",
+ "\n",
+ "Epoch 00154: val_acc did not improve from 0.51654\n",
+ "Epoch 155/3000\n",
+ " - 31s - loss: 1.7665 - acc: 0.5023 - val_loss: 1.8434 - val_acc: 0.5364\n",
+ "\n",
+ "Epoch 00155: val_acc improved from 0.51654 to 0.53640, saving model to ./ModelSnapshots/LSTM-v1-155.h5\n",
+ "Epoch 156/3000\n",
+ " - 32s - loss: 1.7740 - acc: 0.4988 - val_loss: 1.8442 - val_acc: 0.5360\n",
+ "\n",
+ "Epoch 00156: val_acc did not improve from 0.53640\n",
+ "Epoch 157/3000\n",
+ " - 30s - loss: 1.7498 - acc: 0.5103 - val_loss: 1.8750 - val_acc: 0.5243\n",
+ "\n",
+ "Epoch 00157: val_acc did not improve from 0.53640\n",
+ "Epoch 158/3000\n",
+ " - 31s - loss: 1.7603 - acc: 0.5050 - val_loss: 1.9048 - val_acc: 0.4897\n",
+ "\n",
+ "Epoch 00158: val_acc did not improve from 0.53640\n",
+ "Epoch 159/3000\n",
+ " - 31s - loss: 1.7320 - acc: 0.5198 - val_loss: 1.7988 - val_acc: 0.5586\n",
+ "\n",
+ "Epoch 00159: val_acc improved from 0.53640 to 0.55858, saving model to ./ModelSnapshots/LSTM-v1-159.h5\n",
+ "Epoch 160/3000\n",
+ " - 31s - loss: 1.7254 - acc: 0.5246 - val_loss: 1.8168 - val_acc: 0.5469\n",
+ "\n",
+ "Epoch 00160: val_acc did not improve from 0.55858\n",
+ "Epoch 161/3000\n",
+ " - 31s - loss: 1.7381 - acc: 0.5205 - val_loss: 1.8779 - val_acc: 0.5154\n",
+ "\n",
+ "Epoch 00161: val_acc did not improve from 0.55858\n",
+ "Epoch 162/3000\n",
+ " - 31s - loss: 1.7354 - acc: 0.5210 - val_loss: 1.7865 - val_acc: 0.5656\n",
+ "\n",
+ "Epoch 00162: val_acc improved from 0.55858 to 0.56559, saving model to ./ModelSnapshots/LSTM-v1-162.h5\n",
+ "Epoch 163/3000\n",
+ " - 32s - loss: 1.7142 - acc: 0.5282 - val_loss: 1.8238 - val_acc: 0.5566\n",
+ "\n",
+ "Epoch 00163: val_acc did not improve from 0.56559\n",
+ "Epoch 164/3000\n",
+ " - 32s - loss: 1.7238 - acc: 0.5282 - val_loss: 1.8009 - val_acc: 0.5415\n",
+ "\n",
+ "Epoch 00164: val_acc did not improve from 0.56559\n",
+ "Epoch 165/3000\n",
+ " - 29s - loss: 1.7092 - acc: 0.5334 - val_loss: 1.7934 - val_acc: 0.5699\n",
+ "\n",
+ "Epoch 00165: val_acc improved from 0.56559 to 0.56987, saving model to ./ModelSnapshots/LSTM-v1-165.h5\n",
+ "Epoch 166/3000\n",
+ " - 32s - loss: 1.7107 - acc: 0.5374 - val_loss: 1.8209 - val_acc: 0.5481\n",
+ "\n",
+ "Epoch 00166: val_acc did not improve from 0.56987\n",
+ "Epoch 167/3000\n",
+ " - 30s - loss: 1.6829 - acc: 0.5492 - val_loss: 1.7870 - val_acc: 0.5477\n",
+ "\n",
+ "Epoch 00167: val_acc did not improve from 0.56987\n",
+ "Epoch 168/3000\n",
+ " - 31s - loss: 1.6791 - acc: 0.5402 - val_loss: 1.7036 - val_acc: 0.5847\n",
+ "\n",
+ "Epoch 00168: val_acc improved from 0.56987 to 0.58466, saving model to ./ModelSnapshots/LSTM-v1-168.h5\n",
+ "Epoch 169/3000\n",
+ " - 32s - loss: 1.6686 - acc: 0.5515 - val_loss: 1.7252 - val_acc: 0.5777\n",
+ "\n",
+ "Epoch 00169: val_acc did not improve from 0.58466\n",
+ "Epoch 170/3000\n",
+ " - 30s - loss: 1.6653 - acc: 0.5537 - val_loss: 1.7381 - val_acc: 0.5839\n",
+ "\n",
+ "Epoch 00170: val_acc did not improve from 0.58466\n",
+ "Epoch 171/3000\n",
+ " - 32s - loss: 1.6565 - acc: 0.5634 - val_loss: 1.7307 - val_acc: 0.5780\n",
+ "\n",
+ "Epoch 00171: val_acc did not improve from 0.58466\n",
+ "Epoch 172/3000\n",
+ " - 30s - loss: 1.6546 - acc: 0.5556 - val_loss: 1.6916 - val_acc: 0.6057\n",
+ "\n",
+ "Epoch 00172: val_acc improved from 0.58466 to 0.60568, saving model to ./ModelSnapshots/LSTM-v1-172.h5\n",
+ "Epoch 173/3000\n",
+ " - 31s - loss: 1.6538 - acc: 0.5550 - val_loss: 1.7286 - val_acc: 0.5718\n",
+ "\n",
+ "Epoch 00173: val_acc did not improve from 0.60568\n",
+ "Epoch 174/3000\n",
+ " - 31s - loss: 1.6238 - acc: 0.5690 - val_loss: 1.7000 - val_acc: 0.6030\n",
+ "\n",
+ "Epoch 00174: val_acc did not improve from 0.60568\n",
+ "Epoch 175/3000\n",
+ " - 30s - loss: 1.6366 - acc: 0.5630 - val_loss: 1.7058 - val_acc: 0.5987\n",
+ "\n",
+ "Epoch 00175: val_acc did not improve from 0.60568\n",
+ "Epoch 176/3000\n",
+ " - 31s - loss: 1.6284 - acc: 0.5693 - val_loss: 1.6839 - val_acc: 0.6002\n",
+ "\n",
+ "Epoch 00176: val_acc did not improve from 0.60568\n",
+ "Epoch 177/3000\n",
+ " - 31s - loss: 1.6234 - acc: 0.5655 - val_loss: 1.7064 - val_acc: 0.5870\n",
+ "\n",
+ "Epoch 00177: val_acc did not improve from 0.60568\n",
+ "Epoch 178/3000\n",
+ " - 31s - loss: 1.6118 - acc: 0.5768 - val_loss: 1.6805 - val_acc: 0.6088\n",
+ "\n",
+ "Epoch 00178: val_acc improved from 0.60568 to 0.60880, saving model to ./ModelSnapshots/LSTM-v1-178.h5\n",
+ "Epoch 179/3000\n",
+ " - 31s - loss: 1.5972 - acc: 0.5726 - val_loss: 1.6665 - val_acc: 0.5998\n",
+ "\n",
+ "Epoch 00179: val_acc did not improve from 0.60880\n",
+ "Epoch 180/3000\n",
+ " - 32s - loss: 1.6037 - acc: 0.5764 - val_loss: 1.6573 - val_acc: 0.6065\n",
+ "\n",
+ "Epoch 00180: val_acc did not improve from 0.60880\n",
+ "Epoch 181/3000\n",
+ " - 29s - loss: 1.6060 - acc: 0.5814 - val_loss: 1.6807 - val_acc: 0.6010\n",
+ "\n",
+ "Epoch 00181: val_acc did not improve from 0.60880\n",
+ "Epoch 182/3000\n",
+ " - 31s - loss: 1.5979 - acc: 0.5782 - val_loss: 1.6651 - val_acc: 0.6061\n",
+ "\n",
+ "Epoch 00182: val_acc did not improve from 0.60880\n",
+ "Epoch 183/3000\n",
+ " - 32s - loss: 1.5730 - acc: 0.5877 - val_loss: 1.7102 - val_acc: 0.5905\n",
+ "\n",
+ "Epoch 00183: val_acc did not improve from 0.60880\n",
+ "Epoch 184/3000\n",
+ " - 31s - loss: 1.5758 - acc: 0.5882 - val_loss: 1.6400 - val_acc: 0.6345\n",
+ "\n",
+ "Epoch 00184: val_acc improved from 0.60880 to 0.63449, saving model to ./ModelSnapshots/LSTM-v1-184.h5\n",
+ "Epoch 185/3000\n",
+ " - 30s - loss: 1.5683 - acc: 0.5978 - val_loss: 1.7124 - val_acc: 0.5858\n",
+ "\n",
+ "Epoch 00185: val_acc did not improve from 0.63449\n",
+ "Epoch 186/3000\n",
+ " - 32s - loss: 1.5617 - acc: 0.5957 - val_loss: 1.5891 - val_acc: 0.6322\n",
+ "\n",
+ "Epoch 00186: val_acc did not improve from 0.63449\n",
+ "Epoch 187/3000\n",
+ " - 31s - loss: 1.5567 - acc: 0.5992 - val_loss: 1.5721 - val_acc: 0.6454\n",
+ "\n",
+ "Epoch 00187: val_acc improved from 0.63449 to 0.64539, saving model to ./ModelSnapshots/LSTM-v1-187.h5\n",
+ "Epoch 188/3000\n",
+ " - 30s - loss: 1.5432 - acc: 0.5954 - val_loss: 1.6278 - val_acc: 0.6271\n",
+ "\n",
+ "Epoch 00188: val_acc did not improve from 0.64539\n",
+ "Epoch 189/3000\n",
+ " - 31s - loss: 1.5315 - acc: 0.6088 - val_loss: 1.5630 - val_acc: 0.6477\n",
+ "\n",
+ "Epoch 00189: val_acc improved from 0.64539 to 0.64772, saving model to ./ModelSnapshots/LSTM-v1-189.h5\n",
+ "Epoch 190/3000\n",
+ " - 30s - loss: 1.5608 - acc: 0.6040 - val_loss: 1.6215 - val_acc: 0.6279\n",
+ "\n",
+ "Epoch 00190: val_acc did not improve from 0.64772\n",
+ "Epoch 191/3000\n",
+ " - 32s - loss: 1.5158 - acc: 0.6188 - val_loss: 1.6497 - val_acc: 0.6290\n",
+ "\n",
+ "Epoch 00191: val_acc did not improve from 0.64772\n",
+ "Epoch 192/3000\n",
+ " - 29s - loss: 1.5071 - acc: 0.6093 - val_loss: 1.5323 - val_acc: 0.6532\n",
+ "\n",
+ "Epoch 00192: val_acc improved from 0.64772 to 0.65317, saving model to ./ModelSnapshots/LSTM-v1-192.h5\n",
+ "Epoch 193/3000\n",
+ " - 32s - loss: 1.5063 - acc: 0.6128 - val_loss: 1.5500 - val_acc: 0.6407\n",
+ "\n",
+ "Epoch 00193: val_acc did not improve from 0.65317\n",
+ "Epoch 194/3000\n",
+ " - 29s - loss: 1.4953 - acc: 0.6147 - val_loss: 1.5682 - val_acc: 0.6462\n",
+ "\n",
+ "Epoch 00194: val_acc did not improve from 0.65317\n",
+ "Epoch 195/3000\n",
+ " - 32s - loss: 1.4770 - acc: 0.6262 - val_loss: 1.5359 - val_acc: 0.6567\n",
+ "\n",
+ "Epoch 00195: val_acc improved from 0.65317 to 0.65668, saving model to ./ModelSnapshots/LSTM-v1-195.h5\n",
+ "Epoch 196/3000\n",
+ " - 30s - loss: 1.4822 - acc: 0.6330 - val_loss: 1.5659 - val_acc: 0.6613\n",
+ "\n",
+ "Epoch 00196: val_acc improved from 0.65668 to 0.66135, saving model to ./ModelSnapshots/LSTM-v1-196.h5\n",
+ "Epoch 197/3000\n",
+ " - 32s - loss: 1.4783 - acc: 0.6256 - val_loss: 1.5052 - val_acc: 0.6610\n",
+ "\n",
+ "Epoch 00197: val_acc did not improve from 0.66135\n",
+ "Epoch 198/3000\n",
+ " - 30s - loss: 1.4966 - acc: 0.6256 - val_loss: 1.5852 - val_acc: 0.6220\n",
+ "\n",
+ "Epoch 00198: val_acc did not improve from 0.66135\n",
+ "Epoch 199/3000\n",
+ " - 33s - loss: 1.4907 - acc: 0.6270 - val_loss: 1.5417 - val_acc: 0.6641\n",
+ "\n",
+ "Epoch 00199: val_acc improved from 0.66135 to 0.66407, saving model to ./ModelSnapshots/LSTM-v1-199.h5\n",
+ "Epoch 200/3000\n",
+ " - 31s - loss: 1.4683 - acc: 0.6324 - val_loss: 1.5307 - val_acc: 0.6528\n",
+ "\n",
+ "Epoch 00200: val_acc did not improve from 0.66407\n",
+ "Epoch 201/3000\n",
+ " - 30s - loss: 1.4606 - acc: 0.6365 - val_loss: 1.5474 - val_acc: 0.6458\n",
+ "\n",
+ "Epoch 00201: val_acc did not improve from 0.66407\n",
+ "Epoch 202/3000\n",
+ " - 32s - loss: 1.4426 - acc: 0.6439 - val_loss: 1.4965 - val_acc: 0.6676\n",
+ "\n",
+ "Epoch 00202: val_acc improved from 0.66407 to 0.66757, saving model to ./ModelSnapshots/LSTM-v1-202.h5\n",
+ "Epoch 203/3000\n",
+ " - 32s - loss: 1.4257 - acc: 0.6476 - val_loss: 1.5003 - val_acc: 0.6769\n",
+ "\n",
+ "Epoch 00203: val_acc improved from 0.66757 to 0.67692, saving model to ./ModelSnapshots/LSTM-v1-203.h5\n",
+ "Epoch 204/3000\n",
+ " - 29s - loss: 1.4217 - acc: 0.6487 - val_loss: 1.5430 - val_acc: 0.6606\n",
+ "\n",
+ "Epoch 00204: val_acc did not improve from 0.67692\n",
+ "Epoch 205/3000\n",
+ " - 31s - loss: 1.4226 - acc: 0.6493 - val_loss: 1.4830 - val_acc: 0.6789\n",
+ "\n",
+ "Epoch 00205: val_acc improved from 0.67692 to 0.67886, saving model to ./ModelSnapshots/LSTM-v1-205.h5\n",
+ "Epoch 206/3000\n",
+ " - 32s - loss: 1.3975 - acc: 0.6550 - val_loss: 1.4585 - val_acc: 0.6816\n",
+ "\n",
+ "Epoch 00206: val_acc improved from 0.67886 to 0.68159, saving model to ./ModelSnapshots/LSTM-v1-206.h5\n",
+ "Epoch 207/3000\n",
+ " - 30s - loss: 1.3852 - acc: 0.6707 - val_loss: 1.4623 - val_acc: 0.6804\n",
+ "\n",
+ "Epoch 00207: val_acc did not improve from 0.68159\n",
+ "Epoch 208/3000\n",
+ " - 32s - loss: 1.3694 - acc: 0.6736 - val_loss: 1.4777 - val_acc: 0.6828\n",
+ "\n",
+ "Epoch 00208: val_acc improved from 0.68159 to 0.68276, saving model to ./ModelSnapshots/LSTM-v1-208.h5\n",
+ "Epoch 209/3000\n",
+ " - 29s - loss: 1.3926 - acc: 0.6633 - val_loss: 1.4856 - val_acc: 0.6804\n",
+ "\n",
+ "Epoch 00209: val_acc did not improve from 0.68276\n",
+ "Epoch 210/3000\n",
+ " - 31s - loss: 1.3847 - acc: 0.6653 - val_loss: 1.4768 - val_acc: 0.6695\n",
+ "\n",
+ "Epoch 00210: val_acc did not improve from 0.68276\n",
+ "Epoch 211/3000\n",
+ " - 31s - loss: 1.3691 - acc: 0.6732 - val_loss: 1.4424 - val_acc: 0.6940\n",
+ "\n",
+ "Epoch 00211: val_acc improved from 0.68276 to 0.69404, saving model to ./ModelSnapshots/LSTM-v1-211.h5\n",
+ "Epoch 212/3000\n",
+ " - 30s - loss: 1.3587 - acc: 0.6763 - val_loss: 1.4104 - val_acc: 0.6979\n",
+ "\n",
+ "Epoch 00212: val_acc improved from 0.69404 to 0.69794, saving model to ./ModelSnapshots/LSTM-v1-212.h5\n",
+ "Epoch 213/3000\n",
+ " - 32s - loss: 1.3547 - acc: 0.6778 - val_loss: 1.4327 - val_acc: 0.6960\n",
+ "\n",
+ "Epoch 00213: val_acc did not improve from 0.69794\n",
+ "Epoch 214/3000\n",
+ " - 29s - loss: 1.4233 - acc: 0.6624 - val_loss: 1.4354 - val_acc: 0.6925\n",
+ "\n",
+ "Epoch 00214: val_acc did not improve from 0.69794\n",
+ "Epoch 215/3000\n",
+ " - 31s - loss: 1.3359 - acc: 0.6828 - val_loss: 1.4443 - val_acc: 0.6972\n",
+ "\n",
+ "Epoch 00215: val_acc did not improve from 0.69794\n",
+ "Epoch 216/3000\n",
+ " - 31s - loss: 1.3300 - acc: 0.6866 - val_loss: 1.4265 - val_acc: 0.7057\n",
+ "\n",
+ "Epoch 00216: val_acc improved from 0.69794 to 0.70572, saving model to ./ModelSnapshots/LSTM-v1-216.h5\n",
+ "Epoch 217/3000\n",
+ " - 31s - loss: 1.3149 - acc: 0.6937 - val_loss: 1.4729 - val_acc: 0.6909\n",
+ "\n",
+ "Epoch 00217: val_acc did not improve from 0.70572\n",
+ "Epoch 218/3000\n",
+ " - 30s - loss: 1.3437 - acc: 0.6863 - val_loss: 1.4187 - val_acc: 0.7116\n",
+ "\n",
+ "Epoch 00218: val_acc improved from 0.70572 to 0.71156, saving model to ./ModelSnapshots/LSTM-v1-218.h5\n",
+ "Epoch 219/3000\n",
+ " - 32s - loss: 1.2771 - acc: 0.7052 - val_loss: 1.4062 - val_acc: 0.7042\n",
+ "\n",
+ "Epoch 00219: val_acc did not improve from 0.71156\n",
+ "Epoch 220/3000\n",
+ " - 29s - loss: 1.3012 - acc: 0.6947 - val_loss: 1.3739 - val_acc: 0.7197\n",
+ "\n",
+ "Epoch 00220: val_acc improved from 0.71156 to 0.71974, saving model to ./ModelSnapshots/LSTM-v1-220.h5\n",
+ "Epoch 221/3000\n",
+ " - 30s - loss: 1.2894 - acc: 0.7040 - val_loss: 1.3745 - val_acc: 0.7151\n",
+ "\n",
+ "Epoch 00221: val_acc did not improve from 0.71974\n",
+ "Epoch 222/3000\n",
+ " - 31s - loss: 1.2632 - acc: 0.7091 - val_loss: 1.3651 - val_acc: 0.7197\n",
+ "\n",
+ "Epoch 00222: val_acc did not improve from 0.71974\n",
+ "Epoch 223/3000\n",
+ " - 31s - loss: 1.2625 - acc: 0.7120 - val_loss: 1.4143 - val_acc: 0.6979\n",
+ "\n",
+ "Epoch 00223: val_acc did not improve from 0.71974\n",
+ "Epoch 224/3000\n",
+ " - 32s - loss: 1.2671 - acc: 0.7098 - val_loss: 1.3643 - val_acc: 0.7221\n",
+ "\n",
+ "Epoch 00224: val_acc improved from 0.71974 to 0.72207, saving model to ./ModelSnapshots/LSTM-v1-224.h5\n",
+ "Epoch 225/3000\n",
+ " - 30s - loss: 1.2578 - acc: 0.7124 - val_loss: 1.3309 - val_acc: 0.7205\n",
+ "\n",
+ "Epoch 00225: val_acc did not improve from 0.72207\n",
+ "Epoch 226/3000\n",
+ " - 31s - loss: 1.2505 - acc: 0.7168 - val_loss: 1.3527 - val_acc: 0.7197\n",
+ "\n",
+ "Epoch 00226: val_acc did not improve from 0.72207\n",
+ "Epoch 227/3000\n",
+ " - 29s - loss: 1.2482 - acc: 0.7171 - val_loss: 1.3478 - val_acc: 0.7186\n",
+ "\n",
+ "Epoch 00227: val_acc did not improve from 0.72207\n",
+ "Epoch 228/3000\n",
+ " - 31s - loss: 1.2348 - acc: 0.7212 - val_loss: 1.3636 - val_acc: 0.7225\n",
+ "\n",
+ "Epoch 00228: val_acc improved from 0.72207 to 0.72246, saving model to ./ModelSnapshots/LSTM-v1-228.h5\n",
+ "Epoch 229/3000\n",
+ " - 32s - loss: 1.2146 - acc: 0.7337 - val_loss: 1.3154 - val_acc: 0.7384\n",
+ "\n",
+ "Epoch 00229: val_acc improved from 0.72246 to 0.73842, saving model to ./ModelSnapshots/LSTM-v1-229.h5\n",
+ "Epoch 230/3000\n",
+ " - 31s - loss: 1.2042 - acc: 0.7366 - val_loss: 1.4406 - val_acc: 0.7011\n",
+ "\n",
+ "Epoch 00230: val_acc did not improve from 0.73842\n",
+ "Epoch 231/3000\n",
+ " - 29s - loss: 1.2119 - acc: 0.7314 - val_loss: 1.3201 - val_acc: 0.7349\n",
+ "\n",
+ "Epoch 00231: val_acc did not improve from 0.73842\n",
+ "Epoch 232/3000\n",
+ " - 31s - loss: 1.2014 - acc: 0.7348 - val_loss: 1.3087 - val_acc: 0.7326\n",
+ "\n",
+ "Epoch 00232: val_acc did not improve from 0.73842\n",
+ "Epoch 233/3000\n",
+ " - 31s - loss: 1.2043 - acc: 0.7331 - val_loss: 1.3322 - val_acc: 0.7275\n",
+ "\n",
+ "Epoch 00233: val_acc did not improve from 0.73842\n",
+ "Epoch 234/3000\n",
+ " - 31s - loss: 1.1833 - acc: 0.7428 - val_loss: 1.3529 - val_acc: 0.7267\n",
+ "\n",
+ "Epoch 00234: val_acc did not improve from 0.73842\n",
+ "Epoch 235/3000\n",
+ " - 30s - loss: 1.1983 - acc: 0.7411 - val_loss: 1.2910 - val_acc: 0.7384\n",
+ "\n",
+ "Epoch 00235: val_acc did not improve from 0.73842\n",
+ "Epoch 236/3000\n",
+ " - 31s - loss: 1.1755 - acc: 0.7406 - val_loss: 1.3134 - val_acc: 0.7361\n",
+ "\n",
+ "Epoch 00236: val_acc did not improve from 0.73842\n",
+ "Epoch 237/3000\n",
+ " - 29s - loss: 1.1770 - acc: 0.7415 - val_loss: 1.2957 - val_acc: 0.7361\n",
+ "\n",
+ "Epoch 00237: val_acc did not improve from 0.73842\n",
+ "Epoch 238/3000\n",
+ " - 31s - loss: 1.1732 - acc: 0.7494 - val_loss: 1.3361 - val_acc: 0.7267\n",
+ "\n",
+ "Epoch 00238: val_acc did not improve from 0.73842\n",
+ "Epoch 239/3000\n",
+ " - 31s - loss: 1.1891 - acc: 0.7414 - val_loss: 1.3118 - val_acc: 0.7396\n",
+ "\n",
+ "Epoch 00239: val_acc improved from 0.73842 to 0.73959, saving model to ./ModelSnapshots/LSTM-v1-239.h5\n",
+ "Epoch 240/3000\n",
+ " - 30s - loss: 1.1584 - acc: 0.7524 - val_loss: 1.2570 - val_acc: 0.7454\n",
+ "\n",
+ "Epoch 00240: val_acc improved from 0.73959 to 0.74543, saving model to ./ModelSnapshots/LSTM-v1-240.h5\n",
+ "Epoch 241/3000\n",
+ " - 30s - loss: 1.1461 - acc: 0.7583 - val_loss: 1.2675 - val_acc: 0.7388\n",
+ "\n",
+ "Epoch 00241: val_acc did not improve from 0.74543\n",
+ "Epoch 242/3000\n",
+ " - 30s - loss: 1.1188 - acc: 0.7609 - val_loss: 1.2694 - val_acc: 0.7326\n",
+ "\n",
+ "Epoch 00242: val_acc did not improve from 0.74543\n",
+ "Epoch 243/3000\n",
+ " - 32s - loss: 1.1292 - acc: 0.7595 - val_loss: 1.2226 - val_acc: 0.7520\n",
+ "\n",
+ "Epoch 00243: val_acc improved from 0.74543 to 0.75204, saving model to ./ModelSnapshots/LSTM-v1-243.h5\n",
+ "Epoch 244/3000\n",
+ " - 29s - loss: 1.1100 - acc: 0.7674 - val_loss: 1.2890 - val_acc: 0.7373\n",
+ "\n",
+ "Epoch 00244: val_acc did not improve from 0.75204\n",
+ "Epoch 245/3000\n",
+ " - 30s - loss: 1.1264 - acc: 0.7654 - val_loss: 1.2975 - val_acc: 0.7380\n",
+ "\n",
+ "Epoch 00245: val_acc did not improve from 0.75204\n",
+ "Epoch 246/3000\n",
+ " - 31s - loss: 1.1119 - acc: 0.7690 - val_loss: 1.2496 - val_acc: 0.7462\n",
+ "\n",
+ "Epoch 00246: val_acc did not improve from 0.75204\n",
+ "Epoch 247/3000\n",
+ " - 30s - loss: 1.1066 - acc: 0.7669 - val_loss: 1.2268 - val_acc: 0.7513\n",
+ "\n",
+ "Epoch 00247: val_acc did not improve from 0.75204\n",
+ "Epoch 248/3000\n",
+ " - 31s - loss: 1.0889 - acc: 0.7743 - val_loss: 1.2417 - val_acc: 0.7466\n",
+ "\n",
+ "Epoch 00248: val_acc did not improve from 0.75204\n",
+ "Epoch 249/3000\n",
+ " - 31s - loss: 1.0890 - acc: 0.7722 - val_loss: 1.3563 - val_acc: 0.7236\n",
+ "\n",
+ "Epoch 00249: val_acc did not improve from 0.75204\n",
+ "Epoch 250/3000\n",
+ " - 31s - loss: 1.0936 - acc: 0.7722 - val_loss: 1.2323 - val_acc: 0.7571\n",
+ "\n",
+ "Epoch 00250: val_acc improved from 0.75204 to 0.75710, saving model to ./ModelSnapshots/LSTM-v1-250.h5\n",
+ "Epoch 251/3000\n",
+ " - 31s - loss: 1.0894 - acc: 0.7731 - val_loss: 1.2385 - val_acc: 0.7489\n",
+ "\n",
+ "Epoch 00251: val_acc did not improve from 0.75710\n",
+ "Epoch 252/3000\n",
+ " - 30s - loss: 1.0899 - acc: 0.7749 - val_loss: 1.2425 - val_acc: 0.7493\n",
+ "\n",
+ "Epoch 00252: val_acc did not improve from 0.75710\n",
+ "Epoch 253/3000\n",
+ " - 31s - loss: 1.0503 - acc: 0.7814 - val_loss: 1.2751 - val_acc: 0.7489\n",
+ "\n",
+ "Epoch 00253: val_acc did not improve from 0.75710\n",
+ "Epoch 254/3000\n",
+ " - 29s - loss: 1.0586 - acc: 0.7797 - val_loss: 1.2895 - val_acc: 0.7345\n",
+ "\n",
+ "Epoch 00254: val_acc did not improve from 0.75710\n",
+ "Epoch 255/3000\n",
+ " - 31s - loss: 1.0501 - acc: 0.7886 - val_loss: 1.2303 - val_acc: 0.7552\n",
+ "\n",
+ "Epoch 00255: val_acc did not improve from 0.75710\n",
+ "Epoch 256/3000\n",
+ " - 30s - loss: 1.0533 - acc: 0.7838 - val_loss: 1.2029 - val_acc: 0.7602\n",
+ "\n",
+ "Epoch 00256: val_acc improved from 0.75710 to 0.76022, saving model to ./ModelSnapshots/LSTM-v1-256.h5\n",
+ "Epoch 257/3000\n",
+ " - 31s - loss: 1.0425 - acc: 0.7856 - val_loss: 1.2786 - val_acc: 0.7439\n",
+ "\n",
+ "Epoch 00257: val_acc did not improve from 0.76022\n",
+ "Epoch 258/3000\n",
+ " - 32s - loss: 1.0390 - acc: 0.7906 - val_loss: 1.2131 - val_acc: 0.7614\n",
+ "\n",
+ "Epoch 00258: val_acc improved from 0.76022 to 0.76139, saving model to ./ModelSnapshots/LSTM-v1-258.h5\n",
+ "Epoch 259/3000\n",
+ " - 30s - loss: 1.0428 - acc: 0.7888 - val_loss: 1.2970 - val_acc: 0.7291\n",
+ "\n",
+ "Epoch 00259: val_acc did not improve from 0.76139\n",
+ "Epoch 260/3000\n",
+ " - 30s - loss: 1.0305 - acc: 0.7942 - val_loss: 1.2377 - val_acc: 0.7520\n",
+ "\n",
+ "Epoch 00260: val_acc did not improve from 0.76139\n",
+ "Epoch 261/3000\n",
+ " - 30s - loss: 1.0519 - acc: 0.7852 - val_loss: 1.2362 - val_acc: 0.7559\n",
+ "\n",
+ "Epoch 00261: val_acc did not improve from 0.76139\n",
+ "Epoch 262/3000\n",
+ " - 32s - loss: 1.0285 - acc: 0.7980 - val_loss: 1.2101 - val_acc: 0.7594\n",
+ "\n",
+ "Epoch 00262: val_acc did not improve from 0.76139\n",
+ "Epoch 263/3000\n",
+ " - 30s - loss: 1.0345 - acc: 0.7953 - val_loss: 1.1472 - val_acc: 0.7731\n",
+ "\n",
+ "Epoch 00263: val_acc improved from 0.76139 to 0.77306, saving model to ./ModelSnapshots/LSTM-v1-263.h5\n",
+ "Epoch 264/3000\n",
+ " - 31s - loss: 1.0020 - acc: 0.8001 - val_loss: 1.1879 - val_acc: 0.7692\n",
+ "\n",
+ "Epoch 00264: val_acc did not improve from 0.77306\n",
+ "Epoch 265/3000\n",
+ " - 31s - loss: 1.0146 - acc: 0.7980 - val_loss: 1.2670 - val_acc: 0.7415\n",
+ "\n",
+ "Epoch 00265: val_acc did not improve from 0.77306\n",
+ "Epoch 266/3000\n",
+ " - 31s - loss: 0.9991 - acc: 0.7998 - val_loss: 1.1559 - val_acc: 0.7645\n",
+ "\n",
+ "Epoch 00266: val_acc did not improve from 0.77306\n",
+ "Epoch 267/3000\n",
+ " - 30s - loss: 0.9823 - acc: 0.8048 - val_loss: 1.3164 - val_acc: 0.7353\n",
+ "\n",
+ "Epoch 00267: val_acc did not improve from 0.77306\n",
+ "Epoch 268/3000\n",
+ " - 32s - loss: 1.0059 - acc: 0.8021 - val_loss: 1.2022 - val_acc: 0.7610\n",
+ "\n",
+ "Epoch 00268: val_acc did not improve from 0.77306\n",
+ "Epoch 269/3000\n",
+ " - 30s - loss: 1.0039 - acc: 0.8062 - val_loss: 1.2348 - val_acc: 0.7594\n",
+ "\n",
+ "Epoch 00269: val_acc did not improve from 0.77306\n",
+ "Epoch 270/3000\n",
+ " - 33s - loss: 0.9958 - acc: 0.8006 - val_loss: 1.1789 - val_acc: 0.7633\n",
+ "\n",
+ "Epoch 00270: val_acc did not improve from 0.77306\n",
+ "Epoch 271/3000\n",
+ " - 31s - loss: 0.9794 - acc: 0.8069 - val_loss: 1.2661 - val_acc: 0.7482\n",
+ "\n",
+ "Epoch 00271: val_acc did not improve from 0.77306\n",
+ "Epoch 272/3000\n",
+ " - 29s - loss: 0.9759 - acc: 0.8050 - val_loss: 1.1369 - val_acc: 0.7770\n",
+ "\n",
+ "Epoch 00272: val_acc improved from 0.77306 to 0.77696, saving model to ./ModelSnapshots/LSTM-v1-272.h5\n",
+ "Epoch 273/3000\n",
+ " - 31s - loss: 0.9737 - acc: 0.8072 - val_loss: 1.2008 - val_acc: 0.7583\n",
+ "\n",
+ "Epoch 00273: val_acc did not improve from 0.77696\n",
+ "Epoch 274/3000\n",
+ " - 32s - loss: 0.9466 - acc: 0.8170 - val_loss: 1.2508 - val_acc: 0.7544\n",
+ "\n",
+ "Epoch 00274: val_acc did not improve from 0.77696\n",
+ "Epoch 275/3000\n",
+ " - 31s - loss: 0.9635 - acc: 0.8123 - val_loss: 1.2048 - val_acc: 0.7645\n",
+ "\n",
+ "Epoch 00275: val_acc did not improve from 0.77696\n",
+ "Epoch 276/3000\n",
+ " - 31s - loss: 0.9897 - acc: 0.8039 - val_loss: 1.1925 - val_acc: 0.7614\n",
+ "\n",
+ "Epoch 00276: val_acc did not improve from 0.77696\n",
+ "Epoch 277/3000\n",
+ " - 31s - loss: 0.9691 - acc: 0.8110 - val_loss: 1.2056 - val_acc: 0.7614\n",
+ "\n",
+ "Epoch 00277: val_acc did not improve from 0.77696\n",
+ "Epoch 278/3000\n",
+ " - 31s - loss: 0.9639 - acc: 0.8149 - val_loss: 1.1680 - val_acc: 0.7773\n",
+ "\n",
+ "Epoch 00278: val_acc improved from 0.77696 to 0.77735, saving model to ./ModelSnapshots/LSTM-v1-278.h5\n",
+ "Epoch 279/3000\n",
+ " - 29s - loss: 0.9557 - acc: 0.8155 - val_loss: 1.1965 - val_acc: 0.7622\n",
+ "\n",
+ "Epoch 00279: val_acc did not improve from 0.77735\n",
+ "Epoch 280/3000\n",
+ " - 30s - loss: 0.9714 - acc: 0.8111 - val_loss: 1.1554 - val_acc: 0.7793\n",
+ "\n",
+ "Epoch 00280: val_acc improved from 0.77735 to 0.77929, saving model to ./ModelSnapshots/LSTM-v1-280.h5\n",
+ "Epoch 281/3000\n",
+ " - 32s - loss: 0.9599 - acc: 0.8137 - val_loss: 1.1530 - val_acc: 0.7773\n",
+ "\n",
+ "Epoch 00281: val_acc did not improve from 0.77929\n",
+ "Epoch 282/3000\n",
+ " - 31s - loss: 0.9504 - acc: 0.8149 - val_loss: 1.1114 - val_acc: 0.7801\n",
+ "\n",
+ "Epoch 00282: val_acc improved from 0.77929 to 0.78007, saving model to ./ModelSnapshots/LSTM-v1-282.h5\n",
+ "Epoch 283/3000\n",
+ " - 30s - loss: 0.9316 - acc: 0.8238 - val_loss: 1.1905 - val_acc: 0.7711\n",
+ "\n",
+ "Epoch 00283: val_acc did not improve from 0.78007\n",
+ "Epoch 284/3000\n",
+ " - 30s - loss: 0.9260 - acc: 0.8217 - val_loss: 1.1318 - val_acc: 0.7754\n",
+ "\n",
+ "Epoch 00284: val_acc did not improve from 0.78007\n",
+ "Epoch 285/3000\n",
+ " - 31s - loss: 0.9372 - acc: 0.8140 - val_loss: 1.1064 - val_acc: 0.7793\n",
+ "\n",
+ "Epoch 00285: val_acc did not improve from 0.78007\n",
+ "Epoch 286/3000\n",
+ " - 31s - loss: 0.9319 - acc: 0.8220 - val_loss: 1.1778 - val_acc: 0.7805\n",
+ "\n",
+ "Epoch 00286: val_acc improved from 0.78007 to 0.78046, saving model to ./ModelSnapshots/LSTM-v1-286.h5\n",
+ "Epoch 287/3000\n",
+ " - 32s - loss: 0.9362 - acc: 0.8223 - val_loss: 1.2934 - val_acc: 0.7427\n",
+ "\n",
+ "Epoch 00287: val_acc did not improve from 0.78046\n",
+ "Epoch 288/3000\n",
+ " - 30s - loss: 0.9249 - acc: 0.8193 - val_loss: 1.1700 - val_acc: 0.7742\n",
+ "\n",
+ "Epoch 00288: val_acc did not improve from 0.78046\n",
+ "Epoch 289/3000\n",
+ " - 31s - loss: 0.9194 - acc: 0.8271 - val_loss: 1.1633 - val_acc: 0.7808\n",
+ "\n",
+ "Epoch 00289: val_acc improved from 0.78046 to 0.78085, saving model to ./ModelSnapshots/LSTM-v1-289.h5\n",
+ "Epoch 290/3000\n",
+ " - 31s - loss: 0.9366 - acc: 0.8202 - val_loss: 1.1169 - val_acc: 0.7824\n",
+ "\n",
+ "Epoch 00290: val_acc improved from 0.78085 to 0.78241, saving model to ./ModelSnapshots/LSTM-v1-290.h5\n",
+ "Epoch 291/3000\n",
+ " - 32s - loss: 0.9164 - acc: 0.8274 - val_loss: 1.1341 - val_acc: 0.7820\n",
+ "\n",
+ "Epoch 00291: val_acc did not improve from 0.78241\n",
+ "Epoch 292/3000\n",
+ " - 29s - loss: 0.8966 - acc: 0.8357 - val_loss: 1.1341 - val_acc: 0.7785\n",
+ "\n",
+ "Epoch 00292: val_acc did not improve from 0.78241\n",
+ "Epoch 293/3000\n",
+ " - 32s - loss: 0.9236 - acc: 0.8179 - val_loss: 1.1191 - val_acc: 0.7805\n",
+ "\n",
+ "Epoch 00293: val_acc did not improve from 0.78241\n",
+ "Epoch 294/3000\n",
+ " - 29s - loss: 0.9055 - acc: 0.8303 - val_loss: 1.1488 - val_acc: 0.7645\n",
+ "\n",
+ "Epoch 00294: val_acc did not improve from 0.78241\n",
+ "Epoch 295/3000\n",
+ " - 31s - loss: 0.9143 - acc: 0.8197 - val_loss: 1.1451 - val_acc: 0.7801\n",
+ "\n",
+ "Epoch 00295: val_acc did not improve from 0.78241\n",
+ "Epoch 296/3000\n",
+ " - 30s - loss: 0.9034 - acc: 0.8320 - val_loss: 1.1841 - val_acc: 0.7641\n",
+ "\n",
+ "Epoch 00296: val_acc did not improve from 0.78241\n",
+ "Epoch 297/3000\n",
+ " - 30s - loss: 0.8955 - acc: 0.8294 - val_loss: 1.1107 - val_acc: 0.7844\n",
+ "\n",
+ "Epoch 00297: val_acc improved from 0.78241 to 0.78435, saving model to ./ModelSnapshots/LSTM-v1-297.h5\n",
+ "Epoch 298/3000\n",
+ " - 31s - loss: 0.8964 - acc: 0.8287 - val_loss: 1.1443 - val_acc: 0.7816\n",
+ "\n",
+ "Epoch 00298: val_acc did not improve from 0.78435\n",
+ "Epoch 299/3000\n",
+ " - 30s - loss: 0.9105 - acc: 0.8261 - val_loss: 1.1129 - val_acc: 0.7824\n",
+ "\n",
+ "Epoch 00299: val_acc did not improve from 0.78435\n",
+ "Epoch 300/3000\n",
+ " - 30s - loss: 0.8894 - acc: 0.8308 - val_loss: 1.0606 - val_acc: 0.7960\n",
+ "\n",
+ "Epoch 00300: val_acc improved from 0.78435 to 0.79603, saving model to ./ModelSnapshots/LSTM-v1-300.h5\n",
+ "Epoch 301/3000\n",
+ " - 31s - loss: 0.8664 - acc: 0.8401 - val_loss: 1.1634 - val_acc: 0.7773\n",
+ "\n",
+ "Epoch 00301: val_acc did not improve from 0.79603\n",
+ "Epoch 302/3000\n",
+ " - 30s - loss: 0.8953 - acc: 0.8302 - val_loss: 1.1336 - val_acc: 0.7847\n",
+ "\n",
+ "Epoch 00302: val_acc did not improve from 0.79603\n",
+ "Epoch 303/3000\n",
+ " - 31s - loss: 0.8806 - acc: 0.8383 - val_loss: 1.0879 - val_acc: 0.7902\n",
+ "\n",
+ "Epoch 00303: val_acc did not improve from 0.79603\n",
+ "Epoch 304/3000\n",
+ " - 31s - loss: 0.8800 - acc: 0.8356 - val_loss: 1.0758 - val_acc: 0.7953\n",
+ "\n",
+ "Epoch 00304: val_acc did not improve from 0.79603\n",
+ "Epoch 305/3000\n",
+ " - 29s - loss: 0.8654 - acc: 0.8362 - val_loss: 1.0556 - val_acc: 0.8046\n",
+ "\n",
+ "Epoch 00305: val_acc improved from 0.79603 to 0.80459, saving model to ./ModelSnapshots/LSTM-v1-305.h5\n",
+ "Epoch 306/3000\n",
+ " - 32s - loss: 0.8789 - acc: 0.8357 - val_loss: 1.1561 - val_acc: 0.7789\n",
+ "\n",
+ "Epoch 00306: val_acc did not improve from 0.80459\n",
+ "Epoch 307/3000\n",
+ " - 32s - loss: 0.8707 - acc: 0.8353 - val_loss: 1.1298 - val_acc: 0.7871\n",
+ "\n",
+ "Epoch 00307: val_acc did not improve from 0.80459\n",
+ "Epoch 308/3000\n",
+ " - 29s - loss: 0.8679 - acc: 0.8380 - val_loss: 1.0785 - val_acc: 0.7937\n",
+ "\n",
+ "Epoch 00308: val_acc did not improve from 0.80459\n",
+ "Epoch 309/3000\n",
+ " - 30s - loss: 0.8757 - acc: 0.8388 - val_loss: 1.1374 - val_acc: 0.7789\n",
+ "\n",
+ "Epoch 00309: val_acc did not improve from 0.80459\n",
+ "Epoch 310/3000\n",
+ " - 30s - loss: 0.8687 - acc: 0.8397 - val_loss: 1.1523 - val_acc: 0.7766\n",
+ "\n",
+ "Epoch 00310: val_acc did not improve from 0.80459\n",
+ "Epoch 311/3000\n",
+ " - 32s - loss: 0.8563 - acc: 0.8409 - val_loss: 1.0828 - val_acc: 0.7875\n",
+ "\n",
+ "Epoch 00311: val_acc did not improve from 0.80459\n",
+ "Epoch 312/3000\n",
+ " - 31s - loss: 0.8589 - acc: 0.8441 - val_loss: 1.0684 - val_acc: 0.7875\n",
+ "\n",
+ "Epoch 00312: val_acc did not improve from 0.80459\n",
+ "Epoch 313/3000\n",
+ " - 29s - loss: 0.8382 - acc: 0.8487 - val_loss: 1.0717 - val_acc: 0.7921\n",
+ "\n",
+ "Epoch 00313: val_acc did not improve from 0.80459\n",
+ "Epoch 314/3000\n",
+ " - 30s - loss: 0.8542 - acc: 0.8373 - val_loss: 1.0827 - val_acc: 0.7941\n",
+ "\n",
+ "Epoch 00314: val_acc did not improve from 0.80459\n",
+ "Epoch 315/3000\n",
+ " - 31s - loss: 0.8552 - acc: 0.8448 - val_loss: 1.0516 - val_acc: 0.7921\n",
+ "\n",
+ "Epoch 00315: val_acc did not improve from 0.80459\n",
+ "Epoch 316/3000\n",
+ " - 31s - loss: 0.8527 - acc: 0.8419 - val_loss: 1.1131 - val_acc: 0.7836\n",
+ "\n",
+ "Epoch 00316: val_acc did not improve from 0.80459\n",
+ "Epoch 317/3000\n",
+ " - 28s - loss: 0.8614 - acc: 0.8403 - val_loss: 1.0943 - val_acc: 0.8019\n",
+ "\n",
+ "Epoch 00317: val_acc did not improve from 0.80459\n",
+ "Epoch 318/3000\n",
+ " - 31s - loss: 0.8316 - acc: 0.8489 - val_loss: 1.0385 - val_acc: 0.7988\n",
+ "\n",
+ "Epoch 00318: val_acc did not improve from 0.80459\n",
+ "Epoch 319/3000\n",
+ " - 31s - loss: 0.8518 - acc: 0.8481 - val_loss: 1.1031 - val_acc: 0.7960\n",
+ "\n",
+ "Epoch 00319: val_acc did not improve from 0.80459\n",
+ "Epoch 320/3000\n",
+ " - 30s - loss: 0.8534 - acc: 0.8406 - val_loss: 1.0793 - val_acc: 0.8007\n",
+ "\n",
+ "Epoch 00320: val_acc did not improve from 0.80459\n",
+ "Epoch 321/3000\n",
+ " - 29s - loss: 0.8349 - acc: 0.8518 - val_loss: 1.0607 - val_acc: 0.7984\n",
+ "\n",
+ "Epoch 00321: val_acc did not improve from 0.80459\n",
+ "Epoch 322/3000\n",
+ " - 32s - loss: 0.8109 - acc: 0.8564 - val_loss: 1.0437 - val_acc: 0.8100\n",
+ "\n",
+ "Epoch 00322: val_acc improved from 0.80459 to 0.81004, saving model to ./ModelSnapshots/LSTM-v1-322.h5\n",
+ "Epoch 323/3000\n",
+ " - 28s - loss: 0.8229 - acc: 0.8505 - val_loss: 1.0624 - val_acc: 0.7991\n",
+ "\n",
+ "Epoch 00323: val_acc did not improve from 0.81004\n",
+ "Epoch 324/3000\n",
+ " - 31s - loss: 0.8265 - acc: 0.8501 - val_loss: 1.0149 - val_acc: 0.8077\n",
+ "\n",
+ "Epoch 00324: val_acc did not improve from 0.81004\n",
+ "Epoch 325/3000\n",
+ " - 29s - loss: 0.8172 - acc: 0.8554 - val_loss: 1.0836 - val_acc: 0.7988\n",
+ "\n",
+ "Epoch 00325: val_acc did not improve from 0.81004\n",
+ "Epoch 326/3000\n",
+ " - 31s - loss: 0.8124 - acc: 0.8527 - val_loss: 1.0351 - val_acc: 0.8058\n",
+ "\n",
+ "Epoch 00326: val_acc did not improve from 0.81004\n",
+ "Epoch 327/3000\n",
+ " - 31s - loss: 0.8286 - acc: 0.8477 - val_loss: 1.0974 - val_acc: 0.7929\n",
+ "\n",
+ "Epoch 00327: val_acc did not improve from 0.81004\n",
+ "Epoch 328/3000\n",
+ " - 29s - loss: 0.8182 - acc: 0.8477 - val_loss: 1.0974 - val_acc: 0.7953\n",
+ "\n",
+ "Epoch 00328: val_acc did not improve from 0.81004\n",
+ "Epoch 329/3000\n",
+ " - 32s - loss: 0.8181 - acc: 0.8527 - val_loss: 1.0273 - val_acc: 0.8046\n",
+ "\n",
+ "Epoch 00329: val_acc did not improve from 0.81004\n",
+ "Epoch 330/3000\n",
+ " - 29s - loss: 0.8217 - acc: 0.8533 - val_loss: 1.0229 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00330: val_acc did not improve from 0.81004\n",
+ "Epoch 331/3000\n",
+ " - 32s - loss: 0.8182 - acc: 0.8545 - val_loss: 1.0118 - val_acc: 0.8015\n",
+ "\n",
+ "Epoch 00331: val_acc did not improve from 0.81004\n",
+ "Epoch 332/3000\n",
+ " - 30s - loss: 0.8302 - acc: 0.8498 - val_loss: 1.0600 - val_acc: 0.7917\n",
+ "\n",
+ "Epoch 00332: val_acc did not improve from 0.81004\n",
+ "Epoch 333/3000\n",
+ " - 30s - loss: 0.8036 - acc: 0.8584 - val_loss: 1.0229 - val_acc: 0.7984\n",
+ "\n",
+ "Epoch 00333: val_acc did not improve from 0.81004\n",
+ "Epoch 334/3000\n",
+ " - 32s - loss: 0.8212 - acc: 0.8522 - val_loss: 1.0190 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00334: val_acc did not improve from 0.81004\n",
+ "Epoch 335/3000\n",
+ " - 29s - loss: 0.8111 - acc: 0.8560 - val_loss: 1.0210 - val_acc: 0.8093\n",
+ "\n",
+ "Epoch 00335: val_acc did not improve from 0.81004\n",
+ "Epoch 336/3000\n",
+ " - 30s - loss: 0.7902 - acc: 0.8591 - val_loss: 1.0020 - val_acc: 0.8034\n",
+ "\n",
+ "Epoch 00336: val_acc did not improve from 0.81004\n",
+ "Epoch 337/3000\n",
+ " - 30s - loss: 0.8256 - acc: 0.8501 - val_loss: 1.0480 - val_acc: 0.7976\n",
+ "\n",
+ "Epoch 00337: val_acc did not improve from 0.81004\n",
+ "Epoch 338/3000\n",
+ " - 30s - loss: 0.8021 - acc: 0.8647 - val_loss: 1.0069 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00338: val_acc did not improve from 0.81004\n",
+ "Epoch 339/3000\n",
+ " - 33s - loss: 0.8013 - acc: 0.8599 - val_loss: 1.0146 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00339: val_acc did not improve from 0.81004\n",
+ "Epoch 340/3000\n",
+ " - 29s - loss: 0.7923 - acc: 0.8528 - val_loss: 1.0288 - val_acc: 0.8054\n",
+ "\n",
+ "Epoch 00340: val_acc did not improve from 0.81004\n",
+ "Epoch 341/3000\n",
+ " - 32s - loss: 0.7774 - acc: 0.8607 - val_loss: 1.0056 - val_acc: 0.8069\n",
+ "\n",
+ "Epoch 00341: val_acc did not improve from 0.81004\n",
+ "Epoch 342/3000\n",
+ " - 29s - loss: 0.7752 - acc: 0.8616 - val_loss: 1.0470 - val_acc: 0.7964\n",
+ "\n",
+ "Epoch 00342: val_acc did not improve from 0.81004\n",
+ "Epoch 343/3000\n",
+ " - 30s - loss: 0.7852 - acc: 0.8551 - val_loss: 1.0359 - val_acc: 0.8015\n",
+ "\n",
+ "Epoch 00343: val_acc did not improve from 0.81004\n",
+ "Epoch 344/3000\n",
+ " - 32s - loss: 0.7997 - acc: 0.8570 - val_loss: 1.0239 - val_acc: 0.8116\n",
+ "\n",
+ "Epoch 00344: val_acc improved from 0.81004 to 0.81160, saving model to ./ModelSnapshots/LSTM-v1-344.h5\n",
+ "Epoch 345/3000\n",
+ " - 31s - loss: 0.7952 - acc: 0.8593 - val_loss: 1.0793 - val_acc: 0.7953\n",
+ "\n",
+ "Epoch 00345: val_acc did not improve from 0.81160\n",
+ "Epoch 346/3000\n",
+ " - 31s - loss: 0.8099 - acc: 0.8563 - val_loss: 1.0191 - val_acc: 0.8046\n",
+ "\n",
+ "Epoch 00346: val_acc did not improve from 0.81160\n",
+ "Epoch 347/3000\n",
+ " - 30s - loss: 0.7724 - acc: 0.8664 - val_loss: 1.0545 - val_acc: 0.7980\n",
+ "\n",
+ "Epoch 00347: val_acc did not improve from 0.81160\n",
+ "Epoch 348/3000\n",
+ " - 30s - loss: 0.8051 - acc: 0.8579 - val_loss: 1.0120 - val_acc: 0.8108\n",
+ "\n",
+ "Epoch 00348: val_acc did not improve from 0.81160\n",
+ "Epoch 349/3000\n",
+ " - 31s - loss: 0.7717 - acc: 0.8671 - val_loss: 1.0290 - val_acc: 0.8030\n",
+ "\n",
+ "Epoch 00349: val_acc did not improve from 0.81160\n",
+ "Epoch 350/3000\n",
+ " - 30s - loss: 0.8217 - acc: 0.8486 - val_loss: 1.0772 - val_acc: 0.8026\n",
+ "\n",
+ "Epoch 00350: val_acc did not improve from 0.81160\n",
+ "Epoch 351/3000\n",
+ " - 30s - loss: 0.8070 - acc: 0.8601 - val_loss: 1.0177 - val_acc: 0.8097\n",
+ "\n",
+ "Epoch 00351: val_acc did not improve from 0.81160\n",
+ "Epoch 352/3000\n",
+ " - 30s - loss: 0.7847 - acc: 0.8610 - val_loss: 1.0620 - val_acc: 0.8085\n",
+ "\n",
+ "Epoch 00352: val_acc did not improve from 0.81160\n",
+ "Epoch 353/3000\n",
+ " - 31s - loss: 0.7748 - acc: 0.8671 - val_loss: 1.0256 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00353: val_acc did not improve from 0.81160\n",
+ "Epoch 354/3000\n",
+ " - 29s - loss: 0.7647 - acc: 0.8671 - val_loss: 1.0351 - val_acc: 0.8073\n",
+ "\n",
+ "Epoch 00354: val_acc did not improve from 0.81160\n",
+ "Epoch 355/3000\n",
+ " - 32s - loss: 0.7753 - acc: 0.8668 - val_loss: 1.0517 - val_acc: 0.8097\n",
+ "\n",
+ "Epoch 00355: val_acc did not improve from 0.81160\n",
+ "Epoch 356/3000\n",
+ " - 30s - loss: 0.7730 - acc: 0.8697 - val_loss: 1.0437 - val_acc: 0.8104\n",
+ "\n",
+ "Epoch 00356: val_acc did not improve from 0.81160\n",
+ "Epoch 357/3000\n",
+ " - 31s - loss: 0.7611 - acc: 0.8681 - val_loss: 1.0030 - val_acc: 0.8085\n",
+ "\n",
+ "Epoch 00357: val_acc did not improve from 0.81160\n",
+ "Epoch 358/3000\n",
+ " - 30s - loss: 0.7800 - acc: 0.8644 - val_loss: 1.0532 - val_acc: 0.8046\n",
+ "\n",
+ "Epoch 00358: val_acc did not improve from 0.81160\n",
+ "Epoch 359/3000\n",
+ " - 32s - loss: 0.7624 - acc: 0.8690 - val_loss: 1.0162 - val_acc: 0.8038\n",
+ "\n",
+ "Epoch 00359: val_acc did not improve from 0.81160\n",
+ "Epoch 360/3000\n",
+ " - 30s - loss: 0.7607 - acc: 0.8659 - val_loss: 1.0487 - val_acc: 0.7945\n",
+ "\n",
+ "Epoch 00360: val_acc did not improve from 0.81160\n",
+ "Epoch 361/3000\n",
+ " - 30s - loss: 0.7536 - acc: 0.8693 - val_loss: 1.0520 - val_acc: 0.8011\n",
+ "\n",
+ "Epoch 00361: val_acc did not improve from 0.81160\n",
+ "Epoch 362/3000\n",
+ " - 30s - loss: 0.7701 - acc: 0.8675 - val_loss: 1.0541 - val_acc: 0.8054\n",
+ "\n",
+ "Epoch 00362: val_acc did not improve from 0.81160\n",
+ "Epoch 363/3000\n",
+ " - 31s - loss: 0.7684 - acc: 0.8702 - val_loss: 0.9667 - val_acc: 0.8206\n",
+ "\n",
+ "Epoch 00363: val_acc improved from 0.81160 to 0.82055, saving model to ./ModelSnapshots/LSTM-v1-363.h5\n",
+ "Epoch 364/3000\n",
+ " - 32s - loss: 0.7409 - acc: 0.8687 - val_loss: 0.9735 - val_acc: 0.8194\n",
+ "\n",
+ "Epoch 00364: val_acc did not improve from 0.82055\n",
+ "Epoch 365/3000\n",
+ " - 31s - loss: 0.7482 - acc: 0.8688 - val_loss: 0.9960 - val_acc: 0.8132\n",
+ "\n",
+ "Epoch 00365: val_acc did not improve from 0.82055\n",
+ "Epoch 366/3000\n",
+ " - 29s - loss: 0.7497 - acc: 0.8748 - val_loss: 0.9743 - val_acc: 0.8159\n",
+ "\n",
+ "Epoch 00366: val_acc did not improve from 0.82055\n",
+ "Epoch 367/3000\n",
+ " - 30s - loss: 0.7463 - acc: 0.8721 - val_loss: 1.0472 - val_acc: 0.8038\n",
+ "\n",
+ "Epoch 00367: val_acc did not improve from 0.82055\n",
+ "Epoch 368/3000\n",
+ " - 30s - loss: 0.7387 - acc: 0.8730 - val_loss: 0.9919 - val_acc: 0.8170\n",
+ "\n",
+ "Epoch 00368: val_acc did not improve from 0.82055\n",
+ "Epoch 369/3000\n",
+ " - 30s - loss: 0.7461 - acc: 0.8717 - val_loss: 0.9794 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00369: val_acc improved from 0.82055 to 0.82756, saving model to ./ModelSnapshots/LSTM-v1-369.h5\n",
+ "Epoch 370/3000\n",
+ " - 29s - loss: 0.7625 - acc: 0.8664 - val_loss: 1.0434 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00370: val_acc did not improve from 0.82756\n",
+ "Epoch 371/3000\n",
+ " - 31s - loss: 0.7543 - acc: 0.8724 - val_loss: 1.0034 - val_acc: 0.8128\n",
+ "\n",
+ "Epoch 00371: val_acc did not improve from 0.82756\n",
+ "Epoch 372/3000\n",
+ " - 31s - loss: 0.7449 - acc: 0.8711 - val_loss: 0.9866 - val_acc: 0.8132\n",
+ "\n",
+ "Epoch 00372: val_acc did not improve from 0.82756\n",
+ "Epoch 373/3000\n",
+ " - 30s - loss: 0.7404 - acc: 0.8785 - val_loss: 1.0178 - val_acc: 0.8073\n",
+ "\n",
+ "Epoch 00373: val_acc did not improve from 0.82756\n",
+ "Epoch 374/3000\n",
+ " - 30s - loss: 0.7380 - acc: 0.8727 - val_loss: 1.0444 - val_acc: 0.8081\n",
+ "\n",
+ "Epoch 00374: val_acc did not improve from 0.82756\n",
+ "Epoch 375/3000\n",
+ " - 31s - loss: 0.7159 - acc: 0.8785 - val_loss: 1.0397 - val_acc: 0.8093\n",
+ "\n",
+ "Epoch 00375: val_acc did not improve from 0.82756\n",
+ "Epoch 376/3000\n",
+ " - 30s - loss: 0.7268 - acc: 0.8783 - val_loss: 1.0407 - val_acc: 0.8155\n",
+ "\n",
+ "Epoch 00376: val_acc did not improve from 0.82756\n",
+ "Epoch 377/3000\n",
+ " - 31s - loss: 0.7648 - acc: 0.8702 - val_loss: 1.0383 - val_acc: 0.8143\n",
+ "\n",
+ "Epoch 00377: val_acc did not improve from 0.82756\n",
+ "Epoch 378/3000\n",
+ " - 30s - loss: 0.7418 - acc: 0.8744 - val_loss: 0.9557 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00378: val_acc did not improve from 0.82756\n",
+ "Epoch 379/3000\n",
+ " - 31s - loss: 0.7631 - acc: 0.8691 - val_loss: 1.0920 - val_acc: 0.7925\n",
+ "\n",
+ "Epoch 00379: val_acc did not improve from 0.82756\n",
+ "Epoch 380/3000\n",
+ " - 31s - loss: 0.7403 - acc: 0.8738 - val_loss: 0.9497 - val_acc: 0.8174\n",
+ "\n",
+ "Epoch 00380: val_acc did not improve from 0.82756\n",
+ "Epoch 381/3000\n",
+ " - 29s - loss: 0.7280 - acc: 0.8756 - val_loss: 1.0507 - val_acc: 0.8069\n",
+ "\n",
+ "Epoch 00381: val_acc did not improve from 0.82756\n",
+ "Epoch 382/3000\n",
+ " - 31s - loss: 0.7467 - acc: 0.8714 - val_loss: 1.0647 - val_acc: 0.8042\n",
+ "\n",
+ "Epoch 00382: val_acc did not improve from 0.82756\n",
+ "Epoch 383/3000\n",
+ " - 30s - loss: 0.7147 - acc: 0.8798 - val_loss: 1.0431 - val_acc: 0.8120\n",
+ "\n",
+ "Epoch 00383: val_acc did not improve from 0.82756\n",
+ "Epoch 384/3000\n",
+ " - 31s - loss: 0.7253 - acc: 0.8767 - val_loss: 1.0173 - val_acc: 0.8147\n",
+ "\n",
+ "Epoch 00384: val_acc did not improve from 0.82756\n",
+ "Epoch 385/3000\n",
+ " - 32s - loss: 0.7267 - acc: 0.8791 - val_loss: 1.0566 - val_acc: 0.8097\n",
+ "\n",
+ "Epoch 00385: val_acc did not improve from 0.82756\n",
+ "Epoch 386/3000\n",
+ " - 29s - loss: 0.7260 - acc: 0.8747 - val_loss: 1.0022 - val_acc: 0.8163\n",
+ "\n",
+ "Epoch 00386: val_acc did not improve from 0.82756\n",
+ "Epoch 387/3000\n",
+ " - 32s - loss: 0.7341 - acc: 0.8739 - val_loss: 1.0223 - val_acc: 0.8104\n",
+ "\n",
+ "Epoch 00387: val_acc did not improve from 0.82756\n",
+ "Epoch 388/3000\n",
+ " - 31s - loss: 0.7092 - acc: 0.8809 - val_loss: 1.0109 - val_acc: 0.8178\n",
+ "\n",
+ "Epoch 00388: val_acc did not improve from 0.82756\n",
+ "Epoch 389/3000\n",
+ " - 29s - loss: 0.7368 - acc: 0.8789 - val_loss: 1.0589 - val_acc: 0.8042\n",
+ "\n",
+ "Epoch 00389: val_acc did not improve from 0.82756\n",
+ "Epoch 390/3000\n",
+ " - 31s - loss: 0.7278 - acc: 0.8755 - val_loss: 0.9617 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00390: val_acc did not improve from 0.82756\n",
+ "Epoch 391/3000\n",
+ " - 31s - loss: 0.7115 - acc: 0.8832 - val_loss: 0.9896 - val_acc: 0.8151\n",
+ "\n",
+ "Epoch 00391: val_acc did not improve from 0.82756\n",
+ "Epoch 392/3000\n",
+ " - 29s - loss: 0.7167 - acc: 0.8806 - val_loss: 0.9741 - val_acc: 0.8229\n",
+ "\n",
+ "Epoch 00392: val_acc did not improve from 0.82756\n",
+ "Epoch 393/3000\n",
+ " - 32s - loss: 0.7167 - acc: 0.8807 - val_loss: 1.0856 - val_acc: 0.7914\n",
+ "\n",
+ "Epoch 00393: val_acc did not improve from 0.82756\n",
+ "Epoch 394/3000\n",
+ " - 29s - loss: 0.7462 - acc: 0.8747 - val_loss: 0.9327 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00394: val_acc improved from 0.82756 to 0.83067, saving model to ./ModelSnapshots/LSTM-v1-394.h5\n",
+ "Epoch 395/3000\n",
+ " - 29s - loss: 0.7202 - acc: 0.8762 - val_loss: 0.9674 - val_acc: 0.8318\n",
+ "\n",
+ "Epoch 00395: val_acc improved from 0.83067 to 0.83184, saving model to ./ModelSnapshots/LSTM-v1-395.h5\n",
+ "Epoch 396/3000\n",
+ " - 32s - loss: 0.7261 - acc: 0.8800 - val_loss: 1.0525 - val_acc: 0.8100\n",
+ "\n",
+ "Epoch 00396: val_acc did not improve from 0.83184\n",
+ "Epoch 397/3000\n",
+ " - 29s - loss: 0.7172 - acc: 0.8807 - val_loss: 0.9525 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00397: val_acc did not improve from 0.83184\n",
+ "Epoch 398/3000\n",
+ " - 31s - loss: 0.7170 - acc: 0.8815 - val_loss: 1.0284 - val_acc: 0.8151\n",
+ "\n",
+ "Epoch 00398: val_acc did not improve from 0.83184\n",
+ "Epoch 399/3000\n",
+ " - 31s - loss: 0.7246 - acc: 0.8792 - val_loss: 0.9941 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00399: val_acc did not improve from 0.83184\n",
+ "Epoch 400/3000\n",
+ " - 29s - loss: 0.7085 - acc: 0.8818 - val_loss: 1.0028 - val_acc: 0.8147\n",
+ "\n",
+ "Epoch 00400: val_acc did not improve from 0.83184\n",
+ "Epoch 401/3000\n",
+ " - 31s - loss: 0.7097 - acc: 0.8800 - val_loss: 1.0143 - val_acc: 0.8167\n",
+ "\n",
+ "Epoch 00401: val_acc did not improve from 0.83184\n",
+ "Epoch 402/3000\n",
+ " - 31s - loss: 0.7154 - acc: 0.8815 - val_loss: 0.9683 - val_acc: 0.8287\n",
+ "\n",
+ "Epoch 00402: val_acc did not improve from 0.83184\n",
+ "Epoch 403/3000\n",
+ " - 31s - loss: 0.6962 - acc: 0.8866 - val_loss: 1.0118 - val_acc: 0.8128\n",
+ "\n",
+ "Epoch 00403: val_acc did not improve from 0.83184\n",
+ "Epoch 404/3000\n",
+ " - 31s - loss: 0.7014 - acc: 0.8847 - val_loss: 0.9564 - val_acc: 0.8260\n",
+ "\n",
+ "Epoch 00404: val_acc did not improve from 0.83184\n",
+ "Epoch 405/3000\n",
+ " - 32s - loss: 0.6864 - acc: 0.8872 - val_loss: 1.0074 - val_acc: 0.8135\n",
+ "\n",
+ "Epoch 00405: val_acc did not improve from 0.83184\n",
+ "Epoch 406/3000\n",
+ " - 30s - loss: 0.7071 - acc: 0.8801 - val_loss: 1.0440 - val_acc: 0.8120\n",
+ "\n",
+ "Epoch 00406: val_acc did not improve from 0.83184\n",
+ "Epoch 407/3000\n",
+ " - 31s - loss: 0.7017 - acc: 0.8812 - val_loss: 0.9209 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 00407: val_acc improved from 0.83184 to 0.83223, saving model to ./ModelSnapshots/LSTM-v1-407.h5\n",
+ "Epoch 408/3000\n",
+ " - 30s - loss: 0.6888 - acc: 0.8818 - val_loss: 0.9703 - val_acc: 0.8244\n",
+ "\n",
+ "Epoch 00408: val_acc did not improve from 0.83223\n",
+ "Epoch 409/3000\n",
+ " - 29s - loss: 0.7102 - acc: 0.8807 - val_loss: 0.9795 - val_acc: 0.8093\n",
+ "\n",
+ "Epoch 00409: val_acc did not improve from 0.83223\n",
+ "Epoch 410/3000\n",
+ " - 31s - loss: 0.7016 - acc: 0.8832 - val_loss: 1.0575 - val_acc: 0.8085\n",
+ "\n",
+ "Epoch 00410: val_acc did not improve from 0.83223\n",
+ "Epoch 411/3000\n",
+ " - 32s - loss: 0.7020 - acc: 0.8842 - val_loss: 1.0143 - val_acc: 0.8128\n",
+ "\n",
+ "Epoch 00411: val_acc did not improve from 0.83223\n",
+ "Epoch 412/3000\n",
+ " - 29s - loss: 0.6913 - acc: 0.8883 - val_loss: 0.9655 - val_acc: 0.8283\n",
+ "\n",
+ "Epoch 00412: val_acc did not improve from 0.83223\n",
+ "Epoch 413/3000\n",
+ " - 30s - loss: 0.6830 - acc: 0.8863 - val_loss: 0.9658 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00413: val_acc did not improve from 0.83223\n",
+ "Epoch 414/3000\n",
+ " - 33s - loss: 0.6980 - acc: 0.8844 - val_loss: 0.9808 - val_acc: 0.8170\n",
+ "\n",
+ "Epoch 00414: val_acc did not improve from 0.83223\n",
+ "Epoch 415/3000\n",
+ " - 30s - loss: 0.7306 - acc: 0.8819 - val_loss: 0.9565 - val_acc: 0.8225\n",
+ "\n",
+ "Epoch 00415: val_acc did not improve from 0.83223\n",
+ "Epoch 416/3000\n",
+ " - 30s - loss: 0.6786 - acc: 0.8869 - val_loss: 0.9240 - val_acc: 0.8377\n",
+ "\n",
+ "Epoch 00416: val_acc improved from 0.83223 to 0.83768, saving model to ./ModelSnapshots/LSTM-v1-416.h5\n",
+ "Epoch 417/3000\n",
+ " - 31s - loss: 0.6800 - acc: 0.8865 - val_loss: 1.0447 - val_acc: 0.8135\n",
+ "\n",
+ "Epoch 00417: val_acc did not improve from 0.83768\n",
+ "Epoch 418/3000\n",
+ " - 30s - loss: 0.6849 - acc: 0.8884 - val_loss: 1.0136 - val_acc: 0.8112\n",
+ "\n",
+ "Epoch 00418: val_acc did not improve from 0.83768\n",
+ "Epoch 419/3000\n",
+ " - 32s - loss: 0.7044 - acc: 0.8833 - val_loss: 0.9230 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00419: val_acc did not improve from 0.83768\n",
+ "Epoch 420/3000\n",
+ " - 29s - loss: 0.6763 - acc: 0.8936 - val_loss: 0.9175 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00420: val_acc did not improve from 0.83768\n",
+ "Epoch 421/3000\n",
+ " - 31s - loss: 0.6856 - acc: 0.8863 - val_loss: 0.9694 - val_acc: 0.8198\n",
+ "\n",
+ "Epoch 00421: val_acc did not improve from 0.83768\n",
+ "Epoch 422/3000\n",
+ " - 30s - loss: 0.6960 - acc: 0.8862 - val_loss: 0.9924 - val_acc: 0.8221\n",
+ "\n",
+ "Epoch 00422: val_acc did not improve from 0.83768\n",
+ "Epoch 423/3000\n",
+ " - 31s - loss: 0.6809 - acc: 0.8872 - val_loss: 0.9568 - val_acc: 0.8256\n",
+ "\n",
+ "Epoch 00423: val_acc did not improve from 0.83768\n",
+ "Epoch 424/3000\n",
+ " - 32s - loss: 0.6743 - acc: 0.8869 - val_loss: 0.9685 - val_acc: 0.8272\n",
+ "\n",
+ "Epoch 00424: val_acc did not improve from 0.83768\n",
+ "Epoch 425/3000\n",
+ " - 30s - loss: 0.6761 - acc: 0.8909 - val_loss: 0.9737 - val_acc: 0.8170\n",
+ "\n",
+ "Epoch 00425: val_acc did not improve from 0.83768\n",
+ "Epoch 426/3000\n",
+ " - 29s - loss: 0.6811 - acc: 0.8877 - val_loss: 0.9116 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00426: val_acc did not improve from 0.83768\n",
+ "Epoch 427/3000\n",
+ " - 32s - loss: 0.6778 - acc: 0.8905 - val_loss: 0.9641 - val_acc: 0.8190\n",
+ "\n",
+ "Epoch 00427: val_acc did not improve from 0.83768\n",
+ "Epoch 428/3000\n",
+ " - 30s - loss: 0.6847 - acc: 0.8896 - val_loss: 0.9862 - val_acc: 0.8241\n",
+ "\n",
+ "Epoch 00428: val_acc did not improve from 0.83768\n",
+ "Epoch 429/3000\n",
+ " - 31s - loss: 0.6805 - acc: 0.8893 - val_loss: 0.9692 - val_acc: 0.8139\n",
+ "\n",
+ "Epoch 00429: val_acc did not improve from 0.83768\n",
+ "Epoch 430/3000\n",
+ " - 30s - loss: 0.6785 - acc: 0.8881 - val_loss: 0.9187 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00430: val_acc did not improve from 0.83768\n",
+ "Epoch 431/3000\n",
+ " - 31s - loss: 0.6840 - acc: 0.8827 - val_loss: 0.9195 - val_acc: 0.8268\n",
+ "\n",
+ "Epoch 00431: val_acc did not improve from 0.83768\n",
+ "Epoch 432/3000\n",
+ " - 30s - loss: 0.6737 - acc: 0.8883 - val_loss: 1.0007 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00432: val_acc did not improve from 0.83768\n",
+ "Epoch 433/3000\n",
+ " - 29s - loss: 0.6745 - acc: 0.8890 - val_loss: 0.9841 - val_acc: 0.8268\n",
+ "\n",
+ "Epoch 00433: val_acc did not improve from 0.83768\n",
+ "Epoch 434/3000\n",
+ " - 30s - loss: 0.6760 - acc: 0.8869 - val_loss: 0.9422 - val_acc: 0.8326\n",
+ "\n",
+ "Epoch 00434: val_acc did not improve from 0.83768\n",
+ "Epoch 435/3000\n",
+ " - 32s - loss: 0.6775 - acc: 0.8913 - val_loss: 1.0096 - val_acc: 0.8186\n",
+ "\n",
+ "Epoch 00435: val_acc did not improve from 0.83768\n",
+ "Epoch 436/3000\n",
+ " - 31s - loss: 0.6689 - acc: 0.8919 - val_loss: 0.9567 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00436: val_acc did not improve from 0.83768\n",
+ "Epoch 437/3000\n",
+ " - 30s - loss: 0.6949 - acc: 0.8889 - val_loss: 1.0712 - val_acc: 0.8081\n",
+ "\n",
+ "Epoch 00437: val_acc did not improve from 0.83768\n",
+ "Epoch 438/3000\n",
+ " - 30s - loss: 0.6819 - acc: 0.8871 - val_loss: 0.9538 - val_acc: 0.8233\n",
+ "\n",
+ "Epoch 00438: val_acc did not improve from 0.83768\n",
+ "Epoch 439/3000\n",
+ " - 31s - loss: 0.6687 - acc: 0.8912 - val_loss: 1.0322 - val_acc: 0.8139\n",
+ "\n",
+ "Epoch 00439: val_acc did not improve from 0.83768\n",
+ "Epoch 440/3000\n",
+ " - 30s - loss: 0.6751 - acc: 0.8889 - val_loss: 0.9630 - val_acc: 0.8256\n",
+ "\n",
+ "Epoch 00440: val_acc did not improve from 0.83768\n",
+ "Epoch 441/3000\n",
+ " - 31s - loss: 0.6622 - acc: 0.8915 - val_loss: 0.9559 - val_acc: 0.8241\n",
+ "\n",
+ "Epoch 00441: val_acc did not improve from 0.83768\n",
+ "Epoch 442/3000\n",
+ " - 32s - loss: 0.6632 - acc: 0.8934 - val_loss: 0.9613 - val_acc: 0.8206\n",
+ "\n",
+ "Epoch 00442: val_acc did not improve from 0.83768\n",
+ "Epoch 443/3000\n",
+ " - 30s - loss: 0.6698 - acc: 0.8934 - val_loss: 0.9520 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00443: val_acc did not improve from 0.83768\n",
+ "Epoch 444/3000\n",
+ " - 32s - loss: 0.6589 - acc: 0.8978 - val_loss: 0.9992 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00444: val_acc did not improve from 0.83768\n",
+ "Epoch 445/3000\n",
+ " - 29s - loss: 0.6619 - acc: 0.8930 - val_loss: 0.9466 - val_acc: 0.8291\n",
+ "\n",
+ "Epoch 00445: val_acc did not improve from 0.83768\n",
+ "Epoch 446/3000\n",
+ " - 31s - loss: 0.6575 - acc: 0.8949 - val_loss: 0.9641 - val_acc: 0.8248\n",
+ "\n",
+ "Epoch 00446: val_acc did not improve from 0.83768\n",
+ "Epoch 447/3000\n",
+ " - 32s - loss: 0.6643 - acc: 0.8937 - val_loss: 0.9351 - val_acc: 0.8295\n",
+ "\n",
+ "Epoch 00447: val_acc did not improve from 0.83768\n",
+ "Epoch 448/3000\n",
+ " - 31s - loss: 0.6409 - acc: 0.8967 - val_loss: 1.0199 - val_acc: 0.8167\n",
+ "\n",
+ "Epoch 00448: val_acc did not improve from 0.83768\n",
+ "Epoch 449/3000\n",
+ " - 30s - loss: 0.6707 - acc: 0.8913 - val_loss: 0.9216 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00449: val_acc did not improve from 0.83768\n",
+ "Epoch 450/3000\n",
+ " - 30s - loss: 0.6656 - acc: 0.8889 - val_loss: 1.0240 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00450: val_acc did not improve from 0.83768\n",
+ "Epoch 451/3000\n",
+ " - 30s - loss: 0.6704 - acc: 0.8890 - val_loss: 0.9845 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 00451: val_acc did not improve from 0.83768\n",
+ "Epoch 452/3000\n",
+ " - 31s - loss: 0.6570 - acc: 0.8942 - val_loss: 0.9081 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00452: val_acc improved from 0.83768 to 0.84274, saving model to ./ModelSnapshots/LSTM-v1-452.h5\n",
+ "Epoch 453/3000\n",
+ " - 32s - loss: 0.6687 - acc: 0.8904 - val_loss: 0.9301 - val_acc: 0.8268\n",
+ "\n",
+ "Epoch 00453: val_acc did not improve from 0.84274\n",
+ "Epoch 454/3000\n",
+ " - 29s - loss: 0.6622 - acc: 0.8909 - val_loss: 0.9276 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00454: val_acc did not improve from 0.84274\n",
+ "Epoch 455/3000\n",
+ " - 33s - loss: 0.6512 - acc: 0.8945 - val_loss: 0.8959 - val_acc: 0.8377\n",
+ "\n",
+ "Epoch 00455: val_acc did not improve from 0.84274\n",
+ "Epoch 456/3000\n",
+ " - 30s - loss: 0.6434 - acc: 0.8975 - val_loss: 1.0070 - val_acc: 0.8202\n",
+ "\n",
+ "Epoch 00456: val_acc did not improve from 0.84274\n",
+ "Epoch 457/3000\n",
+ " - 31s - loss: 0.6649 - acc: 0.8913 - val_loss: 0.8930 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00457: val_acc did not improve from 0.84274\n",
+ "Epoch 458/3000\n",
+ " - 31s - loss: 0.6446 - acc: 0.8963 - val_loss: 0.9378 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00458: val_acc did not improve from 0.84274\n",
+ "Epoch 459/3000\n",
+ " - 32s - loss: 0.6460 - acc: 0.8933 - val_loss: 0.9946 - val_acc: 0.8163\n",
+ "\n",
+ "Epoch 00459: val_acc did not improve from 0.84274\n",
+ "Epoch 460/3000\n",
+ " - 30s - loss: 0.6373 - acc: 0.9002 - val_loss: 0.9416 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00460: val_acc did not improve from 0.84274\n",
+ "Epoch 461/3000\n",
+ " - 32s - loss: 0.6478 - acc: 0.8995 - val_loss: 0.9468 - val_acc: 0.8299\n",
+ "\n",
+ "Epoch 00461: val_acc did not improve from 0.84274\n",
+ "Epoch 462/3000\n",
+ " - 30s - loss: 0.6483 - acc: 0.8946 - val_loss: 0.9606 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00462: val_acc did not improve from 0.84274\n",
+ "Epoch 463/3000\n",
+ " - 31s - loss: 0.6565 - acc: 0.8949 - val_loss: 0.9546 - val_acc: 0.8272\n",
+ "\n",
+ "Epoch 00463: val_acc did not improve from 0.84274\n",
+ "Epoch 464/3000\n",
+ " - 30s - loss: 0.6421 - acc: 0.8978 - val_loss: 0.9096 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00464: val_acc did not improve from 0.84274\n",
+ "Epoch 465/3000\n",
+ " - 30s - loss: 0.6411 - acc: 0.9008 - val_loss: 0.9590 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00465: val_acc did not improve from 0.84274\n",
+ "Epoch 466/3000\n",
+ " - 32s - loss: 0.6386 - acc: 0.8975 - val_loss: 0.9847 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00466: val_acc did not improve from 0.84274\n",
+ "Epoch 467/3000\n",
+ " - 29s - loss: 0.6587 - acc: 0.8966 - val_loss: 0.9514 - val_acc: 0.8299\n",
+ "\n",
+ "Epoch 00467: val_acc did not improve from 0.84274\n",
+ "Epoch 468/3000\n",
+ " - 32s - loss: 0.6471 - acc: 0.8955 - val_loss: 0.9761 - val_acc: 0.8268\n",
+ "\n",
+ "Epoch 00468: val_acc did not improve from 0.84274\n",
+ "Epoch 469/3000\n",
+ " - 29s - loss: 0.6345 - acc: 0.8989 - val_loss: 0.9755 - val_acc: 0.8272\n",
+ "\n",
+ "Epoch 00469: val_acc did not improve from 0.84274\n",
+ "Epoch 470/3000\n",
+ " - 33s - loss: 0.6434 - acc: 0.8961 - val_loss: 0.9514 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00470: val_acc did not improve from 0.84274\n",
+ "Epoch 471/3000\n",
+ " - 29s - loss: 0.6647 - acc: 0.8910 - val_loss: 0.9720 - val_acc: 0.8229\n",
+ "\n",
+ "Epoch 00471: val_acc did not improve from 0.84274\n",
+ "Epoch 472/3000\n",
+ " - 29s - loss: 0.6248 - acc: 0.9028 - val_loss: 1.0007 - val_acc: 0.8104\n",
+ "\n",
+ "Epoch 00472: val_acc did not improve from 0.84274\n",
+ "Epoch 473/3000\n",
+ " - 31s - loss: 0.6511 - acc: 0.8948 - val_loss: 0.9711 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00473: val_acc did not improve from 0.84274\n",
+ "Epoch 474/3000\n",
+ " - 31s - loss: 0.6358 - acc: 0.8996 - val_loss: 0.9535 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00474: val_acc did not improve from 0.84274\n",
+ "Epoch 475/3000\n",
+ " - 30s - loss: 0.6237 - acc: 0.9023 - val_loss: 0.9315 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00475: val_acc did not improve from 0.84274\n",
+ "Epoch 476/3000\n",
+ " - 31s - loss: 0.6363 - acc: 0.8990 - val_loss: 0.9828 - val_acc: 0.8260\n",
+ "\n",
+ "Epoch 00476: val_acc did not improve from 0.84274\n",
+ "Epoch 477/3000\n",
+ " - 29s - loss: 0.6449 - acc: 0.8960 - val_loss: 0.9743 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00477: val_acc did not improve from 0.84274\n",
+ "Epoch 478/3000\n",
+ " - 31s - loss: 0.6603 - acc: 0.8909 - val_loss: 1.0280 - val_acc: 0.8147\n",
+ "\n",
+ "Epoch 00478: val_acc did not improve from 0.84274\n",
+ "Epoch 479/3000\n",
+ " - 31s - loss: 0.6296 - acc: 0.8976 - val_loss: 0.9203 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00479: val_acc did not improve from 0.84274\n",
+ "Epoch 480/3000\n",
+ " - 32s - loss: 0.6218 - acc: 0.9029 - val_loss: 0.8990 - val_acc: 0.8295\n",
+ "\n",
+ "Epoch 00480: val_acc did not improve from 0.84274\n",
+ "Epoch 481/3000\n",
+ " - 30s - loss: 0.6194 - acc: 0.9029 - val_loss: 0.8981 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 00481: val_acc did not improve from 0.84274\n",
+ "Epoch 482/3000\n",
+ " - 31s - loss: 0.6214 - acc: 0.9066 - val_loss: 0.9426 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00482: val_acc did not improve from 0.84274\n",
+ "Epoch 483/3000\n",
+ " - 31s - loss: 0.6336 - acc: 0.8964 - val_loss: 0.9820 - val_acc: 0.8287\n",
+ "\n",
+ "Epoch 00483: val_acc did not improve from 0.84274\n",
+ "Epoch 484/3000\n",
+ " - 29s - loss: 0.6330 - acc: 0.8976 - val_loss: 0.9949 - val_acc: 0.8221\n",
+ "\n",
+ "Epoch 00484: val_acc did not improve from 0.84274\n",
+ "Epoch 485/3000\n",
+ " - 31s - loss: 0.6278 - acc: 0.8976 - val_loss: 0.9057 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00485: val_acc did not improve from 0.84274\n",
+ "Epoch 486/3000\n",
+ " - 31s - loss: 0.6178 - acc: 0.9031 - val_loss: 0.9053 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00486: val_acc did not improve from 0.84274\n",
+ "Epoch 487/3000\n",
+ " - 29s - loss: 0.6206 - acc: 0.9038 - val_loss: 0.9601 - val_acc: 0.8283\n",
+ "\n",
+ "Epoch 00487: val_acc did not improve from 0.84274\n",
+ "\n",
+ "Epoch 00487: ReduceLROnPlateau reducing learning rate to 9.499999760009814e-05.\n",
+ "Epoch 488/3000\n",
+ " - 31s - loss: 0.6209 - acc: 0.9022 - val_loss: 0.9219 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00488: val_acc did not improve from 0.84274\n",
+ "Epoch 489/3000\n",
+ " - 29s - loss: 0.6177 - acc: 0.9014 - val_loss: 0.9217 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00489: val_acc did not improve from 0.84274\n",
+ "Epoch 490/3000\n",
+ " - 31s - loss: 0.6256 - acc: 0.9041 - val_loss: 0.9317 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00490: val_acc did not improve from 0.84274\n",
+ "Epoch 491/3000\n",
+ " - 32s - loss: 0.6288 - acc: 0.9007 - val_loss: 0.9040 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00491: val_acc did not improve from 0.84274\n",
+ "Epoch 492/3000\n",
+ " - 29s - loss: 0.6171 - acc: 0.9023 - val_loss: 0.9252 - val_acc: 0.8334\n",
+ "\n",
+ "Epoch 00492: val_acc did not improve from 0.84274\n",
+ "Epoch 493/3000\n",
+ " - 32s - loss: 0.6164 - acc: 0.9026 - val_loss: 0.9921 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00493: val_acc did not improve from 0.84274\n",
+ "Epoch 494/3000\n",
+ " - 31s - loss: 0.6113 - acc: 0.9040 - val_loss: 0.9450 - val_acc: 0.8287\n",
+ "\n",
+ "Epoch 00494: val_acc did not improve from 0.84274\n",
+ "Epoch 495/3000\n",
+ " - 30s - loss: 0.6169 - acc: 0.9014 - val_loss: 0.9621 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00495: val_acc did not improve from 0.84274\n",
+ "Epoch 496/3000\n",
+ " - 29s - loss: 0.6022 - acc: 0.9062 - val_loss: 0.9252 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00496: val_acc did not improve from 0.84274\n",
+ "Epoch 497/3000\n",
+ " - 32s - loss: 0.6095 - acc: 0.9049 - val_loss: 0.9989 - val_acc: 0.8198\n",
+ "\n",
+ "Epoch 00497: val_acc did not improve from 0.84274\n",
+ "Epoch 498/3000\n",
+ " - 29s - loss: 0.6256 - acc: 0.8986 - val_loss: 0.9319 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00498: val_acc did not improve from 0.84274\n",
+ "Epoch 499/3000\n",
+ " - 30s - loss: 0.6068 - acc: 0.9064 - val_loss: 0.9506 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00499: val_acc did not improve from 0.84274\n",
+ "Epoch 500/3000\n",
+ " - 33s - loss: 0.6110 - acc: 0.9035 - val_loss: 0.9020 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 00500: val_acc did not improve from 0.84274\n",
+ "Epoch 501/3000\n",
+ " - 29s - loss: 0.5981 - acc: 0.9067 - val_loss: 0.9281 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00501: val_acc did not improve from 0.84274\n",
+ "Epoch 502/3000\n",
+ " - 30s - loss: 0.6126 - acc: 0.9044 - val_loss: 0.9046 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00502: val_acc did not improve from 0.84274\n",
+ "Epoch 503/3000\n",
+ " - 29s - loss: 0.6225 - acc: 0.8992 - val_loss: 0.8745 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00503: val_acc did not improve from 0.84274\n",
+ "Epoch 504/3000\n",
+ " - 30s - loss: 0.6211 - acc: 0.9044 - val_loss: 0.9955 - val_acc: 0.8225\n",
+ "\n",
+ "Epoch 00504: val_acc did not improve from 0.84274\n",
+ "Epoch 505/3000\n",
+ " - 32s - loss: 0.6212 - acc: 0.8993 - val_loss: 0.8950 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00505: val_acc did not improve from 0.84274\n",
+ "Epoch 506/3000\n",
+ " - 31s - loss: 0.6094 - acc: 0.9053 - val_loss: 0.9933 - val_acc: 0.8233\n",
+ "\n",
+ "Epoch 00506: val_acc did not improve from 0.84274\n",
+ "Epoch 507/3000\n",
+ " - 29s - loss: 0.6067 - acc: 0.9037 - val_loss: 0.9005 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00507: val_acc did not improve from 0.84274\n",
+ "Epoch 508/3000\n",
+ " - 32s - loss: 0.6113 - acc: 0.9035 - val_loss: 0.9273 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00508: val_acc did not improve from 0.84274\n",
+ "Epoch 509/3000\n",
+ " - 28s - loss: 0.6074 - acc: 0.9062 - val_loss: 0.8480 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00509: val_acc did not improve from 0.84274\n",
+ "Epoch 510/3000\n",
+ " - 32s - loss: 0.6066 - acc: 0.9041 - val_loss: 0.9606 - val_acc: 0.8303\n",
+ "\n",
+ "Epoch 00510: val_acc did not improve from 0.84274\n",
+ "Epoch 511/3000\n",
+ " - 29s - loss: 0.6203 - acc: 0.9017 - val_loss: 0.9328 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00511: val_acc did not improve from 0.84274\n",
+ "Epoch 512/3000\n",
+ " - 30s - loss: 0.6036 - acc: 0.9050 - val_loss: 0.8756 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00512: val_acc improved from 0.84274 to 0.84663, saving model to ./ModelSnapshots/LSTM-v1-512.h5\n",
+ "Epoch 513/3000\n",
+ " - 31s - loss: 0.5944 - acc: 0.9053 - val_loss: 0.8823 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00513: val_acc did not improve from 0.84663\n",
+ "Epoch 514/3000\n",
+ " - 30s - loss: 0.5969 - acc: 0.9105 - val_loss: 0.9693 - val_acc: 0.8299\n",
+ "\n",
+ "Epoch 00514: val_acc did not improve from 0.84663\n",
+ "Epoch 515/3000\n",
+ " - 31s - loss: 0.6015 - acc: 0.9032 - val_loss: 0.9040 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00515: val_acc did not improve from 0.84663\n",
+ "Epoch 516/3000\n",
+ " - 30s - loss: 0.6017 - acc: 0.9022 - val_loss: 0.9142 - val_acc: 0.8373\n",
+ "\n",
+ "Epoch 00516: val_acc did not improve from 0.84663\n",
+ "Epoch 517/3000\n",
+ " - 30s - loss: 0.6046 - acc: 0.9014 - val_loss: 1.0028 - val_acc: 0.8217\n",
+ "\n",
+ "Epoch 00517: val_acc did not improve from 0.84663\n",
+ "Epoch 518/3000\n",
+ " - 30s - loss: 0.6052 - acc: 0.9028 - val_loss: 0.9557 - val_acc: 0.8248\n",
+ "\n",
+ "Epoch 00518: val_acc did not improve from 0.84663\n",
+ "Epoch 519/3000\n",
+ " - 31s - loss: 0.6058 - acc: 0.9049 - val_loss: 0.9401 - val_acc: 0.8315\n",
+ "\n",
+ "Epoch 00519: val_acc did not improve from 0.84663\n",
+ "Epoch 520/3000\n",
+ " - 31s - loss: 0.5908 - acc: 0.9061 - val_loss: 0.9356 - val_acc: 0.8361\n",
+ "\n",
+ "Epoch 00520: val_acc did not improve from 0.84663\n",
+ "Epoch 521/3000\n",
+ " - 30s - loss: 0.5869 - acc: 0.9056 - val_loss: 0.9188 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00521: val_acc did not improve from 0.84663\n",
+ "Epoch 522/3000\n",
+ " - 30s - loss: 0.5908 - acc: 0.9081 - val_loss: 0.9738 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00522: val_acc did not improve from 0.84663\n",
+ "Epoch 523/3000\n",
+ " - 30s - loss: 0.5922 - acc: 0.9099 - val_loss: 0.8900 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00523: val_acc did not improve from 0.84663\n",
+ "Epoch 524/3000\n",
+ " - 30s - loss: 0.5825 - acc: 0.9102 - val_loss: 0.8922 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00524: val_acc did not improve from 0.84663\n",
+ "Epoch 525/3000\n",
+ " - 32s - loss: 0.5922 - acc: 0.9090 - val_loss: 1.0021 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00525: val_acc did not improve from 0.84663\n",
+ "Epoch 526/3000\n",
+ " - 29s - loss: 0.6023 - acc: 0.9052 - val_loss: 0.9643 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00526: val_acc did not improve from 0.84663\n",
+ "Epoch 527/3000\n",
+ " - 30s - loss: 0.6061 - acc: 0.9035 - val_loss: 0.8903 - val_acc: 0.8353\n",
+ "\n",
+ "Epoch 00527: val_acc did not improve from 0.84663\n",
+ "Epoch 528/3000\n",
+ " - 30s - loss: 0.6028 - acc: 0.9075 - val_loss: 0.9078 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00528: val_acc did not improve from 0.84663\n",
+ "Epoch 529/3000\n",
+ " - 30s - loss: 0.5908 - acc: 0.9085 - val_loss: 0.8997 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00529: val_acc did not improve from 0.84663\n",
+ "Epoch 530/3000\n",
+ " - 30s - loss: 0.5908 - acc: 0.9069 - val_loss: 0.8971 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00530: val_acc did not improve from 0.84663\n",
+ "Epoch 531/3000\n",
+ " - 32s - loss: 0.6067 - acc: 0.8996 - val_loss: 0.9471 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00531: val_acc did not improve from 0.84663\n",
+ "Epoch 532/3000\n",
+ " - 29s - loss: 0.5918 - acc: 0.9058 - val_loss: 0.9280 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00532: val_acc did not improve from 0.84663\n",
+ "Epoch 533/3000\n",
+ " - 30s - loss: 0.5971 - acc: 0.9050 - val_loss: 0.8608 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00533: val_acc did not improve from 0.84663\n",
+ "Epoch 534/3000\n",
+ " - 30s - loss: 0.5908 - acc: 0.9085 - val_loss: 0.8661 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00534: val_acc improved from 0.84663 to 0.85091, saving model to ./ModelSnapshots/LSTM-v1-534.h5\n",
+ "Epoch 535/3000\n",
+ " - 31s - loss: 0.5981 - acc: 0.9052 - val_loss: 1.1163 - val_acc: 0.8124\n",
+ "\n",
+ "Epoch 00535: val_acc did not improve from 0.85091\n",
+ "Epoch 536/3000\n",
+ " - 30s - loss: 0.6046 - acc: 0.9004 - val_loss: 0.9089 - val_acc: 0.8377\n",
+ "\n",
+ "Epoch 00536: val_acc did not improve from 0.85091\n",
+ "Epoch 537/3000\n",
+ " - 30s - loss: 0.5962 - acc: 0.9084 - val_loss: 0.9151 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00537: val_acc did not improve from 0.85091\n",
+ "Epoch 538/3000\n",
+ " - 30s - loss: 0.5858 - acc: 0.9075 - val_loss: 0.9157 - val_acc: 0.8303\n",
+ "\n",
+ "Epoch 00538: val_acc did not improve from 0.85091\n",
+ "Epoch 539/3000\n",
+ " - 32s - loss: 0.5867 - acc: 0.9079 - val_loss: 0.9277 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00539: val_acc did not improve from 0.85091\n",
+ "\n",
+ "Epoch 00539: ReduceLROnPlateau reducing learning rate to 9.02499959920533e-05.\n",
+ "Epoch 540/3000\n",
+ " - 31s - loss: 0.5884 - acc: 0.9085 - val_loss: 0.9444 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00540: val_acc did not improve from 0.85091\n",
+ "Epoch 541/3000\n",
+ " - 31s - loss: 0.5837 - acc: 0.9081 - val_loss: 0.8909 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00541: val_acc did not improve from 0.85091\n",
+ "Epoch 542/3000\n",
+ " - 30s - loss: 0.5881 - acc: 0.9079 - val_loss: 0.9310 - val_acc: 0.8318\n",
+ "\n",
+ "Epoch 00542: val_acc did not improve from 0.85091\n",
+ "Epoch 543/3000\n",
+ " - 30s - loss: 0.5840 - acc: 0.9084 - val_loss: 0.9961 - val_acc: 0.8248\n",
+ "\n",
+ "Epoch 00543: val_acc did not improve from 0.85091\n",
+ "Epoch 544/3000\n",
+ " - 30s - loss: 0.5883 - acc: 0.9061 - val_loss: 1.0121 - val_acc: 0.8182\n",
+ "\n",
+ "Epoch 00544: val_acc did not improve from 0.85091\n",
+ "Epoch 545/3000\n",
+ " - 31s - loss: 0.5747 - acc: 0.9106 - val_loss: 0.9573 - val_acc: 0.8279\n",
+ "\n",
+ "Epoch 00545: val_acc did not improve from 0.85091\n",
+ "Epoch 546/3000\n",
+ " - 32s - loss: 0.5794 - acc: 0.9088 - val_loss: 0.9523 - val_acc: 0.8350\n",
+ "\n",
+ "Epoch 00546: val_acc did not improve from 0.85091\n",
+ "Epoch 547/3000\n",
+ " - 30s - loss: 0.5902 - acc: 0.9097 - val_loss: 0.9324 - val_acc: 0.8287\n",
+ "\n",
+ "Epoch 00547: val_acc did not improve from 0.85091\n",
+ "Epoch 548/3000\n",
+ " - 30s - loss: 0.5840 - acc: 0.9075 - val_loss: 0.8749 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00548: val_acc did not improve from 0.85091\n",
+ "Epoch 549/3000\n"
+ ]
+ }
+ ],
+ "source": [
+ "batch_size = 50\n",
+ "epochs = 3000\n",
+ "timesteps = 50\n",
+ "data_dim = (27,15)\n",
+ "l1v = 0.005\n",
+ "l2v = 0.015\n",
+ "\n",
+ "\n",
+ "tf.get_default_graph()\n",
+ "model = Sequential()\n",
+ "\n",
+ "model.add(TimeDistributed(Conv2D(64, kernel_size=(3,3), activation='relu', \n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v)),\n",
+ " input_shape=(timesteps ,27, 15, 1)))\n",
+ "model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(MaxPooling2D(pool_size=(2,2), strides=None,\n",
+ " padding='same', data_format='channels_last')))\n",
+ "model.add(TimeDistributed(Dropout(0.50)))\n",
+ "\n",
+ "model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(Conv2D(16, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(MaxPooling2D(pool_size=(2,2), strides=None, padding='same', data_format='channels_last')))\n",
+ "model.add(TimeDistributed(Dropout(0.50)))\n",
+ "\n",
+ "model.add(TimeDistributed(Flatten()))\n",
+ "\n",
+ "model.add(keras.layers.CuDNNLSTM(80, return_sequences=True, input_shape=(timesteps, data_dim), kernel_regularizer=regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(Dropout(0.5))\n",
+ "\n",
+ "model.add(keras.layers.CuDNNLSTM(50, return_sequences=False, input_shape=(timesteps, data_dim), kernel_regularizer=regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(Dropout(0.5))\n",
+ "\n",
+ "model.add(Dense(num_classes, activation='softmax'))\n",
+ "\n",
+ "#optimizer = optimizers.Adagrad()\n",
+ "optimizer = optimizers.Adam(lr = 0.0001, decay=1e-6)\n",
+ "#optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.1)\n",
+ "model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=optimizer,\n",
+ " metrics=['accuracy'])\n",
+ " \n",
+ "#Broadcast progress to the tensorboard.\n",
+ "\n",
+ "config = \"\"\n",
+ "for layer in model.layers:\n",
+ " config += str(layer.output).split('\\\"')[1].split(\"/\")[0] + str(layer.output_shape) + \"\\n\\n\"\n",
+ "config += \"batchsize: \" + str(batch_size) + \"\\n\\n\" + \"epochs: \" + str(epochs) + \"\\n\\n\" \n",
+ "config += \"l1: \" + str(l1v) + \"\\n\\n\" + \"l2: \" + str(l2v) + \"\\n\\n\"\n",
+ "\n",
+ "model.summary()\n",
+ "current_name = \"LSTM-v1\"\n",
+ "readable_timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')\n",
+ "tensorflowfolder = \"/srv/share/tensorboardfiles/\" + current_name + readable_timestamp\n",
+ "print(current_name + readable_timestamp)\n",
+ "logger = LoggingTensorBoard(settings_str_to_log = config, log_dir=tensorflowfolder, histogram_freq=0,\n",
+ " write_graph=True, write_images=True, update_freq = 'epoch')\n",
+ "\n",
+ "storer = ModelCheckpoint(\"./ModelSnapshots/\" + current_name + readable_timestamp + '-{epoch:03d}.h5',\n",
+ " monitor='val_acc', verbose=1,\n",
+ " save_best_only=True, save_weights_only=False,\n",
+ " mode='auto', period=1)\n",
+ "\n",
+ "learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', \n",
+ " patience=30, \n",
+ " verbose=1, \n",
+ " factor=0.95, \n",
+ " min_lr=0.00001)\n",
+ "\n",
+ "history = model.fit(x_train, y_train_one_hot,\n",
+ " batch_size=batch_size,\n",
+ " epochs=epochs,\n",
+ " verbose=2,\n",
+ " validation_data=(x_test, y_test_one_hot),\n",
+ " callbacks=[storer,logger, learning_rate_reduction])\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model.save(\"./ModelSnapshots/\" + current_name + \"_DONE.h5\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_12_LSTM-WarmStart.ipynb b/python/Step_12_LSTM-WarmStart.ipynb
new file mode 100644
index 0000000..64305e5
--- /dev/null
+++ b/python/Step_12_LSTM-WarmStart.ipynb
@@ -0,0 +1,10107 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "from keras.models import Sequential, load_model\n",
+ "from keras.layers import *\n",
+ "from keras import optimizers\n",
+ "from keras import utils\n",
+ "from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau\n",
+ "import keras\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle \n",
+ "\n",
+ "# Used for graph export\n",
+ "from keras import backend as K"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class LoggingTensorBoard(TensorBoard): \n",
+ "\n",
+ " def __init__(self, log_dir, settings_str_to_log, **kwargs):\n",
+ " super(LoggingTensorBoard, self).__init__(log_dir, **kwargs)\n",
+ "\n",
+ " self.settings_str = settings_str_to_log\n",
+ "\n",
+ " def on_train_begin(self, logs=None):\n",
+ " TensorBoard.on_train_begin(self, logs=logs)\n",
+ "\n",
+ " tensor = tf.convert_to_tensor(self.settings_str)\n",
+ " summary = tf.summary.text (\"Run_Settings\", tensor)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " s = sess.run(summary)\n",
+ " self.writer.add_summary(s)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n"
+ ]
+ }
+ ],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyCollection/df_lstm_norm50.pkl\")\n",
+ "\n",
+ "lst = dfAll.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\n",
+ " 18])"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.userID.unique()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfAll.TaskID = dfAll.TaskID % 17"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_train = dfAll[dfAll.userID.isin(train_ids)][['Blobs', 'TaskID']]\n",
+ "df_test = dfAll[dfAll.userID.isin(test_ids)][['Blobs', 'TaskID']]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_train = np.concatenate(df_train.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "x_test = np.concatenate(df_test.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "\n",
+ "y_train = df_train.TaskID.values\n",
+ "y_test = df_test.TaskID.values\n",
+ "\n",
+ "x_train = x_train / 255.0\n",
+ "x_test = x_test / 255.0\n",
+ "\n",
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = len(dfAll.TaskID.unique())\n",
+ "y_train_one_hot = utils.to_categorical(df_train.TaskID, num_classes)\n",
+ "y_test_one_hot = utils.to_categorical(df_test.TaskID, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.3\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "_________________________________________________________________\n",
+ "Layer (type) Output Shape Param # \n",
+ "=================================================================\n",
+ "time_distributed_10 (TimeDis (None, 50, 27, 15, 64) 640 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_11 (TimeDis (None, 50, 27, 15, 32) 18464 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_12 (TimeDis (None, 50, 14, 8, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_13 (TimeDis (None, 50, 14, 8, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_14 (TimeDis (None, 50, 14, 8, 32) 9248 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_15 (TimeDis (None, 50, 14, 8, 16) 4624 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_16 (TimeDis (None, 50, 7, 4, 16) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_17 (TimeDis (None, 50, 7, 4, 16) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_18 (TimeDis (None, 50, 448) 0 \n",
+ "_________________________________________________________________\n",
+ "lstm_3 (LSTM) (None, 50, 80) 169280 \n",
+ "_________________________________________________________________\n",
+ "dropout_7 (Dropout) (None, 50, 80) 0 \n",
+ "_________________________________________________________________\n",
+ "lstm_4 (LSTM) (None, 50) 26200 \n",
+ "_________________________________________________________________\n",
+ "dropout_8 (Dropout) (None, 50) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_2 (Dense) (None, 17) 867 \n",
+ "=================================================================\n",
+ "Total params: 229,323\n",
+ "Trainable params: 229,323\n",
+ "Non-trainable params: 0\n",
+ "_________________________________________________________________\n",
+ "LSTM-v2\n",
+ "Train on 6624 samples, validate on 2569 samples\n",
+ "Epoch 1/3000\n",
+ " - 38s - loss: 0.6056 - acc: 0.8918 - val_loss: 1.0498 - val_acc: 0.8198\n",
+ "\n",
+ "Epoch 00001: val_acc improved from -inf to 0.81977, saving model to ./ModelSnapshots/LSTM-v2-001.h5\n",
+ "Epoch 2/3000\n",
+ " - 38s - loss: 0.5269 - acc: 0.9111 - val_loss: 0.9867 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00002: val_acc improved from 0.81977 to 0.83418, saving model to ./ModelSnapshots/LSTM-v2-002.h5\n",
+ "Epoch 3/3000\n",
+ " - 40s - loss: 0.4995 - acc: 0.9215 - val_loss: 1.0149 - val_acc: 0.8315\n",
+ "\n",
+ "Epoch 00003: val_acc did not improve from 0.83418\n",
+ "Epoch 4/3000\n",
+ " - 39s - loss: 0.4941 - acc: 0.9198 - val_loss: 0.9717 - val_acc: 0.8326\n",
+ "\n",
+ "Epoch 00004: val_acc did not improve from 0.83418\n",
+ "Epoch 5/3000\n",
+ " - 39s - loss: 0.4916 - acc: 0.9238 - val_loss: 0.9620 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00005: val_acc improved from 0.83418 to 0.83924, saving model to ./ModelSnapshots/LSTM-v2-005.h5\n",
+ "Epoch 6/3000\n",
+ " - 40s - loss: 0.4997 - acc: 0.9241 - val_loss: 1.1411 - val_acc: 0.8108\n",
+ "\n",
+ "Epoch 00006: val_acc did not improve from 0.83924\n",
+ "Epoch 7/3000\n",
+ " - 40s - loss: 0.5363 - acc: 0.9159 - val_loss: 0.9515 - val_acc: 0.8279\n",
+ "\n",
+ "Epoch 00007: val_acc did not improve from 0.83924\n",
+ "Epoch 8/3000\n",
+ " - 39s - loss: 0.4610 - acc: 0.9289 - val_loss: 0.9546 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00008: val_acc did not improve from 0.83924\n",
+ "Epoch 9/3000\n",
+ " - 39s - loss: 0.4673 - acc: 0.9283 - val_loss: 0.9096 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00009: val_acc improved from 0.83924 to 0.84585, saving model to ./ModelSnapshots/LSTM-v2-009.h5\n",
+ "Epoch 10/3000\n",
+ " - 39s - loss: 0.4555 - acc: 0.9295 - val_loss: 1.0060 - val_acc: 0.8330\n",
+ "\n",
+ "Epoch 00010: val_acc did not improve from 0.84585\n",
+ "Epoch 11/3000\n",
+ " - 39s - loss: 0.4624 - acc: 0.9296 - val_loss: 0.9722 - val_acc: 0.8318\n",
+ "\n",
+ "Epoch 00011: val_acc did not improve from 0.84585\n",
+ "Epoch 12/3000\n",
+ " - 39s - loss: 0.4971 - acc: 0.9209 - val_loss: 1.1189 - val_acc: 0.8062\n",
+ "\n",
+ "Epoch 00012: val_acc did not improve from 0.84585\n",
+ "Epoch 13/3000\n",
+ " - 39s - loss: 0.5239 - acc: 0.9126 - val_loss: 0.9452 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00013: val_acc did not improve from 0.84585\n",
+ "Epoch 14/3000\n",
+ " - 40s - loss: 0.4821 - acc: 0.9224 - val_loss: 0.9675 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00014: val_acc did not improve from 0.84585\n",
+ "Epoch 15/3000\n",
+ " - 39s - loss: 0.4525 - acc: 0.9281 - val_loss: 0.9729 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00015: val_acc did not improve from 0.84585\n",
+ "Epoch 16/3000\n",
+ " - 40s - loss: 0.4651 - acc: 0.9295 - val_loss: 0.9648 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00016: val_acc did not improve from 0.84585\n",
+ "Epoch 17/3000\n",
+ " - 39s - loss: 0.4269 - acc: 0.9366 - val_loss: 0.8995 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00017: val_acc improved from 0.84585 to 0.84663, saving model to ./ModelSnapshots/LSTM-v2-017.h5\n",
+ "Epoch 18/3000\n",
+ " - 39s - loss: 0.4641 - acc: 0.9268 - val_loss: 0.9171 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00018: val_acc did not improve from 0.84663\n",
+ "Epoch 19/3000\n",
+ " - 39s - loss: 0.4250 - acc: 0.9398 - val_loss: 0.9650 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00019: val_acc did not improve from 0.84663\n",
+ "Epoch 20/3000\n",
+ " - 39s - loss: 0.4639 - acc: 0.9330 - val_loss: 0.9631 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00020: val_acc did not improve from 0.84663\n",
+ "Epoch 21/3000\n",
+ " - 39s - loss: 0.4601 - acc: 0.9315 - val_loss: 0.9159 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00021: val_acc did not improve from 0.84663\n",
+ "Epoch 22/3000\n",
+ " - 39s - loss: 0.4361 - acc: 0.9370 - val_loss: 0.8874 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00022: val_acc improved from 0.84663 to 0.84702, saving model to ./ModelSnapshots/LSTM-v2-022.h5\n",
+ "Epoch 23/3000\n",
+ " - 39s - loss: 0.4560 - acc: 0.9304 - val_loss: 0.8797 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00023: val_acc improved from 0.84702 to 0.84936, saving model to ./ModelSnapshots/LSTM-v2-023.h5\n",
+ "Epoch 24/3000\n",
+ " - 39s - loss: 0.4309 - acc: 0.9355 - val_loss: 0.8776 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00024: val_acc improved from 0.84936 to 0.85286, saving model to ./ModelSnapshots/LSTM-v2-024.h5\n",
+ "Epoch 25/3000\n",
+ " - 40s - loss: 0.4402 - acc: 0.9345 - val_loss: 0.9515 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00025: val_acc did not improve from 0.85286\n",
+ "Epoch 26/3000\n",
+ " - 39s - loss: 0.4727 - acc: 0.9309 - val_loss: 0.9700 - val_acc: 0.8256\n",
+ "\n",
+ "Epoch 00026: val_acc did not improve from 0.85286\n",
+ "Epoch 27/3000\n",
+ " - 39s - loss: 0.4610 - acc: 0.9286 - val_loss: 0.9460 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00027: val_acc did not improve from 0.85286\n",
+ "Epoch 28/3000\n",
+ " - 39s - loss: 0.4874 - acc: 0.9265 - val_loss: 0.9083 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00028: val_acc did not improve from 0.85286\n",
+ "Epoch 29/3000\n",
+ " - 38s - loss: 0.4450 - acc: 0.9321 - val_loss: 0.8898 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00029: val_acc did not improve from 0.85286\n",
+ "Epoch 30/3000\n",
+ " - 40s - loss: 0.4875 - acc: 0.9229 - val_loss: 0.9824 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00030: val_acc did not improve from 0.85286\n",
+ "Epoch 31/3000\n",
+ " - 39s - loss: 0.4338 - acc: 0.9386 - val_loss: 0.9317 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00031: val_acc did not improve from 0.85286\n",
+ "Epoch 32/3000\n",
+ " - 39s - loss: 0.4561 - acc: 0.9327 - val_loss: 0.9210 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00032: val_acc did not improve from 0.85286\n",
+ "Epoch 33/3000\n",
+ " - 39s - loss: 0.4344 - acc: 0.9319 - val_loss: 0.9535 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00033: val_acc did not improve from 0.85286\n",
+ "Epoch 34/3000\n",
+ " - 37s - loss: 0.4195 - acc: 0.9413 - val_loss: 0.8583 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00034: val_acc improved from 0.85286 to 0.85520, saving model to ./ModelSnapshots/LSTM-v2-034.h5\n",
+ "Epoch 35/3000\n",
+ " - 38s - loss: 0.4471 - acc: 0.9343 - val_loss: 0.9999 - val_acc: 0.8373\n",
+ "\n",
+ "Epoch 00035: val_acc did not improve from 0.85520\n",
+ "Epoch 36/3000\n",
+ " - 40s - loss: 0.4331 - acc: 0.9351 - val_loss: 0.9634 - val_acc: 0.8361\n",
+ "\n",
+ "Epoch 00036: val_acc did not improve from 0.85520\n",
+ "Epoch 37/3000\n",
+ " - 39s - loss: 0.4321 - acc: 0.9373 - val_loss: 0.9405 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00037: val_acc did not improve from 0.85520\n",
+ "Epoch 38/3000\n",
+ " - 44s - loss: 0.4193 - acc: 0.9402 - val_loss: 0.9312 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00038: val_acc did not improve from 0.85520\n",
+ "Epoch 39/3000\n",
+ " - 40s - loss: 0.4308 - acc: 0.9396 - val_loss: 0.9360 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00039: val_acc did not improve from 0.85520\n",
+ "Epoch 40/3000\n",
+ " - 42s - loss: 0.4237 - acc: 0.9373 - val_loss: 0.8747 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00040: val_acc improved from 0.85520 to 0.86026, saving model to ./ModelSnapshots/LSTM-v2-040.h5\n",
+ "Epoch 41/3000\n",
+ " - 41s - loss: 0.4495 - acc: 0.9327 - val_loss: 0.9276 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00041: val_acc did not improve from 0.86026\n",
+ "Epoch 42/3000\n",
+ " - 39s - loss: 0.4586 - acc: 0.9358 - val_loss: 0.9724 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00042: val_acc did not improve from 0.86026\n",
+ "Epoch 43/3000\n",
+ " - 40s - loss: 0.4676 - acc: 0.9309 - val_loss: 0.9681 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00043: val_acc did not improve from 0.86026\n",
+ "Epoch 44/3000\n",
+ " - 39s - loss: 0.4382 - acc: 0.9364 - val_loss: 0.8846 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00044: val_acc did not improve from 0.86026\n",
+ "Epoch 45/3000\n",
+ " - 39s - loss: 0.4467 - acc: 0.9346 - val_loss: 0.9072 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00045: val_acc did not improve from 0.86026\n",
+ "Epoch 46/3000\n",
+ " - 39s - loss: 0.4176 - acc: 0.9410 - val_loss: 0.9573 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00046: val_acc did not improve from 0.86026\n",
+ "Epoch 47/3000\n",
+ " - 39s - loss: 0.4432 - acc: 0.9327 - val_loss: 0.9924 - val_acc: 0.8350\n",
+ "\n",
+ "Epoch 00047: val_acc did not improve from 0.86026\n",
+ "Epoch 48/3000\n",
+ " - 39s - loss: 0.4237 - acc: 0.9387 - val_loss: 1.0595 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00048: val_acc did not improve from 0.86026\n",
+ "Epoch 49/3000\n",
+ " - 39s - loss: 0.4519 - acc: 0.9345 - val_loss: 0.9091 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00049: val_acc did not improve from 0.86026\n",
+ "Epoch 50/3000\n",
+ " - 40s - loss: 0.4514 - acc: 0.9331 - val_loss: 0.8948 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00050: val_acc did not improve from 0.86026\n",
+ "Epoch 51/3000\n",
+ " - 44s - loss: 0.4385 - acc: 0.9380 - val_loss: 0.9468 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00051: val_acc did not improve from 0.86026\n",
+ "Epoch 52/3000\n",
+ " - 39s - loss: 0.4371 - acc: 0.9373 - val_loss: 0.9737 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00052: val_acc did not improve from 0.86026\n",
+ "Epoch 53/3000\n",
+ " - 40s - loss: 0.4320 - acc: 0.9411 - val_loss: 0.9623 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00053: val_acc did not improve from 0.86026\n",
+ "Epoch 54/3000\n",
+ " - 40s - loss: 0.4183 - acc: 0.9438 - val_loss: 0.8416 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00054: val_acc did not improve from 0.86026\n",
+ "Epoch 55/3000\n",
+ " - 39s - loss: 0.4246 - acc: 0.9422 - val_loss: 0.9440 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00055: val_acc did not improve from 0.86026\n",
+ "Epoch 56/3000\n",
+ " - 40s - loss: 0.4368 - acc: 0.9387 - val_loss: 0.9290 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00056: val_acc did not improve from 0.86026\n",
+ "Epoch 57/3000\n",
+ " - 39s - loss: 0.4627 - acc: 0.9333 - val_loss: 0.8787 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00057: val_acc did not improve from 0.86026\n",
+ "Epoch 58/3000\n",
+ " - 40s - loss: 0.4149 - acc: 0.9428 - val_loss: 0.9141 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00058: val_acc did not improve from 0.86026\n",
+ "Epoch 59/3000\n",
+ " - 39s - loss: 0.4551 - acc: 0.9372 - val_loss: 0.8431 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00059: val_acc did not improve from 0.86026\n",
+ "Epoch 60/3000\n",
+ " - 39s - loss: 0.4260 - acc: 0.9399 - val_loss: 0.8984 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00060: val_acc did not improve from 0.86026\n",
+ "Epoch 61/3000\n",
+ " - 40s - loss: 0.4569 - acc: 0.9348 - val_loss: 0.8772 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00061: val_acc did not improve from 0.86026\n",
+ "Epoch 62/3000\n",
+ " - 39s - loss: 0.4428 - acc: 0.9405 - val_loss: 0.9370 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00062: val_acc did not improve from 0.86026\n",
+ "Epoch 63/3000\n",
+ " - 39s - loss: 0.4368 - acc: 0.9363 - val_loss: 0.9072 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00063: val_acc did not improve from 0.86026\n",
+ "Epoch 64/3000\n",
+ " - 40s - loss: 0.4608 - acc: 0.9307 - val_loss: 0.8726 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00064: val_acc did not improve from 0.86026\n",
+ "Epoch 65/3000\n",
+ " - 39s - loss: 0.4588 - acc: 0.9364 - val_loss: 0.9302 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00065: val_acc did not improve from 0.86026\n",
+ "Epoch 66/3000\n",
+ " - 40s - loss: 0.4514 - acc: 0.9369 - val_loss: 0.9376 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00066: val_acc did not improve from 0.86026\n",
+ "Epoch 67/3000\n",
+ " - 39s - loss: 0.4393 - acc: 0.9395 - val_loss: 0.9064 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00067: val_acc did not improve from 0.86026\n",
+ "Epoch 68/3000\n",
+ " - 40s - loss: 0.4319 - acc: 0.9389 - val_loss: 0.8993 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00068: val_acc did not improve from 0.86026\n",
+ "Epoch 69/3000\n",
+ " - 39s - loss: 0.4387 - acc: 0.9360 - val_loss: 0.8797 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00069: val_acc did not improve from 0.86026\n",
+ "Epoch 70/3000\n",
+ " - 39s - loss: 0.4307 - acc: 0.9392 - val_loss: 0.9072 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00070: val_acc did not improve from 0.86026\n",
+ "Epoch 71/3000\n",
+ " - 38s - loss: 0.4298 - acc: 0.9386 - val_loss: 0.9000 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00071: val_acc did not improve from 0.86026\n",
+ "Epoch 72/3000\n",
+ " - 39s - loss: 0.4512 - acc: 0.9358 - val_loss: 0.9628 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00072: val_acc did not improve from 0.86026\n",
+ "Epoch 73/3000\n",
+ " - 39s - loss: 0.4398 - acc: 0.9380 - val_loss: 0.9015 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00073: val_acc did not improve from 0.86026\n",
+ "Epoch 74/3000\n",
+ " - 39s - loss: 0.4250 - acc: 0.9405 - val_loss: 0.9228 - val_acc: 0.8416\n",
+ "\n",
+ "Epoch 00074: val_acc did not improve from 0.86026\n",
+ "Epoch 75/3000\n",
+ " - 39s - loss: 0.4376 - acc: 0.9389 - val_loss: 1.0756 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00075: val_acc did not improve from 0.86026\n",
+ "Epoch 76/3000\n",
+ " - 40s - loss: 0.4561 - acc: 0.9345 - val_loss: 0.9589 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00076: val_acc did not improve from 0.86026\n",
+ "Epoch 77/3000\n",
+ " - 39s - loss: 0.4814 - acc: 0.9318 - val_loss: 0.9344 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00077: val_acc did not improve from 0.86026\n",
+ "Epoch 78/3000\n",
+ " - 39s - loss: 0.4547 - acc: 0.9336 - val_loss: 0.9314 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00078: val_acc did not improve from 0.86026\n",
+ "Epoch 79/3000\n",
+ " - 40s - loss: 0.4177 - acc: 0.9438 - val_loss: 0.9750 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00079: val_acc did not improve from 0.86026\n",
+ "Epoch 80/3000\n",
+ " - 42s - loss: 0.4243 - acc: 0.9408 - val_loss: 0.9280 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00080: val_acc did not improve from 0.86026\n",
+ "Epoch 81/3000\n",
+ " - 41s - loss: 0.4167 - acc: 0.9432 - val_loss: 0.9659 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00081: val_acc did not improve from 0.86026\n",
+ "Epoch 82/3000\n",
+ " - 39s - loss: 0.4214 - acc: 0.9413 - val_loss: 0.9068 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00082: val_acc did not improve from 0.86026\n",
+ "Epoch 83/3000\n",
+ " - 39s - loss: 0.4220 - acc: 0.9389 - val_loss: 0.9267 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00083: val_acc did not improve from 0.86026\n",
+ "Epoch 84/3000\n",
+ " - 39s - loss: 0.4349 - acc: 0.9399 - val_loss: 0.8673 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00084: val_acc did not improve from 0.86026\n",
+ "\n",
+ "Epoch 00084: ReduceLROnPlateau reducing learning rate to 9.499999760009814e-05.\n",
+ "Epoch 85/3000\n",
+ " - 39s - loss: 0.4043 - acc: 0.9423 - val_loss: 0.9716 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00085: val_acc did not improve from 0.86026\n",
+ "Epoch 86/3000\n",
+ " - 39s - loss: 0.4033 - acc: 0.9470 - val_loss: 0.8990 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00086: val_acc did not improve from 0.86026\n",
+ "Epoch 87/3000\n",
+ " - 39s - loss: 0.4128 - acc: 0.9420 - val_loss: 0.9176 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00087: val_acc did not improve from 0.86026\n",
+ "Epoch 88/3000\n",
+ " - 39s - loss: 0.4322 - acc: 0.9413 - val_loss: 0.9477 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00088: val_acc did not improve from 0.86026\n",
+ "Epoch 89/3000\n",
+ " - 39s - loss: 0.4287 - acc: 0.9398 - val_loss: 0.9319 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00089: val_acc did not improve from 0.86026\n",
+ "Epoch 90/3000\n",
+ " - 39s - loss: 0.4087 - acc: 0.9429 - val_loss: 0.9390 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00090: val_acc did not improve from 0.86026\n",
+ "Epoch 91/3000\n",
+ " - 39s - loss: 0.4065 - acc: 0.9457 - val_loss: 0.9405 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00091: val_acc did not improve from 0.86026\n",
+ "Epoch 92/3000\n",
+ " - 39s - loss: 0.4121 - acc: 0.9438 - val_loss: 0.9280 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00092: val_acc did not improve from 0.86026\n",
+ "Epoch 93/3000\n",
+ " - 39s - loss: 0.4262 - acc: 0.9389 - val_loss: 0.9276 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00093: val_acc did not improve from 0.86026\n",
+ "Epoch 94/3000\n",
+ " - 39s - loss: 0.4168 - acc: 0.9432 - val_loss: 0.8850 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00094: val_acc did not improve from 0.86026\n",
+ "Epoch 95/3000\n",
+ " - 39s - loss: 0.4275 - acc: 0.9401 - val_loss: 0.9073 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00095: val_acc did not improve from 0.86026\n",
+ "Epoch 96/3000\n",
+ " - 39s - loss: 0.4154 - acc: 0.9410 - val_loss: 0.9839 - val_acc: 0.8299\n",
+ "\n",
+ "Epoch 00096: val_acc did not improve from 0.86026\n",
+ "Epoch 97/3000\n",
+ " - 40s - loss: 0.4134 - acc: 0.9446 - val_loss: 1.0332 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00097: val_acc did not improve from 0.86026\n",
+ "Epoch 98/3000\n",
+ " - 39s - loss: 0.4648 - acc: 0.9306 - val_loss: 1.0208 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00098: val_acc did not improve from 0.86026\n",
+ "Epoch 99/3000\n",
+ " - 39s - loss: 0.4515 - acc: 0.9369 - val_loss: 0.9568 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00099: val_acc did not improve from 0.86026\n",
+ "Epoch 100/3000\n",
+ " - 39s - loss: 0.4186 - acc: 0.9411 - val_loss: 0.9582 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00100: val_acc did not improve from 0.86026\n",
+ "Epoch 101/3000\n",
+ " - 39s - loss: 0.4065 - acc: 0.9441 - val_loss: 0.9101 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00101: val_acc did not improve from 0.86026\n",
+ "Epoch 102/3000\n",
+ " - 39s - loss: 0.4159 - acc: 0.9429 - val_loss: 0.9969 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00102: val_acc did not improve from 0.86026\n",
+ "Epoch 103/3000\n",
+ " - 39s - loss: 0.4549 - acc: 0.9334 - val_loss: 1.0005 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00103: val_acc did not improve from 0.86026\n",
+ "Epoch 104/3000\n",
+ " - 39s - loss: 0.4101 - acc: 0.9449 - val_loss: 0.7630 - val_acc: 0.8665\n",
+ "\n",
+ "Epoch 00104: val_acc improved from 0.86026 to 0.86649, saving model to ./ModelSnapshots/LSTM-v2-104.h5\n",
+ "Epoch 105/3000\n",
+ " - 39s - loss: 0.4220 - acc: 0.9405 - val_loss: 0.8933 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00105: val_acc did not improve from 0.86649\n",
+ "Epoch 106/3000\n",
+ " - 38s - loss: 0.4094 - acc: 0.9417 - val_loss: 0.8874 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00106: val_acc did not improve from 0.86649\n",
+ "Epoch 107/3000\n",
+ " - 38s - loss: 0.4191 - acc: 0.9401 - val_loss: 0.8584 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00107: val_acc did not improve from 0.86649\n",
+ "Epoch 108/3000\n",
+ " - 39s - loss: 0.4098 - acc: 0.9446 - val_loss: 0.9456 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00108: val_acc did not improve from 0.86649\n",
+ "Epoch 109/3000\n",
+ " - 39s - loss: 0.4520 - acc: 0.9348 - val_loss: 0.9103 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00109: val_acc did not improve from 0.86649\n",
+ "Epoch 110/3000\n",
+ " - 39s - loss: 0.4052 - acc: 0.9447 - val_loss: 0.8925 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00110: val_acc did not improve from 0.86649\n",
+ "Epoch 111/3000\n",
+ " - 39s - loss: 0.3867 - acc: 0.9467 - val_loss: 0.9218 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00111: val_acc did not improve from 0.86649\n",
+ "Epoch 112/3000\n",
+ " - 39s - loss: 0.4004 - acc: 0.9466 - val_loss: 0.9572 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00112: val_acc did not improve from 0.86649\n",
+ "Epoch 113/3000\n",
+ " - 40s - loss: 0.4357 - acc: 0.9370 - val_loss: 0.9873 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00113: val_acc did not improve from 0.86649\n",
+ "Epoch 114/3000\n",
+ " - 39s - loss: 0.4327 - acc: 0.9399 - val_loss: 0.9470 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00114: val_acc did not improve from 0.86649\n",
+ "Epoch 115/3000\n",
+ " - 39s - loss: 0.4282 - acc: 0.9454 - val_loss: 0.8582 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00115: val_acc did not improve from 0.86649\n",
+ "Epoch 116/3000\n",
+ " - 39s - loss: 0.4082 - acc: 0.9417 - val_loss: 1.0239 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00116: val_acc did not improve from 0.86649\n",
+ "Epoch 117/3000\n",
+ " - 39s - loss: 0.4483 - acc: 0.9348 - val_loss: 0.8706 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00117: val_acc did not improve from 0.86649\n",
+ "Epoch 118/3000\n",
+ " - 40s - loss: 0.4208 - acc: 0.9408 - val_loss: 0.9734 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00118: val_acc did not improve from 0.86649\n",
+ "Epoch 119/3000\n",
+ " - 38s - loss: 0.4357 - acc: 0.9364 - val_loss: 0.9709 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00119: val_acc did not improve from 0.86649\n",
+ "Epoch 120/3000\n",
+ " - 39s - loss: 0.4519 - acc: 0.9331 - val_loss: 0.9216 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00120: val_acc did not improve from 0.86649\n",
+ "Epoch 121/3000\n",
+ " - 39s - loss: 0.4216 - acc: 0.9413 - val_loss: 0.9375 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00121: val_acc did not improve from 0.86649\n",
+ "Epoch 122/3000\n",
+ " - 39s - loss: 0.4187 - acc: 0.9413 - val_loss: 0.8751 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00122: val_acc did not improve from 0.86649\n",
+ "Epoch 123/3000\n",
+ " - 39s - loss: 0.4169 - acc: 0.9469 - val_loss: 0.8852 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00123: val_acc did not improve from 0.86649\n",
+ "Epoch 124/3000\n",
+ " - 39s - loss: 0.4069 - acc: 0.9461 - val_loss: 0.9012 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00124: val_acc did not improve from 0.86649\n",
+ "Epoch 125/3000\n",
+ " - 39s - loss: 0.4158 - acc: 0.9405 - val_loss: 0.8521 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00125: val_acc did not improve from 0.86649\n",
+ "Epoch 126/3000\n",
+ " - 39s - loss: 0.4191 - acc: 0.9414 - val_loss: 0.9001 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00126: val_acc did not improve from 0.86649\n",
+ "Epoch 127/3000\n",
+ " - 39s - loss: 0.3975 - acc: 0.9476 - val_loss: 0.9316 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00127: val_acc did not improve from 0.86649\n",
+ "Epoch 128/3000\n",
+ " - 39s - loss: 0.4252 - acc: 0.9402 - val_loss: 0.8905 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00128: val_acc did not improve from 0.86649\n",
+ "Epoch 129/3000\n",
+ " - 39s - loss: 0.4259 - acc: 0.9392 - val_loss: 0.8737 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00129: val_acc did not improve from 0.86649\n",
+ "Epoch 130/3000\n",
+ " - 39s - loss: 0.4075 - acc: 0.9441 - val_loss: 0.9467 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00130: val_acc did not improve from 0.86649\n",
+ "Epoch 131/3000\n",
+ " - 39s - loss: 0.3948 - acc: 0.9472 - val_loss: 0.8441 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00131: val_acc did not improve from 0.86649\n",
+ "Epoch 132/3000\n",
+ " - 39s - loss: 0.4289 - acc: 0.9410 - val_loss: 0.9384 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00132: val_acc did not improve from 0.86649\n",
+ "Epoch 133/3000\n",
+ " - 39s - loss: 0.4148 - acc: 0.9431 - val_loss: 0.9680 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00133: val_acc did not improve from 0.86649\n",
+ "Epoch 134/3000\n",
+ " - 39s - loss: 0.4544 - acc: 0.9387 - val_loss: 0.9413 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00134: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00134: ReduceLROnPlateau reducing learning rate to 9.02499959920533e-05.\n",
+ "Epoch 135/3000\n",
+ " - 39s - loss: 0.4370 - acc: 0.9402 - val_loss: 0.9007 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00135: val_acc did not improve from 0.86649\n",
+ "Epoch 136/3000\n",
+ " - 38s - loss: 0.3923 - acc: 0.9472 - val_loss: 0.8570 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00136: val_acc did not improve from 0.86649\n",
+ "Epoch 137/3000\n",
+ " - 39s - loss: 0.3934 - acc: 0.9450 - val_loss: 0.9029 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00137: val_acc did not improve from 0.86649\n",
+ "Epoch 138/3000\n",
+ " - 39s - loss: 0.3801 - acc: 0.9508 - val_loss: 0.9045 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00138: val_acc did not improve from 0.86649\n",
+ "Epoch 139/3000\n",
+ " - 40s - loss: 0.4030 - acc: 0.9454 - val_loss: 0.9157 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00139: val_acc did not improve from 0.86649\n",
+ "Epoch 140/3000\n",
+ " - 39s - loss: 0.3966 - acc: 0.9485 - val_loss: 0.9957 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00140: val_acc did not improve from 0.86649\n",
+ "Epoch 141/3000\n",
+ " - 39s - loss: 0.4135 - acc: 0.9443 - val_loss: 0.9842 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00141: val_acc did not improve from 0.86649\n",
+ "Epoch 142/3000\n",
+ " - 39s - loss: 0.3960 - acc: 0.9490 - val_loss: 0.9163 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00142: val_acc did not improve from 0.86649\n",
+ "Epoch 143/3000\n",
+ " - 39s - loss: 0.4101 - acc: 0.9410 - val_loss: 0.8855 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00143: val_acc did not improve from 0.86649\n",
+ "Epoch 144/3000\n",
+ " - 39s - loss: 0.3965 - acc: 0.9509 - val_loss: 1.0028 - val_acc: 0.8303\n",
+ "\n",
+ "Epoch 00144: val_acc did not improve from 0.86649\n",
+ "Epoch 145/3000\n",
+ " - 40s - loss: 0.4101 - acc: 0.9413 - val_loss: 0.9372 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00145: val_acc did not improve from 0.86649\n",
+ "Epoch 146/3000\n",
+ " - 39s - loss: 0.4004 - acc: 0.9444 - val_loss: 0.9043 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00146: val_acc did not improve from 0.86649\n",
+ "Epoch 147/3000\n",
+ " - 39s - loss: 0.4090 - acc: 0.9440 - val_loss: 0.8569 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00147: val_acc did not improve from 0.86649\n",
+ "Epoch 148/3000\n",
+ " - 39s - loss: 0.4181 - acc: 0.9431 - val_loss: 0.9812 - val_acc: 0.8334\n",
+ "\n",
+ "Epoch 00148: val_acc did not improve from 0.86649\n",
+ "Epoch 149/3000\n",
+ " - 39s - loss: 0.4085 - acc: 0.9450 - val_loss: 0.8226 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00149: val_acc did not improve from 0.86649\n",
+ "Epoch 150/3000\n",
+ " - 39s - loss: 0.4201 - acc: 0.9411 - val_loss: 0.9429 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00150: val_acc did not improve from 0.86649\n",
+ "Epoch 151/3000\n",
+ " - 39s - loss: 0.4101 - acc: 0.9428 - val_loss: 0.8741 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00151: val_acc did not improve from 0.86649\n",
+ "Epoch 152/3000\n",
+ " - 39s - loss: 0.4096 - acc: 0.9438 - val_loss: 0.8906 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00152: val_acc did not improve from 0.86649\n",
+ "Epoch 153/3000\n",
+ " - 39s - loss: 0.4016 - acc: 0.9444 - val_loss: 0.9297 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00153: val_acc did not improve from 0.86649\n",
+ "Epoch 154/3000\n",
+ " - 40s - loss: 0.4003 - acc: 0.9473 - val_loss: 1.0061 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00154: val_acc did not improve from 0.86649\n",
+ "Epoch 155/3000\n",
+ " - 39s - loss: 0.4044 - acc: 0.9454 - val_loss: 1.0106 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00155: val_acc did not improve from 0.86649\n",
+ "Epoch 156/3000\n",
+ " - 39s - loss: 0.4122 - acc: 0.9414 - val_loss: 0.9389 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00156: val_acc did not improve from 0.86649\n",
+ "Epoch 157/3000\n",
+ " - 39s - loss: 0.3976 - acc: 0.9446 - val_loss: 0.9240 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00157: val_acc did not improve from 0.86649\n",
+ "Epoch 158/3000\n",
+ " - 39s - loss: 0.3838 - acc: 0.9523 - val_loss: 0.9041 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00158: val_acc did not improve from 0.86649\n",
+ "Epoch 159/3000\n",
+ " - 39s - loss: 0.4119 - acc: 0.9441 - val_loss: 0.9310 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00159: val_acc did not improve from 0.86649\n",
+ "Epoch 160/3000\n",
+ " - 39s - loss: 0.3936 - acc: 0.9464 - val_loss: 0.9601 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00160: val_acc did not improve from 0.86649\n",
+ "Epoch 161/3000\n",
+ " - 39s - loss: 0.4161 - acc: 0.9435 - val_loss: 0.9179 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00161: val_acc did not improve from 0.86649\n",
+ "Epoch 162/3000\n",
+ " - 39s - loss: 0.4000 - acc: 0.9458 - val_loss: 0.9565 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00162: val_acc did not improve from 0.86649\n",
+ "Epoch 163/3000\n",
+ " - 39s - loss: 0.4164 - acc: 0.9419 - val_loss: 1.0898 - val_acc: 0.8272\n",
+ "\n",
+ "Epoch 00163: val_acc did not improve from 0.86649\n",
+ "Epoch 164/3000\n",
+ " - 39s - loss: 0.4255 - acc: 0.9398 - val_loss: 0.9603 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00164: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00164: ReduceLROnPlateau reducing learning rate to 8.573749619245064e-05.\n",
+ "Epoch 165/3000\n",
+ " - 39s - loss: 0.4097 - acc: 0.9446 - val_loss: 0.9117 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00165: val_acc did not improve from 0.86649\n",
+ "Epoch 166/3000\n",
+ " - 39s - loss: 0.3816 - acc: 0.9491 - val_loss: 0.9233 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00166: val_acc did not improve from 0.86649\n",
+ "Epoch 167/3000\n",
+ " - 39s - loss: 0.4109 - acc: 0.9426 - val_loss: 0.9021 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00167: val_acc did not improve from 0.86649\n",
+ "Epoch 168/3000\n",
+ " - 40s - loss: 0.4019 - acc: 0.9446 - val_loss: 0.9406 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00168: val_acc did not improve from 0.86649\n",
+ "Epoch 169/3000\n",
+ " - 40s - loss: 0.3951 - acc: 0.9464 - val_loss: 0.9533 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00169: val_acc did not improve from 0.86649\n",
+ "Epoch 170/3000\n",
+ " - 39s - loss: 0.3935 - acc: 0.9485 - val_loss: 0.9551 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00170: val_acc did not improve from 0.86649\n",
+ "Epoch 171/3000\n",
+ " - 39s - loss: 0.3931 - acc: 0.9496 - val_loss: 0.9058 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00171: val_acc did not improve from 0.86649\n",
+ "Epoch 172/3000\n",
+ " - 40s - loss: 0.3885 - acc: 0.9467 - val_loss: 0.9725 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00172: val_acc did not improve from 0.86649\n",
+ "Epoch 173/3000\n",
+ " - 39s - loss: 0.3981 - acc: 0.9481 - val_loss: 0.9014 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00173: val_acc did not improve from 0.86649\n",
+ "Epoch 174/3000\n",
+ " - 40s - loss: 0.4189 - acc: 0.9460 - val_loss: 0.9159 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00174: val_acc did not improve from 0.86649\n",
+ "Epoch 175/3000\n",
+ " - 39s - loss: 0.4159 - acc: 0.9380 - val_loss: 0.9044 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00175: val_acc did not improve from 0.86649\n",
+ "Epoch 176/3000\n",
+ " - 39s - loss: 0.3944 - acc: 0.9482 - val_loss: 0.9412 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00176: val_acc did not improve from 0.86649\n",
+ "Epoch 177/3000\n",
+ " - 39s - loss: 0.3890 - acc: 0.9502 - val_loss: 0.9233 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00177: val_acc did not improve from 0.86649\n",
+ "Epoch 178/3000\n",
+ " - 39s - loss: 0.3900 - acc: 0.9515 - val_loss: 0.8865 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00178: val_acc did not improve from 0.86649\n",
+ "Epoch 179/3000\n",
+ " - 40s - loss: 0.4063 - acc: 0.9455 - val_loss: 0.9673 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00179: val_acc did not improve from 0.86649\n",
+ "Epoch 180/3000\n",
+ " - 39s - loss: 0.3787 - acc: 0.9499 - val_loss: 0.9494 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00180: val_acc did not improve from 0.86649\n",
+ "Epoch 181/3000\n",
+ " - 40s - loss: 0.4018 - acc: 0.9461 - val_loss: 0.9874 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00181: val_acc did not improve from 0.86649\n",
+ "Epoch 182/3000\n",
+ " - 39s - loss: 0.3996 - acc: 0.9493 - val_loss: 0.8876 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00182: val_acc did not improve from 0.86649\n",
+ "Epoch 183/3000\n",
+ " - 40s - loss: 0.4109 - acc: 0.9461 - val_loss: 0.8838 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00183: val_acc did not improve from 0.86649\n",
+ "Epoch 184/3000\n",
+ " - 39s - loss: 0.3980 - acc: 0.9457 - val_loss: 0.8478 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00184: val_acc did not improve from 0.86649\n",
+ "Epoch 185/3000\n",
+ " - 40s - loss: 0.3885 - acc: 0.9467 - val_loss: 0.9210 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00185: val_acc did not improve from 0.86649\n",
+ "Epoch 186/3000\n",
+ " - 40s - loss: 0.3894 - acc: 0.9499 - val_loss: 0.9790 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00186: val_acc did not improve from 0.86649\n",
+ "Epoch 187/3000\n",
+ " - 39s - loss: 0.3871 - acc: 0.9494 - val_loss: 0.9039 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00187: val_acc did not improve from 0.86649\n",
+ "Epoch 188/3000\n",
+ " - 40s - loss: 0.3955 - acc: 0.9470 - val_loss: 0.9084 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00188: val_acc did not improve from 0.86649\n",
+ "Epoch 189/3000\n",
+ " - 40s - loss: 0.4062 - acc: 0.9446 - val_loss: 0.9570 - val_acc: 0.8318\n",
+ "\n",
+ "Epoch 00189: val_acc did not improve from 0.86649\n",
+ "Epoch 190/3000\n",
+ " - 39s - loss: 0.3884 - acc: 0.9469 - val_loss: 0.9881 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00190: val_acc did not improve from 0.86649\n",
+ "Epoch 191/3000\n",
+ " - 39s - loss: 0.4025 - acc: 0.9472 - val_loss: 0.9180 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00191: val_acc did not improve from 0.86649\n",
+ "Epoch 192/3000\n",
+ " - 39s - loss: 0.3804 - acc: 0.9502 - val_loss: 0.9212 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00192: val_acc did not improve from 0.86649\n",
+ "Epoch 193/3000\n",
+ " - 39s - loss: 0.4011 - acc: 0.9416 - val_loss: 1.0128 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00193: val_acc did not improve from 0.86649\n",
+ "Epoch 194/3000\n",
+ " - 40s - loss: 0.4137 - acc: 0.9426 - val_loss: 0.9164 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00194: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00194: ReduceLROnPlateau reducing learning rate to 8.145062311086804e-05.\n",
+ "Epoch 195/3000\n",
+ " - 39s - loss: 0.3887 - acc: 0.9482 - val_loss: 0.9217 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00195: val_acc did not improve from 0.86649\n",
+ "Epoch 196/3000\n",
+ " - 40s - loss: 0.3835 - acc: 0.9490 - val_loss: 0.9337 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00196: val_acc did not improve from 0.86649\n",
+ "Epoch 197/3000\n",
+ " - 39s - loss: 0.3901 - acc: 0.9478 - val_loss: 0.9818 - val_acc: 0.8353\n",
+ "\n",
+ "Epoch 00197: val_acc did not improve from 0.86649\n",
+ "Epoch 198/3000\n",
+ " - 39s - loss: 0.4046 - acc: 0.9461 - val_loss: 1.0099 - val_acc: 0.8326\n",
+ "\n",
+ "Epoch 00198: val_acc did not improve from 0.86649\n",
+ "Epoch 199/3000\n",
+ " - 39s - loss: 0.4022 - acc: 0.9463 - val_loss: 0.8810 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00199: val_acc did not improve from 0.86649\n",
+ "Epoch 200/3000\n",
+ " - 39s - loss: 0.3884 - acc: 0.9505 - val_loss: 0.8836 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00200: val_acc did not improve from 0.86649\n",
+ "Epoch 201/3000\n",
+ " - 39s - loss: 0.4034 - acc: 0.9461 - val_loss: 0.9049 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00201: val_acc did not improve from 0.86649\n",
+ "Epoch 202/3000\n",
+ " - 39s - loss: 0.3845 - acc: 0.9475 - val_loss: 0.9071 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00202: val_acc did not improve from 0.86649\n",
+ "Epoch 203/3000\n",
+ " - 40s - loss: 0.3795 - acc: 0.9503 - val_loss: 0.9272 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00203: val_acc did not improve from 0.86649\n",
+ "Epoch 204/3000\n",
+ " - 39s - loss: 0.3677 - acc: 0.9521 - val_loss: 0.8768 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00204: val_acc did not improve from 0.86649\n",
+ "Epoch 205/3000\n",
+ " - 40s - loss: 0.3786 - acc: 0.9527 - val_loss: 0.9458 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00205: val_acc did not improve from 0.86649\n",
+ "Epoch 206/3000\n",
+ " - 39s - loss: 0.4036 - acc: 0.9481 - val_loss: 0.8992 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00206: val_acc did not improve from 0.86649\n",
+ "Epoch 207/3000\n",
+ " - 39s - loss: 0.3865 - acc: 0.9467 - val_loss: 0.8995 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00207: val_acc did not improve from 0.86649\n",
+ "Epoch 208/3000\n",
+ " - 39s - loss: 0.3734 - acc: 0.9532 - val_loss: 0.9297 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00208: val_acc did not improve from 0.86649\n",
+ "Epoch 209/3000\n",
+ " - 40s - loss: 0.3811 - acc: 0.9517 - val_loss: 0.8610 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00209: val_acc did not improve from 0.86649\n",
+ "Epoch 210/3000\n",
+ " - 39s - loss: 0.3746 - acc: 0.9517 - val_loss: 0.9199 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00210: val_acc did not improve from 0.86649\n",
+ "Epoch 211/3000\n",
+ " - 39s - loss: 0.3745 - acc: 0.9505 - val_loss: 0.9545 - val_acc: 0.8361\n",
+ "\n",
+ "Epoch 00211: val_acc did not improve from 0.86649\n",
+ "Epoch 212/3000\n",
+ " - 40s - loss: 0.3858 - acc: 0.9478 - val_loss: 0.8884 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00212: val_acc did not improve from 0.86649\n",
+ "Epoch 213/3000\n",
+ " - 39s - loss: 0.3898 - acc: 0.9491 - val_loss: 0.9675 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00213: val_acc did not improve from 0.86649\n",
+ "Epoch 214/3000\n",
+ " - 39s - loss: 0.4043 - acc: 0.9485 - val_loss: 0.8941 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00214: val_acc did not improve from 0.86649\n",
+ "Epoch 215/3000\n",
+ " - 39s - loss: 0.3720 - acc: 0.9538 - val_loss: 0.9408 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00215: val_acc did not improve from 0.86649\n",
+ "Epoch 216/3000\n",
+ " - 39s - loss: 0.3596 - acc: 0.9541 - val_loss: 0.9415 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00216: val_acc did not improve from 0.86649\n",
+ "Epoch 217/3000\n",
+ " - 39s - loss: 0.3797 - acc: 0.9508 - val_loss: 0.9347 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00217: val_acc did not improve from 0.86649\n",
+ "Epoch 218/3000\n",
+ " - 39s - loss: 0.3925 - acc: 0.9473 - val_loss: 0.8918 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00218: val_acc did not improve from 0.86649\n",
+ "Epoch 219/3000\n",
+ " - 39s - loss: 0.3767 - acc: 0.9488 - val_loss: 0.8936 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00219: val_acc did not improve from 0.86649\n",
+ "Epoch 220/3000\n",
+ " - 40s - loss: 0.3981 - acc: 0.9444 - val_loss: 0.9101 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00220: val_acc did not improve from 0.86649\n",
+ "Epoch 221/3000\n",
+ " - 39s - loss: 0.3935 - acc: 0.9484 - val_loss: 0.9340 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00221: val_acc did not improve from 0.86649\n",
+ "Epoch 222/3000\n",
+ " - 39s - loss: 0.3827 - acc: 0.9454 - val_loss: 0.9111 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00224: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00224: ReduceLROnPlateau reducing learning rate to 7.737808919046074e-05.\n",
+ "Epoch 225/3000\n",
+ " - 39s - loss: 0.4087 - acc: 0.9426 - val_loss: 0.9580 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00225: val_acc did not improve from 0.86649\n",
+ "Epoch 226/3000\n",
+ " - 40s - loss: 0.3773 - acc: 0.9485 - val_loss: 0.9078 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00226: val_acc did not improve from 0.86649\n",
+ "Epoch 227/3000\n",
+ " - 40s - loss: 0.3660 - acc: 0.9530 - val_loss: 0.8983 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00227: val_acc did not improve from 0.86649\n",
+ "Epoch 228/3000\n",
+ " - 39s - loss: 0.3804 - acc: 0.9490 - val_loss: 0.8204 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 00228: val_acc did not improve from 0.86649\n",
+ "Epoch 229/3000\n",
+ " - 39s - loss: 0.3861 - acc: 0.9479 - val_loss: 0.8979 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00229: val_acc did not improve from 0.86649\n",
+ "Epoch 230/3000\n",
+ " - 39s - loss: 0.4036 - acc: 0.9449 - val_loss: 0.8535 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00230: val_acc did not improve from 0.86649\n",
+ "Epoch 231/3000\n",
+ " - 39s - loss: 0.3948 - acc: 0.9464 - val_loss: 0.9545 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00231: val_acc did not improve from 0.86649\n",
+ "Epoch 232/3000\n",
+ " - 39s - loss: 0.3763 - acc: 0.9517 - val_loss: 1.0186 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00232: val_acc did not improve from 0.86649\n",
+ "Epoch 233/3000\n",
+ " - 39s - loss: 0.3607 - acc: 0.9521 - val_loss: 0.9043 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00233: val_acc did not improve from 0.86649\n",
+ "Epoch 234/3000\n",
+ " - 40s - loss: 0.3710 - acc: 0.9505 - val_loss: 0.9583 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00234: val_acc did not improve from 0.86649\n",
+ "Epoch 235/3000\n",
+ " - 39s - loss: 0.3781 - acc: 0.9488 - val_loss: 0.8799 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00235: val_acc did not improve from 0.86649\n",
+ "Epoch 236/3000\n",
+ " - 40s - loss: 0.3754 - acc: 0.9534 - val_loss: 0.9165 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00236: val_acc did not improve from 0.86649\n",
+ "Epoch 237/3000\n",
+ " - 40s - loss: 0.3724 - acc: 0.9540 - val_loss: 0.9543 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00237: val_acc did not improve from 0.86649\n",
+ "Epoch 238/3000\n",
+ " - 40s - loss: 0.3855 - acc: 0.9484 - val_loss: 0.9242 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00238: val_acc did not improve from 0.86649\n",
+ "Epoch 239/3000\n",
+ " - 39s - loss: 0.3637 - acc: 0.9529 - val_loss: 0.9195 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00239: val_acc did not improve from 0.86649\n",
+ "Epoch 240/3000\n",
+ " - 39s - loss: 0.3883 - acc: 0.9500 - val_loss: 0.9836 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00240: val_acc did not improve from 0.86649\n",
+ "Epoch 241/3000\n",
+ " - 40s - loss: 0.3729 - acc: 0.9505 - val_loss: 0.8994 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00241: val_acc did not improve from 0.86649\n",
+ "Epoch 242/3000\n",
+ " - 39s - loss: 0.3858 - acc: 0.9478 - val_loss: 0.8928 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00242: val_acc did not improve from 0.86649\n",
+ "Epoch 243/3000\n",
+ " - 40s - loss: 0.3655 - acc: 0.9547 - val_loss: 0.8937 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00243: val_acc did not improve from 0.86649\n",
+ "Epoch 244/3000\n",
+ " - 39s - loss: 0.3747 - acc: 0.9490 - val_loss: 0.9015 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00244: val_acc did not improve from 0.86649\n",
+ "Epoch 245/3000\n",
+ " - 39s - loss: 0.3713 - acc: 0.9499 - val_loss: 0.9938 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00245: val_acc did not improve from 0.86649\n",
+ "Epoch 246/3000\n",
+ " - 39s - loss: 0.3796 - acc: 0.9500 - val_loss: 0.9559 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00246: val_acc did not improve from 0.86649\n",
+ "Epoch 247/3000\n",
+ " - 40s - loss: 0.3666 - acc: 0.9523 - val_loss: 0.9361 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00247: val_acc did not improve from 0.86649\n",
+ "Epoch 248/3000\n",
+ " - 40s - loss: 0.3775 - acc: 0.9515 - val_loss: 0.8607 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00248: val_acc did not improve from 0.86649\n",
+ "Epoch 249/3000\n",
+ " - 39s - loss: 0.3624 - acc: 0.9541 - val_loss: 0.9073 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00249: val_acc did not improve from 0.86649\n",
+ "Epoch 250/3000\n",
+ " - 39s - loss: 0.3687 - acc: 0.9524 - val_loss: 0.9094 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00250: val_acc did not improve from 0.86649\n",
+ "Epoch 251/3000\n",
+ " - 39s - loss: 0.3681 - acc: 0.9534 - val_loss: 0.9190 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00251: val_acc did not improve from 0.86649\n",
+ "Epoch 252/3000\n",
+ " - 39s - loss: 0.3672 - acc: 0.9508 - val_loss: 0.8954 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00252: val_acc did not improve from 0.86649\n",
+ "Epoch 253/3000\n",
+ " - 40s - loss: 0.3870 - acc: 0.9473 - val_loss: 0.8745 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00253: val_acc did not improve from 0.86649\n",
+ "Epoch 254/3000\n",
+ " - 40s - loss: 0.3811 - acc: 0.9481 - val_loss: 0.9155 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00254: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00254: ReduceLROnPlateau reducing learning rate to 7.350918749580159e-05.\n",
+ "Epoch 255/3000\n",
+ " - 40s - loss: 0.3656 - acc: 0.9534 - val_loss: 0.8551 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00255: val_acc did not improve from 0.86649\n",
+ "Epoch 256/3000\n",
+ " - 39s - loss: 0.3653 - acc: 0.9514 - val_loss: 0.9610 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00256: val_acc did not improve from 0.86649\n",
+ "Epoch 257/3000\n",
+ " - 39s - loss: 0.3980 - acc: 0.9463 - val_loss: 1.0120 - val_acc: 0.8295\n",
+ "\n",
+ "Epoch 00257: val_acc did not improve from 0.86649\n",
+ "Epoch 258/3000\n",
+ " - 40s - loss: 0.3869 - acc: 0.9481 - val_loss: 0.9067 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00258: val_acc did not improve from 0.86649\n",
+ "Epoch 259/3000\n",
+ " - 39s - loss: 0.4147 - acc: 0.9423 - val_loss: 0.9569 - val_acc: 0.8416\n",
+ "\n",
+ "Epoch 00259: val_acc did not improve from 0.86649\n",
+ "Epoch 260/3000\n",
+ " - 39s - loss: 0.3765 - acc: 0.9493 - val_loss: 0.9149 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00260: val_acc did not improve from 0.86649\n",
+ "Epoch 261/3000\n",
+ " - 39s - loss: 0.3723 - acc: 0.9494 - val_loss: 0.9884 - val_acc: 0.8350\n",
+ "\n",
+ "Epoch 00261: val_acc did not improve from 0.86649\n",
+ "Epoch 262/3000\n",
+ " - 39s - loss: 0.3825 - acc: 0.9463 - val_loss: 0.9140 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00262: val_acc did not improve from 0.86649\n",
+ "Epoch 263/3000\n",
+ " - 40s - loss: 0.3643 - acc: 0.9535 - val_loss: 0.9832 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00263: val_acc did not improve from 0.86649\n",
+ "Epoch 264/3000\n",
+ " - 39s - loss: 0.3596 - acc: 0.9526 - val_loss: 0.9553 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00264: val_acc did not improve from 0.86649\n",
+ "Epoch 265/3000\n",
+ " - 39s - loss: 0.3603 - acc: 0.9532 - val_loss: 0.9267 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00265: val_acc did not improve from 0.86649\n",
+ "Epoch 266/3000\n",
+ " - 39s - loss: 0.3523 - acc: 0.9583 - val_loss: 0.9068 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00266: val_acc did not improve from 0.86649\n",
+ "Epoch 267/3000\n",
+ " - 39s - loss: 0.3496 - acc: 0.9553 - val_loss: 0.8808 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00267: val_acc did not improve from 0.86649\n",
+ "Epoch 268/3000\n",
+ " - 39s - loss: 0.3839 - acc: 0.9479 - val_loss: 0.8474 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00268: val_acc did not improve from 0.86649\n",
+ "Epoch 269/3000\n",
+ " - 39s - loss: 0.3651 - acc: 0.9552 - val_loss: 0.9066 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00269: val_acc did not improve from 0.86649\n",
+ "Epoch 270/3000\n",
+ " - 39s - loss: 0.3781 - acc: 0.9494 - val_loss: 0.9599 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00270: val_acc did not improve from 0.86649\n",
+ "Epoch 271/3000\n",
+ " - 40s - loss: 0.3577 - acc: 0.9529 - val_loss: 0.9236 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00271: val_acc did not improve from 0.86649\n",
+ "Epoch 272/3000\n",
+ " - 40s - loss: 0.3792 - acc: 0.9473 - val_loss: 0.9265 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00272: val_acc did not improve from 0.86649\n",
+ "Epoch 273/3000\n",
+ " - 40s - loss: 0.3674 - acc: 0.9523 - val_loss: 0.9896 - val_acc: 0.8373\n",
+ "\n",
+ "Epoch 00273: val_acc did not improve from 0.86649\n",
+ "Epoch 274/3000\n",
+ " - 39s - loss: 0.3655 - acc: 0.9526 - val_loss: 0.8524 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00274: val_acc did not improve from 0.86649\n",
+ "Epoch 275/3000\n",
+ " - 39s - loss: 0.3658 - acc: 0.9502 - val_loss: 0.9661 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00275: val_acc did not improve from 0.86649\n",
+ "Epoch 276/3000\n",
+ " - 39s - loss: 0.3713 - acc: 0.9500 - val_loss: 0.8588 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00276: val_acc did not improve from 0.86649\n",
+ "Epoch 277/3000\n",
+ " - 39s - loss: 0.3685 - acc: 0.9540 - val_loss: 0.9974 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00277: val_acc did not improve from 0.86649\n",
+ "Epoch 278/3000\n",
+ " - 39s - loss: 0.3696 - acc: 0.9526 - val_loss: 0.8494 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00278: val_acc did not improve from 0.86649\n",
+ "Epoch 279/3000\n",
+ " - 39s - loss: 0.3585 - acc: 0.9538 - val_loss: 0.9636 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00279: val_acc did not improve from 0.86649\n",
+ "Epoch 280/3000\n",
+ " - 39s - loss: 0.3744 - acc: 0.9490 - val_loss: 0.8726 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00280: val_acc did not improve from 0.86649\n",
+ "Epoch 281/3000\n",
+ " - 40s - loss: 0.3828 - acc: 0.9469 - val_loss: 0.9184 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00281: val_acc did not improve from 0.86649\n",
+ "Epoch 282/3000\n",
+ " - 39s - loss: 0.3731 - acc: 0.9505 - val_loss: 0.8927 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00282: val_acc did not improve from 0.86649\n",
+ "Epoch 283/3000\n",
+ " - 39s - loss: 0.3597 - acc: 0.9524 - val_loss: 0.8174 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 00283: val_acc did not improve from 0.86649\n",
+ "Epoch 284/3000\n",
+ " - 39s - loss: 0.3644 - acc: 0.9515 - val_loss: 0.9366 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00284: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00284: ReduceLROnPlateau reducing learning rate to 6.983372950344346e-05.\n",
+ "Epoch 285/3000\n",
+ " - 38s - loss: 0.3537 - acc: 0.9524 - val_loss: 0.9509 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00285: val_acc did not improve from 0.86649\n",
+ "Epoch 286/3000\n",
+ " - 39s - loss: 0.3539 - acc: 0.9556 - val_loss: 0.8748 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00286: val_acc did not improve from 0.86649\n",
+ "Epoch 287/3000\n",
+ " - 40s - loss: 0.3665 - acc: 0.9512 - val_loss: 0.9176 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00287: val_acc did not improve from 0.86649\n",
+ "Epoch 288/3000\n",
+ " - 39s - loss: 0.3592 - acc: 0.9523 - val_loss: 0.8962 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00288: val_acc did not improve from 0.86649\n",
+ "Epoch 289/3000\n",
+ " - 39s - loss: 0.3485 - acc: 0.9558 - val_loss: 0.8369 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00289: val_acc did not improve from 0.86649\n",
+ "Epoch 290/3000\n",
+ " - 39s - loss: 0.3433 - acc: 0.9537 - val_loss: 0.8916 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00290: val_acc did not improve from 0.86649\n",
+ "Epoch 291/3000\n",
+ " - 39s - loss: 0.3764 - acc: 0.9499 - val_loss: 0.9457 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00291: val_acc did not improve from 0.86649\n",
+ "Epoch 292/3000\n",
+ " - 39s - loss: 0.3546 - acc: 0.9543 - val_loss: 0.9729 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00292: val_acc did not improve from 0.86649\n",
+ "Epoch 293/3000\n",
+ " - 40s - loss: 0.3677 - acc: 0.9505 - val_loss: 0.9273 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00293: val_acc did not improve from 0.86649\n",
+ "Epoch 294/3000\n",
+ " - 39s - loss: 0.3894 - acc: 0.9470 - val_loss: 0.9159 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00294: val_acc did not improve from 0.86649\n",
+ "Epoch 295/3000\n",
+ " - 40s - loss: 0.3607 - acc: 0.9532 - val_loss: 0.9351 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00295: val_acc did not improve from 0.86649\n",
+ "Epoch 296/3000\n",
+ " - 39s - loss: 0.3785 - acc: 0.9485 - val_loss: 0.9046 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00296: val_acc did not improve from 0.86649\n",
+ "Epoch 297/3000\n",
+ " - 39s - loss: 0.3575 - acc: 0.9547 - val_loss: 0.9030 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00297: val_acc did not improve from 0.86649\n",
+ "Epoch 298/3000\n",
+ " - 40s - loss: 0.3679 - acc: 0.9521 - val_loss: 0.8527 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00298: val_acc did not improve from 0.86649\n",
+ "Epoch 299/3000\n",
+ " - 40s - loss: 0.3658 - acc: 0.9527 - val_loss: 0.8763 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00299: val_acc did not improve from 0.86649\n",
+ "Epoch 300/3000\n",
+ " - 39s - loss: 0.3501 - acc: 0.9561 - val_loss: 0.8783 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00300: val_acc did not improve from 0.86649\n",
+ "Epoch 301/3000\n",
+ " - 40s - loss: 0.3594 - acc: 0.9538 - val_loss: 0.9492 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00301: val_acc did not improve from 0.86649\n",
+ "Epoch 302/3000\n",
+ " - 40s - loss: 0.3566 - acc: 0.9541 - val_loss: 0.9000 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00302: val_acc did not improve from 0.86649\n",
+ "Epoch 303/3000\n",
+ " - 39s - loss: 0.3498 - acc: 0.9567 - val_loss: 0.9092 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00303: val_acc did not improve from 0.86649\n",
+ "Epoch 304/3000\n",
+ " - 39s - loss: 0.3646 - acc: 0.9534 - val_loss: 0.9032 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00304: val_acc did not improve from 0.86649\n",
+ "Epoch 305/3000\n",
+ " - 39s - loss: 0.3381 - acc: 0.9588 - val_loss: 0.8957 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00305: val_acc did not improve from 0.86649\n",
+ "Epoch 306/3000\n",
+ " - 39s - loss: 0.3567 - acc: 0.9541 - val_loss: 0.8587 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00306: val_acc did not improve from 0.86649\n",
+ "Epoch 307/3000\n",
+ " - 39s - loss: 0.3696 - acc: 0.9493 - val_loss: 0.9091 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00307: val_acc did not improve from 0.86649\n",
+ "Epoch 308/3000\n",
+ " - 39s - loss: 0.3529 - acc: 0.9530 - val_loss: 0.9425 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00308: val_acc did not improve from 0.86649\n",
+ "Epoch 309/3000\n",
+ " - 39s - loss: 0.3585 - acc: 0.9540 - val_loss: 0.9658 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00309: val_acc did not improve from 0.86649\n",
+ "Epoch 310/3000\n",
+ " - 39s - loss: 0.3701 - acc: 0.9482 - val_loss: 0.9474 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00310: val_acc did not improve from 0.86649\n",
+ "Epoch 311/3000\n",
+ " - 39s - loss: 0.3728 - acc: 0.9485 - val_loss: 0.8468 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00311: val_acc did not improve from 0.86649\n",
+ "Epoch 312/3000\n",
+ " - 40s - loss: 0.3428 - acc: 0.9547 - val_loss: 0.8516 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00312: val_acc did not improve from 0.86649\n",
+ "Epoch 313/3000\n",
+ " - 39s - loss: 0.3676 - acc: 0.9511 - val_loss: 0.9169 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00313: val_acc did not improve from 0.86649\n",
+ "Epoch 314/3000\n",
+ " - 39s - loss: 0.3761 - acc: 0.9464 - val_loss: 0.8943 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00314: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00314: ReduceLROnPlateau reducing learning rate to 6.634204510191921e-05.\n",
+ "Epoch 315/3000\n",
+ " - 39s - loss: 0.3402 - acc: 0.9567 - val_loss: 0.9325 - val_acc: 0.8377\n",
+ "\n",
+ "Epoch 00315: val_acc did not improve from 0.86649\n",
+ "Epoch 316/3000\n",
+ " - 39s - loss: 0.3531 - acc: 0.9526 - val_loss: 0.8943 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00316: val_acc did not improve from 0.86649\n",
+ "Epoch 317/3000\n",
+ " - 39s - loss: 0.3568 - acc: 0.9538 - val_loss: 0.9239 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00317: val_acc did not improve from 0.86649\n",
+ "Epoch 318/3000\n",
+ " - 39s - loss: 0.3511 - acc: 0.9546 - val_loss: 0.8757 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00318: val_acc did not improve from 0.86649\n",
+ "Epoch 319/3000\n",
+ " - 39s - loss: 0.3493 - acc: 0.9565 - val_loss: 0.8912 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00319: val_acc did not improve from 0.86649\n",
+ "Epoch 320/3000\n",
+ " - 40s - loss: 0.3661 - acc: 0.9515 - val_loss: 0.9028 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00320: val_acc did not improve from 0.86649\n",
+ "Epoch 321/3000\n",
+ " - 39s - loss: 0.3615 - acc: 0.9559 - val_loss: 0.9068 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00321: val_acc did not improve from 0.86649\n",
+ "Epoch 322/3000\n",
+ " - 39s - loss: 0.3571 - acc: 0.9530 - val_loss: 0.9152 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00322: val_acc did not improve from 0.86649\n",
+ "Epoch 323/3000\n",
+ " - 39s - loss: 0.3619 - acc: 0.9535 - val_loss: 0.8739 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00323: val_acc did not improve from 0.86649\n",
+ "Epoch 324/3000\n",
+ " - 39s - loss: 0.3484 - acc: 0.9538 - val_loss: 0.9118 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00324: val_acc did not improve from 0.86649\n",
+ "Epoch 325/3000\n",
+ " - 39s - loss: 0.3583 - acc: 0.9556 - val_loss: 0.9312 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00325: val_acc did not improve from 0.86649\n",
+ "Epoch 326/3000\n",
+ " - 39s - loss: 0.3683 - acc: 0.9506 - val_loss: 0.9007 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00326: val_acc did not improve from 0.86649\n",
+ "Epoch 327/3000\n",
+ " - 39s - loss: 0.3640 - acc: 0.9529 - val_loss: 0.9076 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00327: val_acc did not improve from 0.86649\n",
+ "Epoch 328/3000\n",
+ " - 39s - loss: 0.3379 - acc: 0.9583 - val_loss: 0.8766 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00328: val_acc did not improve from 0.86649\n",
+ "Epoch 329/3000\n",
+ " - 39s - loss: 0.3538 - acc: 0.9537 - val_loss: 0.9582 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00329: val_acc did not improve from 0.86649\n",
+ "Epoch 330/3000\n",
+ " - 39s - loss: 0.3485 - acc: 0.9538 - val_loss: 0.8958 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00330: val_acc did not improve from 0.86649\n",
+ "Epoch 331/3000\n",
+ " - 40s - loss: 0.3540 - acc: 0.9534 - val_loss: 0.8465 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00331: val_acc did not improve from 0.86649\n",
+ "Epoch 332/3000\n",
+ " - 39s - loss: 0.3498 - acc: 0.9574 - val_loss: 0.9472 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00332: val_acc did not improve from 0.86649\n",
+ "Epoch 333/3000\n",
+ " - 39s - loss: 0.3407 - acc: 0.9570 - val_loss: 0.9152 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00333: val_acc did not improve from 0.86649\n",
+ "Epoch 334/3000\n",
+ " - 39s - loss: 0.3713 - acc: 0.9541 - val_loss: 0.8634 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00334: val_acc did not improve from 0.86649\n",
+ "Epoch 335/3000\n",
+ " - 39s - loss: 0.3541 - acc: 0.9537 - val_loss: 0.9005 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00335: val_acc did not improve from 0.86649\n",
+ "Epoch 336/3000\n",
+ " - 39s - loss: 0.3709 - acc: 0.9506 - val_loss: 0.9184 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00336: val_acc did not improve from 0.86649\n",
+ "Epoch 337/3000\n",
+ " - 39s - loss: 0.3675 - acc: 0.9500 - val_loss: 0.8765 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00337: val_acc did not improve from 0.86649\n",
+ "Epoch 338/3000\n",
+ " - 39s - loss: 0.3534 - acc: 0.9562 - val_loss: 0.9150 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00338: val_acc did not improve from 0.86649\n",
+ "Epoch 339/3000\n",
+ " - 39s - loss: 0.3440 - acc: 0.9561 - val_loss: 0.9922 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00339: val_acc did not improve from 0.86649\n",
+ "Epoch 340/3000\n",
+ " - 39s - loss: 0.3579 - acc: 0.9535 - val_loss: 0.9198 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00340: val_acc did not improve from 0.86649\n",
+ "Epoch 341/3000\n",
+ " - 39s - loss: 0.3605 - acc: 0.9511 - val_loss: 0.8777 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00341: val_acc did not improve from 0.86649\n",
+ "Epoch 342/3000\n",
+ " - 39s - loss: 0.3638 - acc: 0.9511 - val_loss: 0.9121 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00342: val_acc did not improve from 0.86649\n",
+ "Epoch 343/3000\n",
+ " - 39s - loss: 0.3619 - acc: 0.9514 - val_loss: 0.9426 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00343: val_acc did not improve from 0.86649\n",
+ "Epoch 344/3000\n",
+ " - 39s - loss: 0.3542 - acc: 0.9553 - val_loss: 0.9400 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00344: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00344: ReduceLROnPlateau reducing learning rate to 6.302494111878331e-05.\n",
+ "Epoch 345/3000\n",
+ " - 40s - loss: 0.3759 - acc: 0.9470 - val_loss: 0.8714 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00345: val_acc did not improve from 0.86649\n",
+ "Epoch 346/3000\n",
+ " - 39s - loss: 0.3568 - acc: 0.9523 - val_loss: 0.9523 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00346: val_acc did not improve from 0.86649\n",
+ "Epoch 347/3000\n",
+ " - 39s - loss: 0.3402 - acc: 0.9568 - val_loss: 0.9460 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00347: val_acc did not improve from 0.86649\n",
+ "Epoch 348/3000\n",
+ " - 39s - loss: 0.3569 - acc: 0.9527 - val_loss: 0.8606 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00348: val_acc did not improve from 0.86649\n",
+ "Epoch 349/3000\n",
+ " - 39s - loss: 0.3515 - acc: 0.9555 - val_loss: 0.9580 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00349: val_acc did not improve from 0.86649\n",
+ "Epoch 350/3000\n",
+ " - 39s - loss: 0.3514 - acc: 0.9541 - val_loss: 0.9491 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00350: val_acc did not improve from 0.86649\n",
+ "Epoch 351/3000\n",
+ " - 39s - loss: 0.3516 - acc: 0.9535 - val_loss: 0.9365 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00351: val_acc did not improve from 0.86649\n",
+ "Epoch 352/3000\n",
+ " - 39s - loss: 0.3413 - acc: 0.9561 - val_loss: 0.9349 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00352: val_acc did not improve from 0.86649\n",
+ "Epoch 353/3000\n",
+ " - 39s - loss: 0.3529 - acc: 0.9534 - val_loss: 0.9106 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00353: val_acc did not improve from 0.86649\n",
+ "Epoch 354/3000\n",
+ " - 40s - loss: 0.3388 - acc: 0.9576 - val_loss: 0.8650 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00354: val_acc did not improve from 0.86649\n",
+ "Epoch 355/3000\n",
+ " - 39s - loss: 0.3457 - acc: 0.9576 - val_loss: 0.9217 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00355: val_acc did not improve from 0.86649\n",
+ "Epoch 356/3000\n",
+ " - 39s - loss: 0.3485 - acc: 0.9546 - val_loss: 0.9274 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00356: val_acc did not improve from 0.86649\n",
+ "Epoch 357/3000\n",
+ " - 39s - loss: 0.3410 - acc: 0.9561 - val_loss: 0.9489 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00357: val_acc did not improve from 0.86649\n",
+ "Epoch 358/3000\n",
+ " - 39s - loss: 0.3405 - acc: 0.9576 - val_loss: 0.9048 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00358: val_acc did not improve from 0.86649\n",
+ "Epoch 359/3000\n",
+ " - 39s - loss: 0.3482 - acc: 0.9550 - val_loss: 0.9928 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00359: val_acc did not improve from 0.86649\n",
+ "Epoch 360/3000\n",
+ " - 39s - loss: 0.3555 - acc: 0.9561 - val_loss: 0.9409 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00360: val_acc did not improve from 0.86649\n",
+ "Epoch 361/3000\n",
+ " - 39s - loss: 0.3373 - acc: 0.9562 - val_loss: 0.8830 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00361: val_acc did not improve from 0.86649\n",
+ "Epoch 362/3000\n",
+ " - 39s - loss: 0.3407 - acc: 0.9527 - val_loss: 0.9251 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00362: val_acc did not improve from 0.86649\n",
+ "Epoch 363/3000\n",
+ " - 39s - loss: 0.3546 - acc: 0.9565 - val_loss: 0.8666 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00363: val_acc did not improve from 0.86649\n",
+ "Epoch 364/3000\n",
+ " - 39s - loss: 0.3530 - acc: 0.9526 - val_loss: 0.9651 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00364: val_acc did not improve from 0.86649\n",
+ "Epoch 365/3000\n",
+ " - 39s - loss: 0.3449 - acc: 0.9586 - val_loss: 0.9295 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00365: val_acc did not improve from 0.86649\n",
+ "Epoch 366/3000\n",
+ " - 39s - loss: 0.3288 - acc: 0.9567 - val_loss: 1.0094 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00366: val_acc did not improve from 0.86649\n",
+ "Epoch 367/3000\n",
+ " - 39s - loss: 0.3459 - acc: 0.9553 - val_loss: 0.9412 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00367: val_acc did not improve from 0.86649\n",
+ "Epoch 368/3000\n",
+ " - 39s - loss: 0.3469 - acc: 0.9565 - val_loss: 0.8496 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00368: val_acc did not improve from 0.86649\n",
+ "Epoch 369/3000\n",
+ " - 39s - loss: 0.3490 - acc: 0.9527 - val_loss: 1.0578 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00369: val_acc did not improve from 0.86649\n",
+ "Epoch 370/3000\n",
+ " - 40s - loss: 0.3739 - acc: 0.9506 - val_loss: 0.9138 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00370: val_acc did not improve from 0.86649\n",
+ "Epoch 371/3000\n",
+ " - 39s - loss: 0.3419 - acc: 0.9556 - val_loss: 0.9400 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00371: val_acc did not improve from 0.86649\n",
+ "Epoch 372/3000\n",
+ " - 39s - loss: 0.3464 - acc: 0.9555 - val_loss: 0.9172 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00372: val_acc did not improve from 0.86649\n",
+ "Epoch 373/3000\n",
+ " - 39s - loss: 0.3294 - acc: 0.9595 - val_loss: 0.8920 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00373: val_acc did not improve from 0.86649\n",
+ "Epoch 374/3000\n",
+ " - 39s - loss: 0.3560 - acc: 0.9523 - val_loss: 0.8554 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00374: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00374: ReduceLROnPlateau reducing learning rate to 5.9873694408452134e-05.\n",
+ "Epoch 375/3000\n",
+ " - 40s - loss: 0.3404 - acc: 0.9543 - val_loss: 0.9453 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00375: val_acc did not improve from 0.86649\n",
+ "Epoch 376/3000\n",
+ " - 39s - loss: 0.3477 - acc: 0.9555 - val_loss: 0.9316 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00376: val_acc did not improve from 0.86649\n",
+ "Epoch 377/3000\n",
+ " - 39s - loss: 0.3447 - acc: 0.9543 - val_loss: 0.8490 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 00377: val_acc did not improve from 0.86649\n",
+ "Epoch 378/3000\n",
+ " - 40s - loss: 0.3414 - acc: 0.9564 - val_loss: 0.9153 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00378: val_acc did not improve from 0.86649\n",
+ "Epoch 379/3000\n",
+ " - 39s - loss: 0.3439 - acc: 0.9565 - val_loss: 0.8819 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00379: val_acc did not improve from 0.86649\n",
+ "Epoch 380/3000\n",
+ " - 39s - loss: 0.3306 - acc: 0.9571 - val_loss: 0.8860 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00380: val_acc did not improve from 0.86649\n",
+ "Epoch 381/3000\n",
+ " - 39s - loss: 0.3328 - acc: 0.9592 - val_loss: 0.9399 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00381: val_acc did not improve from 0.86649\n",
+ "Epoch 382/3000\n",
+ " - 39s - loss: 0.3626 - acc: 0.9530 - val_loss: 0.8948 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00382: val_acc did not improve from 0.86649\n",
+ "Epoch 383/3000\n",
+ " - 39s - loss: 0.3342 - acc: 0.9562 - val_loss: 0.9457 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00383: val_acc did not improve from 0.86649\n",
+ "Epoch 384/3000\n",
+ " - 39s - loss: 0.3262 - acc: 0.9594 - val_loss: 0.8822 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00384: val_acc did not improve from 0.86649\n",
+ "Epoch 385/3000\n",
+ " - 39s - loss: 0.3446 - acc: 0.9561 - val_loss: 0.8581 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00385: val_acc did not improve from 0.86649\n",
+ "Epoch 386/3000\n",
+ " - 40s - loss: 0.3536 - acc: 0.9561 - val_loss: 0.9542 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00386: val_acc did not improve from 0.86649\n",
+ "Epoch 387/3000\n",
+ " - 39s - loss: 0.3523 - acc: 0.9553 - val_loss: 0.9583 - val_acc: 0.8334\n",
+ "\n",
+ "Epoch 00387: val_acc did not improve from 0.86649\n",
+ "Epoch 388/3000\n",
+ " - 40s - loss: 0.3573 - acc: 0.9558 - val_loss: 0.8497 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 00388: val_acc did not improve from 0.86649\n",
+ "Epoch 389/3000\n",
+ " - 39s - loss: 0.3448 - acc: 0.9564 - val_loss: 0.9241 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00390: val_acc did not improve from 0.86649\n",
+ "Epoch 391/3000\n",
+ " - 39s - loss: 0.3355 - acc: 0.9601 - val_loss: 0.9487 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00391: val_acc did not improve from 0.86649\n",
+ "Epoch 392/3000\n",
+ " - 39s - loss: 0.3368 - acc: 0.9591 - val_loss: 0.9421 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00392: val_acc did not improve from 0.86649\n",
+ "Epoch 393/3000\n",
+ " - 40s - loss: 0.3445 - acc: 0.9546 - val_loss: 0.9602 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00393: val_acc did not improve from 0.86649\n",
+ "Epoch 394/3000\n",
+ " - 39s - loss: 0.3369 - acc: 0.9582 - val_loss: 1.0126 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00394: val_acc did not improve from 0.86649\n",
+ "Epoch 395/3000\n",
+ " - 39s - loss: 0.3393 - acc: 0.9553 - val_loss: 0.9342 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00395: val_acc did not improve from 0.86649\n",
+ "Epoch 396/3000\n",
+ " - 39s - loss: 0.3227 - acc: 0.9615 - val_loss: 0.8635 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00396: val_acc did not improve from 0.86649\n",
+ "Epoch 397/3000\n",
+ " - 39s - loss: 0.3304 - acc: 0.9580 - val_loss: 0.9210 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00397: val_acc did not improve from 0.86649\n",
+ "Epoch 398/3000\n",
+ " - 39s - loss: 0.3367 - acc: 0.9574 - val_loss: 0.8472 - val_acc: 0.8677\n",
+ "\n",
+ "Epoch 00398: val_acc improved from 0.86649 to 0.86765, saving model to ./ModelSnapshots/LSTM-v2-398.h5\n",
+ "Epoch 399/3000\n",
+ " - 39s - loss: 0.3272 - acc: 0.9583 - val_loss: 0.8903 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00399: val_acc did not improve from 0.86765\n",
+ "Epoch 400/3000\n",
+ " - 39s - loss: 0.3386 - acc: 0.9577 - val_loss: 0.8627 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00400: val_acc did not improve from 0.86765\n",
+ "Epoch 401/3000\n",
+ " - 40s - loss: 0.3461 - acc: 0.9568 - val_loss: 0.9331 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00401: val_acc did not improve from 0.86765\n",
+ "Epoch 402/3000\n",
+ " - 39s - loss: 0.3497 - acc: 0.9555 - val_loss: 0.8815 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00402: val_acc did not improve from 0.86765\n",
+ "Epoch 403/3000\n",
+ " - 40s - loss: 0.3369 - acc: 0.9562 - val_loss: 0.9459 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00403: val_acc did not improve from 0.86765\n",
+ "Epoch 404/3000\n",
+ " - 39s - loss: 0.3439 - acc: 0.9532 - val_loss: 0.8664 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00404: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00404: ReduceLROnPlateau reducing learning rate to 5.68800103792455e-05.\n",
+ "Epoch 405/3000\n",
+ " - 39s - loss: 0.3288 - acc: 0.9592 - val_loss: 0.8931 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00405: val_acc did not improve from 0.86765\n",
+ "Epoch 406/3000\n",
+ " - 39s - loss: 0.3252 - acc: 0.9591 - val_loss: 0.9370 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00406: val_acc did not improve from 0.86765\n",
+ "Epoch 407/3000\n",
+ " - 39s - loss: 0.3420 - acc: 0.9562 - val_loss: 0.8960 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00407: val_acc did not improve from 0.86765\n",
+ "Epoch 408/3000\n",
+ " - 39s - loss: 0.3438 - acc: 0.9552 - val_loss: 0.9718 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00408: val_acc did not improve from 0.86765\n",
+ "Epoch 409/3000\n",
+ " - 39s - loss: 0.3356 - acc: 0.9574 - val_loss: 0.9235 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00409: val_acc did not improve from 0.86765\n",
+ "Epoch 410/3000\n",
+ " - 39s - loss: 0.3196 - acc: 0.9603 - val_loss: 0.8746 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 00410: val_acc did not improve from 0.86765\n",
+ "Epoch 411/3000\n",
+ " - 39s - loss: 0.3455 - acc: 0.9568 - val_loss: 0.8772 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00411: val_acc did not improve from 0.86765\n",
+ "Epoch 412/3000\n",
+ " - 39s - loss: 0.3620 - acc: 0.9526 - val_loss: 0.8925 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00412: val_acc did not improve from 0.86765\n",
+ "Epoch 413/3000\n",
+ " - 39s - loss: 0.3418 - acc: 0.9570 - val_loss: 0.8578 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 00413: val_acc did not improve from 0.86765\n",
+ "Epoch 414/3000\n",
+ " - 39s - loss: 0.3230 - acc: 0.9583 - val_loss: 0.9399 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00414: val_acc did not improve from 0.86765\n",
+ "Epoch 415/3000\n",
+ " - 39s - loss: 0.3225 - acc: 0.9577 - val_loss: 0.9693 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00415: val_acc did not improve from 0.86765\n",
+ "Epoch 416/3000\n",
+ " - 39s - loss: 0.3343 - acc: 0.9574 - val_loss: 0.8807 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00416: val_acc did not improve from 0.86765\n",
+ "Epoch 417/3000\n",
+ " - 39s - loss: 0.3379 - acc: 0.9561 - val_loss: 0.8396 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00417: val_acc did not improve from 0.86765\n",
+ "Epoch 418/3000\n",
+ " - 39s - loss: 0.3500 - acc: 0.9515 - val_loss: 0.8685 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00418: val_acc did not improve from 0.86765\n",
+ "Epoch 419/3000\n",
+ " - 39s - loss: 0.3236 - acc: 0.9594 - val_loss: 0.8988 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00419: val_acc did not improve from 0.86765\n",
+ "Epoch 420/3000\n",
+ " - 39s - loss: 0.3380 - acc: 0.9535 - val_loss: 0.8662 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00420: val_acc did not improve from 0.86765\n",
+ "Epoch 421/3000\n",
+ " - 39s - loss: 0.3282 - acc: 0.9588 - val_loss: 0.9285 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00421: val_acc did not improve from 0.86765\n",
+ "Epoch 422/3000\n",
+ " - 39s - loss: 0.3402 - acc: 0.9573 - val_loss: 0.9211 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00422: val_acc did not improve from 0.86765\n",
+ "Epoch 423/3000\n",
+ " - 39s - loss: 0.3400 - acc: 0.9570 - val_loss: 0.8997 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00423: val_acc did not improve from 0.86765\n",
+ "Epoch 424/3000\n",
+ " - 39s - loss: 0.3168 - acc: 0.9633 - val_loss: 0.8881 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00424: val_acc did not improve from 0.86765\n",
+ "Epoch 425/3000\n",
+ " - 39s - loss: 0.3453 - acc: 0.9556 - val_loss: 0.9445 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00425: val_acc did not improve from 0.86765\n",
+ "Epoch 426/3000\n",
+ " - 39s - loss: 0.3390 - acc: 0.9570 - val_loss: 0.9125 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00426: val_acc did not improve from 0.86765\n",
+ "Epoch 427/3000\n",
+ " - 39s - loss: 0.3397 - acc: 0.9552 - val_loss: 0.8986 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00427: val_acc did not improve from 0.86765\n",
+ "Epoch 428/3000\n",
+ " - 39s - loss: 0.3488 - acc: 0.9553 - val_loss: 0.8960 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00428: val_acc did not improve from 0.86765\n",
+ "Epoch 429/3000\n",
+ " - 39s - loss: 0.3300 - acc: 0.9600 - val_loss: 0.8449 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 00429: val_acc did not improve from 0.86765\n",
+ "Epoch 430/3000\n",
+ " - 39s - loss: 0.3296 - acc: 0.9588 - val_loss: 0.9478 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00430: val_acc did not improve from 0.86765\n",
+ "Epoch 431/3000\n",
+ " - 39s - loss: 0.3305 - acc: 0.9589 - val_loss: 0.8713 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00431: val_acc did not improve from 0.86765\n",
+ "Epoch 432/3000\n",
+ " - 39s - loss: 0.3278 - acc: 0.9580 - val_loss: 0.9387 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00432: val_acc did not improve from 0.86765\n",
+ "Epoch 433/3000\n",
+ " - 39s - loss: 0.3489 - acc: 0.9515 - val_loss: 0.9361 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00433: val_acc did not improve from 0.86765\n",
+ "Epoch 434/3000\n",
+ " - 39s - loss: 0.3327 - acc: 0.9562 - val_loss: 0.9641 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00434: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00434: ReduceLROnPlateau reducing learning rate to 5.4036009169067255e-05.\n",
+ "Epoch 435/3000\n",
+ " - 39s - loss: 0.3291 - acc: 0.9597 - val_loss: 0.9595 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00435: val_acc did not improve from 0.86765\n",
+ "Epoch 436/3000\n",
+ " - 39s - loss: 0.3637 - acc: 0.9553 - val_loss: 0.9519 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00436: val_acc did not improve from 0.86765\n",
+ "Epoch 437/3000\n",
+ " - 39s - loss: 0.3492 - acc: 0.9565 - val_loss: 1.0486 - val_acc: 0.8326\n",
+ "\n",
+ "Epoch 00437: val_acc did not improve from 0.86765\n",
+ "Epoch 438/3000\n",
+ " - 39s - loss: 0.3500 - acc: 0.9518 - val_loss: 0.9460 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00438: val_acc did not improve from 0.86765\n",
+ "Epoch 439/3000\n",
+ " - 39s - loss: 0.3331 - acc: 0.9579 - val_loss: 0.9051 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00439: val_acc did not improve from 0.86765\n",
+ "Epoch 440/3000\n",
+ " - 39s - loss: 0.3301 - acc: 0.9580 - val_loss: 0.8741 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00440: val_acc did not improve from 0.86765\n",
+ "Epoch 441/3000\n",
+ " - 39s - loss: 0.3285 - acc: 0.9606 - val_loss: 0.9382 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00441: val_acc did not improve from 0.86765\n",
+ "Epoch 442/3000\n",
+ " - 39s - loss: 0.3185 - acc: 0.9626 - val_loss: 0.9443 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00442: val_acc did not improve from 0.86765\n",
+ "Epoch 443/3000\n",
+ " - 39s - loss: 0.3345 - acc: 0.9580 - val_loss: 0.9091 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00443: val_acc did not improve from 0.86765\n",
+ "Epoch 444/3000\n",
+ " - 40s - loss: 0.3237 - acc: 0.9589 - val_loss: 0.8787 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00444: val_acc did not improve from 0.86765\n",
+ "Epoch 445/3000\n",
+ " - 39s - loss: 0.3170 - acc: 0.9595 - val_loss: 0.9144 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00445: val_acc did not improve from 0.86765\n",
+ "Epoch 446/3000\n",
+ " - 39s - loss: 0.3250 - acc: 0.9580 - val_loss: 0.9949 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00446: val_acc did not improve from 0.86765\n",
+ "Epoch 447/3000\n",
+ " - 40s - loss: 0.3482 - acc: 0.9532 - val_loss: 0.9225 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00447: val_acc did not improve from 0.86765\n",
+ "Epoch 448/3000\n",
+ " - 39s - loss: 0.3508 - acc: 0.9574 - val_loss: 0.9944 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00448: val_acc did not improve from 0.86765\n",
+ "Epoch 449/3000\n",
+ " - 39s - loss: 0.3301 - acc: 0.9565 - val_loss: 0.8755 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00449: val_acc did not improve from 0.86765\n",
+ "Epoch 450/3000\n",
+ " - 39s - loss: 0.3210 - acc: 0.9615 - val_loss: 0.9460 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00450: val_acc did not improve from 0.86765\n",
+ "Epoch 451/3000\n",
+ " - 39s - loss: 0.3255 - acc: 0.9562 - val_loss: 0.9479 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00451: val_acc did not improve from 0.86765\n",
+ "Epoch 452/3000\n",
+ " - 40s - loss: 0.3187 - acc: 0.9589 - val_loss: 0.9648 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00452: val_acc did not improve from 0.86765\n",
+ "Epoch 453/3000\n",
+ " - 40s - loss: 0.3272 - acc: 0.9603 - val_loss: 0.9094 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00453: val_acc did not improve from 0.86765\n",
+ "Epoch 454/3000\n",
+ " - 39s - loss: 0.3186 - acc: 0.9583 - val_loss: 0.9130 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00454: val_acc did not improve from 0.86765\n",
+ "Epoch 455/3000\n",
+ " - 39s - loss: 0.3492 - acc: 0.9540 - val_loss: 0.9120 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00455: val_acc did not improve from 0.86765\n",
+ "Epoch 456/3000\n",
+ " - 39s - loss: 0.3335 - acc: 0.9586 - val_loss: 0.9360 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00456: val_acc did not improve from 0.86765\n",
+ "Epoch 457/3000\n",
+ " - 39s - loss: 0.3273 - acc: 0.9568 - val_loss: 0.9653 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00457: val_acc did not improve from 0.86765\n",
+ "Epoch 458/3000\n",
+ " - 40s - loss: 0.3327 - acc: 0.9576 - val_loss: 0.9670 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00458: val_acc did not improve from 0.86765\n",
+ "Epoch 459/3000\n",
+ " - 39s - loss: 0.3384 - acc: 0.9565 - val_loss: 0.9111 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00459: val_acc did not improve from 0.86765\n",
+ "Epoch 460/3000\n",
+ " - 39s - loss: 0.3256 - acc: 0.9582 - val_loss: 0.9494 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00460: val_acc did not improve from 0.86765\n",
+ "Epoch 461/3000\n",
+ " - 39s - loss: 0.3388 - acc: 0.9567 - val_loss: 0.8919 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00461: val_acc did not improve from 0.86765\n",
+ "Epoch 462/3000\n",
+ " - 39s - loss: 0.3470 - acc: 0.9543 - val_loss: 0.8662 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00462: val_acc did not improve from 0.86765\n",
+ "Epoch 463/3000\n",
+ " - 40s - loss: 0.3261 - acc: 0.9583 - val_loss: 0.8317 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00463: val_acc did not improve from 0.86765\n",
+ "Epoch 464/3000\n",
+ " - 39s - loss: 0.3419 - acc: 0.9561 - val_loss: 1.0010 - val_acc: 0.8416\n",
+ "\n",
+ "Epoch 00464: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00464: ReduceLROnPlateau reducing learning rate to 5.133420836500591e-05.\n",
+ "Epoch 465/3000\n",
+ " - 39s - loss: 0.3346 - acc: 0.9576 - val_loss: 0.8434 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00465: val_acc did not improve from 0.86765\n",
+ "Epoch 466/3000\n",
+ " - 39s - loss: 0.3281 - acc: 0.9611 - val_loss: 0.8669 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00466: val_acc did not improve from 0.86765\n",
+ "Epoch 467/3000\n",
+ " - 39s - loss: 0.3298 - acc: 0.9591 - val_loss: 0.9269 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00467: val_acc did not improve from 0.86765\n",
+ "Epoch 468/3000\n",
+ " - 39s - loss: 0.3375 - acc: 0.9564 - val_loss: 0.9649 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00468: val_acc did not improve from 0.86765\n",
+ "Epoch 469/3000\n",
+ " - 40s - loss: 0.3302 - acc: 0.9604 - val_loss: 0.9569 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00469: val_acc did not improve from 0.86765\n",
+ "Epoch 470/3000\n",
+ " - 39s - loss: 0.3267 - acc: 0.9589 - val_loss: 0.9243 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00470: val_acc did not improve from 0.86765\n",
+ "Epoch 471/3000\n",
+ " - 39s - loss: 0.3168 - acc: 0.9603 - val_loss: 0.9311 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00471: val_acc did not improve from 0.86765\n",
+ "Epoch 472/3000\n",
+ " - 39s - loss: 0.3068 - acc: 0.9645 - val_loss: 0.8702 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00472: val_acc did not improve from 0.86765\n",
+ "Epoch 473/3000\n",
+ " - 39s - loss: 0.3163 - acc: 0.9595 - val_loss: 0.9633 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00473: val_acc did not improve from 0.86765\n",
+ "Epoch 474/3000\n",
+ " - 39s - loss: 0.3149 - acc: 0.9624 - val_loss: 0.9056 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00474: val_acc did not improve from 0.86765\n",
+ "Epoch 475/3000\n",
+ " - 39s - loss: 0.3333 - acc: 0.9568 - val_loss: 0.8720 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00475: val_acc did not improve from 0.86765\n",
+ "Epoch 476/3000\n",
+ " - 39s - loss: 0.3150 - acc: 0.9623 - val_loss: 0.9025 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00476: val_acc did not improve from 0.86765\n",
+ "Epoch 477/3000\n",
+ " - 39s - loss: 0.3332 - acc: 0.9570 - val_loss: 0.9184 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00477: val_acc did not improve from 0.86765\n",
+ "Epoch 478/3000\n",
+ " - 39s - loss: 0.3236 - acc: 0.9585 - val_loss: 0.8487 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00478: val_acc did not improve from 0.86765\n",
+ "Epoch 479/3000\n",
+ " - 39s - loss: 0.3201 - acc: 0.9592 - val_loss: 0.8885 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00479: val_acc did not improve from 0.86765\n",
+ "Epoch 480/3000\n",
+ " - 39s - loss: 0.3304 - acc: 0.9562 - val_loss: 0.9052 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00480: val_acc did not improve from 0.86765\n",
+ "Epoch 481/3000\n",
+ " - 39s - loss: 0.3175 - acc: 0.9612 - val_loss: 0.8701 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00481: val_acc did not improve from 0.86765\n",
+ "Epoch 482/3000\n",
+ " - 40s - loss: 0.3281 - acc: 0.9573 - val_loss: 0.9248 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00482: val_acc did not improve from 0.86765\n",
+ "Epoch 483/3000\n",
+ " - 40s - loss: 0.3284 - acc: 0.9576 - val_loss: 0.9771 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00483: val_acc did not improve from 0.86765\n",
+ "Epoch 484/3000\n",
+ " - 39s - loss: 0.3366 - acc: 0.9570 - val_loss: 0.9807 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00484: val_acc did not improve from 0.86765\n",
+ "Epoch 485/3000\n",
+ " - 39s - loss: 0.3184 - acc: 0.9614 - val_loss: 0.9132 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00485: val_acc did not improve from 0.86765\n",
+ "Epoch 486/3000\n",
+ " - 39s - loss: 0.3115 - acc: 0.9617 - val_loss: 0.9153 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00486: val_acc did not improve from 0.86765\n",
+ "Epoch 487/3000\n",
+ " - 39s - loss: 0.3381 - acc: 0.9532 - val_loss: 0.9092 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00487: val_acc did not improve from 0.86765\n",
+ "Epoch 488/3000\n",
+ " - 39s - loss: 0.3372 - acc: 0.9574 - val_loss: 0.9252 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00488: val_acc did not improve from 0.86765\n",
+ "Epoch 489/3000\n",
+ " - 39s - loss: 0.3209 - acc: 0.9597 - val_loss: 0.9447 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00489: val_acc did not improve from 0.86765\n",
+ "Epoch 490/3000\n",
+ " - 39s - loss: 0.3298 - acc: 0.9591 - val_loss: 0.8678 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00490: val_acc did not improve from 0.86765\n",
+ "Epoch 491/3000\n",
+ " - 39s - loss: 0.3257 - acc: 0.9594 - val_loss: 0.8909 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00491: val_acc did not improve from 0.86765\n",
+ "Epoch 492/3000\n",
+ " - 39s - loss: 0.3173 - acc: 0.9597 - val_loss: 0.9157 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00492: val_acc did not improve from 0.86765\n",
+ "Epoch 493/3000\n",
+ " - 39s - loss: 0.3303 - acc: 0.9611 - val_loss: 0.9396 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00493: val_acc did not improve from 0.86765\n",
+ "Epoch 494/3000\n",
+ " - 39s - loss: 0.3222 - acc: 0.9609 - val_loss: 0.9693 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00494: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00494: ReduceLROnPlateau reducing learning rate to 4.876749881077558e-05.\n",
+ "Epoch 495/3000\n",
+ " - 40s - loss: 0.3281 - acc: 0.9583 - val_loss: 0.9320 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00495: val_acc did not improve from 0.86765\n",
+ "Epoch 496/3000\n",
+ " - 39s - loss: 0.3316 - acc: 0.9574 - val_loss: 0.9316 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00496: val_acc did not improve from 0.86765\n",
+ "Epoch 497/3000\n",
+ " - 39s - loss: 0.3135 - acc: 0.9614 - val_loss: 0.8739 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00497: val_acc did not improve from 0.86765\n",
+ "Epoch 498/3000\n",
+ " - 39s - loss: 0.3212 - acc: 0.9586 - val_loss: 0.8895 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00498: val_acc did not improve from 0.86765\n",
+ "Epoch 499/3000\n",
+ " - 39s - loss: 0.3216 - acc: 0.9595 - val_loss: 0.8799 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00499: val_acc did not improve from 0.86765\n",
+ "Epoch 500/3000\n",
+ " - 39s - loss: 0.3230 - acc: 0.9583 - val_loss: 0.8783 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00500: val_acc did not improve from 0.86765\n",
+ "Epoch 501/3000\n",
+ " - 39s - loss: 0.3177 - acc: 0.9606 - val_loss: 0.9315 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00501: val_acc did not improve from 0.86765\n",
+ "Epoch 502/3000\n",
+ " - 39s - loss: 0.3369 - acc: 0.9550 - val_loss: 0.9903 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00502: val_acc did not improve from 0.86765\n",
+ "Epoch 503/3000\n",
+ " - 39s - loss: 0.3324 - acc: 0.9603 - val_loss: 0.8952 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 00503: val_acc did not improve from 0.86765\n",
+ "Epoch 504/3000\n",
+ " - 40s - loss: 0.3390 - acc: 0.9556 - val_loss: 0.9544 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00504: val_acc did not improve from 0.86765\n",
+ "Epoch 505/3000\n",
+ " - 40s - loss: 0.3353 - acc: 0.9564 - val_loss: 0.9124 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00505: val_acc did not improve from 0.86765\n",
+ "Epoch 506/3000\n",
+ " - 39s - loss: 0.3210 - acc: 0.9594 - val_loss: 0.8594 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00506: val_acc did not improve from 0.86765\n",
+ "Epoch 507/3000\n",
+ " - 39s - loss: 0.3219 - acc: 0.9573 - val_loss: 0.8915 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00507: val_acc did not improve from 0.86765\n",
+ "Epoch 508/3000\n",
+ " - 39s - loss: 0.3247 - acc: 0.9588 - val_loss: 0.8953 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00508: val_acc did not improve from 0.86765\n",
+ "Epoch 509/3000\n",
+ " - 39s - loss: 0.3191 - acc: 0.9607 - val_loss: 0.8783 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00509: val_acc did not improve from 0.86765\n",
+ "Epoch 510/3000\n",
+ " - 39s - loss: 0.3234 - acc: 0.9574 - val_loss: 0.9082 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00510: val_acc did not improve from 0.86765\n",
+ "Epoch 511/3000\n",
+ " - 39s - loss: 0.3298 - acc: 0.9562 - val_loss: 0.8984 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00511: val_acc did not improve from 0.86765\n",
+ "Epoch 512/3000\n",
+ " - 39s - loss: 0.3041 - acc: 0.9641 - val_loss: 0.8977 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00512: val_acc did not improve from 0.86765\n",
+ "Epoch 513/3000\n",
+ " - 39s - loss: 0.3270 - acc: 0.9555 - val_loss: 0.8402 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00513: val_acc did not improve from 0.86765\n",
+ "Epoch 514/3000\n",
+ " - 40s - loss: 0.3115 - acc: 0.9606 - val_loss: 0.8794 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 00514: val_acc did not improve from 0.86765\n",
+ "Epoch 515/3000\n",
+ " - 39s - loss: 0.3252 - acc: 0.9562 - val_loss: 0.9451 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00515: val_acc did not improve from 0.86765\n",
+ "Epoch 516/3000\n",
+ " - 39s - loss: 0.3219 - acc: 0.9597 - val_loss: 0.8295 - val_acc: 0.8657\n",
+ "\n",
+ "Epoch 00516: val_acc did not improve from 0.86765\n",
+ "Epoch 517/3000\n",
+ " - 39s - loss: 0.3225 - acc: 0.9577 - val_loss: 0.8308 - val_acc: 0.8638\n",
+ "\n",
+ "Epoch 00517: val_acc did not improve from 0.86765\n",
+ "Epoch 518/3000\n",
+ " - 39s - loss: 0.3120 - acc: 0.9614 - val_loss: 0.8791 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00518: val_acc did not improve from 0.86765\n",
+ "Epoch 519/3000\n",
+ " - 39s - loss: 0.3207 - acc: 0.9588 - val_loss: 0.8841 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00519: val_acc did not improve from 0.86765\n",
+ "Epoch 520/3000\n",
+ " - 39s - loss: 0.3166 - acc: 0.9603 - val_loss: 0.8913 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00520: val_acc did not improve from 0.86765\n",
+ "Epoch 521/3000\n",
+ " - 39s - loss: 0.3218 - acc: 0.9604 - val_loss: 0.8800 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00521: val_acc did not improve from 0.86765\n",
+ "Epoch 522/3000\n",
+ " - 39s - loss: 0.3278 - acc: 0.9586 - val_loss: 0.8885 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00522: val_acc did not improve from 0.86765\n",
+ "Epoch 523/3000\n",
+ " - 40s - loss: 0.3203 - acc: 0.9577 - val_loss: 0.8893 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00523: val_acc did not improve from 0.86765\n",
+ "Epoch 524/3000\n",
+ " - 39s - loss: 0.3078 - acc: 0.9635 - val_loss: 0.8748 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00524: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00524: ReduceLROnPlateau reducing learning rate to 4.63291238702368e-05.\n",
+ "Epoch 525/3000\n",
+ " - 40s - loss: 0.3199 - acc: 0.9588 - val_loss: 0.9706 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00525: val_acc did not improve from 0.86765\n",
+ "Epoch 526/3000\n",
+ " - 40s - loss: 0.3191 - acc: 0.9583 - val_loss: 0.8217 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00526: val_acc did not improve from 0.86765\n",
+ "Epoch 527/3000\n",
+ " - 39s - loss: 0.3051 - acc: 0.9626 - val_loss: 0.9206 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00527: val_acc did not improve from 0.86765\n",
+ "Epoch 528/3000\n",
+ " - 39s - loss: 0.3100 - acc: 0.9618 - val_loss: 0.9187 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00528: val_acc did not improve from 0.86765\n",
+ "Epoch 529/3000\n",
+ " - 39s - loss: 0.3182 - acc: 0.9600 - val_loss: 0.8847 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00529: val_acc did not improve from 0.86765\n",
+ "Epoch 530/3000\n",
+ " - 40s - loss: 0.3149 - acc: 0.9598 - val_loss: 0.8858 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00530: val_acc did not improve from 0.86765\n",
+ "Epoch 531/3000\n",
+ " - 39s - loss: 0.3266 - acc: 0.9588 - val_loss: 0.8866 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00531: val_acc did not improve from 0.86765\n",
+ "Epoch 532/3000\n",
+ " - 39s - loss: 0.3166 - acc: 0.9598 - val_loss: 0.8903 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00532: val_acc did not improve from 0.86765\n",
+ "Epoch 533/3000\n",
+ " - 39s - loss: 0.3091 - acc: 0.9632 - val_loss: 0.8742 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00533: val_acc did not improve from 0.86765\n",
+ "Epoch 534/3000\n",
+ " - 39s - loss: 0.3330 - acc: 0.9564 - val_loss: 0.9164 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00534: val_acc did not improve from 0.86765\n",
+ "Epoch 535/3000\n",
+ " - 40s - loss: 0.3164 - acc: 0.9606 - val_loss: 0.9253 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00535: val_acc did not improve from 0.86765\n",
+ "Epoch 536/3000\n",
+ " - 39s - loss: 0.3114 - acc: 0.9585 - val_loss: 0.8726 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00536: val_acc did not improve from 0.86765\n",
+ "Epoch 537/3000\n",
+ " - 39s - loss: 0.3251 - acc: 0.9600 - val_loss: 0.9935 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00537: val_acc did not improve from 0.86765\n",
+ "Epoch 538/3000\n",
+ " - 39s - loss: 0.3159 - acc: 0.9623 - val_loss: 0.9470 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00538: val_acc did not improve from 0.86765\n",
+ "Epoch 539/3000\n",
+ " - 39s - loss: 0.3156 - acc: 0.9591 - val_loss: 0.8687 - val_acc: 0.8630\n",
+ "\n",
+ "Epoch 00539: val_acc did not improve from 0.86765\n",
+ "Epoch 540/3000\n",
+ " - 39s - loss: 0.3105 - acc: 0.9612 - val_loss: 0.9334 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00540: val_acc did not improve from 0.86765\n",
+ "Epoch 541/3000\n",
+ " - 40s - loss: 0.3124 - acc: 0.9598 - val_loss: 0.9087 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00541: val_acc did not improve from 0.86765\n",
+ "Epoch 542/3000\n",
+ " - 39s - loss: 0.3080 - acc: 0.9615 - val_loss: 0.8623 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 00542: val_acc did not improve from 0.86765\n",
+ "Epoch 543/3000\n",
+ " - 39s - loss: 0.3236 - acc: 0.9583 - val_loss: 0.8804 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00543: val_acc did not improve from 0.86765\n",
+ "Epoch 544/3000\n",
+ " - 40s - loss: 0.3359 - acc: 0.9550 - val_loss: 0.8948 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00544: val_acc did not improve from 0.86765\n",
+ "Epoch 545/3000\n",
+ " - 39s - loss: 0.3287 - acc: 0.9595 - val_loss: 0.8948 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00545: val_acc did not improve from 0.86765\n",
+ "Epoch 546/3000\n",
+ " - 39s - loss: 0.3135 - acc: 0.9629 - val_loss: 0.9767 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00546: val_acc did not improve from 0.86765\n",
+ "Epoch 547/3000\n",
+ " - 39s - loss: 0.3047 - acc: 0.9607 - val_loss: 0.9010 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00547: val_acc did not improve from 0.86765\n",
+ "Epoch 548/3000\n",
+ " - 39s - loss: 0.3219 - acc: 0.9603 - val_loss: 0.9056 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00548: val_acc did not improve from 0.86765\n",
+ "Epoch 549/3000\n",
+ " - 40s - loss: 0.3148 - acc: 0.9621 - val_loss: 0.8763 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00549: val_acc did not improve from 0.86765\n",
+ "Epoch 550/3000\n",
+ " - 39s - loss: 0.3018 - acc: 0.9630 - val_loss: 0.9044 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00550: val_acc did not improve from 0.86765\n",
+ "Epoch 551/3000\n",
+ " - 39s - loss: 0.3134 - acc: 0.9621 - val_loss: 0.9063 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00551: val_acc did not improve from 0.86765\n",
+ "Epoch 552/3000\n",
+ " - 39s - loss: 0.3164 - acc: 0.9630 - val_loss: 0.8837 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00552: val_acc did not improve from 0.86765\n",
+ "Epoch 553/3000\n",
+ " - 39s - loss: 0.3270 - acc: 0.9586 - val_loss: 0.8592 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00553: val_acc did not improve from 0.86765\n",
+ "Epoch 554/3000\n",
+ " - 39s - loss: 0.3205 - acc: 0.9588 - val_loss: 0.9231 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00554: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00554: ReduceLROnPlateau reducing learning rate to 4.4012669059156905e-05.\n",
+ "Epoch 555/3000\n",
+ " - 39s - loss: 0.3312 - acc: 0.9577 - val_loss: 0.8746 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00555: val_acc did not improve from 0.86765\n",
+ "Epoch 556/3000\n",
+ " - 39s - loss: 0.3002 - acc: 0.9606 - val_loss: 0.8975 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00556: val_acc did not improve from 0.86765\n",
+ "Epoch 557/3000\n",
+ " - 39s - loss: 0.3142 - acc: 0.9614 - val_loss: 0.8877 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00557: val_acc did not improve from 0.86765\n",
+ "Epoch 558/3000\n",
+ " - 39s - loss: 0.3120 - acc: 0.9601 - val_loss: 0.9035 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00558: val_acc did not improve from 0.86765\n",
+ "Epoch 559/3000\n",
+ " - 39s - loss: 0.3106 - acc: 0.9635 - val_loss: 0.8927 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00559: val_acc did not improve from 0.86765\n",
+ "Epoch 560/3000\n",
+ " - 39s - loss: 0.3132 - acc: 0.9592 - val_loss: 0.8772 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00560: val_acc did not improve from 0.86765\n",
+ "Epoch 561/3000\n",
+ " - 39s - loss: 0.3115 - acc: 0.9624 - val_loss: 0.9050 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00561: val_acc did not improve from 0.86765\n",
+ "Epoch 562/3000\n",
+ " - 39s - loss: 0.3087 - acc: 0.9621 - val_loss: 0.8773 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00564: val_acc did not improve from 0.86765\n",
+ "Epoch 565/3000\n",
+ " - 39s - loss: 0.3089 - acc: 0.9632 - val_loss: 0.9398 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00565: val_acc did not improve from 0.86765\n",
+ "Epoch 566/3000\n",
+ " - 40s - loss: 0.3065 - acc: 0.9612 - val_loss: 0.9517 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00566: val_acc did not improve from 0.86765\n",
+ "Epoch 567/3000\n",
+ " - 40s - loss: 0.3049 - acc: 0.9630 - val_loss: 0.9277 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00567: val_acc did not improve from 0.86765\n",
+ "Epoch 568/3000\n",
+ " - 40s - loss: 0.3161 - acc: 0.9567 - val_loss: 0.8848 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00568: val_acc did not improve from 0.86765\n",
+ "Epoch 569/3000\n",
+ " - 39s - loss: 0.3028 - acc: 0.9624 - val_loss: 0.9450 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00569: val_acc did not improve from 0.86765\n",
+ "Epoch 570/3000\n",
+ " - 39s - loss: 0.3059 - acc: 0.9601 - val_loss: 0.9400 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00570: val_acc did not improve from 0.86765\n",
+ "Epoch 571/3000\n",
+ " - 39s - loss: 0.3174 - acc: 0.9626 - val_loss: 0.9213 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00571: val_acc did not improve from 0.86765\n",
+ "Epoch 572/3000\n",
+ " - 39s - loss: 0.3332 - acc: 0.9565 - val_loss: 0.9168 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00572: val_acc did not improve from 0.86765\n",
+ "Epoch 573/3000\n",
+ " - 39s - loss: 0.3111 - acc: 0.9617 - val_loss: 0.9227 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00573: val_acc did not improve from 0.86765\n",
+ "Epoch 574/3000\n",
+ " - 39s - loss: 0.3135 - acc: 0.9582 - val_loss: 0.9085 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00574: val_acc did not improve from 0.86765\n",
+ "Epoch 575/3000\n",
+ " - 39s - loss: 0.3082 - acc: 0.9589 - val_loss: 0.9385 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00575: val_acc did not improve from 0.86765\n",
+ "Epoch 576/3000\n",
+ " - 39s - loss: 0.3147 - acc: 0.9614 - val_loss: 0.8738 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00576: val_acc did not improve from 0.86765\n",
+ "Epoch 577/3000\n",
+ " - 39s - loss: 0.3130 - acc: 0.9606 - val_loss: 0.9327 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00577: val_acc did not improve from 0.86765\n",
+ "Epoch 578/3000\n",
+ " - 39s - loss: 0.3073 - acc: 0.9598 - val_loss: 0.9454 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00578: val_acc did not improve from 0.86765\n",
+ "Epoch 579/3000\n",
+ " - 39s - loss: 0.3095 - acc: 0.9617 - val_loss: 0.9060 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00579: val_acc did not improve from 0.86765\n",
+ "Epoch 580/3000\n",
+ " - 39s - loss: 0.3184 - acc: 0.9598 - val_loss: 0.8865 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00580: val_acc did not improve from 0.86765\n",
+ "Epoch 581/3000\n",
+ " - 39s - loss: 0.3052 - acc: 0.9624 - val_loss: 0.8943 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00581: val_acc did not improve from 0.86765\n",
+ "Epoch 582/3000\n",
+ " - 39s - loss: 0.3107 - acc: 0.9618 - val_loss: 0.9659 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00582: val_acc did not improve from 0.86765\n",
+ "Epoch 583/3000\n",
+ " - 39s - loss: 0.3174 - acc: 0.9611 - val_loss: 0.9223 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00583: val_acc did not improve from 0.86765\n",
+ "Epoch 584/3000\n",
+ " - 39s - loss: 0.2990 - acc: 0.9624 - val_loss: 0.9150 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00584: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00584: ReduceLROnPlateau reducing learning rate to 4.181203439657111e-05.\n",
+ "Epoch 585/3000\n",
+ " - 39s - loss: 0.3184 - acc: 0.9577 - val_loss: 0.9063 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00585: val_acc did not improve from 0.86765\n",
+ "Epoch 586/3000\n",
+ " - 39s - loss: 0.3116 - acc: 0.9614 - val_loss: 0.8674 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00586: val_acc did not improve from 0.86765\n",
+ "Epoch 587/3000\n",
+ " - 39s - loss: 0.3115 - acc: 0.9629 - val_loss: 0.8713 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00587: val_acc did not improve from 0.86765\n",
+ "Epoch 588/3000\n",
+ " - 39s - loss: 0.3092 - acc: 0.9627 - val_loss: 0.9005 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00588: val_acc did not improve from 0.86765\n",
+ "Epoch 589/3000\n",
+ " - 40s - loss: 0.3278 - acc: 0.9556 - val_loss: 0.8666 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00589: val_acc did not improve from 0.86765\n",
+ "Epoch 590/3000\n",
+ " - 39s - loss: 0.3093 - acc: 0.9611 - val_loss: 0.8830 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00590: val_acc did not improve from 0.86765\n",
+ "Epoch 591/3000\n",
+ " - 39s - loss: 0.3041 - acc: 0.9595 - val_loss: 0.8750 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00591: val_acc did not improve from 0.86765\n",
+ "Epoch 592/3000\n",
+ " - 39s - loss: 0.3028 - acc: 0.9633 - val_loss: 0.8818 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00592: val_acc did not improve from 0.86765\n",
+ "Epoch 593/3000\n",
+ " - 39s - loss: 0.3133 - acc: 0.9576 - val_loss: 0.9473 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00593: val_acc did not improve from 0.86765\n",
+ "Epoch 594/3000\n",
+ " - 40s - loss: 0.3150 - acc: 0.9597 - val_loss: 0.9213 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00594: val_acc did not improve from 0.86765\n",
+ "Epoch 595/3000\n",
+ " - 39s - loss: 0.3025 - acc: 0.9630 - val_loss: 0.9128 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00595: val_acc did not improve from 0.86765\n",
+ "Epoch 596/3000\n",
+ " - 39s - loss: 0.2899 - acc: 0.9645 - val_loss: 0.9197 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00596: val_acc did not improve from 0.86765\n",
+ "Epoch 597/3000\n",
+ " - 39s - loss: 0.2943 - acc: 0.9633 - val_loss: 0.9544 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00597: val_acc did not improve from 0.86765\n",
+ "Epoch 598/3000\n",
+ " - 38s - loss: 0.3093 - acc: 0.9623 - val_loss: 0.9325 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00598: val_acc did not improve from 0.86765\n",
+ "Epoch 599/3000\n",
+ " - 39s - loss: 0.3032 - acc: 0.9626 - val_loss: 0.9353 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00599: val_acc did not improve from 0.86765\n",
+ "Epoch 600/3000\n",
+ " - 39s - loss: 0.2977 - acc: 0.9621 - val_loss: 0.9069 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00600: val_acc did not improve from 0.86765\n",
+ "Epoch 601/3000\n",
+ " - 39s - loss: 0.3101 - acc: 0.9614 - val_loss: 0.9313 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00601: val_acc did not improve from 0.86765\n",
+ "Epoch 602/3000\n",
+ " - 39s - loss: 0.3042 - acc: 0.9639 - val_loss: 0.9277 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00602: val_acc did not improve from 0.86765\n",
+ "Epoch 603/3000\n",
+ " - 39s - loss: 0.3144 - acc: 0.9589 - val_loss: 0.8583 - val_acc: 0.8626\n",
+ "\n",
+ "Epoch 00603: val_acc did not improve from 0.86765\n",
+ "Epoch 604/3000\n",
+ " - 40s - loss: 0.3224 - acc: 0.9591 - val_loss: 0.8898 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00604: val_acc did not improve from 0.86765\n",
+ "Epoch 605/3000\n",
+ " - 39s - loss: 0.3099 - acc: 0.9611 - val_loss: 0.8822 - val_acc: 0.8641\n",
+ "\n",
+ "Epoch 00605: val_acc did not improve from 0.86765\n",
+ "Epoch 606/3000\n",
+ " - 40s - loss: 0.3005 - acc: 0.9627 - val_loss: 0.9255 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00606: val_acc did not improve from 0.86765\n",
+ "Epoch 607/3000\n",
+ " - 39s - loss: 0.3039 - acc: 0.9620 - val_loss: 0.8470 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 00607: val_acc did not improve from 0.86765\n",
+ "Epoch 608/3000\n",
+ " - 39s - loss: 0.3012 - acc: 0.9611 - val_loss: 0.8784 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00608: val_acc did not improve from 0.86765\n",
+ "Epoch 609/3000\n",
+ " - 39s - loss: 0.2958 - acc: 0.9641 - val_loss: 0.9520 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00609: val_acc did not improve from 0.86765\n",
+ "Epoch 610/3000\n",
+ " - 39s - loss: 0.3149 - acc: 0.9615 - val_loss: 0.8923 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00610: val_acc did not improve from 0.86765\n",
+ "Epoch 611/3000\n",
+ " - 39s - loss: 0.3170 - acc: 0.9623 - val_loss: 0.9307 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00611: val_acc did not improve from 0.86765\n",
+ "Epoch 612/3000\n",
+ " - 39s - loss: 0.3073 - acc: 0.9653 - val_loss: 0.8659 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 00612: val_acc did not improve from 0.86765\n",
+ "Epoch 613/3000\n",
+ " - 40s - loss: 0.2899 - acc: 0.9662 - val_loss: 0.9281 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00613: val_acc did not improve from 0.86765\n",
+ "Epoch 614/3000\n",
+ " - 39s - loss: 0.3131 - acc: 0.9591 - val_loss: 0.8495 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00614: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00614: ReduceLROnPlateau reducing learning rate to 3.9721430948702614e-05.\n",
+ "Epoch 615/3000\n",
+ " - 39s - loss: 0.2953 - acc: 0.9629 - val_loss: 0.9233 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00615: val_acc did not improve from 0.86765\n",
+ "Epoch 616/3000\n",
+ " - 39s - loss: 0.3051 - acc: 0.9621 - val_loss: 0.9077 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00616: val_acc did not improve from 0.86765\n",
+ "Epoch 617/3000\n",
+ " - 40s - loss: 0.2863 - acc: 0.9662 - val_loss: 0.8860 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00617: val_acc did not improve from 0.86765\n",
+ "Epoch 618/3000\n",
+ " - 39s - loss: 0.3088 - acc: 0.9597 - val_loss: 0.8963 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00618: val_acc did not improve from 0.86765\n",
+ "Epoch 619/3000\n",
+ " - 39s - loss: 0.3013 - acc: 0.9650 - val_loss: 0.9202 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00619: val_acc did not improve from 0.86765\n",
+ "Epoch 620/3000\n",
+ " - 39s - loss: 0.3097 - acc: 0.9604 - val_loss: 0.9244 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00620: val_acc did not improve from 0.86765\n",
+ "Epoch 621/3000\n",
+ " - 39s - loss: 0.3048 - acc: 0.9626 - val_loss: 0.9033 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00621: val_acc did not improve from 0.86765\n",
+ "Epoch 622/3000\n",
+ " - 39s - loss: 0.2948 - acc: 0.9648 - val_loss: 0.8670 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00622: val_acc did not improve from 0.86765\n",
+ "Epoch 623/3000\n",
+ " - 39s - loss: 0.2986 - acc: 0.9647 - val_loss: 0.8519 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00623: val_acc did not improve from 0.86765\n",
+ "Epoch 624/3000\n",
+ " - 39s - loss: 0.3045 - acc: 0.9632 - val_loss: 0.9180 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00624: val_acc did not improve from 0.86765\n",
+ "Epoch 625/3000\n",
+ " - 39s - loss: 0.3078 - acc: 0.9595 - val_loss: 0.9361 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00625: val_acc did not improve from 0.86765\n",
+ "Epoch 626/3000\n",
+ " - 39s - loss: 0.3033 - acc: 0.9620 - val_loss: 0.8913 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00626: val_acc did not improve from 0.86765\n",
+ "Epoch 627/3000\n",
+ " - 39s - loss: 0.2974 - acc: 0.9641 - val_loss: 0.8778 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00627: val_acc did not improve from 0.86765\n",
+ "Epoch 628/3000\n",
+ " - 39s - loss: 0.3035 - acc: 0.9609 - val_loss: 0.8390 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 00628: val_acc did not improve from 0.86765\n",
+ "Epoch 629/3000\n",
+ " - 40s - loss: 0.2961 - acc: 0.9639 - val_loss: 0.9099 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00629: val_acc did not improve from 0.86765\n",
+ "Epoch 630/3000\n",
+ " - 39s - loss: 0.3082 - acc: 0.9594 - val_loss: 0.9248 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00630: val_acc did not improve from 0.86765\n",
+ "Epoch 631/3000\n",
+ " - 40s - loss: 0.2951 - acc: 0.9651 - val_loss: 0.8578 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00631: val_acc did not improve from 0.86765\n",
+ "Epoch 632/3000\n",
+ " - 39s - loss: 0.2976 - acc: 0.9635 - val_loss: 0.9248 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00632: val_acc did not improve from 0.86765\n",
+ "Epoch 633/3000\n",
+ " - 39s - loss: 0.2952 - acc: 0.9662 - val_loss: 0.9488 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00633: val_acc did not improve from 0.86765\n",
+ "Epoch 634/3000\n",
+ " - 39s - loss: 0.2984 - acc: 0.9651 - val_loss: 0.8497 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00634: val_acc did not improve from 0.86765\n",
+ "Epoch 635/3000\n",
+ " - 39s - loss: 0.2962 - acc: 0.9627 - val_loss: 0.9404 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00635: val_acc did not improve from 0.86765\n",
+ "Epoch 636/3000\n",
+ " - 39s - loss: 0.3032 - acc: 0.9617 - val_loss: 0.8722 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00636: val_acc did not improve from 0.86765\n",
+ "Epoch 637/3000\n",
+ " - 39s - loss: 0.3138 - acc: 0.9592 - val_loss: 0.8681 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00637: val_acc did not improve from 0.86765\n",
+ "Epoch 638/3000\n",
+ " - 39s - loss: 0.2964 - acc: 0.9638 - val_loss: 0.9499 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00638: val_acc did not improve from 0.86765\n",
+ "Epoch 639/3000\n",
+ " - 39s - loss: 0.3060 - acc: 0.9618 - val_loss: 0.9046 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00639: val_acc did not improve from 0.86765\n",
+ "Epoch 640/3000\n",
+ " - 39s - loss: 0.2985 - acc: 0.9601 - val_loss: 0.9265 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00640: val_acc did not improve from 0.86765\n",
+ "Epoch 641/3000\n",
+ " - 39s - loss: 0.3079 - acc: 0.9600 - val_loss: 0.8866 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00641: val_acc did not improve from 0.86765\n",
+ "Epoch 642/3000\n",
+ " - 39s - loss: 0.3059 - acc: 0.9606 - val_loss: 0.8764 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00642: val_acc did not improve from 0.86765\n",
+ "Epoch 643/3000\n",
+ " - 39s - loss: 0.2926 - acc: 0.9636 - val_loss: 0.9022 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00643: val_acc did not improve from 0.86765\n",
+ "Epoch 644/3000\n",
+ " - 39s - loss: 0.2894 - acc: 0.9656 - val_loss: 0.8993 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00644: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00644: ReduceLROnPlateau reducing learning rate to 3.773536009248346e-05.\n",
+ "Epoch 645/3000\n",
+ " - 39s - loss: 0.2983 - acc: 0.9624 - val_loss: 0.8809 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 00645: val_acc did not improve from 0.86765\n",
+ "Epoch 646/3000\n",
+ " - 40s - loss: 0.2960 - acc: 0.9615 - val_loss: 0.8956 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00646: val_acc did not improve from 0.86765\n",
+ "Epoch 647/3000\n",
+ " - 39s - loss: 0.3005 - acc: 0.9621 - val_loss: 0.9148 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00647: val_acc did not improve from 0.86765\n",
+ "Epoch 648/3000\n",
+ " - 39s - loss: 0.3006 - acc: 0.9632 - val_loss: 0.9311 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00648: val_acc did not improve from 0.86765\n",
+ "Epoch 649/3000\n",
+ " - 39s - loss: 0.3029 - acc: 0.9617 - val_loss: 0.9337 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00649: val_acc did not improve from 0.86765\n",
+ "Epoch 650/3000\n",
+ " - 39s - loss: 0.3052 - acc: 0.9623 - val_loss: 0.8779 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00650: val_acc did not improve from 0.86765\n",
+ "Epoch 651/3000\n",
+ " - 39s - loss: 0.2960 - acc: 0.9621 - val_loss: 0.9227 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00651: val_acc did not improve from 0.86765\n",
+ "Epoch 652/3000\n",
+ " - 39s - loss: 0.3001 - acc: 0.9630 - val_loss: 0.9128 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00652: val_acc did not improve from 0.86765\n",
+ "Epoch 653/3000\n",
+ " - 40s - loss: 0.2973 - acc: 0.9632 - val_loss: 0.9267 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00653: val_acc did not improve from 0.86765\n",
+ "Epoch 654/3000\n",
+ " - 39s - loss: 0.2871 - acc: 0.9660 - val_loss: 0.9043 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00654: val_acc did not improve from 0.86765\n",
+ "Epoch 655/3000\n",
+ " - 39s - loss: 0.3059 - acc: 0.9630 - val_loss: 0.9146 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00655: val_acc did not improve from 0.86765\n",
+ "Epoch 656/3000\n",
+ " - 39s - loss: 0.2993 - acc: 0.9600 - val_loss: 0.9579 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00656: val_acc did not improve from 0.86765\n",
+ "Epoch 657/3000\n",
+ " - 40s - loss: 0.3047 - acc: 0.9623 - val_loss: 0.8910 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00657: val_acc did not improve from 0.86765\n",
+ "Epoch 658/3000\n",
+ " - 39s - loss: 0.3010 - acc: 0.9624 - val_loss: 0.9186 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00658: val_acc did not improve from 0.86765\n",
+ "Epoch 659/3000\n",
+ " - 39s - loss: 0.3168 - acc: 0.9617 - val_loss: 0.8789 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00659: val_acc did not improve from 0.86765\n",
+ "Epoch 660/3000\n",
+ " - 39s - loss: 0.2925 - acc: 0.9642 - val_loss: 0.9028 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00660: val_acc did not improve from 0.86765\n",
+ "Epoch 661/3000\n",
+ " - 39s - loss: 0.2944 - acc: 0.9653 - val_loss: 0.9155 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00661: val_acc did not improve from 0.86765\n",
+ "Epoch 662/3000\n",
+ " - 39s - loss: 0.2973 - acc: 0.9639 - val_loss: 0.8542 - val_acc: 0.8645\n",
+ "\n",
+ "Epoch 00662: val_acc did not improve from 0.86765\n",
+ "Epoch 663/3000\n",
+ " - 39s - loss: 0.3077 - acc: 0.9603 - val_loss: 0.8811 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00663: val_acc did not improve from 0.86765\n",
+ "Epoch 664/3000\n",
+ " - 39s - loss: 0.2877 - acc: 0.9650 - val_loss: 0.8856 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00664: val_acc did not improve from 0.86765\n",
+ "Epoch 665/3000\n",
+ " - 39s - loss: 0.3138 - acc: 0.9604 - val_loss: 0.8760 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00665: val_acc did not improve from 0.86765\n",
+ "Epoch 666/3000\n",
+ " - 39s - loss: 0.2929 - acc: 0.9629 - val_loss: 0.8754 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00666: val_acc did not improve from 0.86765\n",
+ "Epoch 667/3000\n",
+ " - 39s - loss: 0.3044 - acc: 0.9629 - val_loss: 0.8867 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00667: val_acc did not improve from 0.86765\n",
+ "Epoch 668/3000\n",
+ " - 39s - loss: 0.2866 - acc: 0.9645 - val_loss: 0.8662 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 00668: val_acc did not improve from 0.86765\n",
+ "Epoch 669/3000\n",
+ " - 39s - loss: 0.3043 - acc: 0.9601 - val_loss: 0.9173 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00669: val_acc did not improve from 0.86765\n",
+ "Epoch 670/3000\n",
+ " - 39s - loss: 0.2931 - acc: 0.9630 - val_loss: 0.9022 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00670: val_acc did not improve from 0.86765\n",
+ "Epoch 671/3000\n",
+ " - 39s - loss: 0.2879 - acc: 0.9641 - val_loss: 0.9246 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00671: val_acc did not improve from 0.86765\n",
+ "Epoch 672/3000\n",
+ " - 39s - loss: 0.2970 - acc: 0.9650 - val_loss: 0.9134 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00672: val_acc did not improve from 0.86765\n",
+ "Epoch 673/3000\n",
+ " - 39s - loss: 0.3130 - acc: 0.9598 - val_loss: 0.9538 - val_acc: 0.8416\n",
+ "\n",
+ "Epoch 00673: val_acc did not improve from 0.86765\n",
+ "Epoch 674/3000\n",
+ " - 39s - loss: 0.2877 - acc: 0.9656 - val_loss: 0.9165 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00674: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00674: ReduceLROnPlateau reducing learning rate to 3.584859277907526e-05.\n",
+ "Epoch 675/3000\n",
+ " - 39s - loss: 0.3036 - acc: 0.9598 - val_loss: 0.9019 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00675: val_acc did not improve from 0.86765\n",
+ "Epoch 676/3000\n",
+ " - 39s - loss: 0.2976 - acc: 0.9627 - val_loss: 0.9121 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00676: val_acc did not improve from 0.86765\n",
+ "Epoch 677/3000\n",
+ " - 39s - loss: 0.2968 - acc: 0.9601 - val_loss: 0.9235 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00677: val_acc did not improve from 0.86765\n",
+ "Epoch 678/3000\n",
+ " - 40s - loss: 0.2980 - acc: 0.9641 - val_loss: 0.9238 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00678: val_acc did not improve from 0.86765\n",
+ "Epoch 679/3000\n",
+ " - 39s - loss: 0.3045 - acc: 0.9641 - val_loss: 0.9372 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00679: val_acc did not improve from 0.86765\n",
+ "Epoch 680/3000\n",
+ " - 39s - loss: 0.2936 - acc: 0.9653 - val_loss: 0.9538 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00680: val_acc did not improve from 0.86765\n",
+ "Epoch 681/3000\n",
+ " - 40s - loss: 0.2835 - acc: 0.9674 - val_loss: 0.8736 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00681: val_acc did not improve from 0.86765\n",
+ "Epoch 682/3000\n",
+ " - 39s - loss: 0.2869 - acc: 0.9647 - val_loss: 0.9875 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00682: val_acc did not improve from 0.86765\n",
+ "Epoch 683/3000\n",
+ " - 39s - loss: 0.3113 - acc: 0.9603 - val_loss: 0.8752 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00683: val_acc did not improve from 0.86765\n",
+ "Epoch 684/3000\n",
+ " - 39s - loss: 0.2964 - acc: 0.9657 - val_loss: 0.8664 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00684: val_acc did not improve from 0.86765\n",
+ "Epoch 685/3000\n",
+ " - 40s - loss: 0.2932 - acc: 0.9638 - val_loss: 0.8569 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00685: val_acc did not improve from 0.86765\n",
+ "Epoch 686/3000\n",
+ " - 39s - loss: 0.3064 - acc: 0.9621 - val_loss: 0.9977 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00686: val_acc did not improve from 0.86765\n",
+ "Epoch 687/3000\n",
+ " - 39s - loss: 0.3080 - acc: 0.9612 - val_loss: 0.9792 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00687: val_acc did not improve from 0.86765\n",
+ "Epoch 688/3000\n",
+ " - 39s - loss: 0.2962 - acc: 0.9662 - val_loss: 0.9532 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00688: val_acc did not improve from 0.86765\n",
+ "Epoch 689/3000\n",
+ " - 39s - loss: 0.2952 - acc: 0.9656 - val_loss: 0.9066 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00689: val_acc did not improve from 0.86765\n",
+ "Epoch 690/3000\n",
+ " - 39s - loss: 0.2913 - acc: 0.9633 - val_loss: 0.8976 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00690: val_acc did not improve from 0.86765\n",
+ "Epoch 691/3000\n",
+ " - 39s - loss: 0.2921 - acc: 0.9626 - val_loss: 0.9137 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00691: val_acc did not improve from 0.86765\n",
+ "Epoch 692/3000\n",
+ " - 39s - loss: 0.2933 - acc: 0.9644 - val_loss: 0.8747 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00692: val_acc did not improve from 0.86765\n",
+ "Epoch 693/3000\n",
+ " - 39s - loss: 0.2911 - acc: 0.9657 - val_loss: 0.9050 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00693: val_acc did not improve from 0.86765\n",
+ "Epoch 694/3000\n",
+ " - 39s - loss: 0.2948 - acc: 0.9650 - val_loss: 0.8901 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00694: val_acc did not improve from 0.86765\n",
+ "Epoch 695/3000\n",
+ " - 39s - loss: 0.2893 - acc: 0.9642 - val_loss: 0.9297 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00695: val_acc did not improve from 0.86765\n",
+ "Epoch 696/3000\n",
+ " - 39s - loss: 0.2954 - acc: 0.9630 - val_loss: 0.9187 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00696: val_acc did not improve from 0.86765\n",
+ "Epoch 697/3000\n",
+ " - 39s - loss: 0.2788 - acc: 0.9677 - val_loss: 0.9193 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00697: val_acc did not improve from 0.86765\n",
+ "Epoch 698/3000\n",
+ " - 40s - loss: 0.2841 - acc: 0.9654 - val_loss: 0.9162 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00698: val_acc did not improve from 0.86765\n",
+ "Epoch 699/3000\n",
+ " - 40s - loss: 0.2931 - acc: 0.9635 - val_loss: 0.9340 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00699: val_acc did not improve from 0.86765\n",
+ "Epoch 700/3000\n",
+ " - 39s - loss: 0.2893 - acc: 0.9651 - val_loss: 0.8903 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00700: val_acc did not improve from 0.86765\n",
+ "Epoch 701/3000\n",
+ " - 40s - loss: 0.3039 - acc: 0.9606 - val_loss: 0.9014 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00701: val_acc did not improve from 0.86765\n",
+ "Epoch 702/3000\n",
+ " - 39s - loss: 0.2943 - acc: 0.9648 - val_loss: 0.9103 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00702: val_acc did not improve from 0.86765\n",
+ "Epoch 703/3000\n",
+ " - 39s - loss: 0.2827 - acc: 0.9663 - val_loss: 0.9195 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00703: val_acc did not improve from 0.86765\n",
+ "Epoch 704/3000\n",
+ " - 39s - loss: 0.2919 - acc: 0.9632 - val_loss: 0.9178 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00704: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00704: ReduceLROnPlateau reducing learning rate to 3.4056162621709515e-05.\n",
+ "Epoch 705/3000\n",
+ " - 39s - loss: 0.2836 - acc: 0.9678 - val_loss: 0.9168 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00705: val_acc did not improve from 0.86765\n",
+ "Epoch 706/3000\n",
+ " - 39s - loss: 0.2895 - acc: 0.9648 - val_loss: 0.9295 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00706: val_acc did not improve from 0.86765\n",
+ "Epoch 707/3000\n",
+ " - 40s - loss: 0.2835 - acc: 0.9654 - val_loss: 0.9059 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00707: val_acc did not improve from 0.86765\n",
+ "Epoch 708/3000\n",
+ " - 40s - loss: 0.2936 - acc: 0.9641 - val_loss: 0.8736 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00708: val_acc did not improve from 0.86765\n",
+ "Epoch 709/3000\n",
+ " - 39s - loss: 0.2918 - acc: 0.9656 - val_loss: 0.9068 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00709: val_acc did not improve from 0.86765\n",
+ "Epoch 710/3000\n",
+ " - 39s - loss: 0.2994 - acc: 0.9609 - val_loss: 0.9029 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00710: val_acc did not improve from 0.86765\n",
+ "Epoch 711/3000\n",
+ " - 39s - loss: 0.2889 - acc: 0.9642 - val_loss: 0.8765 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00711: val_acc did not improve from 0.86765\n",
+ "Epoch 712/3000\n",
+ " - 39s - loss: 0.2862 - acc: 0.9663 - val_loss: 0.8791 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00712: val_acc did not improve from 0.86765\n",
+ "Epoch 713/3000\n",
+ " - 39s - loss: 0.2796 - acc: 0.9677 - val_loss: 0.9159 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00713: val_acc did not improve from 0.86765\n",
+ "Epoch 714/3000\n",
+ " - 39s - loss: 0.2880 - acc: 0.9633 - val_loss: 0.8778 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00714: val_acc did not improve from 0.86765\n",
+ "Epoch 715/3000\n",
+ " - 39s - loss: 0.3003 - acc: 0.9618 - val_loss: 0.9154 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00715: val_acc did not improve from 0.86765\n",
+ "Epoch 716/3000\n",
+ " - 39s - loss: 0.2912 - acc: 0.9620 - val_loss: 0.9004 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00716: val_acc did not improve from 0.86765\n",
+ "Epoch 717/3000\n",
+ " - 39s - loss: 0.2935 - acc: 0.9645 - val_loss: 0.9077 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00717: val_acc did not improve from 0.86765\n",
+ "Epoch 718/3000\n",
+ " - 39s - loss: 0.2992 - acc: 0.9620 - val_loss: 0.9111 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00718: val_acc did not improve from 0.86765\n",
+ "Epoch 719/3000\n",
+ " - 39s - loss: 0.2827 - acc: 0.9681 - val_loss: 0.8965 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00719: val_acc did not improve from 0.86765\n",
+ "Epoch 720/3000\n",
+ " - 39s - loss: 0.2885 - acc: 0.9629 - val_loss: 0.8949 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00720: val_acc did not improve from 0.86765\n",
+ "Epoch 721/3000\n",
+ " - 39s - loss: 0.2937 - acc: 0.9650 - val_loss: 0.8743 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 00721: val_acc did not improve from 0.86765\n",
+ "Epoch 722/3000\n",
+ " - 39s - loss: 0.2873 - acc: 0.9647 - val_loss: 0.9313 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00722: val_acc did not improve from 0.86765\n",
+ "Epoch 723/3000\n",
+ " - 39s - loss: 0.3143 - acc: 0.9579 - val_loss: 0.8754 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00723: val_acc did not improve from 0.86765\n",
+ "Epoch 724/3000\n",
+ " - 39s - loss: 0.2988 - acc: 0.9609 - val_loss: 0.8939 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00724: val_acc did not improve from 0.86765\n",
+ "Epoch 725/3000\n",
+ " - 39s - loss: 0.2950 - acc: 0.9623 - val_loss: 0.8939 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00725: val_acc did not improve from 0.86765\n",
+ "Epoch 726/3000\n",
+ " - 39s - loss: 0.2831 - acc: 0.9680 - val_loss: 0.9072 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00726: val_acc did not improve from 0.86765\n",
+ "Epoch 727/3000\n",
+ " - 39s - loss: 0.2914 - acc: 0.9660 - val_loss: 0.9478 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00727: val_acc did not improve from 0.86765\n",
+ "Epoch 728/3000\n",
+ " - 39s - loss: 0.2890 - acc: 0.9651 - val_loss: 0.8874 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00728: val_acc did not improve from 0.86765\n",
+ "Epoch 729/3000\n",
+ " - 39s - loss: 0.2919 - acc: 0.9644 - val_loss: 0.9314 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00729: val_acc did not improve from 0.86765\n",
+ "Epoch 730/3000\n",
+ " - 39s - loss: 0.2971 - acc: 0.9609 - val_loss: 0.8941 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00730: val_acc did not improve from 0.86765\n",
+ "Epoch 731/3000\n",
+ " - 39s - loss: 0.2879 - acc: 0.9644 - val_loss: 0.9337 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00731: val_acc did not improve from 0.86765\n",
+ "Epoch 732/3000\n",
+ " - 39s - loss: 0.2856 - acc: 0.9647 - val_loss: 0.9427 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00732: val_acc did not improve from 0.86765\n",
+ "Epoch 733/3000\n",
+ " - 39s - loss: 0.2878 - acc: 0.9639 - val_loss: 0.8807 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00733: val_acc did not improve from 0.86765\n",
+ "Epoch 734/3000\n",
+ " - 39s - loss: 0.2843 - acc: 0.9659 - val_loss: 0.9139 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00734: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00734: ReduceLROnPlateau reducing learning rate to 3.2353355527448004e-05.\n",
+ "Epoch 735/3000\n",
+ " - 39s - loss: 0.2950 - acc: 0.9642 - val_loss: 0.9057 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00735: val_acc did not improve from 0.86765\n",
+ "Epoch 736/3000\n",
+ " - 39s - loss: 0.2941 - acc: 0.9650 - val_loss: 0.9524 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00736: val_acc did not improve from 0.86765\n",
+ "Epoch 737/3000\n",
+ " - 39s - loss: 0.2927 - acc: 0.9642 - val_loss: 0.8909 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00737: val_acc did not improve from 0.86765\n",
+ "Epoch 738/3000\n",
+ " - 39s - loss: 0.2851 - acc: 0.9650 - val_loss: 0.9237 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00738: val_acc did not improve from 0.86765\n",
+ "Epoch 739/3000\n",
+ " - 39s - loss: 0.3011 - acc: 0.9635 - val_loss: 0.9081 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00739: val_acc did not improve from 0.86765\n",
+ "Epoch 740/3000\n",
+ " - 39s - loss: 0.3006 - acc: 0.9612 - val_loss: 0.8786 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00740: val_acc did not improve from 0.86765\n",
+ "Epoch 741/3000\n",
+ " - 39s - loss: 0.2911 - acc: 0.9650 - val_loss: 0.9304 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00741: val_acc did not improve from 0.86765\n",
+ "Epoch 742/3000\n",
+ " - 40s - loss: 0.2882 - acc: 0.9657 - val_loss: 0.9545 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00742: val_acc did not improve from 0.86765\n",
+ "Epoch 743/3000\n",
+ " - 39s - loss: 0.2863 - acc: 0.9632 - val_loss: 0.9024 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00743: val_acc did not improve from 0.86765\n",
+ "Epoch 744/3000\n",
+ " - 39s - loss: 0.2837 - acc: 0.9647 - val_loss: 0.9288 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00744: val_acc did not improve from 0.86765\n",
+ "Epoch 745/3000\n",
+ " - 39s - loss: 0.2848 - acc: 0.9666 - val_loss: 0.9740 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00745: val_acc did not improve from 0.86765\n",
+ "Epoch 746/3000\n",
+ " - 39s - loss: 0.2994 - acc: 0.9647 - val_loss: 0.9326 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00746: val_acc did not improve from 0.86765\n",
+ "Epoch 747/3000\n",
+ " - 39s - loss: 0.2904 - acc: 0.9644 - val_loss: 0.9005 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00747: val_acc did not improve from 0.86765\n",
+ "Epoch 748/3000\n",
+ " - 39s - loss: 0.2829 - acc: 0.9650 - val_loss: 0.9139 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00748: val_acc did not improve from 0.86765\n",
+ "Epoch 749/3000\n",
+ " - 39s - loss: 0.2870 - acc: 0.9635 - val_loss: 0.9250 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00749: val_acc did not improve from 0.86765\n",
+ "Epoch 750/3000\n",
+ " - 39s - loss: 0.2867 - acc: 0.9639 - val_loss: 0.8846 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00750: val_acc did not improve from 0.86765\n",
+ "Epoch 751/3000\n",
+ " - 39s - loss: 0.2881 - acc: 0.9642 - val_loss: 0.9549 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00751: val_acc did not improve from 0.86765\n",
+ "Epoch 752/3000\n",
+ " - 40s - loss: 0.3015 - acc: 0.9627 - val_loss: 0.9473 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00752: val_acc did not improve from 0.86765\n",
+ "Epoch 753/3000\n",
+ " - 39s - loss: 0.2887 - acc: 0.9639 - val_loss: 0.9583 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00753: val_acc did not improve from 0.86765\n",
+ "Epoch 754/3000\n",
+ " - 39s - loss: 0.2826 - acc: 0.9654 - val_loss: 0.9187 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00754: val_acc did not improve from 0.86765\n",
+ "Epoch 755/3000\n",
+ " - 39s - loss: 0.2901 - acc: 0.9633 - val_loss: 0.9473 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00755: val_acc did not improve from 0.86765\n",
+ "Epoch 756/3000\n",
+ " - 39s - loss: 0.2856 - acc: 0.9657 - val_loss: 0.9160 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00756: val_acc did not improve from 0.86765\n",
+ "Epoch 757/3000\n",
+ " - 39s - loss: 0.2852 - acc: 0.9648 - val_loss: 0.8796 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00757: val_acc did not improve from 0.86765\n",
+ "Epoch 758/3000\n",
+ " - 39s - loss: 0.2768 - acc: 0.9672 - val_loss: 0.9374 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00758: val_acc did not improve from 0.86765\n",
+ "Epoch 759/3000\n",
+ " - 39s - loss: 0.2989 - acc: 0.9626 - val_loss: 0.9388 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00759: val_acc did not improve from 0.86765\n",
+ "Epoch 760/3000\n",
+ " - 39s - loss: 0.2904 - acc: 0.9630 - val_loss: 0.9740 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00760: val_acc did not improve from 0.86765\n",
+ "Epoch 761/3000\n",
+ " - 40s - loss: 0.2885 - acc: 0.9642 - val_loss: 0.9901 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00761: val_acc did not improve from 0.86765\n",
+ "Epoch 762/3000\n",
+ " - 39s - loss: 0.2759 - acc: 0.9662 - val_loss: 0.9343 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00762: val_acc did not improve from 0.86765\n",
+ "Epoch 763/3000\n",
+ " - 39s - loss: 0.2832 - acc: 0.9642 - val_loss: 0.9455 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00763: val_acc did not improve from 0.86765\n",
+ "Epoch 764/3000\n",
+ " - 39s - loss: 0.2840 - acc: 0.9635 - val_loss: 0.9037 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00764: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00764: ReduceLROnPlateau reducing learning rate to 3.0735688960703554e-05.\n",
+ "Epoch 765/3000\n",
+ " - 39s - loss: 0.2901 - acc: 0.9629 - val_loss: 0.9094 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00765: val_acc did not improve from 0.86765\n",
+ "Epoch 766/3000\n",
+ " - 39s - loss: 0.2887 - acc: 0.9629 - val_loss: 0.9508 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00766: val_acc did not improve from 0.86765\n",
+ "Epoch 767/3000\n",
+ " - 39s - loss: 0.2766 - acc: 0.9669 - val_loss: 0.9100 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00767: val_acc did not improve from 0.86765\n",
+ "Epoch 768/3000\n",
+ " - 39s - loss: 0.2883 - acc: 0.9639 - val_loss: 0.8800 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00768: val_acc did not improve from 0.86765\n",
+ "Epoch 769/3000\n",
+ " - 40s - loss: 0.2919 - acc: 0.9633 - val_loss: 0.8850 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00769: val_acc did not improve from 0.86765\n",
+ "Epoch 770/3000\n",
+ " - 39s - loss: 0.2937 - acc: 0.9633 - val_loss: 0.9037 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00770: val_acc did not improve from 0.86765\n",
+ "Epoch 771/3000\n",
+ " - 39s - loss: 0.2788 - acc: 0.9659 - val_loss: 0.9109 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00771: val_acc did not improve from 0.86765\n",
+ "Epoch 772/3000\n",
+ " - 39s - loss: 0.2797 - acc: 0.9666 - val_loss: 0.9358 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00772: val_acc did not improve from 0.86765\n",
+ "Epoch 773/3000\n",
+ " - 39s - loss: 0.2813 - acc: 0.9662 - val_loss: 0.9673 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00773: val_acc did not improve from 0.86765\n",
+ "Epoch 774/3000\n",
+ " - 39s - loss: 0.2857 - acc: 0.9645 - val_loss: 0.9170 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00774: val_acc did not improve from 0.86765\n",
+ "Epoch 775/3000\n",
+ " - 39s - loss: 0.2743 - acc: 0.9645 - val_loss: 0.9429 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00775: val_acc did not improve from 0.86765\n",
+ "Epoch 776/3000\n",
+ " - 39s - loss: 0.2823 - acc: 0.9632 - val_loss: 0.9266 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00776: val_acc did not improve from 0.86765\n",
+ "Epoch 777/3000\n",
+ " - 39s - loss: 0.3024 - acc: 0.9600 - val_loss: 0.9550 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00777: val_acc did not improve from 0.86765\n",
+ "Epoch 778/3000\n",
+ " - 39s - loss: 0.2889 - acc: 0.9632 - val_loss: 0.9114 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00778: val_acc did not improve from 0.86765\n",
+ "Epoch 779/3000\n",
+ " - 39s - loss: 0.2823 - acc: 0.9648 - val_loss: 0.8880 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 00779: val_acc did not improve from 0.86765\n",
+ "Epoch 780/3000\n",
+ " - 39s - loss: 0.2770 - acc: 0.9681 - val_loss: 0.9010 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00780: val_acc did not improve from 0.86765\n",
+ "Epoch 781/3000\n",
+ " - 39s - loss: 0.2822 - acc: 0.9651 - val_loss: 0.9331 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00781: val_acc did not improve from 0.86765\n",
+ "Epoch 782/3000\n",
+ " - 39s - loss: 0.2738 - acc: 0.9688 - val_loss: 0.9178 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00782: val_acc did not improve from 0.86765\n",
+ "Epoch 783/3000\n",
+ " - 40s - loss: 0.2764 - acc: 0.9684 - val_loss: 0.8969 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00783: val_acc did not improve from 0.86765\n",
+ "Epoch 784/3000\n",
+ " - 39s - loss: 0.2783 - acc: 0.9669 - val_loss: 0.9362 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00784: val_acc did not improve from 0.86765\n",
+ "Epoch 785/3000\n",
+ " - 39s - loss: 0.2815 - acc: 0.9660 - val_loss: 0.9326 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00785: val_acc did not improve from 0.86765\n",
+ "Epoch 786/3000\n",
+ " - 39s - loss: 0.2779 - acc: 0.9656 - val_loss: 0.9801 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00786: val_acc did not improve from 0.86765\n",
+ "Epoch 787/3000\n",
+ " - 39s - loss: 0.2955 - acc: 0.9638 - val_loss: 0.9417 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00787: val_acc did not improve from 0.86765\n",
+ "Epoch 788/3000\n",
+ " - 39s - loss: 0.2904 - acc: 0.9635 - val_loss: 0.9325 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00788: val_acc did not improve from 0.86765\n",
+ "Epoch 789/3000\n",
+ " - 40s - loss: 0.2880 - acc: 0.9651 - val_loss: 0.9173 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00789: val_acc did not improve from 0.86765\n",
+ "Epoch 790/3000\n",
+ " - 39s - loss: 0.2861 - acc: 0.9642 - val_loss: 0.8816 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00790: val_acc did not improve from 0.86765\n",
+ "Epoch 791/3000\n",
+ " - 40s - loss: 0.2830 - acc: 0.9644 - val_loss: 0.8885 - val_acc: 0.8638\n",
+ "\n",
+ "Epoch 00791: val_acc did not improve from 0.86765\n",
+ "Epoch 792/3000\n",
+ " - 39s - loss: 0.2820 - acc: 0.9660 - val_loss: 0.9431 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00792: val_acc did not improve from 0.86765\n",
+ "Epoch 793/3000\n",
+ " - 39s - loss: 0.2788 - acc: 0.9680 - val_loss: 0.9015 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00793: val_acc did not improve from 0.86765\n",
+ "Epoch 794/3000\n",
+ " - 39s - loss: 0.2761 - acc: 0.9668 - val_loss: 0.9139 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00794: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00794: ReduceLROnPlateau reducing learning rate to 2.9198905031080356e-05.\n",
+ "Epoch 795/3000\n",
+ " - 40s - loss: 0.2831 - acc: 0.9654 - val_loss: 0.9262 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00795: val_acc did not improve from 0.86765\n",
+ "Epoch 796/3000\n",
+ " - 39s - loss: 0.2777 - acc: 0.9659 - val_loss: 0.9238 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00796: val_acc did not improve from 0.86765\n",
+ "Epoch 797/3000\n",
+ " - 39s - loss: 0.2825 - acc: 0.9683 - val_loss: 0.9148 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00797: val_acc did not improve from 0.86765\n",
+ "Epoch 798/3000\n",
+ " - 39s - loss: 0.2807 - acc: 0.9662 - val_loss: 0.9590 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00798: val_acc did not improve from 0.86765\n",
+ "Epoch 799/3000\n",
+ " - 40s - loss: 0.2864 - acc: 0.9662 - val_loss: 0.9451 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00799: val_acc did not improve from 0.86765\n",
+ "Epoch 800/3000\n",
+ " - 39s - loss: 0.2845 - acc: 0.9648 - val_loss: 0.9250 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00800: val_acc did not improve from 0.86765\n",
+ "Epoch 801/3000\n",
+ " - 39s - loss: 0.2791 - acc: 0.9671 - val_loss: 0.9420 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00801: val_acc did not improve from 0.86765\n",
+ "Epoch 802/3000\n",
+ " - 39s - loss: 0.2731 - acc: 0.9680 - val_loss: 0.9543 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00802: val_acc did not improve from 0.86765\n",
+ "Epoch 803/3000\n",
+ " - 39s - loss: 0.2699 - acc: 0.9689 - val_loss: 0.8947 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 00803: val_acc did not improve from 0.86765\n",
+ "Epoch 804/3000\n",
+ " - 39s - loss: 0.2867 - acc: 0.9639 - val_loss: 0.9387 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00804: val_acc did not improve from 0.86765\n",
+ "Epoch 805/3000\n",
+ " - 40s - loss: 0.2954 - acc: 0.9621 - val_loss: 0.9670 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00805: val_acc did not improve from 0.86765\n",
+ "Epoch 806/3000\n",
+ " - 39s - loss: 0.2764 - acc: 0.9651 - val_loss: 0.9275 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00806: val_acc did not improve from 0.86765\n",
+ "Epoch 807/3000\n",
+ " - 39s - loss: 0.2878 - acc: 0.9629 - val_loss: 0.9517 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00807: val_acc did not improve from 0.86765\n",
+ "Epoch 808/3000\n",
+ " - 39s - loss: 0.2891 - acc: 0.9626 - val_loss: 0.8864 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00808: val_acc did not improve from 0.86765\n",
+ "Epoch 809/3000\n",
+ " - 39s - loss: 0.2790 - acc: 0.9653 - val_loss: 0.8962 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00809: val_acc did not improve from 0.86765\n",
+ "Epoch 810/3000\n",
+ " - 40s - loss: 0.2791 - acc: 0.9672 - val_loss: 0.9036 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00810: val_acc did not improve from 0.86765\n",
+ "Epoch 811/3000\n",
+ " - 39s - loss: 0.2815 - acc: 0.9621 - val_loss: 0.8976 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 00811: val_acc did not improve from 0.86765\n",
+ "Epoch 812/3000\n",
+ " - 39s - loss: 0.2793 - acc: 0.9675 - val_loss: 0.8967 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00812: val_acc did not improve from 0.86765\n",
+ "Epoch 813/3000\n",
+ " - 39s - loss: 0.2730 - acc: 0.9700 - val_loss: 0.9122 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00813: val_acc did not improve from 0.86765\n",
+ "Epoch 814/3000\n",
+ " - 39s - loss: 0.2846 - acc: 0.9645 - val_loss: 0.9050 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00814: val_acc did not improve from 0.86765\n",
+ "Epoch 815/3000\n",
+ " - 39s - loss: 0.2723 - acc: 0.9675 - val_loss: 0.9011 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00815: val_acc did not improve from 0.86765\n",
+ "Epoch 816/3000\n",
+ " - 39s - loss: 0.2765 - acc: 0.9662 - val_loss: 0.9365 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00816: val_acc did not improve from 0.86765\n",
+ "Epoch 817/3000\n",
+ " - 39s - loss: 0.2826 - acc: 0.9639 - val_loss: 0.9098 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00817: val_acc did not improve from 0.86765\n",
+ "Epoch 818/3000\n",
+ " - 39s - loss: 0.2846 - acc: 0.9641 - val_loss: 0.9574 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00818: val_acc did not improve from 0.86765\n",
+ "Epoch 819/3000\n",
+ " - 40s - loss: 0.2812 - acc: 0.9659 - val_loss: 0.9651 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00819: val_acc did not improve from 0.86765\n",
+ "Epoch 820/3000\n",
+ " - 39s - loss: 0.2794 - acc: 0.9659 - val_loss: 0.9131 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00820: val_acc did not improve from 0.86765\n",
+ "Epoch 821/3000\n",
+ " - 39s - loss: 0.2878 - acc: 0.9639 - val_loss: 1.0026 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00821: val_acc did not improve from 0.86765\n",
+ "Epoch 822/3000\n",
+ " - 39s - loss: 0.2864 - acc: 0.9629 - val_loss: 0.9296 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00822: val_acc did not improve from 0.86765\n",
+ "Epoch 823/3000\n",
+ " - 39s - loss: 0.2793 - acc: 0.9657 - val_loss: 0.9175 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00823: val_acc did not improve from 0.86765\n",
+ "Epoch 824/3000\n",
+ " - 39s - loss: 0.2714 - acc: 0.9689 - val_loss: 0.9200 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00824: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00824: ReduceLROnPlateau reducing learning rate to 2.7738960125134326e-05.\n",
+ "Epoch 825/3000\n",
+ " - 39s - loss: 0.2668 - acc: 0.9684 - val_loss: 0.9377 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00825: val_acc did not improve from 0.86765\n",
+ "Epoch 826/3000\n",
+ " - 39s - loss: 0.2777 - acc: 0.9659 - val_loss: 0.9424 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00826: val_acc did not improve from 0.86765\n",
+ "Epoch 827/3000\n",
+ " - 39s - loss: 0.2730 - acc: 0.9681 - val_loss: 0.9109 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00827: val_acc did not improve from 0.86765\n",
+ "Epoch 828/3000\n",
+ " - 39s - loss: 0.2793 - acc: 0.9672 - val_loss: 0.8981 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00828: val_acc did not improve from 0.86765\n",
+ "Epoch 829/3000\n",
+ " - 39s - loss: 0.2794 - acc: 0.9674 - val_loss: 0.8856 - val_acc: 0.8630\n",
+ "\n",
+ "Epoch 00829: val_acc did not improve from 0.86765\n",
+ "Epoch 830/3000\n",
+ " - 39s - loss: 0.2857 - acc: 0.9632 - val_loss: 0.8871 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00830: val_acc did not improve from 0.86765\n",
+ "Epoch 831/3000\n",
+ " - 39s - loss: 0.2796 - acc: 0.9641 - val_loss: 0.9087 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00831: val_acc did not improve from 0.86765\n",
+ "Epoch 832/3000\n",
+ " - 39s - loss: 0.2803 - acc: 0.9665 - val_loss: 0.9592 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00832: val_acc did not improve from 0.86765\n",
+ "Epoch 833/3000\n",
+ " - 39s - loss: 0.2778 - acc: 0.9672 - val_loss: 0.9842 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00833: val_acc did not improve from 0.86765\n",
+ "Epoch 834/3000\n",
+ " - 40s - loss: 0.2756 - acc: 0.9684 - val_loss: 0.9498 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00834: val_acc did not improve from 0.86765\n",
+ "Epoch 835/3000\n",
+ " - 39s - loss: 0.2723 - acc: 0.9659 - val_loss: 0.8817 - val_acc: 0.8661\n",
+ "\n",
+ "Epoch 00835: val_acc did not improve from 0.86765\n",
+ "Epoch 836/3000\n",
+ " - 40s - loss: 0.2778 - acc: 0.9665 - val_loss: 0.9431 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00836: val_acc did not improve from 0.86765\n",
+ "Epoch 837/3000\n",
+ " - 39s - loss: 0.2793 - acc: 0.9642 - val_loss: 0.9221 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00837: val_acc did not improve from 0.86765\n",
+ "Epoch 838/3000\n",
+ " - 39s - loss: 0.2845 - acc: 0.9672 - val_loss: 0.9088 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00838: val_acc did not improve from 0.86765\n",
+ "Epoch 839/3000\n",
+ " - 40s - loss: 0.2788 - acc: 0.9665 - val_loss: 0.9014 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00839: val_acc did not improve from 0.86765\n",
+ "Epoch 840/3000\n",
+ " - 39s - loss: 0.2772 - acc: 0.9654 - val_loss: 0.9364 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00840: val_acc did not improve from 0.86765\n",
+ "Epoch 841/3000\n",
+ " - 40s - loss: 0.2839 - acc: 0.9638 - val_loss: 0.9346 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00841: val_acc did not improve from 0.86765\n",
+ "Epoch 842/3000\n",
+ " - 39s - loss: 0.2756 - acc: 0.9668 - val_loss: 0.9324 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00842: val_acc did not improve from 0.86765\n",
+ "Epoch 843/3000\n",
+ " - 39s - loss: 0.2699 - acc: 0.9660 - val_loss: 0.9185 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00843: val_acc did not improve from 0.86765\n",
+ "Epoch 844/3000\n",
+ " - 39s - loss: 0.2659 - acc: 0.9692 - val_loss: 0.9411 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00844: val_acc did not improve from 0.86765\n",
+ "Epoch 845/3000\n",
+ " - 38s - loss: 0.2721 - acc: 0.9681 - val_loss: 0.9333 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00845: val_acc did not improve from 0.86765\n",
+ "Epoch 846/3000\n",
+ " - 39s - loss: 0.2703 - acc: 0.9695 - val_loss: 0.9727 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00846: val_acc did not improve from 0.86765\n",
+ "Epoch 847/3000\n",
+ " - 39s - loss: 0.2790 - acc: 0.9672 - val_loss: 0.9842 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00847: val_acc did not improve from 0.86765\n",
+ "Epoch 848/3000\n",
+ " - 39s - loss: 0.2789 - acc: 0.9666 - val_loss: 0.8609 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00848: val_acc did not improve from 0.86765\n",
+ "Epoch 849/3000\n",
+ " - 39s - loss: 0.2717 - acc: 0.9654 - val_loss: 0.9184 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00849: val_acc did not improve from 0.86765\n",
+ "Epoch 850/3000\n",
+ " - 40s - loss: 0.2760 - acc: 0.9689 - val_loss: 0.8725 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00850: val_acc did not improve from 0.86765\n",
+ "Epoch 851/3000\n",
+ " - 39s - loss: 0.2848 - acc: 0.9660 - val_loss: 0.9761 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00851: val_acc did not improve from 0.86765\n",
+ "Epoch 852/3000\n",
+ " - 39s - loss: 0.2893 - acc: 0.9639 - val_loss: 0.9037 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00852: val_acc did not improve from 0.86765\n",
+ "Epoch 853/3000\n",
+ " - 39s - loss: 0.2875 - acc: 0.9598 - val_loss: 0.9386 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00853: val_acc did not improve from 0.86765\n",
+ "Epoch 854/3000\n",
+ " - 39s - loss: 0.2745 - acc: 0.9665 - val_loss: 0.9106 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00854: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00854: ReduceLROnPlateau reducing learning rate to 2.6352012810093584e-05.\n",
+ "Epoch 855/3000\n",
+ " - 39s - loss: 0.2748 - acc: 0.9660 - val_loss: 0.8811 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00855: val_acc did not improve from 0.86765\n",
+ "Epoch 856/3000\n",
+ " - 39s - loss: 0.2754 - acc: 0.9684 - val_loss: 0.9337 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00856: val_acc did not improve from 0.86765\n",
+ "Epoch 857/3000\n",
+ " - 39s - loss: 0.2750 - acc: 0.9666 - val_loss: 0.9643 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00857: val_acc did not improve from 0.86765\n",
+ "Epoch 858/3000\n",
+ " - 39s - loss: 0.2956 - acc: 0.9629 - val_loss: 0.9885 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00858: val_acc did not improve from 0.86765\n",
+ "Epoch 859/3000\n",
+ " - 39s - loss: 0.2851 - acc: 0.9656 - val_loss: 0.9386 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00859: val_acc did not improve from 0.86765\n",
+ "Epoch 860/3000\n",
+ " - 39s - loss: 0.2880 - acc: 0.9630 - val_loss: 0.8919 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00860: val_acc did not improve from 0.86765\n",
+ "Epoch 861/3000\n",
+ " - 39s - loss: 0.2840 - acc: 0.9659 - val_loss: 0.9075 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00861: val_acc did not improve from 0.86765\n",
+ "Epoch 862/3000\n",
+ " - 39s - loss: 0.2773 - acc: 0.9675 - val_loss: 0.9273 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00862: val_acc did not improve from 0.86765\n",
+ "Epoch 863/3000\n",
+ " - 39s - loss: 0.2611 - acc: 0.9688 - val_loss: 0.9148 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 00863: val_acc did not improve from 0.86765\n",
+ "Epoch 864/3000\n",
+ " - 39s - loss: 0.2638 - acc: 0.9691 - val_loss: 0.9168 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00864: val_acc did not improve from 0.86765\n",
+ "Epoch 865/3000\n",
+ " - 39s - loss: 0.2778 - acc: 0.9642 - val_loss: 0.9238 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00865: val_acc did not improve from 0.86765\n",
+ "Epoch 866/3000\n",
+ " - 39s - loss: 0.2923 - acc: 0.9630 - val_loss: 0.9227 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00866: val_acc did not improve from 0.86765\n",
+ "Epoch 867/3000\n",
+ " - 39s - loss: 0.2839 - acc: 0.9650 - val_loss: 0.9232 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00867: val_acc did not improve from 0.86765\n",
+ "Epoch 868/3000\n",
+ " - 39s - loss: 0.2839 - acc: 0.9644 - val_loss: 0.8876 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00868: val_acc did not improve from 0.86765\n",
+ "Epoch 869/3000\n",
+ " - 39s - loss: 0.2805 - acc: 0.9660 - val_loss: 0.8901 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00869: val_acc did not improve from 0.86765\n",
+ "Epoch 870/3000\n",
+ " - 39s - loss: 0.2745 - acc: 0.9672 - val_loss: 0.9161 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00870: val_acc did not improve from 0.86765\n",
+ "Epoch 871/3000\n",
+ " - 39s - loss: 0.2767 - acc: 0.9656 - val_loss: 0.9435 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00871: val_acc did not improve from 0.86765\n",
+ "Epoch 872/3000\n",
+ " - 39s - loss: 0.2778 - acc: 0.9665 - val_loss: 0.9252 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00872: val_acc did not improve from 0.86765\n",
+ "Epoch 873/3000\n",
+ " - 38s - loss: 0.2818 - acc: 0.9642 - val_loss: 0.9597 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00873: val_acc did not improve from 0.86765\n",
+ "Epoch 874/3000\n",
+ " - 39s - loss: 0.2685 - acc: 0.9681 - val_loss: 0.9170 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00874: val_acc did not improve from 0.86765\n",
+ "Epoch 875/3000\n",
+ " - 40s - loss: 0.2723 - acc: 0.9666 - val_loss: 0.9440 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00875: val_acc did not improve from 0.86765\n",
+ "Epoch 876/3000\n",
+ " - 39s - loss: 0.2628 - acc: 0.9681 - val_loss: 0.9244 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00876: val_acc did not improve from 0.86765\n",
+ "Epoch 877/3000\n",
+ " - 39s - loss: 0.2675 - acc: 0.9688 - val_loss: 0.9393 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00877: val_acc did not improve from 0.86765\n",
+ "Epoch 878/3000\n",
+ " - 39s - loss: 0.2735 - acc: 0.9681 - val_loss: 0.9207 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 00878: val_acc did not improve from 0.86765\n",
+ "Epoch 879/3000\n",
+ " - 39s - loss: 0.2819 - acc: 0.9671 - val_loss: 0.9909 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00879: val_acc did not improve from 0.86765\n",
+ "Epoch 880/3000\n",
+ " - 39s - loss: 0.2822 - acc: 0.9663 - val_loss: 0.8859 - val_acc: 0.8622\n",
+ " - 39s - loss: 0.2437 - acc: 0.9728 - val_loss: 0.9414 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01453: val_acc did not improve from 0.86765\n",
+ "Epoch 1454/3000\n",
+ " - 39s - loss: 0.2456 - acc: 0.9689 - val_loss: 0.9396 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01454: val_acc did not improve from 0.86765\n",
+ "Epoch 1455/3000\n",
+ " - 39s - loss: 0.2501 - acc: 0.9704 - val_loss: 0.9704 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01455: val_acc did not improve from 0.86765\n",
+ "Epoch 1456/3000\n",
+ " - 39s - loss: 0.2410 - acc: 0.9730 - val_loss: 0.9438 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01456: val_acc did not improve from 0.86765\n",
+ "Epoch 1457/3000\n",
+ " - 39s - loss: 0.2434 - acc: 0.9704 - val_loss: 0.9349 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01457: val_acc did not improve from 0.86765\n",
+ "Epoch 1458/3000\n",
+ " - 39s - loss: 0.2376 - acc: 0.9721 - val_loss: 0.9272 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01458: val_acc did not improve from 0.86765\n",
+ "Epoch 1459/3000\n",
+ " - 39s - loss: 0.2536 - acc: 0.9677 - val_loss: 0.9463 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01459: val_acc did not improve from 0.86765\n",
+ "Epoch 1460/3000\n",
+ " - 39s - loss: 0.2437 - acc: 0.9716 - val_loss: 0.9497 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01460: val_acc did not improve from 0.86765\n",
+ "Epoch 1461/3000\n",
+ " - 39s - loss: 0.2477 - acc: 0.9725 - val_loss: 0.9798 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01461: val_acc did not improve from 0.86765\n",
+ "Epoch 1462/3000\n",
+ " - 39s - loss: 0.2415 - acc: 0.9713 - val_loss: 0.9064 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 01462: val_acc did not improve from 0.86765\n",
+ "Epoch 1463/3000\n",
+ " - 39s - loss: 0.2492 - acc: 0.9691 - val_loss: 0.9249 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01463: val_acc did not improve from 0.86765\n",
+ "Epoch 1464/3000\n",
+ " - 39s - loss: 0.2496 - acc: 0.9680 - val_loss: 0.9249 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01464: val_acc did not improve from 0.86765\n",
+ "Epoch 1465/3000\n",
+ " - 39s - loss: 0.2397 - acc: 0.9719 - val_loss: 0.9813 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01465: val_acc did not improve from 0.86765\n",
+ "Epoch 1466/3000\n",
+ " - 39s - loss: 0.2389 - acc: 0.9728 - val_loss: 0.9445 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01466: val_acc did not improve from 0.86765\n",
+ "Epoch 1467/3000\n",
+ " - 39s - loss: 0.2439 - acc: 0.9706 - val_loss: 0.9341 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01467: val_acc did not improve from 0.86765\n",
+ "Epoch 1468/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9703 - val_loss: 0.9718 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01468: val_acc did not improve from 0.86765\n",
+ "Epoch 1469/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9707 - val_loss: 0.9249 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01469: val_acc did not improve from 0.86765\n",
+ "Epoch 1470/3000\n",
+ " - 39s - loss: 0.2429 - acc: 0.9713 - val_loss: 0.9052 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01470: val_acc did not improve from 0.86765\n",
+ "Epoch 1471/3000\n",
+ " - 39s - loss: 0.2460 - acc: 0.9727 - val_loss: 0.9187 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01471: val_acc did not improve from 0.86765\n",
+ "Epoch 1472/3000\n",
+ " - 39s - loss: 0.2476 - acc: 0.9688 - val_loss: 0.9076 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01472: val_acc did not improve from 0.86765\n",
+ "Epoch 1473/3000\n",
+ " - 39s - loss: 0.2420 - acc: 0.9722 - val_loss: 0.9097 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01473: val_acc did not improve from 0.86765\n",
+ "Epoch 1474/3000\n",
+ " - 39s - loss: 0.2489 - acc: 0.9707 - val_loss: 0.9347 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01474: val_acc did not improve from 0.86765\n",
+ "Epoch 1475/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9706 - val_loss: 0.9509 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01475: val_acc did not improve from 0.86765\n",
+ "Epoch 1476/3000\n",
+ " - 39s - loss: 0.2499 - acc: 0.9728 - val_loss: 0.9493 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01476: val_acc did not improve from 0.86765\n",
+ "Epoch 1477/3000\n",
+ " - 39s - loss: 0.2415 - acc: 0.9716 - val_loss: 0.9297 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01477: val_acc did not improve from 0.86765\n",
+ "Epoch 1478/3000\n",
+ " - 39s - loss: 0.2439 - acc: 0.9710 - val_loss: 0.9168 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01481: val_acc did not improve from 0.86765\n",
+ "Epoch 1482/3000\n",
+ " - 39s - loss: 0.2395 - acc: 0.9718 - val_loss: 0.9125 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01482: val_acc did not improve from 0.86765\n",
+ "Epoch 1483/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9722 - val_loss: 0.9493 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01483: val_acc did not improve from 0.86765\n",
+ "Epoch 1484/3000\n",
+ " - 39s - loss: 0.2469 - acc: 0.9697 - val_loss: 0.9119 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01484: val_acc did not improve from 0.86765\n",
+ "Epoch 1485/3000\n",
+ " - 39s - loss: 0.2441 - acc: 0.9697 - val_loss: 0.9422 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01485: val_acc did not improve from 0.86765\n",
+ "Epoch 1486/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9724 - val_loss: 0.8943 - val_acc: 0.8630\n",
+ "\n",
+ "Epoch 01486: val_acc did not improve from 0.86765\n",
+ "Epoch 1487/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9733 - val_loss: 0.9159 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01487: val_acc did not improve from 0.86765\n",
+ "Epoch 1488/3000\n",
+ " - 39s - loss: 0.2531 - acc: 0.9701 - val_loss: 0.9343 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01488: val_acc did not improve from 0.86765\n",
+ "Epoch 1489/3000\n",
+ " - 39s - loss: 0.2523 - acc: 0.9681 - val_loss: 0.9137 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01489: val_acc did not improve from 0.86765\n",
+ "Epoch 1490/3000\n",
+ " - 39s - loss: 0.2489 - acc: 0.9692 - val_loss: 0.9188 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 01490: val_acc did not improve from 0.86765\n",
+ "Epoch 1491/3000\n",
+ " - 39s - loss: 0.2460 - acc: 0.9721 - val_loss: 0.9391 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01491: val_acc did not improve from 0.86765\n",
+ "Epoch 1492/3000\n",
+ " - 39s - loss: 0.2413 - acc: 0.9731 - val_loss: 0.9119 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01492: val_acc did not improve from 0.86765\n",
+ "Epoch 1493/3000\n",
+ " - 39s - loss: 0.2432 - acc: 0.9728 - val_loss: 0.9226 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01493: val_acc did not improve from 0.86765\n",
+ "Epoch 1494/3000\n",
+ " - 39s - loss: 0.2494 - acc: 0.9700 - val_loss: 0.9348 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01494: val_acc did not improve from 0.86765\n",
+ "Epoch 1495/3000\n",
+ " - 39s - loss: 0.2516 - acc: 0.9691 - val_loss: 0.9478 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01495: val_acc did not improve from 0.86765\n",
+ "Epoch 1496/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9730 - val_loss: 0.9172 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01496: val_acc did not improve from 0.86765\n",
+ "Epoch 1497/3000\n",
+ " - 39s - loss: 0.2487 - acc: 0.9700 - val_loss: 0.9282 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01497: val_acc did not improve from 0.86765\n",
+ "Epoch 1498/3000\n",
+ " - 39s - loss: 0.2415 - acc: 0.9703 - val_loss: 0.9244 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01498: val_acc did not improve from 0.86765\n",
+ "Epoch 1499/3000\n",
+ " - 39s - loss: 0.2360 - acc: 0.9731 - val_loss: 0.9419 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01499: val_acc did not improve from 0.86765\n",
+ "Epoch 1500/3000\n",
+ " - 40s - loss: 0.2498 - acc: 0.9707 - val_loss: 0.9439 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01500: val_acc did not improve from 0.86765\n",
+ "Epoch 1501/3000\n",
+ " - 39s - loss: 0.2382 - acc: 0.9739 - val_loss: 0.9383 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01501: val_acc did not improve from 0.86765\n",
+ "Epoch 1502/3000\n",
+ " - 39s - loss: 0.2510 - acc: 0.9691 - val_loss: 0.9453 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01502: val_acc did not improve from 0.86765\n",
+ "Epoch 1503/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9715 - val_loss: 0.9334 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01503: val_acc did not improve from 0.86765\n",
+ "Epoch 1504/3000\n",
+ " - 39s - loss: 0.2410 - acc: 0.9719 - val_loss: 0.9173 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01504: val_acc did not improve from 0.86765\n",
+ "Epoch 1505/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9727 - val_loss: 0.9308 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01505: val_acc did not improve from 0.86765\n",
+ "Epoch 1506/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9716 - val_loss: 0.9309 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01506: val_acc did not improve from 0.86765\n",
+ "Epoch 1507/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9727 - val_loss: 0.9427 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01507: val_acc did not improve from 0.86765\n",
+ "Epoch 1508/3000\n",
+ " - 39s - loss: 0.2587 - acc: 0.9684 - val_loss: 0.9172 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01508: val_acc did not improve from 0.86765\n",
+ "Epoch 1509/3000\n",
+ " - 39s - loss: 0.2459 - acc: 0.9706 - val_loss: 0.9064 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01509: val_acc did not improve from 0.86765\n",
+ "Epoch 1510/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9718 - val_loss: 0.9717 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 01510: val_acc did not improve from 0.86765\n",
+ "Epoch 1511/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9748 - val_loss: 0.9140 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01511: val_acc did not improve from 0.86765\n",
+ "Epoch 1512/3000\n",
+ " - 40s - loss: 0.2466 - acc: 0.9703 - val_loss: 0.9460 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01512: val_acc did not improve from 0.86765\n",
+ "Epoch 1513/3000\n",
+ " - 39s - loss: 0.2477 - acc: 0.9706 - val_loss: 0.9360 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01513: val_acc did not improve from 0.86765\n",
+ "Epoch 1514/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9703 - val_loss: 0.9291 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01514: val_acc did not improve from 0.86765\n",
+ "Epoch 1515/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9725 - val_loss: 0.9156 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01515: val_acc did not improve from 0.86765\n",
+ "Epoch 1516/3000\n",
+ " - 39s - loss: 0.2503 - acc: 0.9700 - val_loss: 0.9251 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01516: val_acc did not improve from 0.86765\n",
+ "Epoch 1517/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9706 - val_loss: 0.9421 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01517: val_acc did not improve from 0.86765\n",
+ "Epoch 1518/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9736 - val_loss: 0.9315 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01518: val_acc did not improve from 0.86765\n",
+ "Epoch 1519/3000\n",
+ " - 39s - loss: 0.2454 - acc: 0.9692 - val_loss: 0.9511 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01519: val_acc did not improve from 0.86765\n",
+ "Epoch 1520/3000\n",
+ " - 39s - loss: 0.2413 - acc: 0.9713 - val_loss: 0.9209 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01520: val_acc did not improve from 0.86765\n",
+ "Epoch 1521/3000\n",
+ " - 39s - loss: 0.2384 - acc: 0.9725 - val_loss: 0.9393 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01521: val_acc did not improve from 0.86765\n",
+ "Epoch 1522/3000\n",
+ " - 39s - loss: 0.2409 - acc: 0.9718 - val_loss: 0.9752 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01522: val_acc did not improve from 0.86765\n",
+ "Epoch 1523/3000\n",
+ " - 39s - loss: 0.2481 - acc: 0.9712 - val_loss: 0.9664 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01523: val_acc did not improve from 0.86765\n",
+ "Epoch 1524/3000\n",
+ " - 39s - loss: 0.2389 - acc: 0.9718 - val_loss: 0.9558 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01524: val_acc did not improve from 0.86765\n",
+ "Epoch 1525/3000\n",
+ " - 40s - loss: 0.2428 - acc: 0.9707 - val_loss: 0.9175 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01525: val_acc did not improve from 0.86765\n",
+ "Epoch 1526/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9710 - val_loss: 0.9364 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01526: val_acc did not improve from 0.86765\n",
+ "Epoch 1527/3000\n",
+ " - 39s - loss: 0.2385 - acc: 0.9724 - val_loss: 0.9245 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01527: val_acc did not improve from 0.86765\n",
+ "Epoch 1528/3000\n",
+ " - 39s - loss: 0.2444 - acc: 0.9713 - val_loss: 0.9479 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01528: val_acc did not improve from 0.86765\n",
+ "Epoch 1529/3000\n",
+ " - 39s - loss: 0.2440 - acc: 0.9712 - val_loss: 0.9174 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01529: val_acc did not improve from 0.86765\n",
+ "Epoch 1530/3000\n",
+ " - 39s - loss: 0.2414 - acc: 0.9730 - val_loss: 0.9120 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01530: val_acc did not improve from 0.86765\n",
+ "Epoch 1531/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9700 - val_loss: 0.9599 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01531: val_acc did not improve from 0.86765\n",
+ "Epoch 1532/3000\n",
+ " - 39s - loss: 0.2468 - acc: 0.9703 - val_loss: 0.9481 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01532: val_acc did not improve from 0.86765\n",
+ "Epoch 1533/3000\n",
+ " - 39s - loss: 0.2527 - acc: 0.9697 - val_loss: 0.9536 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01533: val_acc did not improve from 0.86765\n",
+ "Epoch 1534/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9725 - val_loss: 0.9329 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01534: val_acc did not improve from 0.86765\n",
+ "Epoch 1535/3000\n",
+ " - 39s - loss: 0.2466 - acc: 0.9716 - val_loss: 0.9295 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01535: val_acc did not improve from 0.86765\n",
+ "Epoch 1536/3000\n",
+ " - 39s - loss: 0.2510 - acc: 0.9689 - val_loss: 0.9513 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01536: val_acc did not improve from 0.86765\n",
+ "Epoch 1537/3000\n",
+ " - 39s - loss: 0.2406 - acc: 0.9734 - val_loss: 0.9543 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01537: val_acc did not improve from 0.86765\n",
+ "Epoch 1538/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9742 - val_loss: 0.9489 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01538: val_acc did not improve from 0.86765\n",
+ "Epoch 1539/3000\n",
+ " - 39s - loss: 0.2477 - acc: 0.9716 - val_loss: 0.9163 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 01539: val_acc did not improve from 0.86765\n",
+ "Epoch 1540/3000\n",
+ " - 39s - loss: 0.2436 - acc: 0.9710 - val_loss: 0.9541 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01540: val_acc did not improve from 0.86765\n",
+ "Epoch 1541/3000\n",
+ " - 39s - loss: 0.2495 - acc: 0.9704 - val_loss: 0.9125 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01541: val_acc did not improve from 0.86765\n",
+ "Epoch 1542/3000\n",
+ " - 39s - loss: 0.2509 - acc: 0.9691 - val_loss: 0.9325 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01542: val_acc did not improve from 0.86765\n",
+ "Epoch 1543/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9725 - val_loss: 0.9237 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 01543: val_acc did not improve from 0.86765\n",
+ "Epoch 1544/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9700 - val_loss: 0.9509 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01544: val_acc did not improve from 0.86765\n",
+ "Epoch 1545/3000\n",
+ " - 40s - loss: 0.2449 - acc: 0.9697 - val_loss: 0.9444 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 01545: val_acc did not improve from 0.86765\n",
+ "Epoch 1546/3000\n",
+ " - 39s - loss: 0.2476 - acc: 0.9712 - val_loss: 0.9380 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01546: val_acc did not improve from 0.86765\n",
+ "Epoch 1547/3000\n",
+ " - 39s - loss: 0.2425 - acc: 0.9712 - val_loss: 0.9158 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01547: val_acc did not improve from 0.86765\n",
+ "Epoch 1548/3000\n",
+ " - 39s - loss: 0.2398 - acc: 0.9718 - val_loss: 0.9345 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01548: val_acc did not improve from 0.86765\n",
+ "Epoch 1549/3000\n",
+ " - 39s - loss: 0.2443 - acc: 0.9707 - val_loss: 0.9413 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01549: val_acc did not improve from 0.86765\n",
+ "Epoch 1550/3000\n",
+ " - 39s - loss: 0.2405 - acc: 0.9718 - val_loss: 0.9338 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01550: val_acc did not improve from 0.86765\n",
+ "Epoch 1551/3000\n",
+ " - 39s - loss: 0.2521 - acc: 0.9701 - val_loss: 0.9322 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01551: val_acc did not improve from 0.86765\n",
+ "Epoch 1552/3000\n",
+ " - 40s - loss: 0.2453 - acc: 0.9728 - val_loss: 0.9305 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01552: val_acc did not improve from 0.86765\n",
+ "Epoch 1553/3000\n",
+ " - 39s - loss: 0.2423 - acc: 0.9719 - val_loss: 0.9418 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01553: val_acc did not improve from 0.86765\n",
+ "Epoch 1554/3000\n",
+ " - 39s - loss: 0.2467 - acc: 0.9710 - val_loss: 0.9277 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01554: val_acc did not improve from 0.86765\n",
+ "Epoch 1555/3000\n",
+ " - 39s - loss: 0.2439 - acc: 0.9706 - val_loss: 0.9063 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01555: val_acc did not improve from 0.86765\n",
+ "Epoch 1556/3000\n",
+ " - 39s - loss: 0.2427 - acc: 0.9721 - val_loss: 0.9451 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01556: val_acc did not improve from 0.86765\n",
+ "Epoch 1557/3000\n",
+ " - 39s - loss: 0.2500 - acc: 0.9707 - val_loss: 0.9079 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01557: val_acc did not improve from 0.86765\n",
+ "Epoch 1558/3000\n",
+ " - 39s - loss: 0.2409 - acc: 0.9709 - val_loss: 0.9328 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01558: val_acc did not improve from 0.86765\n",
+ "Epoch 1559/3000\n",
+ " - 39s - loss: 0.2429 - acc: 0.9725 - val_loss: 0.9575 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01559: val_acc did not improve from 0.86765\n",
+ "Epoch 1560/3000\n",
+ " - 40s - loss: 0.2338 - acc: 0.9734 - val_loss: 0.9515 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01560: val_acc did not improve from 0.86765\n",
+ "Epoch 1561/3000\n",
+ " - 40s - loss: 0.2473 - acc: 0.9706 - val_loss: 0.9409 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01561: val_acc did not improve from 0.86765\n",
+ "Epoch 1562/3000\n",
+ " - 39s - loss: 0.2456 - acc: 0.9712 - val_loss: 0.9535 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01562: val_acc did not improve from 0.86765\n",
+ "Epoch 1563/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9716 - val_loss: 0.9553 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01563: val_acc did not improve from 0.86765\n",
+ "Epoch 1564/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9746 - val_loss: 0.9454 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01564: val_acc did not improve from 0.86765\n",
+ "Epoch 1565/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9736 - val_loss: 0.9554 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01565: val_acc did not improve from 0.86765\n",
+ "Epoch 1566/3000\n",
+ " - 39s - loss: 0.2458 - acc: 0.9695 - val_loss: 0.9125 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01566: val_acc did not improve from 0.86765\n",
+ "Epoch 1567/3000\n",
+ " - 40s - loss: 0.2468 - acc: 0.9704 - val_loss: 0.9234 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01567: val_acc did not improve from 0.86765\n",
+ "Epoch 1568/3000\n",
+ " - 39s - loss: 0.2473 - acc: 0.9686 - val_loss: 0.9318 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01568: val_acc did not improve from 0.86765\n",
+ "Epoch 1569/3000\n",
+ " - 39s - loss: 0.2450 - acc: 0.9701 - val_loss: 0.9276 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01569: val_acc did not improve from 0.86765\n",
+ "Epoch 1570/3000\n",
+ " - 39s - loss: 0.2446 - acc: 0.9698 - val_loss: 0.9424 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01570: val_acc did not improve from 0.86765\n",
+ "Epoch 1571/3000\n",
+ " - 39s - loss: 0.2531 - acc: 0.9695 - val_loss: 0.9584 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01571: val_acc did not improve from 0.86765\n",
+ "Epoch 1572/3000\n",
+ " - 39s - loss: 0.2353 - acc: 0.9734 - val_loss: 0.9187 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 01572: val_acc did not improve from 0.86765\n",
+ "Epoch 1573/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9721 - val_loss: 0.9378 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01573: val_acc did not improve from 0.86765\n",
+ "Epoch 1574/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9734 - val_loss: 0.9305 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01574: val_acc did not improve from 0.86765\n",
+ "Epoch 1575/3000\n",
+ " - 39s - loss: 0.2427 - acc: 0.9706 - val_loss: 0.9283 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01575: val_acc did not improve from 0.86765\n",
+ "Epoch 1576/3000\n",
+ " - 39s - loss: 0.2452 - acc: 0.9713 - val_loss: 0.9388 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01576: val_acc did not improve from 0.86765\n",
+ "Epoch 1577/3000\n",
+ " - 39s - loss: 0.2353 - acc: 0.9731 - val_loss: 0.9383 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01577: val_acc did not improve from 0.86765\n",
+ "Epoch 1578/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9737 - val_loss: 0.9308 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01578: val_acc did not improve from 0.86765\n",
+ "Epoch 1579/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9737 - val_loss: 0.9294 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01579: val_acc did not improve from 0.86765\n",
+ "Epoch 1580/3000\n",
+ " - 39s - loss: 0.2419 - acc: 0.9722 - val_loss: 0.9507 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 01580: val_acc did not improve from 0.86765\n",
+ "Epoch 1581/3000\n",
+ " - 39s - loss: 0.2438 - acc: 0.9727 - val_loss: 0.9298 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01581: val_acc did not improve from 0.86765\n",
+ "Epoch 1582/3000\n",
+ " - 39s - loss: 0.2423 - acc: 0.9727 - val_loss: 0.9501 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01582: val_acc did not improve from 0.86765\n",
+ "Epoch 1583/3000\n",
+ " - 39s - loss: 0.2363 - acc: 0.9727 - val_loss: 0.9519 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01583: val_acc did not improve from 0.86765\n",
+ "Epoch 1584/3000\n",
+ " - 39s - loss: 0.2477 - acc: 0.9686 - val_loss: 0.9393 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01584: val_acc did not improve from 0.86765\n",
+ "Epoch 1585/3000\n",
+ " - 39s - loss: 0.2445 - acc: 0.9706 - val_loss: 0.9423 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01585: val_acc did not improve from 0.86765\n",
+ "Epoch 1586/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9722 - val_loss: 0.9277 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01586: val_acc did not improve from 0.86765\n",
+ "Epoch 1587/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9736 - val_loss: 0.9419 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01587: val_acc did not improve from 0.86765\n",
+ "Epoch 1588/3000\n",
+ " - 39s - loss: 0.2463 - acc: 0.9703 - val_loss: 0.9391 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01588: val_acc did not improve from 0.86765\n",
+ "Epoch 1589/3000\n",
+ " - 39s - loss: 0.2509 - acc: 0.9715 - val_loss: 0.8978 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01589: val_acc did not improve from 0.86765\n",
+ "Epoch 1590/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9736 - val_loss: 0.9471 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01590: val_acc did not improve from 0.86765\n",
+ "Epoch 1591/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9745 - val_loss: 0.9482 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01591: val_acc did not improve from 0.86765\n",
+ "Epoch 1592/3000\n",
+ " - 39s - loss: 0.2456 - acc: 0.9706 - val_loss: 0.9385 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01592: val_acc did not improve from 0.86765\n",
+ "Epoch 1593/3000\n",
+ " - 39s - loss: 0.2496 - acc: 0.9698 - val_loss: 0.9354 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01593: val_acc did not improve from 0.86765\n",
+ "Epoch 1594/3000\n",
+ " - 39s - loss: 0.2427 - acc: 0.9722 - val_loss: 0.9388 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01594: val_acc did not improve from 0.86765\n",
+ "Epoch 1595/3000\n",
+ " - 39s - loss: 0.2441 - acc: 0.9722 - val_loss: 0.9616 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01595: val_acc did not improve from 0.86765\n",
+ "Epoch 1596/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9736 - val_loss: 0.9283 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01596: val_acc did not improve from 0.86765\n",
+ "Epoch 1597/3000\n",
+ " - 39s - loss: 0.2392 - acc: 0.9718 - val_loss: 0.9350 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01597: val_acc did not improve from 0.86765\n",
+ "Epoch 1598/3000\n",
+ " - 39s - loss: 0.2397 - acc: 0.9730 - val_loss: 0.9188 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01598: val_acc did not improve from 0.86765\n",
+ "Epoch 1599/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9728 - val_loss: 0.9390 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01599: val_acc did not improve from 0.86765\n",
+ "Epoch 1600/3000\n",
+ " - 39s - loss: 0.2415 - acc: 0.9719 - val_loss: 0.9558 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01600: val_acc did not improve from 0.86765\n",
+ "Epoch 1601/3000\n",
+ " - 40s - loss: 0.2359 - acc: 0.9739 - val_loss: 0.9396 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01601: val_acc did not improve from 0.86765\n",
+ "Epoch 1602/3000\n",
+ " - 39s - loss: 0.2448 - acc: 0.9709 - val_loss: 0.9179 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01602: val_acc did not improve from 0.86765\n",
+ "Epoch 1603/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9725 - val_loss: 0.9440 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01603: val_acc did not improve from 0.86765\n",
+ "Epoch 1604/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9755 - val_loss: 0.9561 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01604: val_acc did not improve from 0.86765\n",
+ "Epoch 1605/3000\n",
+ " - 39s - loss: 0.2442 - acc: 0.9709 - val_loss: 0.9349 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01605: val_acc did not improve from 0.86765\n",
+ "Epoch 1606/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9718 - val_loss: 0.9517 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01606: val_acc did not improve from 0.86765\n",
+ "Epoch 1607/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9700 - val_loss: 0.9624 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01607: val_acc did not improve from 0.86765\n",
+ "Epoch 1608/3000\n",
+ " - 39s - loss: 0.2463 - acc: 0.9722 - val_loss: 0.9382 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01608: val_acc did not improve from 0.86765\n",
+ "Epoch 1609/3000\n",
+ " - 40s - loss: 0.2430 - acc: 0.9721 - val_loss: 0.9283 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01609: val_acc did not improve from 0.86765\n",
+ "Epoch 1610/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9716 - val_loss: 0.9335 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01610: val_acc did not improve from 0.86765\n",
+ "Epoch 1611/3000\n",
+ " - 39s - loss: 0.2451 - acc: 0.9707 - val_loss: 0.9679 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01611: val_acc did not improve from 0.86765\n",
+ "Epoch 1612/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9719 - val_loss: 0.9498 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01612: val_acc did not improve from 0.86765\n",
+ "Epoch 1613/3000\n",
+ " - 39s - loss: 0.2493 - acc: 0.9698 - val_loss: 0.9202 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01613: val_acc did not improve from 0.86765\n",
+ "Epoch 1614/3000\n",
+ " - 39s - loss: 0.2435 - acc: 0.9719 - val_loss: 0.9701 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 01614: val_acc did not improve from 0.86765\n",
+ "Epoch 1615/3000\n",
+ " - 39s - loss: 0.2496 - acc: 0.9704 - val_loss: 0.9406 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 01615: val_acc did not improve from 0.86765\n",
+ "Epoch 1616/3000\n",
+ " - 39s - loss: 0.2410 - acc: 0.9727 - val_loss: 0.9507 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01616: val_acc did not improve from 0.86765\n",
+ "Epoch 1617/3000\n",
+ " - 39s - loss: 0.2406 - acc: 0.9725 - val_loss: 0.9349 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01617: val_acc did not improve from 0.86765\n",
+ "Epoch 1618/3000\n",
+ " - 39s - loss: 0.2343 - acc: 0.9740 - val_loss: 0.9635 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01618: val_acc did not improve from 0.86765\n",
+ "Epoch 1619/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9727 - val_loss: 0.9410 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01619: val_acc did not improve from 0.86765\n",
+ "Epoch 1620/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9740 - val_loss: 0.9448 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01620: val_acc did not improve from 0.86765\n",
+ "Epoch 1621/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9736 - val_loss: 0.9504 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01621: val_acc did not improve from 0.86765\n",
+ "Epoch 1622/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9706 - val_loss: 0.9619 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01622: val_acc did not improve from 0.86765\n",
+ "Epoch 1623/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9725 - val_loss: 0.9266 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01623: val_acc did not improve from 0.86765\n",
+ "Epoch 1624/3000\n",
+ " - 39s - loss: 0.2396 - acc: 0.9716 - val_loss: 0.9552 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01624: val_acc did not improve from 0.86765\n",
+ "Epoch 1625/3000\n",
+ " - 39s - loss: 0.2455 - acc: 0.9707 - val_loss: 0.9090 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01625: val_acc did not improve from 0.86765\n",
+ "Epoch 1626/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9727 - val_loss: 0.9586 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01626: val_acc did not improve from 0.86765\n",
+ "Epoch 1627/3000\n",
+ " - 39s - loss: 0.2413 - acc: 0.9722 - val_loss: 0.9194 - val_acc: 0.8630\n",
+ "\n",
+ "Epoch 01627: val_acc did not improve from 0.86765\n",
+ "Epoch 1628/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9701 - val_loss: 0.9331 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01628: val_acc did not improve from 0.86765\n",
+ "Epoch 1629/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9725 - val_loss: 0.9728 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01629: val_acc did not improve from 0.86765\n",
+ "Epoch 1630/3000\n",
+ " - 39s - loss: 0.2458 - acc: 0.9718 - val_loss: 0.9460 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01630: val_acc did not improve from 0.86765\n",
+ "Epoch 1631/3000\n",
+ " - 39s - loss: 0.2544 - acc: 0.9669 - val_loss: 0.9337 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01631: val_acc did not improve from 0.86765\n",
+ "Epoch 1632/3000\n",
+ " - 39s - loss: 0.2498 - acc: 0.9698 - val_loss: 0.9509 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01632: val_acc did not improve from 0.86765\n",
+ "Epoch 1633/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9719 - val_loss: 0.9533 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01633: val_acc did not improve from 0.86765\n",
+ "Epoch 1634/3000\n",
+ " - 39s - loss: 0.2489 - acc: 0.9704 - val_loss: 0.9220 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01634: val_acc did not improve from 0.86765\n",
+ "Epoch 1635/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9716 - val_loss: 0.9476 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01635: val_acc did not improve from 0.86765\n",
+ "Epoch 1636/3000\n",
+ " - 39s - loss: 0.2425 - acc: 0.9716 - val_loss: 0.9419 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01636: val_acc did not improve from 0.86765\n",
+ "Epoch 1637/3000\n",
+ " - 39s - loss: 0.2475 - acc: 0.9680 - val_loss: 0.9260 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01637: val_acc did not improve from 0.86765\n",
+ "Epoch 1638/3000\n",
+ " - 39s - loss: 0.2448 - acc: 0.9707 - val_loss: 0.9294 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01638: val_acc did not improve from 0.86765\n",
+ "Epoch 1639/3000\n",
+ " - 39s - loss: 0.2376 - acc: 0.9710 - val_loss: 0.9369 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01639: val_acc did not improve from 0.86765\n",
+ "Epoch 1640/3000\n",
+ " - 39s - loss: 0.2451 - acc: 0.9688 - val_loss: 0.9558 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01640: val_acc did not improve from 0.86765\n",
+ "Epoch 1641/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9709 - val_loss: 0.9431 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01641: val_acc did not improve from 0.86765\n",
+ "Epoch 1642/3000\n",
+ " - 40s - loss: 0.2439 - acc: 0.9683 - val_loss: 0.9447 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01642: val_acc did not improve from 0.86765\n",
+ "Epoch 1643/3000\n",
+ " - 39s - loss: 0.2433 - acc: 0.9707 - val_loss: 0.9525 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01643: val_acc did not improve from 0.86765\n",
+ "Epoch 1644/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9757 - val_loss: 0.9513 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01644: val_acc did not improve from 0.86765\n",
+ "Epoch 1645/3000\n",
+ " - 39s - loss: 0.2347 - acc: 0.9713 - val_loss: 0.9371 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01645: val_acc did not improve from 0.86765\n",
+ "Epoch 1646/3000\n",
+ " - 39s - loss: 0.2341 - acc: 0.9757 - val_loss: 0.9519 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 01646: val_acc did not improve from 0.86765\n",
+ "Epoch 1647/3000\n",
+ " - 39s - loss: 0.2425 - acc: 0.9692 - val_loss: 0.9479 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01647: val_acc did not improve from 0.86765\n",
+ "Epoch 1648/3000\n",
+ " - 39s - loss: 0.2429 - acc: 0.9718 - val_loss: 0.9273 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01648: val_acc did not improve from 0.86765\n",
+ "Epoch 1649/3000\n",
+ " - 39s - loss: 0.2422 - acc: 0.9710 - val_loss: 0.9667 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01649: val_acc did not improve from 0.86765\n",
+ "Epoch 1650/3000\n",
+ " - 39s - loss: 0.2579 - acc: 0.9684 - val_loss: 0.9098 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01650: val_acc did not improve from 0.86765\n",
+ "Epoch 1651/3000\n",
+ " - 39s - loss: 0.2449 - acc: 0.9703 - val_loss: 0.9447 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01651: val_acc did not improve from 0.86765\n",
+ "Epoch 1652/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9725 - val_loss: 0.9293 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01652: val_acc did not improve from 0.86765\n",
+ "Epoch 1653/3000\n",
+ " - 39s - loss: 0.2319 - acc: 0.9751 - val_loss: 0.9602 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01653: val_acc did not improve from 0.86765\n",
+ "Epoch 1654/3000\n",
+ " - 39s - loss: 0.2309 - acc: 0.9748 - val_loss: 0.9506 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01654: val_acc did not improve from 0.86765\n",
+ "Epoch 1655/3000\n",
+ " - 39s - loss: 0.2422 - acc: 0.9724 - val_loss: 0.9509 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01655: val_acc did not improve from 0.86765\n",
+ "Epoch 1656/3000\n",
+ " - 39s - loss: 0.2348 - acc: 0.9736 - val_loss: 0.9294 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01656: val_acc did not improve from 0.86765\n",
+ "Epoch 1657/3000\n",
+ " - 40s - loss: 0.2360 - acc: 0.9748 - val_loss: 0.9647 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01657: val_acc did not improve from 0.86765\n",
+ "Epoch 1658/3000\n",
+ " - 39s - loss: 0.2443 - acc: 0.9733 - val_loss: 0.9579 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01658: val_acc did not improve from 0.86765\n",
+ "Epoch 1659/3000\n",
+ " - 40s - loss: 0.2359 - acc: 0.9734 - val_loss: 0.9637 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01659: val_acc did not improve from 0.86765\n",
+ "Epoch 1660/3000\n",
+ " - 39s - loss: 0.2457 - acc: 0.9719 - val_loss: 0.9487 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 01660: val_acc did not improve from 0.86765\n",
+ "Epoch 1661/3000\n",
+ " - 39s - loss: 0.2475 - acc: 0.9684 - val_loss: 0.9477 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01661: val_acc did not improve from 0.86765\n",
+ "Epoch 1662/3000\n",
+ " - 39s - loss: 0.2420 - acc: 0.9700 - val_loss: 0.9458 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01662: val_acc did not improve from 0.86765\n",
+ "Epoch 1663/3000\n",
+ " - 39s - loss: 0.2477 - acc: 0.9697 - val_loss: 0.9442 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01663: val_acc did not improve from 0.86765\n",
+ "Epoch 1664/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9700 - val_loss: 0.9368 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01664: val_acc did not improve from 0.86765\n",
+ "Epoch 1665/3000\n",
+ " - 39s - loss: 0.2441 - acc: 0.9712 - val_loss: 0.9506 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01665: val_acc did not improve from 0.86765\n",
+ "Epoch 1666/3000\n",
+ " - 39s - loss: 0.2415 - acc: 0.9716 - val_loss: 0.9450 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01666: val_acc did not improve from 0.86765\n",
+ "Epoch 1667/3000\n",
+ " - 39s - loss: 0.2457 - acc: 0.9700 - val_loss: 0.9005 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 01667: val_acc did not improve from 0.86765\n",
+ "Epoch 1668/3000\n",
+ " - 39s - loss: 0.2392 - acc: 0.9724 - val_loss: 0.9510 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01668: val_acc did not improve from 0.86765\n",
+ "Epoch 1669/3000\n",
+ " - 39s - loss: 0.2405 - acc: 0.9718 - val_loss: 0.9585 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01669: val_acc did not improve from 0.86765\n",
+ "Epoch 1670/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9724 - val_loss: 0.9441 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01670: val_acc did not improve from 0.86765\n",
+ "Epoch 1671/3000\n",
+ " - 40s - loss: 0.2461 - acc: 0.9713 - val_loss: 0.9328 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01671: val_acc did not improve from 0.86765\n",
+ "Epoch 1672/3000\n",
+ " - 39s - loss: 0.2339 - acc: 0.9743 - val_loss: 0.9569 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01672: val_acc did not improve from 0.86765\n",
+ "Epoch 1673/3000\n",
+ " - 39s - loss: 0.2422 - acc: 0.9730 - val_loss: 0.9678 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01673: val_acc did not improve from 0.86765\n",
+ "Epoch 1674/3000\n",
+ " - 39s - loss: 0.2379 - acc: 0.9736 - val_loss: 0.9578 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01674: val_acc did not improve from 0.86765\n",
+ "Epoch 1675/3000\n",
+ " - 39s - loss: 0.2431 - acc: 0.9719 - val_loss: 0.9396 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01675: val_acc did not improve from 0.86765\n",
+ "Epoch 1676/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9739 - val_loss: 0.9647 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01676: val_acc did not improve from 0.86765\n",
+ "Epoch 1677/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9716 - val_loss: 0.9397 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01677: val_acc did not improve from 0.86765\n",
+ "Epoch 1678/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9725 - val_loss: 0.9399 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 01678: val_acc did not improve from 0.86765\n",
+ "Epoch 1679/3000\n",
+ " - 40s - loss: 0.2425 - acc: 0.9730 - val_loss: 0.9595 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01679: val_acc did not improve from 0.86765\n",
+ "Epoch 1680/3000\n",
+ " - 39s - loss: 0.2448 - acc: 0.9701 - val_loss: 0.9767 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01680: val_acc did not improve from 0.86765\n",
+ "Epoch 1681/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9716 - val_loss: 0.9558 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01681: val_acc did not improve from 0.86765\n",
+ "Epoch 1682/3000\n",
+ " - 39s - loss: 0.2443 - acc: 0.9710 - val_loss: 0.9277 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01682: val_acc did not improve from 0.86765\n",
+ "Epoch 1683/3000\n",
+ " - 39s - loss: 0.2431 - acc: 0.9712 - val_loss: 0.9203 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01683: val_acc did not improve from 0.86765\n",
+ "Epoch 1684/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9736 - val_loss: 0.9380 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01684: val_acc did not improve from 0.86765\n",
+ "Epoch 1685/3000\n",
+ " - 39s - loss: 0.2406 - acc: 0.9725 - val_loss: 0.9291 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01685: val_acc did not improve from 0.86765\n",
+ "Epoch 1686/3000\n",
+ " - 40s - loss: 0.2362 - acc: 0.9725 - val_loss: 0.9439 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01686: val_acc did not improve from 0.86765\n",
+ "Epoch 1687/3000\n",
+ " - 39s - loss: 0.2435 - acc: 0.9686 - val_loss: 0.9464 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01687: val_acc did not improve from 0.86765\n",
+ "Epoch 1688/3000\n",
+ " - 39s - loss: 0.2474 - acc: 0.9710 - val_loss: 0.9844 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01688: val_acc did not improve from 0.86765\n",
+ "Epoch 1689/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9730 - val_loss: 0.9340 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01689: val_acc did not improve from 0.86765\n",
+ "Epoch 1690/3000\n",
+ " - 39s - loss: 0.2444 - acc: 0.9706 - val_loss: 0.9369 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01690: val_acc did not improve from 0.86765\n",
+ "Epoch 1691/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9724 - val_loss: 0.9335 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01691: val_acc did not improve from 0.86765\n",
+ "Epoch 1692/3000\n",
+ " - 39s - loss: 0.2431 - acc: 0.9724 - val_loss: 0.9536 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01692: val_acc did not improve from 0.86765\n",
+ "Epoch 1693/3000\n",
+ " - 39s - loss: 0.2437 - acc: 0.9737 - val_loss: 0.9467 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01693: val_acc did not improve from 0.86765\n",
+ "Epoch 1694/3000\n",
+ " - 40s - loss: 0.2387 - acc: 0.9743 - val_loss: 0.9340 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01694: val_acc did not improve from 0.86765\n",
+ "Epoch 1695/3000\n",
+ " - 39s - loss: 0.2499 - acc: 0.9695 - val_loss: 0.9462 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01695: val_acc did not improve from 0.86765\n",
+ "Epoch 1696/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9710 - val_loss: 0.9613 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01696: val_acc did not improve from 0.86765\n",
+ "Epoch 1697/3000\n",
+ " - 40s - loss: 0.2481 - acc: 0.9722 - val_loss: 0.9473 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01697: val_acc did not improve from 0.86765\n",
+ "Epoch 1698/3000\n",
+ " - 40s - loss: 0.2469 - acc: 0.9713 - val_loss: 0.9375 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01698: val_acc did not improve from 0.86765\n",
+ "Epoch 1699/3000\n",
+ " - 39s - loss: 0.2516 - acc: 0.9692 - val_loss: 0.9508 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01699: val_acc did not improve from 0.86765\n",
+ "Epoch 1700/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9715 - val_loss: 0.9098 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01700: val_acc did not improve from 0.86765\n",
+ "Epoch 1701/3000\n",
+ " - 39s - loss: 0.2466 - acc: 0.9713 - val_loss: 0.9457 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01701: val_acc did not improve from 0.86765\n",
+ "Epoch 1702/3000\n",
+ " - 39s - loss: 0.2518 - acc: 0.9712 - val_loss: 0.9541 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01702: val_acc did not improve from 0.86765\n",
+ "Epoch 1703/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9716 - val_loss: 0.9383 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01703: val_acc did not improve from 0.86765\n",
+ "Epoch 1704/3000\n",
+ " - 39s - loss: 0.2395 - acc: 0.9721 - val_loss: 0.9496 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01704: val_acc did not improve from 0.86765\n",
+ "Epoch 1705/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9721 - val_loss: 0.9436 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01705: val_acc did not improve from 0.86765\n",
+ "Epoch 1706/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9701 - val_loss: 0.9312 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01706: val_acc did not improve from 0.86765\n",
+ "Epoch 1707/3000\n",
+ " - 39s - loss: 0.2296 - acc: 0.9731 - val_loss: 0.9177 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01707: val_acc did not improve from 0.86765\n",
+ "Epoch 1708/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9697 - val_loss: 0.9459 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 01708: val_acc did not improve from 0.86765\n",
+ "Epoch 1709/3000\n",
+ " - 39s - loss: 0.2434 - acc: 0.9724 - val_loss: 0.9242 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01709: val_acc did not improve from 0.86765\n",
+ "Epoch 1710/3000\n",
+ " - 39s - loss: 0.2486 - acc: 0.9691 - val_loss: 0.9343 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01710: val_acc did not improve from 0.86765\n",
+ "Epoch 1711/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9698 - val_loss: 0.9341 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01711: val_acc did not improve from 0.86765\n",
+ "Epoch 1712/3000\n",
+ " - 39s - loss: 0.2464 - acc: 0.9733 - val_loss: 0.9185 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01712: val_acc did not improve from 0.86765\n",
+ "Epoch 1713/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9749 - val_loss: 0.9516 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01713: val_acc did not improve from 0.86765\n",
+ "Epoch 1714/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9725 - val_loss: 0.9506 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01714: val_acc did not improve from 0.86765\n",
+ "Epoch 1715/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9706 - val_loss: 0.9420 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01715: val_acc did not improve from 0.86765\n",
+ "Epoch 1716/3000\n",
+ " - 39s - loss: 0.2410 - acc: 0.9722 - val_loss: 0.9052 - val_acc: 0.8626\n",
+ "\n",
+ "Epoch 01716: val_acc did not improve from 0.86765\n",
+ "Epoch 1717/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9710 - val_loss: 0.9490 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01717: val_acc did not improve from 0.86765\n",
+ "Epoch 1718/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9716 - val_loss: 0.9385 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01718: val_acc did not improve from 0.86765\n",
+ "Epoch 1719/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9737 - val_loss: 0.9540 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01719: val_acc did not improve from 0.86765\n",
+ "Epoch 1720/3000\n",
+ " - 39s - loss: 0.2437 - acc: 0.9692 - val_loss: 0.9487 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01720: val_acc did not improve from 0.86765\n",
+ "Epoch 1721/3000\n",
+ " - 40s - loss: 0.2416 - acc: 0.9700 - val_loss: 0.9389 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01721: val_acc did not improve from 0.86765\n",
+ "Epoch 1722/3000\n",
+ " - 39s - loss: 0.2386 - acc: 0.9713 - val_loss: 0.9300 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01722: val_acc did not improve from 0.86765\n",
+ "Epoch 1723/3000\n",
+ " - 39s - loss: 0.2559 - acc: 0.9683 - val_loss: 0.9424 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01723: val_acc did not improve from 0.86765\n",
+ "Epoch 1724/3000\n",
+ " - 39s - loss: 0.2483 - acc: 0.9695 - val_loss: 0.9352 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01724: val_acc did not improve from 0.86765\n",
+ "Epoch 1725/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9728 - val_loss: 0.9717 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 01725: val_acc did not improve from 0.86765\n",
+ "Epoch 1726/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9743 - val_loss: 0.9454 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01726: val_acc did not improve from 0.86765\n",
+ "Epoch 1727/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9718 - val_loss: 0.9518 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01727: val_acc did not improve from 0.86765\n",
+ "Epoch 1728/3000\n",
+ " - 39s - loss: 0.2425 - acc: 0.9718 - val_loss: 0.9127 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01728: val_acc did not improve from 0.86765\n",
+ "Epoch 1729/3000\n",
+ " - 40s - loss: 0.2421 - acc: 0.9713 - val_loss: 0.9084 - val_acc: 0.8638\n",
+ "\n",
+ "Epoch 01729: val_acc did not improve from 0.86765\n",
+ "Epoch 1730/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9730 - val_loss: 0.9388 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01730: val_acc did not improve from 0.86765\n",
+ "Epoch 1731/3000\n",
+ " - 39s - loss: 0.2448 - acc: 0.9709 - val_loss: 0.9196 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01731: val_acc did not improve from 0.86765\n",
+ "Epoch 1732/3000\n",
+ " - 39s - loss: 0.2353 - acc: 0.9725 - val_loss: 0.9516 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01732: val_acc did not improve from 0.86765\n",
+ "Epoch 1733/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9712 - val_loss: 0.9143 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01733: val_acc did not improve from 0.86765\n",
+ "Epoch 1734/3000\n",
+ " - 39s - loss: 0.2426 - acc: 0.9733 - val_loss: 0.9374 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01734: val_acc did not improve from 0.86765\n",
+ "Epoch 1735/3000\n",
+ " - 39s - loss: 0.2436 - acc: 0.9710 - val_loss: 0.9604 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01735: val_acc did not improve from 0.86765\n",
+ "Epoch 1736/3000\n",
+ " - 39s - loss: 0.2415 - acc: 0.9721 - val_loss: 0.9298 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01736: val_acc did not improve from 0.86765\n",
+ "Epoch 1737/3000\n",
+ " - 39s - loss: 0.2399 - acc: 0.9725 - val_loss: 0.9215 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01737: val_acc did not improve from 0.86765\n",
+ "Epoch 1738/3000\n",
+ " - 39s - loss: 0.2310 - acc: 0.9736 - val_loss: 0.9474 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01738: val_acc did not improve from 0.86765\n",
+ "Epoch 1739/3000\n",
+ " - 39s - loss: 0.2395 - acc: 0.9712 - val_loss: 0.9407 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01739: val_acc did not improve from 0.86765\n",
+ "Epoch 1740/3000\n",
+ " - 39s - loss: 0.2460 - acc: 0.9703 - val_loss: 0.9558 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01740: val_acc did not improve from 0.86765\n",
+ "Epoch 1741/3000\n",
+ " - 39s - loss: 0.2414 - acc: 0.9694 - val_loss: 0.9251 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01741: val_acc did not improve from 0.86765\n",
+ "Epoch 1742/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9715 - val_loss: 0.9195 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01742: val_acc did not improve from 0.86765\n",
+ "Epoch 1743/3000\n",
+ " - 39s - loss: 0.2456 - acc: 0.9689 - val_loss: 0.9571 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01743: val_acc did not improve from 0.86765\n",
+ "Epoch 1744/3000\n",
+ " - 39s - loss: 0.2492 - acc: 0.9719 - val_loss: 0.9217 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01744: val_acc did not improve from 0.86765\n",
+ "Epoch 1745/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9683 - val_loss: 0.9430 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01745: val_acc did not improve from 0.86765\n",
+ "Epoch 1746/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9710 - val_loss: 0.9504 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01746: val_acc did not improve from 0.86765\n",
+ "Epoch 1747/3000\n",
+ " - 39s - loss: 0.2436 - acc: 0.9704 - val_loss: 0.9241 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01747: val_acc did not improve from 0.86765\n",
+ "Epoch 1748/3000\n",
+ " - 39s - loss: 0.2376 - acc: 0.9722 - val_loss: 0.9375 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01748: val_acc did not improve from 0.86765\n",
+ "Epoch 1749/3000\n",
+ " - 39s - loss: 0.2347 - acc: 0.9751 - val_loss: 0.9259 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01749: val_acc did not improve from 0.86765\n",
+ "Epoch 1750/3000\n",
+ " - 40s - loss: 0.2415 - acc: 0.9743 - val_loss: 0.9516 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01750: val_acc did not improve from 0.86765\n",
+ "Epoch 1751/3000\n",
+ " - 40s - loss: 0.2434 - acc: 0.9724 - val_loss: 0.9135 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01751: val_acc did not improve from 0.86765\n",
+ "Epoch 1752/3000\n",
+ " - 39s - loss: 0.2428 - acc: 0.9703 - val_loss: 0.9301 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01752: val_acc did not improve from 0.86765\n",
+ "Epoch 1753/3000\n",
+ " - 39s - loss: 0.2417 - acc: 0.9727 - val_loss: 0.9306 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01753: val_acc did not improve from 0.86765\n",
+ "Epoch 1754/3000\n",
+ " - 39s - loss: 0.2461 - acc: 0.9712 - val_loss: 0.9300 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01754: val_acc did not improve from 0.86765\n",
+ "Epoch 1755/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9727 - val_loss: 0.9236 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01755: val_acc did not improve from 0.86765\n",
+ "Epoch 1756/3000\n",
+ " - 39s - loss: 0.2459 - acc: 0.9709 - val_loss: 0.9483 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01756: val_acc did not improve from 0.86765\n",
+ "Epoch 1757/3000\n",
+ " - 39s - loss: 0.2420 - acc: 0.9710 - val_loss: 0.9172 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01757: val_acc did not improve from 0.86765\n",
+ "Epoch 1758/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9763 - val_loss: 0.9278 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01758: val_acc did not improve from 0.86765\n",
+ "Epoch 1759/3000\n",
+ " - 39s - loss: 0.2438 - acc: 0.9692 - val_loss: 0.9344 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01759: val_acc did not improve from 0.86765\n",
+ "Epoch 1760/3000\n",
+ " - 39s - loss: 0.2326 - acc: 0.9737 - val_loss: 0.9386 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01760: val_acc did not improve from 0.86765\n",
+ "Epoch 1761/3000\n",
+ " - 39s - loss: 0.2436 - acc: 0.9718 - val_loss: 0.9084 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 01761: val_acc did not improve from 0.86765\n",
+ "Epoch 1762/3000\n",
+ " - 39s - loss: 0.2398 - acc: 0.9722 - val_loss: 0.9349 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01762: val_acc did not improve from 0.86765\n",
+ "Epoch 1763/3000\n",
+ " - 39s - loss: 0.2419 - acc: 0.9722 - val_loss: 0.9150 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01763: val_acc did not improve from 0.86765\n",
+ "Epoch 1764/3000\n",
+ " - 39s - loss: 0.2461 - acc: 0.9697 - val_loss: 0.9402 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01764: val_acc did not improve from 0.86765\n",
+ "Epoch 1765/3000\n",
+ " - 39s - loss: 0.2457 - acc: 0.9681 - val_loss: 0.9335 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01765: val_acc did not improve from 0.86765\n",
+ "Epoch 1766/3000\n",
+ " - 39s - loss: 0.2419 - acc: 0.9713 - val_loss: 0.9420 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01766: val_acc did not improve from 0.86765\n",
+ "Epoch 1767/3000\n",
+ " - 39s - loss: 0.2509 - acc: 0.9691 - val_loss: 0.9329 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01767: val_acc did not improve from 0.86765\n",
+ "Epoch 1768/3000\n",
+ " - 39s - loss: 0.2456 - acc: 0.9730 - val_loss: 0.9043 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 01768: val_acc did not improve from 0.86765\n",
+ "Epoch 1769/3000\n",
+ " - 39s - loss: 0.2478 - acc: 0.9694 - val_loss: 0.9261 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01769: val_acc did not improve from 0.86765\n",
+ "Epoch 1770/3000\n",
+ " - 39s - loss: 0.2429 - acc: 0.9725 - val_loss: 0.9364 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01770: val_acc did not improve from 0.86765\n",
+ "Epoch 1771/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9733 - val_loss: 0.9368 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01771: val_acc did not improve from 0.86765\n",
+ "Epoch 1772/3000\n",
+ " - 39s - loss: 0.2486 - acc: 0.9692 - val_loss: 0.9250 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01772: val_acc did not improve from 0.86765\n",
+ "Epoch 1773/3000\n",
+ " - 39s - loss: 0.2443 - acc: 0.9687 - val_loss: 0.9404 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01773: val_acc did not improve from 0.86765\n",
+ "Epoch 1774/3000\n",
+ " - 39s - loss: 0.2495 - acc: 0.9692 - val_loss: 0.9378 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01774: val_acc did not improve from 0.86765\n",
+ "Epoch 1775/3000\n",
+ " - 39s - loss: 0.2415 - acc: 0.9719 - val_loss: 0.9238 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01775: val_acc did not improve from 0.86765\n",
+ "Epoch 1776/3000\n",
+ " - 39s - loss: 0.2394 - acc: 0.9716 - val_loss: 0.9160 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 01776: val_acc did not improve from 0.86765\n",
+ "Epoch 1777/3000\n",
+ " - 39s - loss: 0.2438 - acc: 0.9715 - val_loss: 0.9439 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01777: val_acc did not improve from 0.86765\n",
+ "Epoch 1778/3000\n",
+ " - 39s - loss: 0.2338 - acc: 0.9737 - val_loss: 0.9241 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01778: val_acc did not improve from 0.86765\n",
+ "Epoch 1779/3000\n",
+ " - 39s - loss: 0.2466 - acc: 0.9706 - val_loss: 0.9761 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 01779: val_acc did not improve from 0.86765\n",
+ "Epoch 1780/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9724 - val_loss: 0.9495 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01780: val_acc did not improve from 0.86765\n",
+ "Epoch 1781/3000\n",
+ " - 39s - loss: 0.2429 - acc: 0.9677 - val_loss: 0.9758 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01781: val_acc did not improve from 0.86765\n",
+ "Epoch 1782/3000\n",
+ " - 39s - loss: 0.2457 - acc: 0.9700 - val_loss: 0.9374 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01782: val_acc did not improve from 0.86765\n",
+ "Epoch 1783/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9721 - val_loss: 0.9752 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01783: val_acc did not improve from 0.86765\n",
+ "Epoch 1784/3000\n",
+ " - 39s - loss: 0.2423 - acc: 0.9719 - val_loss: 0.9427 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01784: val_acc did not improve from 0.86765\n",
+ "Epoch 1785/3000\n",
+ " - 39s - loss: 0.2375 - acc: 0.9700 - val_loss: 0.9716 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 01785: val_acc did not improve from 0.86765\n",
+ "Epoch 1786/3000\n",
+ " - 39s - loss: 0.2413 - acc: 0.9719 - val_loss: 0.9522 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01786: val_acc did not improve from 0.86765\n",
+ "Epoch 1787/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9704 - val_loss: 0.9640 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01787: val_acc did not improve from 0.86765\n",
+ "Epoch 1788/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9748 - val_loss: 0.9631 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01788: val_acc did not improve from 0.86765\n",
+ "Epoch 1789/3000\n",
+ " - 39s - loss: 0.2437 - acc: 0.9710 - val_loss: 0.9231 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01789: val_acc did not improve from 0.86765\n",
+ "Epoch 1790/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9733 - val_loss: 0.9308 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01790: val_acc did not improve from 0.86765\n",
+ "Epoch 1791/3000\n",
+ " - 39s - loss: 0.2389 - acc: 0.9721 - val_loss: 0.9357 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01791: val_acc did not improve from 0.86765\n",
+ "Epoch 1792/3000\n",
+ " - 39s - loss: 0.2424 - acc: 0.9737 - val_loss: 0.9492 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01792: val_acc did not improve from 0.86765\n",
+ "Epoch 1793/3000\n",
+ " - 39s - loss: 0.2438 - acc: 0.9713 - val_loss: 0.9325 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 01793: val_acc did not improve from 0.86765\n",
+ "Epoch 1794/3000\n",
+ " - 39s - loss: 0.2479 - acc: 0.9675 - val_loss: 0.9442 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01794: val_acc did not improve from 0.86765\n",
+ "Epoch 1795/3000\n",
+ " - 39s - loss: 0.2486 - acc: 0.9712 - val_loss: 0.9469 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01795: val_acc did not improve from 0.86765\n",
+ "Epoch 1796/3000\n",
+ " - 39s - loss: 0.2459 - acc: 0.9704 - val_loss: 0.9402 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01796: val_acc did not improve from 0.86765\n",
+ "Epoch 1797/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9700 - val_loss: 0.9580 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01797: val_acc did not improve from 0.86765\n",
+ "Epoch 1798/3000\n",
+ " - 39s - loss: 0.2442 - acc: 0.9700 - val_loss: 0.9435 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01798: val_acc did not improve from 0.86765\n",
+ "Epoch 1799/3000\n",
+ " - 39s - loss: 0.2406 - acc: 0.9724 - val_loss: 0.9391 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01799: val_acc did not improve from 0.86765\n",
+ "Epoch 1800/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9736 - val_loss: 0.9343 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01800: val_acc did not improve from 0.86765\n",
+ "Epoch 1801/3000\n",
+ " - 39s - loss: 0.2366 - acc: 0.9734 - val_loss: 0.9168 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01801: val_acc did not improve from 0.86765\n",
+ "Epoch 1802/3000\n",
+ " - 39s - loss: 0.2345 - acc: 0.9730 - val_loss: 0.9597 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01802: val_acc did not improve from 0.86765\n",
+ "Epoch 1803/3000\n",
+ " - 39s - loss: 0.2450 - acc: 0.9700 - val_loss: 0.9367 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01803: val_acc did not improve from 0.86765\n",
+ "Epoch 1804/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9730 - val_loss: 0.9376 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01804: val_acc did not improve from 0.86765\n",
+ "Epoch 1805/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9712 - val_loss: 0.9378 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01805: val_acc did not improve from 0.86765\n",
+ "Epoch 1806/3000\n",
+ " - 39s - loss: 0.2422 - acc: 0.9718 - val_loss: 0.9483 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01806: val_acc did not improve from 0.86765\n",
+ "Epoch 1807/3000\n",
+ " - 39s - loss: 0.2417 - acc: 0.9701 - val_loss: 0.9790 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 01807: val_acc did not improve from 0.86765\n",
+ "Epoch 1808/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9734 - val_loss: 0.9583 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01808: val_acc did not improve from 0.86765\n",
+ "Epoch 1809/3000\n",
+ " - 39s - loss: 0.2282 - acc: 0.9749 - val_loss: 0.9693 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01809: val_acc did not improve from 0.86765\n",
+ "Epoch 1810/3000\n",
+ " - 39s - loss: 0.2411 - acc: 0.9695 - val_loss: 0.9553 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01810: val_acc did not improve from 0.86765\n",
+ "Epoch 1811/3000\n",
+ " - 39s - loss: 0.2520 - acc: 0.9709 - val_loss: 0.9570 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01811: val_acc did not improve from 0.86765\n",
+ "Epoch 1812/3000\n",
+ " - 39s - loss: 0.2471 - acc: 0.9698 - val_loss: 0.9281 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01812: val_acc did not improve from 0.86765\n",
+ "Epoch 1813/3000\n",
+ " - 39s - loss: 0.2417 - acc: 0.9721 - val_loss: 0.9480 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01813: val_acc did not improve from 0.86765\n",
+ "Epoch 1814/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9746 - val_loss: 0.9497 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01814: val_acc did not improve from 0.86765\n",
+ "Epoch 1815/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9736 - val_loss: 0.9438 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01815: val_acc did not improve from 0.86765\n",
+ "Epoch 1816/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9712 - val_loss: 0.9399 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01816: val_acc did not improve from 0.86765\n",
+ "Epoch 1817/3000\n",
+ " - 39s - loss: 0.2405 - acc: 0.9710 - val_loss: 0.9412 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01817: val_acc did not improve from 0.86765\n",
+ "Epoch 1818/3000\n",
+ " - 39s - loss: 0.2468 - acc: 0.9700 - val_loss: 0.9614 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01818: val_acc did not improve from 0.86765\n",
+ "Epoch 1819/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9695 - val_loss: 0.9304 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01819: val_acc did not improve from 0.86765\n",
+ "Epoch 1820/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9727 - val_loss: 0.9495 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01820: val_acc did not improve from 0.86765\n",
+ "Epoch 1821/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9721 - val_loss: 0.9580 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01821: val_acc did not improve from 0.86765\n",
+ "Epoch 1822/3000\n",
+ " - 39s - loss: 0.2413 - acc: 0.9721 - val_loss: 0.9527 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01822: val_acc did not improve from 0.86765\n",
+ "Epoch 1823/3000\n",
+ " - 39s - loss: 0.2466 - acc: 0.9731 - val_loss: 0.9803 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01823: val_acc did not improve from 0.86765\n",
+ "Epoch 1824/3000\n",
+ " - 39s - loss: 0.2419 - acc: 0.9688 - val_loss: 0.9607 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01824: val_acc did not improve from 0.86765\n",
+ "Epoch 1825/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9734 - val_loss: 0.9452 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01825: val_acc did not improve from 0.86765\n",
+ "Epoch 1826/3000\n",
+ " - 40s - loss: 0.2399 - acc: 0.9710 - val_loss: 0.9513 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01826: val_acc did not improve from 0.86765\n",
+ "Epoch 1827/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9713 - val_loss: 0.9697 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01827: val_acc did not improve from 0.86765\n",
+ "Epoch 1828/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9713 - val_loss: 0.9881 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 01828: val_acc did not improve from 0.86765\n",
+ "Epoch 1829/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9704 - val_loss: 0.9500 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01829: val_acc did not improve from 0.86765\n",
+ "Epoch 1830/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9728 - val_loss: 0.9716 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01830: val_acc did not improve from 0.86765\n",
+ "Epoch 1831/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9758 - val_loss: 0.9836 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 01831: val_acc did not improve from 0.86765\n",
+ "Epoch 1832/3000\n",
+ " - 39s - loss: 0.2432 - acc: 0.9707 - val_loss: 0.9901 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 01832: val_acc did not improve from 0.86765\n",
+ "Epoch 1833/3000\n",
+ " - 40s - loss: 0.2499 - acc: 0.9686 - val_loss: 0.9511 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01833: val_acc did not improve from 0.86765\n",
+ "Epoch 1834/3000\n",
+ " - 39s - loss: 0.2431 - acc: 0.9681 - val_loss: 0.9292 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01834: val_acc did not improve from 0.86765\n",
+ "Epoch 1835/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9707 - val_loss: 0.9398 - val_acc: 0.8626\n",
+ "\n",
+ "Epoch 01835: val_acc did not improve from 0.86765\n",
+ "Epoch 1836/3000\n",
+ " - 39s - loss: 0.2392 - acc: 0.9698 - val_loss: 0.9527 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01836: val_acc did not improve from 0.86765\n",
+ "Epoch 1837/3000\n",
+ " - 39s - loss: 0.2405 - acc: 0.9707 - val_loss: 0.9451 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01837: val_acc did not improve from 0.86765\n",
+ "Epoch 1838/3000\n",
+ " - 39s - loss: 0.2516 - acc: 0.9695 - val_loss: 0.9610 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01838: val_acc did not improve from 0.86765\n",
+ "Epoch 1839/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9749 - val_loss: 0.9451 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01839: val_acc did not improve from 0.86765\n",
+ "Epoch 1840/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9697 - val_loss: 0.9727 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01840: val_acc did not improve from 0.86765\n",
+ "Epoch 1841/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9712 - val_loss: 0.9786 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01841: val_acc did not improve from 0.86765\n",
+ "Epoch 1842/3000\n",
+ " - 39s - loss: 0.2398 - acc: 0.9721 - val_loss: 0.9552 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01842: val_acc did not improve from 0.86765\n",
+ "Epoch 1843/3000\n",
+ " - 39s - loss: 0.2389 - acc: 0.9713 - val_loss: 0.9592 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01843: val_acc did not improve from 0.86765\n",
+ "Epoch 1844/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9743 - val_loss: 0.9337 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01844: val_acc did not improve from 0.86765\n",
+ "Epoch 1845/3000\n",
+ " - 39s - loss: 0.2399 - acc: 0.9704 - val_loss: 0.9657 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 01845: val_acc did not improve from 0.86765\n",
+ "Epoch 1846/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9742 - val_loss: 0.9439 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01846: val_acc did not improve from 0.86765\n",
+ "Epoch 1847/3000\n",
+ " - 39s - loss: 0.2435 - acc: 0.9704 - val_loss: 0.9366 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01847: val_acc did not improve from 0.86765\n",
+ "Epoch 1848/3000\n",
+ " - 39s - loss: 0.2470 - acc: 0.9694 - val_loss: 0.9374 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01848: val_acc did not improve from 0.86765\n",
+ "Epoch 1849/3000\n",
+ " - 39s - loss: 0.2396 - acc: 0.9718 - val_loss: 0.9438 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01849: val_acc did not improve from 0.86765\n",
+ "Epoch 1850/3000\n",
+ " - 39s - loss: 0.2414 - acc: 0.9719 - val_loss: 0.9080 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01850: val_acc did not improve from 0.86765\n",
+ "Epoch 1851/3000\n",
+ " - 39s - loss: 0.2434 - acc: 0.9697 - val_loss: 0.9104 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01851: val_acc did not improve from 0.86765\n",
+ "Epoch 1852/3000\n",
+ " - 39s - loss: 0.2470 - acc: 0.9713 - val_loss: 0.9503 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01852: val_acc did not improve from 0.86765\n",
+ "Epoch 1853/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9700 - val_loss: 0.9346 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01853: val_acc did not improve from 0.86765\n",
+ "Epoch 1854/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9739 - val_loss: 0.9314 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 01854: val_acc did not improve from 0.86765\n",
+ "Epoch 1855/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9724 - val_loss: 0.9511 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01855: val_acc did not improve from 0.86765\n",
+ "Epoch 1856/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9703 - val_loss: 0.9523 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01856: val_acc did not improve from 0.86765\n",
+ "Epoch 1857/3000\n",
+ " - 39s - loss: 0.2361 - acc: 0.9721 - val_loss: 0.9428 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01857: val_acc did not improve from 0.86765\n",
+ "Epoch 1858/3000\n",
+ " - 39s - loss: 0.2398 - acc: 0.9715 - val_loss: 0.9580 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01858: val_acc did not improve from 0.86765\n",
+ "Epoch 1859/3000\n",
+ " - 39s - loss: 0.2347 - acc: 0.9722 - val_loss: 0.9519 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01859: val_acc did not improve from 0.86765\n",
+ "Epoch 1860/3000\n",
+ " - 39s - loss: 0.2367 - acc: 0.9724 - val_loss: 0.9812 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01860: val_acc did not improve from 0.86765\n",
+ "Epoch 1861/3000\n",
+ " - 39s - loss: 0.2304 - acc: 0.9746 - val_loss: 0.9847 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01861: val_acc did not improve from 0.86765\n",
+ "Epoch 1862/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9755 - val_loss: 0.9559 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01862: val_acc did not improve from 0.86765\n",
+ "Epoch 1863/3000\n",
+ " - 39s - loss: 0.2399 - acc: 0.9687 - val_loss: 0.9609 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01863: val_acc did not improve from 0.86765\n",
+ "Epoch 1864/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9706 - val_loss: 0.9607 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01864: val_acc did not improve from 0.86765\n",
+ "Epoch 1865/3000\n",
+ " - 39s - loss: 0.2422 - acc: 0.9727 - val_loss: 0.9649 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01865: val_acc did not improve from 0.86765\n",
+ "Epoch 1866/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9719 - val_loss: 0.9655 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01866: val_acc did not improve from 0.86765\n",
+ "Epoch 1867/3000\n",
+ " - 39s - loss: 0.2550 - acc: 0.9694 - val_loss: 0.9776 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 01867: val_acc did not improve from 0.86765\n",
+ "Epoch 1868/3000\n",
+ " - 39s - loss: 0.2452 - acc: 0.9709 - val_loss: 0.9613 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01868: val_acc did not improve from 0.86765\n",
+ "Epoch 1869/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9728 - val_loss: 0.9542 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01869: val_acc did not improve from 0.86765\n",
+ "Epoch 1870/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9721 - val_loss: 0.9660 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01870: val_acc did not improve from 0.86765\n",
+ "Epoch 1871/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9727 - val_loss: 0.9606 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01871: val_acc did not improve from 0.86765\n",
+ "Epoch 1872/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9718 - val_loss: 0.9379 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01872: val_acc did not improve from 0.86765\n",
+ "Epoch 1873/3000\n",
+ " - 39s - loss: 0.2452 - acc: 0.9716 - val_loss: 0.9386 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01873: val_acc did not improve from 0.86765\n",
+ "Epoch 1874/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9721 - val_loss: 0.9413 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01874: val_acc did not improve from 0.86765\n",
+ "Epoch 1875/3000\n",
+ " - 39s - loss: 0.2489 - acc: 0.9695 - val_loss: 0.9421 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01875: val_acc did not improve from 0.86765\n",
+ "Epoch 1876/3000\n",
+ " - 39s - loss: 0.2346 - acc: 0.9718 - val_loss: 0.9266 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 01876: val_acc did not improve from 0.86765\n",
+ "Epoch 1877/3000\n",
+ " - 39s - loss: 0.2360 - acc: 0.9713 - val_loss: 0.9745 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01877: val_acc did not improve from 0.86765\n",
+ "Epoch 1878/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9727 - val_loss: 0.9634 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01878: val_acc did not improve from 0.86765\n",
+ "Epoch 1879/3000\n",
+ " - 39s - loss: 0.2326 - acc: 0.9733 - val_loss: 0.9706 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01879: val_acc did not improve from 0.86765\n",
+ "Epoch 1880/3000\n",
+ " - 39s - loss: 0.2360 - acc: 0.9746 - val_loss: 0.9577 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01880: val_acc did not improve from 0.86765\n",
+ "Epoch 1881/3000\n",
+ " - 39s - loss: 0.2414 - acc: 0.9713 - val_loss: 0.9587 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01881: val_acc did not improve from 0.86765\n",
+ "Epoch 1882/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9713 - val_loss: 0.9674 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01882: val_acc did not improve from 0.86765\n",
+ "Epoch 1883/3000\n",
+ " - 39s - loss: 0.2440 - acc: 0.9727 - val_loss: 0.9707 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01883: val_acc did not improve from 0.86765\n",
+ "Epoch 1884/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9709 - val_loss: 0.9582 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01884: val_acc did not improve from 0.86765\n",
+ "Epoch 1885/3000\n",
+ " - 39s - loss: 0.2495 - acc: 0.9689 - val_loss: 0.9689 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01885: val_acc did not improve from 0.86765\n",
+ "Epoch 1886/3000\n",
+ " - 39s - loss: 0.2439 - acc: 0.9713 - val_loss: 0.9941 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 01886: val_acc did not improve from 0.86765\n",
+ "Epoch 1887/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9727 - val_loss: 0.9802 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01887: val_acc did not improve from 0.86765\n",
+ "Epoch 1888/3000\n",
+ " - 39s - loss: 0.2310 - acc: 0.9722 - val_loss: 0.9709 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01888: val_acc did not improve from 0.86765\n",
+ "Epoch 1889/3000\n",
+ " - 39s - loss: 0.2405 - acc: 0.9721 - val_loss: 0.9663 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01889: val_acc did not improve from 0.86765\n",
+ "Epoch 1890/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9728 - val_loss: 0.9883 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 01890: val_acc did not improve from 0.86765\n",
+ "Epoch 1891/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9734 - val_loss: 0.9596 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01891: val_acc did not improve from 0.86765\n",
+ "Epoch 1892/3000\n",
+ " - 39s - loss: 0.2392 - acc: 0.9733 - val_loss: 0.9252 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 01892: val_acc did not improve from 0.86765\n",
+ "Epoch 1893/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9739 - val_loss: 0.9429 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01893: val_acc did not improve from 0.86765\n",
+ "Epoch 1894/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9718 - val_loss: 0.9621 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01894: val_acc did not improve from 0.86765\n",
+ "Epoch 1895/3000\n",
+ " - 39s - loss: 0.2471 - acc: 0.9706 - val_loss: 0.9455 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01895: val_acc did not improve from 0.86765\n",
+ "Epoch 1896/3000\n",
+ " - 39s - loss: 0.2334 - acc: 0.9724 - val_loss: 0.9653 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01896: val_acc did not improve from 0.86765\n",
+ "Epoch 1897/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9710 - val_loss: 0.9728 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01897: val_acc did not improve from 0.86765\n",
+ "Epoch 1898/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9737 - val_loss: 0.9766 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01898: val_acc did not improve from 0.86765\n",
+ "Epoch 1899/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9706 - val_loss: 0.9570 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01899: val_acc did not improve from 0.86765\n",
+ "Epoch 1900/3000\n",
+ " - 39s - loss: 0.2432 - acc: 0.9707 - val_loss: 0.9477 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01900: val_acc did not improve from 0.86765\n",
+ "Epoch 1901/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9739 - val_loss: 0.9596 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01901: val_acc did not improve from 0.86765\n",
+ "Epoch 1902/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9721 - val_loss: 0.9618 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 01902: val_acc did not improve from 0.86765\n",
+ "Epoch 1903/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9719 - val_loss: 0.9374 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 01903: val_acc did not improve from 0.86765\n",
+ "Epoch 1904/3000\n",
+ " - 39s - loss: 0.2339 - acc: 0.9728 - val_loss: 0.9285 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01904: val_acc did not improve from 0.86765\n",
+ "Epoch 1905/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9746 - val_loss: 0.9552 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01905: val_acc did not improve from 0.86765\n",
+ "Epoch 1906/3000\n",
+ " - 39s - loss: 0.2459 - acc: 0.9709 - val_loss: 0.9340 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01906: val_acc did not improve from 0.86765\n",
+ "Epoch 1907/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9733 - val_loss: 0.9364 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01907: val_acc did not improve from 0.86765\n",
+ "Epoch 1908/3000\n",
+ " - 39s - loss: 0.2379 - acc: 0.9725 - val_loss: 0.9703 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01908: val_acc did not improve from 0.86765\n",
+ "Epoch 1909/3000\n",
+ " - 39s - loss: 0.2433 - acc: 0.9713 - val_loss: 0.9424 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01909: val_acc did not improve from 0.86765\n",
+ "Epoch 1910/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9742 - val_loss: 0.9427 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01910: val_acc did not improve from 0.86765\n",
+ "Epoch 1911/3000\n",
+ " - 39s - loss: 0.2467 - acc: 0.9706 - val_loss: 0.9509 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01911: val_acc did not improve from 0.86765\n",
+ "Epoch 1912/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9724 - val_loss: 0.9493 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01912: val_acc did not improve from 0.86765\n",
+ "Epoch 1913/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9734 - val_loss: 0.9739 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01913: val_acc did not improve from 0.86765\n",
+ "Epoch 1914/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9725 - val_loss: 0.9722 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01914: val_acc did not improve from 0.86765\n",
+ "Epoch 1915/3000\n",
+ " - 39s - loss: 0.2417 - acc: 0.9719 - val_loss: 0.9384 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01915: val_acc did not improve from 0.86765\n",
+ "Epoch 1916/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9722 - val_loss: 0.9634 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01916: val_acc did not improve from 0.86765\n",
+ "Epoch 1917/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9722 - val_loss: 0.9507 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01917: val_acc did not improve from 0.86765\n",
+ "Epoch 1918/3000\n",
+ " - 40s - loss: 0.2385 - acc: 0.9709 - val_loss: 0.9990 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 01918: val_acc did not improve from 0.86765\n",
+ "Epoch 1919/3000\n",
+ " - 39s - loss: 0.2386 - acc: 0.9739 - val_loss: 0.9816 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 01919: val_acc did not improve from 0.86765\n",
+ "Epoch 1920/3000\n",
+ " - 39s - loss: 0.2395 - acc: 0.9722 - val_loss: 0.9367 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01920: val_acc did not improve from 0.86765\n",
+ "Epoch 1921/3000\n",
+ " - 39s - loss: 0.2367 - acc: 0.9730 - val_loss: 0.9630 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01921: val_acc did not improve from 0.86765\n",
+ "Epoch 1922/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9716 - val_loss: 0.9766 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01922: val_acc did not improve from 0.86765\n",
+ "Epoch 1923/3000\n",
+ " - 39s - loss: 0.2269 - acc: 0.9752 - val_loss: 0.9504 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01923: val_acc did not improve from 0.86765\n",
+ "Epoch 1924/3000\n",
+ " - 39s - loss: 0.2434 - acc: 0.9704 - val_loss: 0.9474 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01924: val_acc did not improve from 0.86765\n",
+ "Epoch 1925/3000\n",
+ " - 39s - loss: 0.2433 - acc: 0.9716 - val_loss: 0.9505 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01925: val_acc did not improve from 0.86765\n",
+ "Epoch 1926/3000\n",
+ " - 39s - loss: 0.2325 - acc: 0.9721 - val_loss: 0.9665 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 01926: val_acc did not improve from 0.86765\n",
+ "Epoch 1927/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9740 - val_loss: 0.9683 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01927: val_acc did not improve from 0.86765\n",
+ "Epoch 1928/3000\n",
+ " - 39s - loss: 0.2500 - acc: 0.9698 - val_loss: 0.9197 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01928: val_acc did not improve from 0.86765\n",
+ "Epoch 1929/3000\n",
+ " - 39s - loss: 0.2470 - acc: 0.9701 - val_loss: 0.9270 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01929: val_acc did not improve from 0.86765\n",
+ "Epoch 1930/3000\n",
+ " - 40s - loss: 0.2369 - acc: 0.9709 - val_loss: 0.9403 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01930: val_acc did not improve from 0.86765\n",
+ "Epoch 1931/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9742 - val_loss: 0.9646 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01931: val_acc did not improve from 0.86765\n",
+ "Epoch 1932/3000\n",
+ " - 39s - loss: 0.2443 - acc: 0.9715 - val_loss: 0.9565 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01932: val_acc did not improve from 0.86765\n",
+ "Epoch 1933/3000\n",
+ " - 39s - loss: 0.2288 - acc: 0.9758 - val_loss: 0.9446 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01933: val_acc did not improve from 0.86765\n",
+ "Epoch 1934/3000\n",
+ " - 39s - loss: 0.2394 - acc: 0.9715 - val_loss: 0.9450 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01934: val_acc did not improve from 0.86765\n",
+ "Epoch 1935/3000\n",
+ " - 39s - loss: 0.2435 - acc: 0.9719 - val_loss: 0.9380 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01935: val_acc did not improve from 0.86765\n",
+ "Epoch 1936/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9713 - val_loss: 0.9436 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01936: val_acc did not improve from 0.86765\n",
+ "Epoch 1937/3000\n",
+ " - 39s - loss: 0.2463 - acc: 0.9707 - val_loss: 0.9316 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01937: val_acc did not improve from 0.86765\n",
+ "Epoch 1938/3000\n",
+ " - 39s - loss: 0.2431 - acc: 0.9710 - val_loss: 0.9430 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01938: val_acc did not improve from 0.86765\n",
+ "Epoch 1939/3000\n",
+ " - 39s - loss: 0.2424 - acc: 0.9713 - val_loss: 0.9609 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01939: val_acc did not improve from 0.86765\n",
+ "Epoch 1940/3000\n",
+ " - 39s - loss: 0.2343 - acc: 0.9727 - val_loss: 0.9469 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01940: val_acc did not improve from 0.86765\n",
+ "Epoch 1941/3000\n",
+ " - 39s - loss: 0.2327 - acc: 0.9739 - val_loss: 0.9730 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 01941: val_acc did not improve from 0.86765\n",
+ "Epoch 1942/3000\n",
+ " - 39s - loss: 0.2485 - acc: 0.9701 - val_loss: 0.9613 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01942: val_acc did not improve from 0.86765\n",
+ "Epoch 1943/3000\n",
+ " - 39s - loss: 0.2386 - acc: 0.9712 - val_loss: 0.9709 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 01943: val_acc did not improve from 0.86765\n",
+ "Epoch 1944/3000\n",
+ " - 39s - loss: 0.2444 - acc: 0.9719 - val_loss: 0.9265 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01944: val_acc did not improve from 0.86765\n",
+ "Epoch 1945/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9736 - val_loss: 0.9613 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01945: val_acc did not improve from 0.86765\n",
+ "Epoch 1946/3000\n",
+ " - 39s - loss: 0.2347 - acc: 0.9731 - val_loss: 0.9583 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 01946: val_acc did not improve from 0.86765\n",
+ "Epoch 1947/3000\n",
+ " - 39s - loss: 0.2366 - acc: 0.9727 - val_loss: 0.9558 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01947: val_acc did not improve from 0.86765\n",
+ "Epoch 1948/3000\n",
+ " - 39s - loss: 0.2376 - acc: 0.9719 - val_loss: 0.9360 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01948: val_acc did not improve from 0.86765\n",
+ "Epoch 1949/3000\n",
+ " - 39s - loss: 0.2278 - acc: 0.9742 - val_loss: 0.9444 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01949: val_acc did not improve from 0.86765\n",
+ "Epoch 1950/3000\n",
+ " - 39s - loss: 0.2490 - acc: 0.9703 - val_loss: 0.9326 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01950: val_acc did not improve from 0.86765\n",
+ "Epoch 1951/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9736 - val_loss: 0.9240 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01951: val_acc did not improve from 0.86765\n",
+ "Epoch 1952/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9724 - val_loss: 0.9178 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01952: val_acc did not improve from 0.86765\n",
+ "Epoch 1953/3000\n",
+ " - 39s - loss: 0.2367 - acc: 0.9715 - val_loss: 0.9453 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 01953: val_acc did not improve from 0.86765\n",
+ "Epoch 1954/3000\n",
+ " - 39s - loss: 0.2439 - acc: 0.9698 - val_loss: 0.9189 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01954: val_acc did not improve from 0.86765\n",
+ "Epoch 1955/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9748 - val_loss: 0.9128 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01955: val_acc did not improve from 0.86765\n",
+ "Epoch 1956/3000\n",
+ " - 39s - loss: 0.2483 - acc: 0.9721 - val_loss: 0.9379 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01956: val_acc did not improve from 0.86765\n",
+ "Epoch 1957/3000\n",
+ " - 39s - loss: 0.2426 - acc: 0.9713 - val_loss: 0.9401 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01957: val_acc did not improve from 0.86765\n",
+ "Epoch 1958/3000\n",
+ " - 39s - loss: 0.2320 - acc: 0.9725 - val_loss: 0.9494 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01958: val_acc did not improve from 0.86765\n",
+ "Epoch 1959/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9709 - val_loss: 0.9283 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01959: val_acc did not improve from 0.86765\n",
+ "Epoch 1960/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9731 - val_loss: 0.9307 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01960: val_acc did not improve from 0.86765\n",
+ "Epoch 1961/3000\n",
+ " - 39s - loss: 0.2386 - acc: 0.9725 - val_loss: 0.9237 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01961: val_acc did not improve from 0.86765\n",
+ "Epoch 1962/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9718 - val_loss: 0.9575 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 01962: val_acc did not improve from 0.86765\n",
+ "Epoch 1963/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9716 - val_loss: 0.9744 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01963: val_acc did not improve from 0.86765\n",
+ "Epoch 1964/3000\n",
+ " - 39s - loss: 0.2351 - acc: 0.9707 - val_loss: 0.9565 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 01964: val_acc did not improve from 0.86765\n",
+ "Epoch 1965/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9739 - val_loss: 0.9547 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01965: val_acc did not improve from 0.86765\n",
+ "Epoch 1966/3000\n",
+ " - 39s - loss: 0.2353 - acc: 0.9737 - val_loss: 0.9342 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01966: val_acc did not improve from 0.86765\n",
+ "Epoch 1967/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9733 - val_loss: 0.9350 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01967: val_acc did not improve from 0.86765\n",
+ "Epoch 1968/3000\n",
+ " - 39s - loss: 0.2315 - acc: 0.9734 - val_loss: 0.9570 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01968: val_acc did not improve from 0.86765\n",
+ "Epoch 1969/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9700 - val_loss: 0.9726 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01969: val_acc did not improve from 0.86765\n",
+ "Epoch 1970/3000\n",
+ " - 39s - loss: 0.2409 - acc: 0.9716 - val_loss: 0.9846 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 01970: val_acc did not improve from 0.86765\n",
+ "Epoch 1971/3000\n",
+ " - 39s - loss: 0.2337 - acc: 0.9733 - val_loss: 0.9560 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 01971: val_acc did not improve from 0.86765\n",
+ "Epoch 1972/3000\n",
+ " - 39s - loss: 0.2431 - acc: 0.9716 - val_loss: 0.9473 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 01972: val_acc did not improve from 0.86765\n",
+ "Epoch 1973/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9713 - val_loss: 0.9491 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01973: val_acc did not improve from 0.86765\n",
+ "Epoch 1974/3000\n",
+ " - 39s - loss: 0.2320 - acc: 0.9742 - val_loss: 0.9748 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 01974: val_acc did not improve from 0.86765\n",
+ "Epoch 1975/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9712 - val_loss: 0.9847 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 01975: val_acc did not improve from 0.86765\n",
+ "Epoch 1976/3000\n",
+ " - 39s - loss: 0.2345 - acc: 0.9737 - val_loss: 0.9525 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01976: val_acc did not improve from 0.86765\n",
+ "Epoch 1977/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9728 - val_loss: 0.9498 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01977: val_acc did not improve from 0.86765\n",
+ "Epoch 1978/3000\n",
+ " - 39s - loss: 0.2410 - acc: 0.9719 - val_loss: 0.9489 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01978: val_acc did not improve from 0.86765\n",
+ "Epoch 1979/3000\n",
+ " - 40s - loss: 0.2351 - acc: 0.9719 - val_loss: 0.9670 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 01979: val_acc did not improve from 0.86765\n",
+ "Epoch 1980/3000\n",
+ " - 39s - loss: 0.2305 - acc: 0.9755 - val_loss: 0.9234 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 01980: val_acc did not improve from 0.86765\n",
+ "Epoch 1981/3000\n",
+ " - 39s - loss: 0.2433 - acc: 0.9709 - val_loss: 0.9302 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01981: val_acc did not improve from 0.86765\n",
+ "Epoch 1982/3000\n",
+ " - 39s - loss: 0.2398 - acc: 0.9710 - val_loss: 0.9491 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 01982: val_acc did not improve from 0.86765\n",
+ "Epoch 1983/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9719 - val_loss: 0.9484 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01983: val_acc did not improve from 0.86765\n",
+ "Epoch 1984/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9710 - val_loss: 0.9230 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01984: val_acc did not improve from 0.86765\n",
+ "Epoch 1985/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9718 - val_loss: 0.9209 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01985: val_acc did not improve from 0.86765\n",
+ "Epoch 1986/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9731 - val_loss: 0.9489 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01986: val_acc did not improve from 0.86765\n",
+ "Epoch 1987/3000\n",
+ " - 39s - loss: 0.2398 - acc: 0.9746 - val_loss: 0.9605 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 01987: val_acc did not improve from 0.86765\n",
+ "Epoch 1988/3000\n",
+ " - 40s - loss: 0.2354 - acc: 0.9724 - val_loss: 0.9813 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01988: val_acc did not improve from 0.86765\n",
+ "Epoch 1989/3000\n",
+ " - 39s - loss: 0.2438 - acc: 0.9718 - val_loss: 0.9515 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 01989: val_acc did not improve from 0.86765\n",
+ "Epoch 1990/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9724 - val_loss: 0.9762 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 01990: val_acc did not improve from 0.86765\n",
+ "Epoch 1991/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9716 - val_loss: 0.9334 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 01991: val_acc did not improve from 0.86765\n",
+ "Epoch 1992/3000\n",
+ " - 40s - loss: 0.2349 - acc: 0.9710 - val_loss: 0.9288 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 01992: val_acc did not improve from 0.86765\n",
+ "Epoch 1993/3000\n",
+ " - 39s - loss: 0.2394 - acc: 0.9724 - val_loss: 0.9279 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01993: val_acc did not improve from 0.86765\n",
+ "Epoch 1994/3000\n",
+ " - 39s - loss: 0.2526 - acc: 0.9697 - val_loss: 0.9624 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 01994: val_acc did not improve from 0.86765\n",
+ "Epoch 1995/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9710 - val_loss: 0.9695 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 01995: val_acc did not improve from 0.86765\n",
+ "Epoch 1996/3000\n",
+ " - 39s - loss: 0.2423 - acc: 0.9725 - val_loss: 0.9476 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01996: val_acc did not improve from 0.86765\n",
+ "Epoch 1997/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9707 - val_loss: 0.9169 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 01997: val_acc did not improve from 0.86765\n",
+ "Epoch 1998/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9731 - val_loss: 0.9313 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 01998: val_acc did not improve from 0.86765\n",
+ "Epoch 1999/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9718 - val_loss: 0.9367 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 01999: val_acc did not improve from 0.86765\n",
+ "Epoch 2000/3000\n",
+ " - 39s - loss: 0.2459 - acc: 0.9697 - val_loss: 0.9481 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02000: val_acc did not improve from 0.86765\n",
+ "Epoch 2001/3000\n",
+ " - 39s - loss: 0.2441 - acc: 0.9719 - val_loss: 0.9557 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02001: val_acc did not improve from 0.86765\n",
+ "Epoch 2002/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9709 - val_loss: 0.9537 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02002: val_acc did not improve from 0.86765\n",
+ "Epoch 2003/3000\n",
+ " - 39s - loss: 0.2445 - acc: 0.9695 - val_loss: 0.9502 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02003: val_acc did not improve from 0.86765\n",
+ "Epoch 2004/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9728 - val_loss: 0.9505 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02004: val_acc did not improve from 0.86765\n",
+ "Epoch 2005/3000\n",
+ " - 40s - loss: 0.2439 - acc: 0.9712 - val_loss: 0.9444 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02005: val_acc did not improve from 0.86765\n",
+ "Epoch 2006/3000\n",
+ " - 39s - loss: 0.2411 - acc: 0.9698 - val_loss: 0.9675 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02006: val_acc did not improve from 0.86765\n",
+ "Epoch 2007/3000\n",
+ " - 39s - loss: 0.2413 - acc: 0.9725 - val_loss: 0.9461 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02007: val_acc did not improve from 0.86765\n",
+ "Epoch 2008/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9716 - val_loss: 0.9730 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 02008: val_acc did not improve from 0.86765\n",
+ "Epoch 2009/3000\n",
+ " - 39s - loss: 0.2425 - acc: 0.9691 - val_loss: 0.9421 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02009: val_acc did not improve from 0.86765\n",
+ "Epoch 2010/3000\n",
+ " - 39s - loss: 0.2375 - acc: 0.9740 - val_loss: 0.9678 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02010: val_acc did not improve from 0.86765\n",
+ "Epoch 2011/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9704 - val_loss: 0.9357 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02011: val_acc did not improve from 0.86765\n",
+ "Epoch 2012/3000\n",
+ " - 39s - loss: 0.2442 - acc: 0.9715 - val_loss: 0.9617 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02012: val_acc did not improve from 0.86765\n",
+ "Epoch 2013/3000\n",
+ " - 39s - loss: 0.2321 - acc: 0.9739 - val_loss: 0.9584 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02013: val_acc did not improve from 0.86765\n",
+ "Epoch 2014/3000\n",
+ " - 39s - loss: 0.2488 - acc: 0.9706 - val_loss: 0.9299 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02014: val_acc did not improve from 0.86765\n",
+ "Epoch 2015/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9694 - val_loss: 0.9469 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02015: val_acc did not improve from 0.86765\n",
+ "Epoch 2016/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9734 - val_loss: 0.9361 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02016: val_acc did not improve from 0.86765\n",
+ "Epoch 2017/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9709 - val_loss: 0.9471 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02017: val_acc did not improve from 0.86765\n",
+ "Epoch 2018/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9737 - val_loss: 0.9547 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02018: val_acc did not improve from 0.86765\n",
+ "Epoch 2019/3000\n",
+ " - 39s - loss: 0.2366 - acc: 0.9724 - val_loss: 0.9492 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02019: val_acc did not improve from 0.86765\n",
+ "Epoch 2020/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9713 - val_loss: 0.9556 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02020: val_acc did not improve from 0.86765\n",
+ "Epoch 2021/3000\n",
+ " - 39s - loss: 0.2384 - acc: 0.9701 - val_loss: 0.9613 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02021: val_acc did not improve from 0.86765\n",
+ "Epoch 2022/3000\n",
+ " - 39s - loss: 0.2335 - acc: 0.9704 - val_loss: 0.9613 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02022: val_acc did not improve from 0.86765\n",
+ "Epoch 2023/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9718 - val_loss: 0.9449 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02023: val_acc did not improve from 0.86765\n",
+ "Epoch 2024/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9742 - val_loss: 0.9633 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02024: val_acc did not improve from 0.86765\n",
+ "Epoch 2025/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9719 - val_loss: 0.9627 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02025: val_acc did not improve from 0.86765\n",
+ "Epoch 2026/3000\n",
+ " - 39s - loss: 0.2476 - acc: 0.9712 - val_loss: 0.9534 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02026: val_acc did not improve from 0.86765\n",
+ "Epoch 2027/3000\n",
+ " - 39s - loss: 0.2458 - acc: 0.9700 - val_loss: 0.9551 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02027: val_acc did not improve from 0.86765\n",
+ "Epoch 2028/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9716 - val_loss: 0.9125 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02028: val_acc did not improve from 0.86765\n",
+ "Epoch 2029/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9715 - val_loss: 0.9414 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02029: val_acc did not improve from 0.86765\n",
+ "Epoch 2030/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9718 - val_loss: 0.9240 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02030: val_acc did not improve from 0.86765\n",
+ "Epoch 2031/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9706 - val_loss: 0.9580 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02031: val_acc did not improve from 0.86765\n",
+ "Epoch 2032/3000\n",
+ " - 39s - loss: 0.2414 - acc: 0.9692 - val_loss: 0.9617 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02032: val_acc did not improve from 0.86765\n",
+ "Epoch 2033/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9724 - val_loss: 0.9299 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02033: val_acc did not improve from 0.86765\n",
+ "Epoch 2034/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9722 - val_loss: 0.9504 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02034: val_acc did not improve from 0.86765\n",
+ "Epoch 2035/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9704 - val_loss: 0.9584 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02035: val_acc did not improve from 0.86765\n",
+ "Epoch 2036/3000\n",
+ " - 39s - loss: 0.2291 - acc: 0.9752 - val_loss: 0.9353 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02036: val_acc did not improve from 0.86765\n",
+ "Epoch 2037/3000\n",
+ " - 39s - loss: 0.2465 - acc: 0.9713 - val_loss: 0.9433 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02037: val_acc did not improve from 0.86765\n",
+ "Epoch 2038/3000\n",
+ " - 39s - loss: 0.2313 - acc: 0.9755 - val_loss: 0.9622 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02038: val_acc did not improve from 0.86765\n",
+ "Epoch 2039/3000\n",
+ " - 39s - loss: 0.2351 - acc: 0.9742 - val_loss: 0.9695 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02039: val_acc did not improve from 0.86765\n",
+ "Epoch 2040/3000\n",
+ " - 39s - loss: 0.2315 - acc: 0.9748 - val_loss: 0.9526 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02040: val_acc did not improve from 0.86765\n",
+ "Epoch 2041/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9721 - val_loss: 0.9791 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02041: val_acc did not improve from 0.86765\n",
+ "Epoch 2042/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9700 - val_loss: 0.9864 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 02042: val_acc did not improve from 0.86765\n",
+ "Epoch 2043/3000\n",
+ " - 39s - loss: 0.2399 - acc: 0.9706 - val_loss: 0.9681 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02043: val_acc did not improve from 0.86765\n",
+ "Epoch 2044/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9707 - val_loss: 0.9471 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02044: val_acc did not improve from 0.86765\n",
+ "Epoch 2045/3000\n",
+ " - 39s - loss: 0.2297 - acc: 0.9731 - val_loss: 0.9520 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02045: val_acc did not improve from 0.86765\n",
+ "Epoch 2046/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9728 - val_loss: 0.9474 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02046: val_acc did not improve from 0.86765\n",
+ "Epoch 2047/3000\n",
+ " - 39s - loss: 0.2361 - acc: 0.9730 - val_loss: 0.9686 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02047: val_acc did not improve from 0.86765\n",
+ "Epoch 2048/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9740 - val_loss: 0.9266 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02048: val_acc did not improve from 0.86765\n",
+ "Epoch 2049/3000\n",
+ " - 39s - loss: 0.2426 - acc: 0.9719 - val_loss: 0.9389 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02049: val_acc did not improve from 0.86765\n",
+ "Epoch 2050/3000\n",
+ " - 39s - loss: 0.2379 - acc: 0.9725 - val_loss: 0.9371 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02050: val_acc did not improve from 0.86765\n",
+ "Epoch 2051/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9701 - val_loss: 0.9588 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02051: val_acc did not improve from 0.86765\n",
+ "Epoch 2052/3000\n",
+ " - 39s - loss: 0.2376 - acc: 0.9722 - val_loss: 0.9510 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02052: val_acc did not improve from 0.86765\n",
+ "Epoch 2053/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9695 - val_loss: 0.9629 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02053: val_acc did not improve from 0.86765\n",
+ "Epoch 2054/3000\n",
+ " - 39s - loss: 0.2442 - acc: 0.9691 - val_loss: 0.9616 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02054: val_acc did not improve from 0.86765\n",
+ "Epoch 2055/3000\n",
+ " - 39s - loss: 0.2328 - acc: 0.9730 - val_loss: 0.9661 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02055: val_acc did not improve from 0.86765\n",
+ "Epoch 2056/3000\n",
+ " - 39s - loss: 0.2396 - acc: 0.9728 - val_loss: 0.9707 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02056: val_acc did not improve from 0.86765\n",
+ "Epoch 2057/3000\n",
+ " - 39s - loss: 0.2385 - acc: 0.9701 - val_loss: 0.9474 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02057: val_acc did not improve from 0.86765\n",
+ "Epoch 2058/3000\n",
+ " - 39s - loss: 0.2517 - acc: 0.9683 - val_loss: 0.9756 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02058: val_acc did not improve from 0.86765\n",
+ "Epoch 2059/3000\n",
+ " - 39s - loss: 0.2406 - acc: 0.9703 - val_loss: 0.9670 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02059: val_acc did not improve from 0.86765\n",
+ "Epoch 2060/3000\n",
+ " - 39s - loss: 0.2333 - acc: 0.9727 - val_loss: 0.9609 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02060: val_acc did not improve from 0.86765\n",
+ "Epoch 2061/3000\n",
+ " - 39s - loss: 0.2347 - acc: 0.9734 - val_loss: 0.9288 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02061: val_acc did not improve from 0.86765\n",
+ "Epoch 2062/3000\n",
+ " - 39s - loss: 0.2367 - acc: 0.9709 - val_loss: 0.9407 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02062: val_acc did not improve from 0.86765\n",
+ "Epoch 2063/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9695 - val_loss: 0.9461 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02063: val_acc did not improve from 0.86765\n",
+ "Epoch 2064/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9721 - val_loss: 0.9359 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02064: val_acc did not improve from 0.86765\n",
+ "Epoch 2065/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9740 - val_loss: 0.9771 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02065: val_acc did not improve from 0.86765\n",
+ "Epoch 2066/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9709 - val_loss: 0.9569 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02066: val_acc did not improve from 0.86765\n",
+ "Epoch 2067/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9730 - val_loss: 0.9549 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02067: val_acc did not improve from 0.86765\n",
+ "Epoch 2068/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9695 - val_loss: 0.9608 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02068: val_acc did not improve from 0.86765\n",
+ "Epoch 2069/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9709 - val_loss: 0.9994 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02069: val_acc did not improve from 0.86765\n",
+ "Epoch 2070/3000\n",
+ " - 39s - loss: 0.2426 - acc: 0.9721 - val_loss: 0.9681 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02070: val_acc did not improve from 0.86765\n",
+ "Epoch 2071/3000\n",
+ " - 39s - loss: 0.2438 - acc: 0.9709 - val_loss: 0.9335 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02071: val_acc did not improve from 0.86765\n",
+ "Epoch 2072/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9706 - val_loss: 0.9464 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02072: val_acc did not improve from 0.86765\n",
+ "Epoch 2073/3000\n",
+ " - 39s - loss: 0.2379 - acc: 0.9716 - val_loss: 0.9395 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02073: val_acc did not improve from 0.86765\n",
+ "Epoch 2074/3000\n",
+ " - 39s - loss: 0.2335 - acc: 0.9725 - val_loss: 0.9276 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02074: val_acc did not improve from 0.86765\n",
+ "Epoch 2075/3000\n",
+ " - 39s - loss: 0.2359 - acc: 0.9707 - val_loss: 0.9434 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02075: val_acc did not improve from 0.86765\n",
+ "Epoch 2076/3000\n",
+ " - 39s - loss: 0.2351 - acc: 0.9725 - val_loss: 0.9271 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 02076: val_acc did not improve from 0.86765\n",
+ "Epoch 2077/3000\n",
+ " - 39s - loss: 0.2339 - acc: 0.9739 - val_loss: 0.9493 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02077: val_acc did not improve from 0.86765\n",
+ "Epoch 2078/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9713 - val_loss: 0.9289 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02078: val_acc did not improve from 0.86765\n",
+ "Epoch 2079/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9731 - val_loss: 0.9743 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02079: val_acc did not improve from 0.86765\n",
+ "Epoch 2080/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9712 - val_loss: 0.9787 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02080: val_acc did not improve from 0.86765\n",
+ "Epoch 2081/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9721 - val_loss: 0.9481 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02081: val_acc did not improve from 0.86765\n",
+ "Epoch 2082/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9746 - val_loss: 0.9670 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02082: val_acc did not improve from 0.86765\n",
+ "Epoch 2083/3000\n",
+ " - 39s - loss: 0.2428 - acc: 0.9700 - val_loss: 0.9585 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02083: val_acc did not improve from 0.86765\n",
+ "Epoch 2084/3000\n",
+ " - 39s - loss: 0.2419 - acc: 0.9707 - val_loss: 0.9589 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02084: val_acc did not improve from 0.86765\n",
+ "Epoch 2085/3000\n",
+ " - 39s - loss: 0.2325 - acc: 0.9718 - val_loss: 0.9546 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02085: val_acc did not improve from 0.86765\n",
+ "Epoch 2086/3000\n",
+ " - 39s - loss: 0.2456 - acc: 0.9695 - val_loss: 0.9727 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02086: val_acc did not improve from 0.86765\n",
+ "Epoch 2087/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9725 - val_loss: 0.9415 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02087: val_acc did not improve from 0.86765\n",
+ "Epoch 2088/3000\n",
+ " - 39s - loss: 0.2340 - acc: 0.9730 - val_loss: 0.9524 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02088: val_acc did not improve from 0.86765\n",
+ "Epoch 2089/3000\n",
+ " - 40s - loss: 0.2349 - acc: 0.9739 - val_loss: 0.9492 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02089: val_acc did not improve from 0.86765\n",
+ "Epoch 2090/3000\n",
+ " - 39s - loss: 0.2492 - acc: 0.9692 - val_loss: 0.9358 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02090: val_acc did not improve from 0.86765\n",
+ "Epoch 2091/3000\n",
+ " - 39s - loss: 0.2351 - acc: 0.9716 - val_loss: 0.9357 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02091: val_acc did not improve from 0.86765\n",
+ "Epoch 2092/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9728 - val_loss: 0.9582 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02092: val_acc did not improve from 0.86765\n",
+ "Epoch 2093/3000\n",
+ " - 40s - loss: 0.2418 - acc: 0.9727 - val_loss: 0.9491 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02093: val_acc did not improve from 0.86765\n",
+ "Epoch 2094/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9751 - val_loss: 0.9501 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02094: val_acc did not improve from 0.86765\n",
+ "Epoch 2095/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9709 - val_loss: 0.9506 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02095: val_acc did not improve from 0.86765\n",
+ "Epoch 2096/3000\n",
+ " - 39s - loss: 0.2446 - acc: 0.9706 - val_loss: 0.9476 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02096: val_acc did not improve from 0.86765\n",
+ "Epoch 2097/3000\n",
+ " - 39s - loss: 0.2413 - acc: 0.9698 - val_loss: 0.9480 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02097: val_acc did not improve from 0.86765\n",
+ "Epoch 2098/3000\n",
+ " - 39s - loss: 0.2435 - acc: 0.9706 - val_loss: 0.9637 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02098: val_acc did not improve from 0.86765\n",
+ "Epoch 2099/3000\n",
+ " - 38s - loss: 0.2336 - acc: 0.9710 - val_loss: 0.9526 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02099: val_acc did not improve from 0.86765\n",
+ "Epoch 2100/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9710 - val_loss: 0.9775 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02100: val_acc did not improve from 0.86765\n",
+ "Epoch 2101/3000\n",
+ " - 39s - loss: 0.2267 - acc: 0.9755 - val_loss: 0.9836 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02101: val_acc did not improve from 0.86765\n",
+ "Epoch 2102/3000\n",
+ " - 39s - loss: 0.2341 - acc: 0.9737 - val_loss: 0.9518 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02102: val_acc did not improve from 0.86765\n",
+ "Epoch 2103/3000\n",
+ " - 39s - loss: 0.2455 - acc: 0.9709 - val_loss: 0.9928 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 02103: val_acc did not improve from 0.86765\n",
+ "Epoch 2104/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9713 - val_loss: 0.9514 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02104: val_acc did not improve from 0.86765\n",
+ "Epoch 2105/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9727 - val_loss: 0.9577 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02105: val_acc did not improve from 0.86765\n",
+ "Epoch 2106/3000\n",
+ " - 39s - loss: 0.2509 - acc: 0.9694 - val_loss: 0.9963 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02106: val_acc did not improve from 0.86765\n",
+ "Epoch 2107/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9739 - val_loss: 0.9823 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02107: val_acc did not improve from 0.86765\n",
+ "Epoch 2108/3000\n",
+ " - 39s - loss: 0.2395 - acc: 0.9722 - val_loss: 0.9448 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02108: val_acc did not improve from 0.86765\n",
+ "Epoch 2109/3000\n",
+ " - 39s - loss: 0.2436 - acc: 0.9713 - val_loss: 0.9368 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02109: val_acc did not improve from 0.86765\n",
+ "Epoch 2110/3000\n",
+ " - 39s - loss: 0.2356 - acc: 0.9727 - val_loss: 0.9681 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02110: val_acc did not improve from 0.86765\n",
+ "Epoch 2111/3000\n",
+ " - 39s - loss: 0.2287 - acc: 0.9752 - val_loss: 0.9240 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02111: val_acc did not improve from 0.86765\n",
+ "Epoch 2112/3000\n",
+ " - 39s - loss: 0.2469 - acc: 0.9695 - val_loss: 0.9354 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02112: val_acc did not improve from 0.86765\n",
+ "Epoch 2113/3000\n",
+ " - 39s - loss: 0.2347 - acc: 0.9736 - val_loss: 0.9155 - val_acc: 0.8638\n",
+ "\n",
+ "Epoch 02113: val_acc did not improve from 0.86765\n",
+ "Epoch 2114/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9680 - val_loss: 0.9165 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02114: val_acc did not improve from 0.86765\n",
+ "Epoch 2115/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9722 - val_loss: 0.9211 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02115: val_acc did not improve from 0.86765\n",
+ "Epoch 2116/3000\n",
+ " - 39s - loss: 0.2292 - acc: 0.9728 - val_loss: 0.9327 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02116: val_acc did not improve from 0.86765\n",
+ "Epoch 2117/3000\n",
+ " - 39s - loss: 0.2303 - acc: 0.9725 - val_loss: 0.9244 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02117: val_acc did not improve from 0.86765\n",
+ "Epoch 2118/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9727 - val_loss: 0.9234 - val_acc: 0.8634\n",
+ "\n",
+ "Epoch 02118: val_acc did not improve from 0.86765\n",
+ "Epoch 2119/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9725 - val_loss: 0.9618 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02119: val_acc did not improve from 0.86765\n",
+ "Epoch 2120/3000\n",
+ " - 39s - loss: 0.2456 - acc: 0.9689 - val_loss: 0.9649 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02120: val_acc did not improve from 0.86765\n",
+ "Epoch 2121/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9743 - val_loss: 0.9671 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02121: val_acc did not improve from 0.86765\n",
+ "Epoch 2122/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9695 - val_loss: 0.9523 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02122: val_acc did not improve from 0.86765\n",
+ "Epoch 2123/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9707 - val_loss: 0.9457 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02123: val_acc did not improve from 0.86765\n",
+ "Epoch 2124/3000\n",
+ " - 39s - loss: 0.2469 - acc: 0.9706 - val_loss: 0.9600 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02124: val_acc did not improve from 0.86765\n",
+ "Epoch 2125/3000\n",
+ " - 39s - loss: 0.2411 - acc: 0.9709 - val_loss: 0.9726 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02125: val_acc did not improve from 0.86765\n",
+ "Epoch 2126/3000\n",
+ " - 39s - loss: 0.2395 - acc: 0.9721 - val_loss: 0.9435 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02126: val_acc did not improve from 0.86765\n",
+ "Epoch 2127/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9725 - val_loss: 0.9640 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02127: val_acc did not improve from 0.86765\n",
+ "Epoch 2128/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9718 - val_loss: 0.9418 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02128: val_acc did not improve from 0.86765\n",
+ "Epoch 2129/3000\n",
+ " - 39s - loss: 0.2473 - acc: 0.9689 - val_loss: 0.9574 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02129: val_acc did not improve from 0.86765\n",
+ "Epoch 2130/3000\n",
+ " - 39s - loss: 0.2325 - acc: 0.9707 - val_loss: 0.9537 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02130: val_acc did not improve from 0.86765\n",
+ "Epoch 2131/3000\n",
+ " - 39s - loss: 0.2376 - acc: 0.9737 - val_loss: 0.9685 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02131: val_acc did not improve from 0.86765\n",
+ "Epoch 2132/3000\n",
+ " - 39s - loss: 0.2299 - acc: 0.9730 - val_loss: 0.9493 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02132: val_acc did not improve from 0.86765\n",
+ "Epoch 2133/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9728 - val_loss: 0.9621 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02133: val_acc did not improve from 0.86765\n",
+ "Epoch 2134/3000\n",
+ " - 39s - loss: 0.2414 - acc: 0.9719 - val_loss: 0.9472 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02134: val_acc did not improve from 0.86765\n",
+ "Epoch 2135/3000\n",
+ " - 39s - loss: 0.2434 - acc: 0.9709 - val_loss: 0.9432 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02135: val_acc did not improve from 0.86765\n",
+ "Epoch 2136/3000\n",
+ " - 39s - loss: 0.2439 - acc: 0.9707 - val_loss: 0.9703 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02136: val_acc did not improve from 0.86765\n",
+ "Epoch 2137/3000\n",
+ " - 40s - loss: 0.2343 - acc: 0.9749 - val_loss: 0.9428 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02137: val_acc did not improve from 0.86765\n",
+ "Epoch 2138/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9727 - val_loss: 0.9384 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02138: val_acc did not improve from 0.86765\n",
+ "Epoch 2139/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9742 - val_loss: 0.9424 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02139: val_acc did not improve from 0.86765\n",
+ "Epoch 2140/3000\n",
+ " - 39s - loss: 0.2327 - acc: 0.9731 - val_loss: 0.9431 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02140: val_acc did not improve from 0.86765\n",
+ "Epoch 2141/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9716 - val_loss: 0.9011 - val_acc: 0.8653\n",
+ "\n",
+ "Epoch 02141: val_acc did not improve from 0.86765\n",
+ "Epoch 2142/3000\n",
+ " - 40s - loss: 0.2385 - acc: 0.9713 - val_loss: 0.9478 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02142: val_acc did not improve from 0.86765\n",
+ "Epoch 2143/3000\n",
+ " - 39s - loss: 0.2342 - acc: 0.9742 - val_loss: 0.9522 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02143: val_acc did not improve from 0.86765\n",
+ "Epoch 2144/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9746 - val_loss: 0.9636 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02144: val_acc did not improve from 0.86765\n",
+ "Epoch 2145/3000\n",
+ " - 39s - loss: 0.2411 - acc: 0.9698 - val_loss: 0.9515 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02145: val_acc did not improve from 0.86765\n",
+ "Epoch 2146/3000\n",
+ " - 39s - loss: 0.2485 - acc: 0.9719 - val_loss: 0.9671 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02146: val_acc did not improve from 0.86765\n",
+ "Epoch 2147/3000\n",
+ " - 39s - loss: 0.2319 - acc: 0.9728 - val_loss: 0.9527 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02147: val_acc did not improve from 0.86765\n",
+ "Epoch 2148/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9719 - val_loss: 0.9169 - val_acc: 0.8634\n",
+ "\n",
+ "Epoch 02148: val_acc did not improve from 0.86765\n",
+ "Epoch 2149/3000\n",
+ " - 39s - loss: 0.2327 - acc: 0.9733 - val_loss: 0.9487 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02149: val_acc did not improve from 0.86765\n",
+ "Epoch 2150/3000\n",
+ " - 39s - loss: 0.2423 - acc: 0.9701 - val_loss: 0.9421 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02150: val_acc did not improve from 0.86765\n",
+ "Epoch 2151/3000\n",
+ " - 39s - loss: 0.2305 - acc: 0.9742 - val_loss: 0.9333 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 02151: val_acc did not improve from 0.86765\n",
+ "Epoch 2152/3000\n",
+ " - 39s - loss: 0.2429 - acc: 0.9719 - val_loss: 0.9272 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02152: val_acc did not improve from 0.86765\n",
+ "Epoch 2153/3000\n",
+ " - 39s - loss: 0.2290 - acc: 0.9737 - val_loss: 0.9488 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02153: val_acc did not improve from 0.86765\n",
+ "Epoch 2154/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9731 - val_loss: 0.9561 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02154: val_acc did not improve from 0.86765\n",
+ "Epoch 2155/3000\n",
+ " - 39s - loss: 0.2467 - acc: 0.9709 - val_loss: 0.9504 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02155: val_acc did not improve from 0.86765\n",
+ "Epoch 2156/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9731 - val_loss: 0.9694 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02156: val_acc did not improve from 0.86765\n",
+ "Epoch 2157/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9724 - val_loss: 0.9621 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02157: val_acc did not improve from 0.86765\n",
+ "Epoch 2158/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9718 - val_loss: 0.9670 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02158: val_acc did not improve from 0.86765\n",
+ "Epoch 2159/3000\n",
+ " - 39s - loss: 0.2409 - acc: 0.9703 - val_loss: 0.9648 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02159: val_acc did not improve from 0.86765\n",
+ "Epoch 2160/3000\n",
+ " - 39s - loss: 0.2353 - acc: 0.9721 - val_loss: 0.9618 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02160: val_acc did not improve from 0.86765\n",
+ "Epoch 2161/3000\n",
+ " - 39s - loss: 0.2441 - acc: 0.9707 - val_loss: 0.9449 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02161: val_acc did not improve from 0.86765\n",
+ "Epoch 2162/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9707 - val_loss: 0.9527 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02162: val_acc did not improve from 0.86765\n",
+ "Epoch 2163/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9707 - val_loss: 0.9394 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02163: val_acc did not improve from 0.86765\n",
+ "Epoch 2164/3000\n",
+ " - 39s - loss: 0.2400 - acc: 0.9743 - val_loss: 0.9387 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02164: val_acc did not improve from 0.86765\n",
+ "Epoch 2165/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9712 - val_loss: 0.9530 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02165: val_acc did not improve from 0.86765\n",
+ "Epoch 2166/3000\n",
+ " - 39s - loss: 0.2463 - acc: 0.9700 - val_loss: 0.9595 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02166: val_acc did not improve from 0.86765\n",
+ "Epoch 2167/3000\n",
+ " - 39s - loss: 0.2366 - acc: 0.9736 - val_loss: 0.9505 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02167: val_acc did not improve from 0.86765\n",
+ "Epoch 2168/3000\n",
+ " - 39s - loss: 0.2294 - acc: 0.9734 - val_loss: 0.9591 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02168: val_acc did not improve from 0.86765\n",
+ "Epoch 2169/3000\n",
+ " - 39s - loss: 0.2546 - acc: 0.9663 - val_loss: 0.9435 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02169: val_acc did not improve from 0.86765\n",
+ "Epoch 2170/3000\n",
+ " - 39s - loss: 0.2328 - acc: 0.9722 - val_loss: 0.9725 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02170: val_acc did not improve from 0.86765\n",
+ "Epoch 2171/3000\n",
+ " - 38s - loss: 0.2448 - acc: 0.9701 - val_loss: 0.9529 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02171: val_acc did not improve from 0.86765\n",
+ "Epoch 2172/3000\n",
+ " - 38s - loss: 0.2358 - acc: 0.9725 - val_loss: 0.9978 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02172: val_acc did not improve from 0.86765\n",
+ "Epoch 2173/3000\n",
+ " - 39s - loss: 0.2450 - acc: 0.9724 - val_loss: 0.9337 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02173: val_acc did not improve from 0.86765\n",
+ "Epoch 2174/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9718 - val_loss: 0.9269 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02174: val_acc did not improve from 0.86765\n",
+ "Epoch 2175/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9742 - val_loss: 0.9453 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02175: val_acc did not improve from 0.86765\n",
+ "Epoch 2176/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9731 - val_loss: 0.9892 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 02176: val_acc did not improve from 0.86765\n",
+ "Epoch 2177/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9703 - val_loss: 0.9376 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02177: val_acc did not improve from 0.86765\n",
+ "Epoch 2178/3000\n",
+ " - 39s - loss: 0.2502 - acc: 0.9686 - val_loss: 0.9702 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02178: val_acc did not improve from 0.86765\n",
+ "Epoch 2179/3000\n",
+ " - 39s - loss: 0.2435 - acc: 0.9700 - val_loss: 0.9554 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02179: val_acc did not improve from 0.86765\n",
+ "Epoch 2180/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9731 - val_loss: 0.9555 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02180: val_acc did not improve from 0.86765\n",
+ "Epoch 2181/3000\n",
+ " - 39s - loss: 0.2392 - acc: 0.9709 - val_loss: 0.9399 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02181: val_acc did not improve from 0.86765\n",
+ "Epoch 2182/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9734 - val_loss: 0.9342 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02182: val_acc did not improve from 0.86765\n",
+ "Epoch 2183/3000\n",
+ " - 39s - loss: 0.2356 - acc: 0.9718 - val_loss: 0.9682 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02183: val_acc did not improve from 0.86765\n",
+ "Epoch 2184/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9727 - val_loss: 0.9766 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02184: val_acc did not improve from 0.86765\n",
+ "Epoch 2185/3000\n",
+ " - 39s - loss: 0.2454 - acc: 0.9698 - val_loss: 0.9491 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02185: val_acc did not improve from 0.86765\n",
+ "Epoch 2186/3000\n",
+ " - 39s - loss: 0.2301 - acc: 0.9740 - val_loss: 0.9486 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02186: val_acc did not improve from 0.86765\n",
+ "Epoch 2187/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9709 - val_loss: 0.9520 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02187: val_acc did not improve from 0.86765\n",
+ "Epoch 2188/3000\n",
+ " - 39s - loss: 0.2449 - acc: 0.9706 - val_loss: 0.9421 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02188: val_acc did not improve from 0.86765\n",
+ "Epoch 2189/3000\n",
+ " - 39s - loss: 0.2304 - acc: 0.9748 - val_loss: 0.9585 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02189: val_acc did not improve from 0.86765\n",
+ "Epoch 2190/3000\n",
+ " - 39s - loss: 0.2320 - acc: 0.9733 - val_loss: 0.9616 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02190: val_acc did not improve from 0.86765\n",
+ "Epoch 2191/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9716 - val_loss: 0.9054 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02191: val_acc did not improve from 0.86765\n",
+ "Epoch 2192/3000\n",
+ " - 39s - loss: 0.2270 - acc: 0.9730 - val_loss: 0.9647 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02192: val_acc did not improve from 0.86765\n",
+ "Epoch 2193/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9725 - val_loss: 0.9449 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02193: val_acc did not improve from 0.86765\n",
+ "Epoch 2194/3000\n",
+ " - 39s - loss: 0.2333 - acc: 0.9718 - val_loss: 0.9578 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02194: val_acc did not improve from 0.86765\n",
+ "Epoch 2195/3000\n",
+ " - 39s - loss: 0.2316 - acc: 0.9716 - val_loss: 0.9701 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02195: val_acc did not improve from 0.86765\n",
+ "Epoch 2196/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9742 - val_loss: 0.9713 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02196: val_acc did not improve from 0.86765\n",
+ "Epoch 2197/3000\n",
+ " - 39s - loss: 0.2403 - acc: 0.9719 - val_loss: 0.9509 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02197: val_acc did not improve from 0.86765\n",
+ "Epoch 2198/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9722 - val_loss: 0.9612 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02198: val_acc did not improve from 0.86765\n",
+ "Epoch 2199/3000\n",
+ " - 39s - loss: 0.2508 - acc: 0.9710 - val_loss: 0.9537 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02199: val_acc did not improve from 0.86765\n",
+ "Epoch 2200/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9713 - val_loss: 0.9637 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02200: val_acc did not improve from 0.86765\n",
+ "Epoch 2201/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9742 - val_loss: 0.9383 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02201: val_acc did not improve from 0.86765\n",
+ "Epoch 2202/3000\n",
+ " - 39s - loss: 0.2339 - acc: 0.9748 - val_loss: 0.9644 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02202: val_acc did not improve from 0.86765\n",
+ "Epoch 2203/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9718 - val_loss: 0.9537 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02203: val_acc did not improve from 0.86765\n",
+ "Epoch 2204/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9733 - val_loss: 0.9451 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 02204: val_acc did not improve from 0.86765\n",
+ "Epoch 2205/3000\n",
+ " - 40s - loss: 0.2349 - acc: 0.9722 - val_loss: 0.9414 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02205: val_acc did not improve from 0.86765\n",
+ "Epoch 2206/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9710 - val_loss: 0.9047 - val_acc: 0.8626\n",
+ "\n",
+ "Epoch 02206: val_acc did not improve from 0.86765\n",
+ "Epoch 2207/3000\n",
+ " - 40s - loss: 0.2321 - acc: 0.9730 - val_loss: 0.9673 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02207: val_acc did not improve from 0.86765\n",
+ "Epoch 2208/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9725 - val_loss: 0.9252 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02208: val_acc did not improve from 0.86765\n",
+ "Epoch 2209/3000\n",
+ " - 39s - loss: 0.2306 - acc: 0.9743 - val_loss: 0.9461 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 02209: val_acc did not improve from 0.86765\n",
+ "Epoch 2210/3000\n",
+ " - 39s - loss: 0.2442 - acc: 0.9713 - val_loss: 0.9348 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02210: val_acc did not improve from 0.86765\n",
+ "Epoch 2211/3000\n",
+ " - 39s - loss: 0.2426 - acc: 0.9721 - val_loss: 0.9371 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02211: val_acc did not improve from 0.86765\n",
+ "Epoch 2212/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9721 - val_loss: 0.9723 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02212: val_acc did not improve from 0.86765\n",
+ "Epoch 2213/3000\n",
+ " - 39s - loss: 0.2301 - acc: 0.9751 - val_loss: 0.9681 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02213: val_acc did not improve from 0.86765\n",
+ "Epoch 2214/3000\n",
+ " - 39s - loss: 0.2444 - acc: 0.9706 - val_loss: 0.9855 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02214: val_acc did not improve from 0.86765\n",
+ "Epoch 2215/3000\n",
+ " - 39s - loss: 0.2445 - acc: 0.9703 - val_loss: 0.9591 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02215: val_acc did not improve from 0.86765\n",
+ "Epoch 2216/3000\n",
+ " - 39s - loss: 0.2359 - acc: 0.9745 - val_loss: 0.9669 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02216: val_acc did not improve from 0.86765\n",
+ "Epoch 2217/3000\n",
+ " - 39s - loss: 0.2334 - acc: 0.9727 - val_loss: 0.9352 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 02217: val_acc did not improve from 0.86765\n",
+ "Epoch 2218/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9707 - val_loss: 0.9582 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02218: val_acc did not improve from 0.86765\n",
+ "Epoch 2219/3000\n",
+ " - 40s - loss: 0.2337 - acc: 0.9718 - val_loss: 0.9612 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02219: val_acc did not improve from 0.86765\n",
+ "Epoch 2220/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9730 - val_loss: 0.9411 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02220: val_acc did not improve from 0.86765\n",
+ "Epoch 2221/3000\n",
+ " - 39s - loss: 0.2367 - acc: 0.9697 - val_loss: 0.9605 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02221: val_acc did not improve from 0.86765\n",
+ "Epoch 2222/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9703 - val_loss: 0.9287 - val_acc: 0.8641\n",
+ "\n",
+ "Epoch 02222: val_acc did not improve from 0.86765\n",
+ "Epoch 2223/3000\n",
+ " - 39s - loss: 0.2316 - acc: 0.9745 - val_loss: 0.9463 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02223: val_acc did not improve from 0.86765\n",
+ "Epoch 2224/3000\n",
+ " - 39s - loss: 0.2419 - acc: 0.9689 - val_loss: 0.9613 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02224: val_acc did not improve from 0.86765\n",
+ "Epoch 2225/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9724 - val_loss: 0.9395 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02225: val_acc did not improve from 0.86765\n",
+ "Epoch 2226/3000\n",
+ " - 39s - loss: 0.2434 - acc: 0.9706 - val_loss: 0.9340 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02226: val_acc did not improve from 0.86765\n",
+ "Epoch 2227/3000\n",
+ " - 39s - loss: 0.2328 - acc: 0.9716 - val_loss: 0.9440 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02227: val_acc did not improve from 0.86765\n",
+ "Epoch 2228/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9718 - val_loss: 0.9648 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02228: val_acc did not improve from 0.86765\n",
+ "Epoch 2229/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9715 - val_loss: 0.9581 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02229: val_acc did not improve from 0.86765\n",
+ "Epoch 2230/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9725 - val_loss: 0.9576 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02230: val_acc did not improve from 0.86765\n",
+ "Epoch 2231/3000\n",
+ " - 39s - loss: 0.2321 - acc: 0.9722 - val_loss: 0.9319 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02231: val_acc did not improve from 0.86765\n",
+ "Epoch 2232/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9754 - val_loss: 0.9285 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02232: val_acc did not improve from 0.86765\n",
+ "Epoch 2233/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9713 - val_loss: 0.9410 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02233: val_acc did not improve from 0.86765\n",
+ "Epoch 2234/3000\n",
+ " - 39s - loss: 0.2410 - acc: 0.9716 - val_loss: 0.9212 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02234: val_acc did not improve from 0.86765\n",
+ "Epoch 2235/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9715 - val_loss: 0.9463 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02235: val_acc did not improve from 0.86765\n",
+ "Epoch 2236/3000\n",
+ " - 39s - loss: 0.2395 - acc: 0.9695 - val_loss: 0.9439 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02236: val_acc did not improve from 0.86765\n",
+ "Epoch 2237/3000\n",
+ " - 39s - loss: 0.2340 - acc: 0.9709 - val_loss: 0.9286 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02237: val_acc did not improve from 0.86765\n",
+ "Epoch 2238/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9722 - val_loss: 0.9477 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02238: val_acc did not improve from 0.86765\n",
+ "Epoch 2239/3000\n",
+ " - 39s - loss: 0.2282 - acc: 0.9731 - val_loss: 0.9587 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02239: val_acc did not improve from 0.86765\n",
+ "Epoch 2240/3000\n",
+ " - 39s - loss: 0.2267 - acc: 0.9740 - val_loss: 0.9578 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02240: val_acc did not improve from 0.86765\n",
+ "Epoch 2241/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9749 - val_loss: 0.9466 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02241: val_acc did not improve from 0.86765\n",
+ "Epoch 2242/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9728 - val_loss: 0.9706 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02242: val_acc did not improve from 0.86765\n",
+ "Epoch 2243/3000\n",
+ " - 40s - loss: 0.2349 - acc: 0.9739 - val_loss: 0.9677 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02243: val_acc did not improve from 0.86765\n",
+ "Epoch 2244/3000\n",
+ " - 39s - loss: 0.2303 - acc: 0.9727 - val_loss: 0.9443 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02244: val_acc did not improve from 0.86765\n",
+ "Epoch 2245/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9745 - val_loss: 0.9391 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02245: val_acc did not improve from 0.86765\n",
+ "Epoch 2246/3000\n",
+ " - 39s - loss: 0.2316 - acc: 0.9724 - val_loss: 0.9383 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02246: val_acc did not improve from 0.86765\n",
+ "Epoch 2247/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9715 - val_loss: 0.9841 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02247: val_acc did not improve from 0.86765\n",
+ "Epoch 2248/3000\n",
+ " - 39s - loss: 0.2327 - acc: 0.9751 - val_loss: 0.9544 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02248: val_acc did not improve from 0.86765\n",
+ "Epoch 2249/3000\n",
+ " - 39s - loss: 0.2334 - acc: 0.9713 - val_loss: 0.9532 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02249: val_acc did not improve from 0.86765\n",
+ "Epoch 2250/3000\n",
+ " - 39s - loss: 0.2449 - acc: 0.9736 - val_loss: 0.9496 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02250: val_acc did not improve from 0.86765\n",
+ "Epoch 2251/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9703 - val_loss: 0.9364 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02251: val_acc did not improve from 0.86765\n",
+ "Epoch 2252/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9709 - val_loss: 0.9697 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02252: val_acc did not improve from 0.86765\n",
+ "Epoch 2253/3000\n",
+ " - 39s - loss: 0.2366 - acc: 0.9722 - val_loss: 0.9391 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02253: val_acc did not improve from 0.86765\n",
+ "Epoch 2254/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9725 - val_loss: 0.9516 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02254: val_acc did not improve from 0.86765\n",
+ "Epoch 2255/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9715 - val_loss: 0.8996 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02255: val_acc did not improve from 0.86765\n",
+ "Epoch 2256/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9755 - val_loss: 0.9286 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02256: val_acc did not improve from 0.86765\n",
+ "Epoch 2257/3000\n",
+ " - 39s - loss: 0.2300 - acc: 0.9751 - val_loss: 0.9458 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02257: val_acc did not improve from 0.86765\n",
+ "Epoch 2258/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9733 - val_loss: 0.9911 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 02258: val_acc did not improve from 0.86765\n",
+ "Epoch 2259/3000\n",
+ " - 39s - loss: 0.2475 - acc: 0.9712 - val_loss: 0.9554 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02259: val_acc did not improve from 0.86765\n",
+ "Epoch 2260/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9706 - val_loss: 0.9421 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02260: val_acc did not improve from 0.86765\n",
+ "Epoch 2261/3000\n",
+ " - 39s - loss: 0.2250 - acc: 0.9749 - val_loss: 0.9583 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02261: val_acc did not improve from 0.86765\n",
+ "Epoch 2262/3000\n",
+ " - 39s - loss: 0.2343 - acc: 0.9740 - val_loss: 0.9705 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02262: val_acc did not improve from 0.86765\n",
+ "Epoch 2263/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9743 - val_loss: 0.9594 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02263: val_acc did not improve from 0.86765\n",
+ "Epoch 2264/3000\n",
+ " - 39s - loss: 0.2313 - acc: 0.9727 - val_loss: 0.9568 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02264: val_acc did not improve from 0.86765\n",
+ "Epoch 2265/3000\n",
+ " - 39s - loss: 0.2436 - acc: 0.9706 - val_loss: 0.9479 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02265: val_acc did not improve from 0.86765\n",
+ "Epoch 2266/3000\n",
+ " - 39s - loss: 0.2275 - acc: 0.9758 - val_loss: 0.9625 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02266: val_acc did not improve from 0.86765\n",
+ "Epoch 2267/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9725 - val_loss: 0.9445 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02267: val_acc did not improve from 0.86765\n",
+ "Epoch 2268/3000\n",
+ " - 39s - loss: 0.2369 - acc: 0.9709 - val_loss: 0.9346 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02268: val_acc did not improve from 0.86765\n",
+ "Epoch 2269/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9715 - val_loss: 0.9488 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02269: val_acc did not improve from 0.86765\n",
+ "Epoch 2270/3000\n",
+ " - 39s - loss: 0.2398 - acc: 0.9719 - val_loss: 0.9507 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02270: val_acc did not improve from 0.86765\n",
+ "Epoch 2271/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9731 - val_loss: 0.9596 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02271: val_acc did not improve from 0.86765\n",
+ "Epoch 2272/3000\n",
+ " - 39s - loss: 0.2426 - acc: 0.9715 - val_loss: 0.9287 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02272: val_acc did not improve from 0.86765\n",
+ "Epoch 2273/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9737 - val_loss: 0.9244 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02273: val_acc did not improve from 0.86765\n",
+ "Epoch 2274/3000\n",
+ " - 39s - loss: 0.2369 - acc: 0.9713 - val_loss: 0.9526 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02274: val_acc did not improve from 0.86765\n",
+ "Epoch 2275/3000\n",
+ " - 39s - loss: 0.2307 - acc: 0.9730 - val_loss: 0.9783 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02275: val_acc did not improve from 0.86765\n",
+ "Epoch 2276/3000\n",
+ " - 39s - loss: 0.2444 - acc: 0.9700 - val_loss: 0.9924 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02276: val_acc did not improve from 0.86765\n",
+ "Epoch 2277/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9728 - val_loss: 0.9869 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02277: val_acc did not improve from 0.86765\n",
+ "Epoch 2278/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9710 - val_loss: 0.9591 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02278: val_acc did not improve from 0.86765\n",
+ "Epoch 2279/3000\n",
+ " - 39s - loss: 0.2268 - acc: 0.9752 - val_loss: 0.9801 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02279: val_acc did not improve from 0.86765\n",
+ "Epoch 2280/3000\n",
+ " - 40s - loss: 0.2347 - acc: 0.9706 - val_loss: 0.9621 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02280: val_acc did not improve from 0.86765\n",
+ "Epoch 2281/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9719 - val_loss: 0.9544 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02281: val_acc did not improve from 0.86765\n",
+ "Epoch 2282/3000\n",
+ " - 39s - loss: 0.2396 - acc: 0.9734 - val_loss: 0.9680 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02282: val_acc did not improve from 0.86765\n",
+ "Epoch 2283/3000\n",
+ " - 39s - loss: 0.2434 - acc: 0.9713 - val_loss: 0.9440 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02283: val_acc did not improve from 0.86765\n",
+ "Epoch 2284/3000\n",
+ " - 39s - loss: 0.2409 - acc: 0.9706 - val_loss: 0.9591 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02284: val_acc did not improve from 0.86765\n",
+ "Epoch 2285/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9715 - val_loss: 0.9714 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02285: val_acc did not improve from 0.86765\n",
+ "Epoch 2286/3000\n",
+ " - 39s - loss: 0.2458 - acc: 0.9716 - val_loss: 0.9103 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02286: val_acc did not improve from 0.86765\n",
+ "Epoch 2287/3000\n",
+ " - 39s - loss: 0.2333 - acc: 0.9718 - val_loss: 0.9534 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02287: val_acc did not improve from 0.86765\n",
+ "Epoch 2288/3000\n",
+ " - 39s - loss: 0.2379 - acc: 0.9722 - val_loss: 0.9320 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02288: val_acc did not improve from 0.86765\n",
+ "Epoch 2289/3000\n",
+ " - 39s - loss: 0.2417 - acc: 0.9706 - val_loss: 0.9663 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02289: val_acc did not improve from 0.86765\n",
+ "Epoch 2290/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9695 - val_loss: 0.9348 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02290: val_acc did not improve from 0.86765\n",
+ "Epoch 2291/3000\n",
+ " - 39s - loss: 0.2304 - acc: 0.9737 - val_loss: 0.9505 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02291: val_acc did not improve from 0.86765\n",
+ "Epoch 2292/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9724 - val_loss: 0.9879 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02292: val_acc did not improve from 0.86765\n",
+ "Epoch 2293/3000\n",
+ " - 39s - loss: 0.2395 - acc: 0.9724 - val_loss: 0.9370 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02293: val_acc did not improve from 0.86765\n",
+ "Epoch 2294/3000\n",
+ " - 39s - loss: 0.2269 - acc: 0.9745 - val_loss: 0.9260 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02294: val_acc did not improve from 0.86765\n",
+ "Epoch 2295/3000\n",
+ " - 39s - loss: 0.2341 - acc: 0.9719 - val_loss: 0.9530 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02295: val_acc did not improve from 0.86765\n",
+ "Epoch 2296/3000\n",
+ " - 39s - loss: 0.2326 - acc: 0.9739 - val_loss: 0.9625 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02296: val_acc did not improve from 0.86765\n",
+ "Epoch 2297/3000\n",
+ " - 39s - loss: 0.2346 - acc: 0.9728 - val_loss: 0.9527 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02297: val_acc did not improve from 0.86765\n",
+ "Epoch 2298/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9751 - val_loss: 0.9355 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 02298: val_acc did not improve from 0.86765\n",
+ "Epoch 2299/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9727 - val_loss: 0.9402 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02299: val_acc did not improve from 0.86765\n",
+ "Epoch 2300/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9703 - val_loss: 0.9541 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02300: val_acc did not improve from 0.86765\n",
+ "Epoch 2301/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9697 - val_loss: 0.9476 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02301: val_acc did not improve from 0.86765\n",
+ "Epoch 2302/3000\n",
+ " - 39s - loss: 0.2423 - acc: 0.9713 - val_loss: 0.9606 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02302: val_acc did not improve from 0.86765\n",
+ "Epoch 2303/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9739 - val_loss: 0.9781 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02303: val_acc did not improve from 0.86765\n",
+ "Epoch 2304/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9734 - val_loss: 0.9507 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02304: val_acc did not improve from 0.86765\n",
+ "Epoch 2305/3000\n",
+ " - 39s - loss: 0.2299 - acc: 0.9740 - val_loss: 0.9564 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02305: val_acc did not improve from 0.86765\n",
+ "Epoch 2306/3000\n",
+ " - 39s - loss: 0.2347 - acc: 0.9706 - val_loss: 0.9826 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02306: val_acc did not improve from 0.86765\n",
+ "Epoch 2307/3000\n",
+ " - 40s - loss: 0.2338 - acc: 0.9752 - val_loss: 0.9546 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02307: val_acc did not improve from 0.86765\n",
+ "Epoch 2308/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9734 - val_loss: 0.9902 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 02308: val_acc did not improve from 0.86765\n",
+ "Epoch 2309/3000\n",
+ " - 39s - loss: 0.2305 - acc: 0.9739 - val_loss: 0.9832 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02309: val_acc did not improve from 0.86765\n",
+ "Epoch 2310/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9724 - val_loss: 0.9368 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02310: val_acc did not improve from 0.86765\n",
+ "Epoch 2311/3000\n",
+ " - 39s - loss: 0.2327 - acc: 0.9722 - val_loss: 0.9683 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02311: val_acc did not improve from 0.86765\n",
+ "Epoch 2312/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9751 - val_loss: 0.9598 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02312: val_acc did not improve from 0.86765\n",
+ "Epoch 2313/3000\n",
+ " - 39s - loss: 0.2360 - acc: 0.9700 - val_loss: 0.9626 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02313: val_acc did not improve from 0.86765\n",
+ "Epoch 2314/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9713 - val_loss: 0.9544 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02314: val_acc did not improve from 0.86765\n",
+ "Epoch 2315/3000\n",
+ " - 39s - loss: 0.2336 - acc: 0.9739 - val_loss: 0.9574 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02315: val_acc did not improve from 0.86765\n",
+ "Epoch 2316/3000\n",
+ " - 39s - loss: 0.2289 - acc: 0.9742 - val_loss: 0.9602 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02316: val_acc did not improve from 0.86765\n",
+ "Epoch 2317/3000\n",
+ " - 39s - loss: 0.2406 - acc: 0.9706 - val_loss: 0.9705 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02317: val_acc did not improve from 0.86765\n",
+ "Epoch 2318/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9737 - val_loss: 0.9730 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 02318: val_acc did not improve from 0.86765\n",
+ "Epoch 2319/3000\n",
+ " - 39s - loss: 0.2417 - acc: 0.9697 - val_loss: 0.9522 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02319: val_acc did not improve from 0.86765\n",
+ "Epoch 2320/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9742 - val_loss: 0.9510 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02320: val_acc did not improve from 0.86765\n",
+ "Epoch 2321/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9721 - val_loss: 0.9601 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02321: val_acc did not improve from 0.86765\n",
+ "Epoch 2322/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9703 - val_loss: 0.9777 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02322: val_acc did not improve from 0.86765\n",
+ "Epoch 2323/3000\n",
+ " - 39s - loss: 0.2274 - acc: 0.9746 - val_loss: 0.9618 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02323: val_acc did not improve from 0.86765\n",
+ "Epoch 2324/3000\n",
+ " - 39s - loss: 0.2361 - acc: 0.9724 - val_loss: 0.9341 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02324: val_acc did not improve from 0.86765\n",
+ "Epoch 2325/3000\n",
+ " - 39s - loss: 0.2343 - acc: 0.9731 - val_loss: 0.9430 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02325: val_acc did not improve from 0.86765\n",
+ "Epoch 2326/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9701 - val_loss: 0.9950 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02326: val_acc did not improve from 0.86765\n",
+ "Epoch 2327/3000\n",
+ " - 39s - loss: 0.2340 - acc: 0.9721 - val_loss: 0.9374 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02327: val_acc did not improve from 0.86765\n",
+ "Epoch 2328/3000\n",
+ " - 39s - loss: 0.2312 - acc: 0.9724 - val_loss: 0.9416 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02328: val_acc did not improve from 0.86765\n",
+ "Epoch 2329/3000\n",
+ " - 39s - loss: 0.2384 - acc: 0.9706 - val_loss: 0.9337 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02329: val_acc did not improve from 0.86765\n",
+ "Epoch 2330/3000\n",
+ " - 39s - loss: 0.2323 - acc: 0.9742 - val_loss: 0.9485 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02330: val_acc did not improve from 0.86765\n",
+ "Epoch 2331/3000\n",
+ " - 39s - loss: 0.2345 - acc: 0.9718 - val_loss: 0.9293 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02331: val_acc did not improve from 0.86765\n",
+ "Epoch 2332/3000\n",
+ " - 39s - loss: 0.2379 - acc: 0.9713 - val_loss: 0.9305 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02332: val_acc did not improve from 0.86765\n",
+ "Epoch 2333/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9727 - val_loss: 0.9679 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02333: val_acc did not improve from 0.86765\n",
+ "Epoch 2334/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9736 - val_loss: 0.9430 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02334: val_acc did not improve from 0.86765\n",
+ "Epoch 2335/3000\n",
+ " - 39s - loss: 0.2468 - acc: 0.9701 - val_loss: 0.9728 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02335: val_acc did not improve from 0.86765\n",
+ "Epoch 2336/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9706 - val_loss: 0.9896 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02336: val_acc did not improve from 0.86765\n",
+ "Epoch 2337/3000\n",
+ " - 40s - loss: 0.2454 - acc: 0.9684 - val_loss: 0.9457 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02337: val_acc did not improve from 0.86765\n",
+ "Epoch 2338/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9755 - val_loss: 0.9464 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02338: val_acc did not improve from 0.86765\n",
+ "Epoch 2339/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9739 - val_loss: 0.9594 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02339: val_acc did not improve from 0.86765\n",
+ "Epoch 2340/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9768 - val_loss: 0.9532 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02340: val_acc did not improve from 0.86765\n",
+ "Epoch 2341/3000\n",
+ " - 39s - loss: 0.2338 - acc: 0.9739 - val_loss: 0.9779 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02341: val_acc did not improve from 0.86765\n",
+ "Epoch 2342/3000\n",
+ " - 39s - loss: 0.2341 - acc: 0.9742 - val_loss: 0.9827 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02342: val_acc did not improve from 0.86765\n",
+ "Epoch 2343/3000\n",
+ " - 39s - loss: 0.2250 - acc: 0.9749 - val_loss: 0.9957 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 02343: val_acc did not improve from 0.86765\n",
+ "Epoch 2344/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9709 - val_loss: 0.9604 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02344: val_acc did not improve from 0.86765\n",
+ "Epoch 2345/3000\n",
+ " - 39s - loss: 0.2307 - acc: 0.9737 - val_loss: 0.9637 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02345: val_acc did not improve from 0.86765\n",
+ "Epoch 2346/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9724 - val_loss: 0.9627 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02346: val_acc did not improve from 0.86765\n",
+ "Epoch 2347/3000\n",
+ " - 39s - loss: 0.2288 - acc: 0.9754 - val_loss: 0.9457 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02347: val_acc did not improve from 0.86765\n",
+ "Epoch 2348/3000\n",
+ " - 39s - loss: 0.2279 - acc: 0.9745 - val_loss: 0.9546 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02348: val_acc did not improve from 0.86765\n",
+ "Epoch 2349/3000\n",
+ " - 40s - loss: 0.2394 - acc: 0.9701 - val_loss: 0.9547 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02349: val_acc did not improve from 0.86765\n",
+ "Epoch 2350/3000\n",
+ " - 39s - loss: 0.2361 - acc: 0.9727 - val_loss: 0.9519 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02350: val_acc did not improve from 0.86765\n",
+ "Epoch 2351/3000\n",
+ " - 39s - loss: 0.2267 - acc: 0.9761 - val_loss: 0.9496 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02351: val_acc did not improve from 0.86765\n",
+ "Epoch 2352/3000\n",
+ " - 39s - loss: 0.2281 - acc: 0.9731 - val_loss: 0.9460 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02352: val_acc did not improve from 0.86765\n",
+ "Epoch 2353/3000\n",
+ " - 39s - loss: 0.2221 - acc: 0.9764 - val_loss: 0.9533 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02353: val_acc did not improve from 0.86765\n",
+ "Epoch 2354/3000\n",
+ " - 39s - loss: 0.2286 - acc: 0.9740 - val_loss: 0.9180 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 02354: val_acc did not improve from 0.86765\n",
+ "Epoch 2355/3000\n",
+ " - 39s - loss: 0.2366 - acc: 0.9704 - val_loss: 0.9643 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02355: val_acc did not improve from 0.86765\n",
+ "Epoch 2356/3000\n",
+ " - 39s - loss: 0.2323 - acc: 0.9748 - val_loss: 0.9968 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 02356: val_acc did not improve from 0.86765\n",
+ "Epoch 2357/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9728 - val_loss: 0.9898 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02357: val_acc did not improve from 0.86765\n",
+ "Epoch 2358/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9716 - val_loss: 1.0030 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02358: val_acc did not improve from 0.86765\n",
+ "Epoch 2359/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9698 - val_loss: 0.9627 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02359: val_acc did not improve from 0.86765\n",
+ "Epoch 2360/3000\n",
+ " - 39s - loss: 0.2366 - acc: 0.9730 - val_loss: 0.9595 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02360: val_acc did not improve from 0.86765\n",
+ "Epoch 2361/3000\n",
+ " - 39s - loss: 0.2356 - acc: 0.9719 - val_loss: 0.9889 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02361: val_acc did not improve from 0.86765\n",
+ "Epoch 2362/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9715 - val_loss: 0.9462 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02362: val_acc did not improve from 0.86765\n",
+ "Epoch 2363/3000\n",
+ " - 39s - loss: 0.2273 - acc: 0.9721 - val_loss: 0.9555 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02363: val_acc did not improve from 0.86765\n",
+ "Epoch 2364/3000\n",
+ " - 39s - loss: 0.2462 - acc: 0.9697 - val_loss: 0.9372 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02364: val_acc did not improve from 0.86765\n",
+ "Epoch 2365/3000\n",
+ " - 39s - loss: 0.2343 - acc: 0.9715 - val_loss: 0.9402 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 02365: val_acc did not improve from 0.86765\n",
+ "Epoch 2366/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9716 - val_loss: 0.9570 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02366: val_acc did not improve from 0.86765\n",
+ "Epoch 2367/3000\n",
+ " - 39s - loss: 0.2407 - acc: 0.9724 - val_loss: 0.9670 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02367: val_acc did not improve from 0.86765\n",
+ "Epoch 2368/3000\n",
+ " - 39s - loss: 0.2348 - acc: 0.9709 - val_loss: 0.9481 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02368: val_acc did not improve from 0.86765\n",
+ "Epoch 2369/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9718 - val_loss: 0.9554 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02369: val_acc did not improve from 0.86765\n",
+ "Epoch 2370/3000\n",
+ " - 39s - loss: 0.2406 - acc: 0.9712 - val_loss: 0.9645 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02370: val_acc did not improve from 0.86765\n",
+ "Epoch 2371/3000\n",
+ " - 39s - loss: 0.2305 - acc: 0.9727 - val_loss: 0.9433 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02371: val_acc did not improve from 0.86765\n",
+ "Epoch 2372/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9734 - val_loss: 0.9649 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02372: val_acc did not improve from 0.86765\n",
+ "Epoch 2373/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9727 - val_loss: 0.9369 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02373: val_acc did not improve from 0.86765\n",
+ "Epoch 2374/3000\n",
+ " - 39s - loss: 0.2338 - acc: 0.9725 - val_loss: 0.9547 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02374: val_acc did not improve from 0.86765\n",
+ "Epoch 2375/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9721 - val_loss: 0.9469 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02375: val_acc did not improve from 0.86765\n",
+ "Epoch 2376/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9701 - val_loss: 0.9474 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02376: val_acc did not improve from 0.86765\n",
+ "Epoch 2377/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9728 - val_loss: 0.9655 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02377: val_acc did not improve from 0.86765\n",
+ "Epoch 2378/3000\n",
+ " - 39s - loss: 0.2319 - acc: 0.9721 - val_loss: 0.9265 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02378: val_acc did not improve from 0.86765\n",
+ "Epoch 2379/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9695 - val_loss: 0.9320 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02379: val_acc did not improve from 0.86765\n",
+ "Epoch 2380/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9712 - val_loss: 0.9467 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02380: val_acc did not improve from 0.86765\n",
+ "Epoch 2381/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9754 - val_loss: 0.9249 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02381: val_acc did not improve from 0.86765\n",
+ "Epoch 2382/3000\n",
+ " - 39s - loss: 0.2360 - acc: 0.9727 - val_loss: 0.9564 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02382: val_acc did not improve from 0.86765\n",
+ "Epoch 2383/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9704 - val_loss: 0.9617 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02383: val_acc did not improve from 0.86765\n",
+ "Epoch 2384/3000\n",
+ " - 39s - loss: 0.2382 - acc: 0.9722 - val_loss: 0.9808 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02384: val_acc did not improve from 0.86765\n",
+ "Epoch 2385/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9706 - val_loss: 0.9643 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02385: val_acc did not improve from 0.86765\n",
+ "Epoch 2386/3000\n",
+ " - 39s - loss: 0.2300 - acc: 0.9739 - val_loss: 0.9172 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02386: val_acc did not improve from 0.86765\n",
+ "Epoch 2387/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9721 - val_loss: 0.9827 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02387: val_acc did not improve from 0.86765\n",
+ "Epoch 2388/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9716 - val_loss: 0.9644 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02388: val_acc did not improve from 0.86765\n",
+ "Epoch 2389/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9733 - val_loss: 0.9686 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02389: val_acc did not improve from 0.86765\n",
+ "Epoch 2390/3000\n",
+ " - 39s - loss: 0.2303 - acc: 0.9725 - val_loss: 0.9571 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02390: val_acc did not improve from 0.86765\n",
+ "Epoch 2391/3000\n",
+ " - 39s - loss: 0.2359 - acc: 0.9733 - val_loss: 0.9938 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02391: val_acc did not improve from 0.86765\n",
+ "Epoch 2392/3000\n",
+ " - 39s - loss: 0.2348 - acc: 0.9730 - val_loss: 0.9525 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02392: val_acc did not improve from 0.86765\n",
+ "Epoch 2393/3000\n",
+ " - 39s - loss: 0.2350 - acc: 0.9722 - val_loss: 0.9352 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02393: val_acc did not improve from 0.86765\n",
+ "Epoch 2394/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9712 - val_loss: 0.9257 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02394: val_acc did not improve from 0.86765\n",
+ "Epoch 2395/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9755 - val_loss: 0.9573 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02395: val_acc did not improve from 0.86765\n",
+ "Epoch 2396/3000\n",
+ " - 39s - loss: 0.2319 - acc: 0.9743 - val_loss: 0.9582 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02396: val_acc did not improve from 0.86765\n",
+ "Epoch 2397/3000\n",
+ " - 39s - loss: 0.2382 - acc: 0.9722 - val_loss: 0.9603 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02397: val_acc did not improve from 0.86765\n",
+ "Epoch 2398/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9739 - val_loss: 0.9519 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02398: val_acc did not improve from 0.86765\n",
+ "Epoch 2399/3000\n",
+ " - 40s - loss: 0.2421 - acc: 0.9692 - val_loss: 0.9390 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02399: val_acc did not improve from 0.86765\n",
+ "Epoch 2400/3000\n",
+ " - 39s - loss: 0.2384 - acc: 0.9721 - val_loss: 0.9542 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02400: val_acc did not improve from 0.86765\n",
+ "Epoch 2401/3000\n",
+ " - 39s - loss: 0.2338 - acc: 0.9713 - val_loss: 0.9516 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02401: val_acc did not improve from 0.86765\n",
+ "Epoch 2402/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9724 - val_loss: 0.9321 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02402: val_acc did not improve from 0.86765\n",
+ "Epoch 2403/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9742 - val_loss: 0.9507 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02403: val_acc did not improve from 0.86765\n",
+ "Epoch 2404/3000\n",
+ " - 39s - loss: 0.2259 - acc: 0.9778 - val_loss: 0.9700 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02404: val_acc did not improve from 0.86765\n",
+ "Epoch 2405/3000\n",
+ " - 39s - loss: 0.2346 - acc: 0.9733 - val_loss: 0.9633 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02405: val_acc did not improve from 0.86765\n",
+ "Epoch 2406/3000\n",
+ " - 40s - loss: 0.2353 - acc: 0.9703 - val_loss: 0.9576 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02406: val_acc did not improve from 0.86765\n",
+ "Epoch 2407/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9728 - val_loss: 0.9422 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02407: val_acc did not improve from 0.86765\n",
+ "Epoch 2408/3000\n",
+ " - 39s - loss: 0.2293 - acc: 0.9728 - val_loss: 0.9483 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02408: val_acc did not improve from 0.86765\n",
+ "Epoch 2409/3000\n",
+ " - 38s - loss: 0.2352 - acc: 0.9721 - val_loss: 0.9434 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02409: val_acc did not improve from 0.86765\n",
+ "Epoch 2410/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9728 - val_loss: 0.9679 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02410: val_acc did not improve from 0.86765\n",
+ "Epoch 2411/3000\n",
+ " - 39s - loss: 0.2346 - acc: 0.9725 - val_loss: 0.9551 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02411: val_acc did not improve from 0.86765\n",
+ "Epoch 2412/3000\n",
+ " - 39s - loss: 0.2420 - acc: 0.9716 - val_loss: 0.9242 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02412: val_acc did not improve from 0.86765\n",
+ "Epoch 2413/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9716 - val_loss: 0.9776 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02413: val_acc did not improve from 0.86765\n",
+ "Epoch 2414/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9737 - val_loss: 0.9731 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02414: val_acc did not improve from 0.86765\n",
+ "Epoch 2415/3000\n",
+ " - 39s - loss: 0.2316 - acc: 0.9739 - val_loss: 0.9609 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02415: val_acc did not improve from 0.86765\n",
+ "Epoch 2416/3000\n",
+ " - 40s - loss: 0.2380 - acc: 0.9728 - val_loss: 0.9770 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 02416: val_acc did not improve from 0.86765\n",
+ "Epoch 2417/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9718 - val_loss: 0.9951 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02417: val_acc did not improve from 0.86765\n",
+ "Epoch 2418/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9746 - val_loss: 0.9608 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02418: val_acc did not improve from 0.86765\n",
+ "Epoch 2419/3000\n",
+ " - 39s - loss: 0.2309 - acc: 0.9728 - val_loss: 0.9579 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02419: val_acc did not improve from 0.86765\n",
+ "Epoch 2420/3000\n",
+ " - 39s - loss: 0.2315 - acc: 0.9733 - val_loss: 0.9645 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02420: val_acc did not improve from 0.86765\n",
+ "Epoch 2421/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9742 - val_loss: 0.9873 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 02421: val_acc did not improve from 0.86765\n",
+ "Epoch 2422/3000\n",
+ " - 39s - loss: 0.2319 - acc: 0.9727 - val_loss: 0.9648 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02422: val_acc did not improve from 0.86765\n",
+ "Epoch 2423/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9728 - val_loss: 0.9491 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02423: val_acc did not improve from 0.86765\n",
+ "Epoch 2424/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9710 - val_loss: 0.9632 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02424: val_acc did not improve from 0.86765\n",
+ "Epoch 2425/3000\n",
+ " - 39s - loss: 0.2204 - acc: 0.9777 - val_loss: 0.9756 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02425: val_acc did not improve from 0.86765\n",
+ "Epoch 2426/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9728 - val_loss: 0.9553 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02426: val_acc did not improve from 0.86765\n",
+ "Epoch 2427/3000\n",
+ " - 39s - loss: 0.2260 - acc: 0.9752 - val_loss: 0.9708 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02427: val_acc did not improve from 0.86765\n",
+ "Epoch 2428/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9701 - val_loss: 0.9735 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02428: val_acc did not improve from 0.86765\n",
+ "Epoch 2429/3000\n",
+ " - 39s - loss: 0.2320 - acc: 0.9709 - val_loss: 0.9532 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02429: val_acc did not improve from 0.86765\n",
+ "Epoch 2430/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9721 - val_loss: 0.9513 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02430: val_acc did not improve from 0.86765\n",
+ "Epoch 2431/3000\n",
+ " - 39s - loss: 0.2286 - acc: 0.9730 - val_loss: 0.9829 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 02431: val_acc did not improve from 0.86765\n",
+ "Epoch 2432/3000\n",
+ " - 39s - loss: 0.2423 - acc: 0.9715 - val_loss: 0.9423 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02432: val_acc did not improve from 0.86765\n",
+ "Epoch 2433/3000\n",
+ " - 39s - loss: 0.2361 - acc: 0.9718 - val_loss: 0.9475 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02433: val_acc did not improve from 0.86765\n",
+ "Epoch 2434/3000\n",
+ " - 39s - loss: 0.2335 - acc: 0.9727 - val_loss: 0.9301 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02434: val_acc did not improve from 0.86765\n",
+ "Epoch 2435/3000\n",
+ " - 39s - loss: 0.2265 - acc: 0.9745 - val_loss: 0.9652 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02435: val_acc did not improve from 0.86765\n",
+ "Epoch 2436/3000\n",
+ " - 39s - loss: 0.2302 - acc: 0.9724 - val_loss: 0.9334 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02436: val_acc did not improve from 0.86765\n",
+ "Epoch 2437/3000\n",
+ " - 39s - loss: 0.2455 - acc: 0.9684 - val_loss: 0.9630 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02437: val_acc did not improve from 0.86765\n",
+ "Epoch 2438/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9700 - val_loss: 0.9709 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02438: val_acc did not improve from 0.86765\n",
+ "Epoch 2439/3000\n",
+ " - 39s - loss: 0.2302 - acc: 0.9777 - val_loss: 0.9608 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02439: val_acc did not improve from 0.86765\n",
+ "Epoch 2440/3000\n",
+ " - 39s - loss: 0.2392 - acc: 0.9716 - val_loss: 0.9308 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02440: val_acc did not improve from 0.86765\n",
+ "Epoch 2441/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9703 - val_loss: 0.9459 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02441: val_acc did not improve from 0.86765\n",
+ "Epoch 2442/3000\n",
+ " - 39s - loss: 0.2351 - acc: 0.9725 - val_loss: 0.9689 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02442: val_acc did not improve from 0.86765\n",
+ "Epoch 2443/3000\n",
+ " - 39s - loss: 0.2399 - acc: 0.9716 - val_loss: 0.9566 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02443: val_acc did not improve from 0.86765\n",
+ "Epoch 2444/3000\n",
+ " - 39s - loss: 0.2361 - acc: 0.9707 - val_loss: 0.9691 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02444: val_acc did not improve from 0.86765\n",
+ "Epoch 2445/3000\n",
+ " - 39s - loss: 0.2341 - acc: 0.9751 - val_loss: 0.9407 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02445: val_acc did not improve from 0.86765\n",
+ "Epoch 2446/3000\n",
+ " - 39s - loss: 0.2337 - acc: 0.9713 - val_loss: 0.9381 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02446: val_acc did not improve from 0.86765\n",
+ "Epoch 2447/3000\n",
+ " - 39s - loss: 0.2356 - acc: 0.9712 - val_loss: 0.9595 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02447: val_acc did not improve from 0.86765\n",
+ "Epoch 2448/3000\n",
+ " - 39s - loss: 0.2253 - acc: 0.9740 - val_loss: 0.9326 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02448: val_acc did not improve from 0.86765\n",
+ "Epoch 2449/3000\n",
+ " - 40s - loss: 0.2382 - acc: 0.9715 - val_loss: 0.9471 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02449: val_acc did not improve from 0.86765\n",
+ "Epoch 2450/3000\n",
+ " - 39s - loss: 0.2283 - acc: 0.9734 - val_loss: 0.9830 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02450: val_acc did not improve from 0.86765\n",
+ "Epoch 2451/3000\n",
+ " - 39s - loss: 0.2359 - acc: 0.9713 - val_loss: 0.9605 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02451: val_acc did not improve from 0.86765\n",
+ "Epoch 2452/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9743 - val_loss: 0.9502 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02452: val_acc did not improve from 0.86765\n",
+ "Epoch 2453/3000\n",
+ " - 39s - loss: 0.2280 - acc: 0.9751 - val_loss: 0.9751 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02453: val_acc did not improve from 0.86765\n",
+ "Epoch 2454/3000\n",
+ " - 39s - loss: 0.2244 - acc: 0.9771 - val_loss: 0.9504 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02454: val_acc did not improve from 0.86765\n",
+ "Epoch 2455/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9712 - val_loss: 0.9774 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02455: val_acc did not improve from 0.86765\n",
+ "Epoch 2456/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9722 - val_loss: 0.9662 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02456: val_acc did not improve from 0.86765\n",
+ "Epoch 2457/3000\n",
+ " - 39s - loss: 0.2338 - acc: 0.9730 - val_loss: 0.9646 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02457: val_acc did not improve from 0.86765\n",
+ "Epoch 2458/3000\n",
+ " - 39s - loss: 0.2313 - acc: 0.9730 - val_loss: 0.9670 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02458: val_acc did not improve from 0.86765\n",
+ "Epoch 2459/3000\n",
+ " - 39s - loss: 0.2394 - acc: 0.9712 - val_loss: 0.9832 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02459: val_acc did not improve from 0.86765\n",
+ "Epoch 2460/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9719 - val_loss: 0.9609 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02460: val_acc did not improve from 0.86765\n",
+ "Epoch 2461/3000\n",
+ " - 39s - loss: 0.2389 - acc: 0.9704 - val_loss: 1.0131 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 02461: val_acc did not improve from 0.86765\n",
+ "Epoch 2462/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9706 - val_loss: 0.9680 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02462: val_acc did not improve from 0.86765\n",
+ "Epoch 2463/3000\n",
+ " - 39s - loss: 0.2517 - acc: 0.9674 - val_loss: 0.9694 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02463: val_acc did not improve from 0.86765\n",
+ "Epoch 2464/3000\n",
+ " - 39s - loss: 0.2324 - acc: 0.9730 - val_loss: 0.9769 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02464: val_acc did not improve from 0.86765\n",
+ "Epoch 2465/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9722 - val_loss: 0.9863 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02465: val_acc did not improve from 0.86765\n",
+ "Epoch 2466/3000\n",
+ " - 39s - loss: 0.2409 - acc: 0.9706 - val_loss: 0.9598 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02466: val_acc did not improve from 0.86765\n",
+ "Epoch 2467/3000\n",
+ " - 40s - loss: 0.2349 - acc: 0.9745 - val_loss: 0.9682 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02467: val_acc did not improve from 0.86765\n",
+ "Epoch 2468/3000\n",
+ " - 39s - loss: 0.2204 - acc: 0.9760 - val_loss: 0.9870 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02468: val_acc did not improve from 0.86765\n",
+ "Epoch 2469/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9731 - val_loss: 0.9713 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02469: val_acc did not improve from 0.86765\n",
+ "Epoch 2470/3000\n",
+ " - 39s - loss: 0.2292 - acc: 0.9733 - val_loss: 1.0103 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 02470: val_acc did not improve from 0.86765\n",
+ "Epoch 2471/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9731 - val_loss: 0.9607 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02471: val_acc did not improve from 0.86765\n",
+ "Epoch 2472/3000\n",
+ " - 39s - loss: 0.2343 - acc: 0.9737 - val_loss: 0.9634 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02472: val_acc did not improve from 0.86765\n",
+ "Epoch 2473/3000\n",
+ " - 39s - loss: 0.2338 - acc: 0.9722 - val_loss: 0.9475 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02473: val_acc did not improve from 0.86765\n",
+ "Epoch 2474/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9709 - val_loss: 0.9654 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02474: val_acc did not improve from 0.86765\n",
+ "Epoch 2475/3000\n",
+ " - 40s - loss: 0.2310 - acc: 0.9745 - val_loss: 0.9234 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02475: val_acc did not improve from 0.86765\n",
+ "Epoch 2476/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9745 - val_loss: 0.9563 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02476: val_acc did not improve from 0.86765\n",
+ "Epoch 2477/3000\n",
+ "\n",
+ "Epoch 02479: val_acc did not improve from 0.86765\n",
+ "Epoch 2480/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9727 - val_loss: 0.9650 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02480: val_acc did not improve from 0.86765\n",
+ "Epoch 2481/3000\n",
+ " - 39s - loss: 0.2312 - acc: 0.9743 - val_loss: 0.9271 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02481: val_acc did not improve from 0.86765\n",
+ "Epoch 2482/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9710 - val_loss: 0.9862 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 02482: val_acc did not improve from 0.86765\n",
+ "Epoch 2483/3000\n",
+ " - 39s - loss: 0.2366 - acc: 0.9710 - val_loss: 0.9794 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02483: val_acc did not improve from 0.86765\n",
+ "Epoch 2484/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9713 - val_loss: 0.9613 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02484: val_acc did not improve from 0.86765\n",
+ "Epoch 2485/3000\n",
+ " - 39s - loss: 0.2325 - acc: 0.9707 - val_loss: 0.9589 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02485: val_acc did not improve from 0.86765\n",
+ "Epoch 2486/3000\n",
+ " - 39s - loss: 0.2287 - acc: 0.9733 - val_loss: 0.9622 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02486: val_acc did not improve from 0.86765\n",
+ "Epoch 2487/3000\n",
+ " - 39s - loss: 0.2359 - acc: 0.9739 - val_loss: 0.9513 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02487: val_acc did not improve from 0.86765\n",
+ "Epoch 2488/3000\n",
+ " - 39s - loss: 0.2323 - acc: 0.9734 - val_loss: 0.9478 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02488: val_acc did not improve from 0.86765\n",
+ "Epoch 2489/3000\n",
+ " - 39s - loss: 0.2340 - acc: 0.9733 - val_loss: 0.9449 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02489: val_acc did not improve from 0.86765\n",
+ "Epoch 2490/3000\n",
+ " - 39s - loss: 0.2346 - acc: 0.9713 - val_loss: 0.9492 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02490: val_acc did not improve from 0.86765\n",
+ "Epoch 2491/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9718 - val_loss: 0.9639 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02491: val_acc did not improve from 0.86765\n",
+ "Epoch 2492/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9746 - val_loss: 0.9718 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02492: val_acc did not improve from 0.86765\n",
+ "Epoch 2493/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9724 - val_loss: 0.9405 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02493: val_acc did not improve from 0.86765\n",
+ "Epoch 2494/3000\n",
+ " - 39s - loss: 0.2411 - acc: 0.9697 - val_loss: 0.9457 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02494: val_acc did not improve from 0.86765\n",
+ "Epoch 2495/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9704 - val_loss: 0.9843 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02495: val_acc did not improve from 0.86765\n",
+ "Epoch 2496/3000\n",
+ " - 39s - loss: 0.2360 - acc: 0.9713 - val_loss: 0.9547 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02496: val_acc did not improve from 0.86765\n",
+ "Epoch 2497/3000\n",
+ " - 39s - loss: 0.2397 - acc: 0.9695 - val_loss: 0.9504 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02497: val_acc did not improve from 0.86765\n",
+ "Epoch 2498/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9715 - val_loss: 0.9428 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02498: val_acc did not improve from 0.86765\n",
+ "Epoch 2499/3000\n",
+ " - 40s - loss: 0.2363 - acc: 0.9728 - val_loss: 0.9882 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02499: val_acc did not improve from 0.86765\n",
+ "Epoch 2500/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9722 - val_loss: 1.0214 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 02500: val_acc did not improve from 0.86765\n",
+ "Epoch 2501/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9710 - val_loss: 0.9527 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02501: val_acc did not improve from 0.86765\n",
+ "Epoch 2502/3000\n",
+ " - 39s - loss: 0.2280 - acc: 0.9746 - val_loss: 0.9583 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02502: val_acc did not improve from 0.86765\n",
+ "Epoch 2503/3000\n",
+ " - 39s - loss: 0.2270 - acc: 0.9739 - val_loss: 0.9613 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02503: val_acc did not improve from 0.86765\n",
+ "Epoch 2504/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9727 - val_loss: 0.9712 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02504: val_acc did not improve from 0.86765\n",
+ "Epoch 2505/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9722 - val_loss: 0.9256 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02505: val_acc did not improve from 0.86765\n",
+ "Epoch 2506/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9701 - val_loss: 0.9520 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02506: val_acc did not improve from 0.86765\n",
+ "Epoch 2507/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9749 - val_loss: 0.9447 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02507: val_acc did not improve from 0.86765\n",
+ "Epoch 2508/3000\n",
+ " - 39s - loss: 0.2270 - acc: 0.9757 - val_loss: 0.9507 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02508: val_acc did not improve from 0.86765\n",
+ "Epoch 2509/3000\n",
+ " - 39s - loss: 0.2275 - acc: 0.9746 - val_loss: 0.9457 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02509: val_acc did not improve from 0.86765\n",
+ "Epoch 2510/3000\n",
+ " - 39s - loss: 0.2292 - acc: 0.9746 - val_loss: 0.9665 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02510: val_acc did not improve from 0.86765\n",
+ "Epoch 2511/3000\n",
+ " - 39s - loss: 0.2254 - acc: 0.9751 - val_loss: 0.9572 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02511: val_acc did not improve from 0.86765\n",
+ "Epoch 2512/3000\n",
+ " - 39s - loss: 0.2385 - acc: 0.9737 - val_loss: 0.9690 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02512: val_acc did not improve from 0.86765\n",
+ "Epoch 2513/3000\n",
+ " - 39s - loss: 0.2411 - acc: 0.9716 - val_loss: 0.9671 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02513: val_acc did not improve from 0.86765\n",
+ "Epoch 2514/3000\n",
+ " - 39s - loss: 0.2288 - acc: 0.9713 - val_loss: 0.9640 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02514: val_acc did not improve from 0.86765\n",
+ "Epoch 2515/3000\n",
+ " - 39s - loss: 0.2337 - acc: 0.9731 - val_loss: 0.9903 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02515: val_acc did not improve from 0.86765\n",
+ "Epoch 2516/3000\n",
+ " - 39s - loss: 0.2399 - acc: 0.9710 - val_loss: 0.9713 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02516: val_acc did not improve from 0.86765\n",
+ "Epoch 2517/3000\n",
+ " - 39s - loss: 0.2250 - acc: 0.9771 - val_loss: 0.9441 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02517: val_acc did not improve from 0.86765\n",
+ "Epoch 2518/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9713 - val_loss: 0.9495 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02518: val_acc did not improve from 0.86765\n",
+ "Epoch 2519/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9691 - val_loss: 0.9484 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02519: val_acc did not improve from 0.86765\n",
+ "Epoch 2520/3000\n",
+ " - 39s - loss: 0.2294 - acc: 0.9745 - val_loss: 0.9395 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02520: val_acc did not improve from 0.86765\n",
+ "Epoch 2521/3000\n",
+ " - 39s - loss: 0.2316 - acc: 0.9725 - val_loss: 0.9487 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02521: val_acc did not improve from 0.86765\n",
+ "Epoch 2522/3000\n",
+ " - 40s - loss: 0.2313 - acc: 0.9740 - val_loss: 0.9455 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02522: val_acc did not improve from 0.86765\n",
+ "Epoch 2523/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9742 - val_loss: 0.9453 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02523: val_acc did not improve from 0.86765\n",
+ "Epoch 2524/3000\n",
+ " - 39s - loss: 0.2449 - acc: 0.9698 - val_loss: 0.9585 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02524: val_acc did not improve from 0.86765\n",
+ "Epoch 2525/3000\n",
+ " - 39s - loss: 0.2297 - acc: 0.9727 - val_loss: 0.9688 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02525: val_acc did not improve from 0.86765\n",
+ "Epoch 2526/3000\n",
+ " - 39s - loss: 0.2304 - acc: 0.9737 - val_loss: 0.9475 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02526: val_acc did not improve from 0.86765\n",
+ "Epoch 2527/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9733 - val_loss: 0.9605 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02527: val_acc did not improve from 0.86765\n",
+ "Epoch 2528/3000\n",
+ " - 39s - loss: 0.2277 - acc: 0.9734 - val_loss: 0.9569 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02528: val_acc did not improve from 0.86765\n",
+ "Epoch 2529/3000\n",
+ " - 39s - loss: 0.2334 - acc: 0.9736 - val_loss: 0.9647 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02529: val_acc did not improve from 0.86765\n",
+ "Epoch 2530/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9712 - val_loss: 0.9489 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02530: val_acc did not improve from 0.86765\n",
+ "Epoch 2531/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9746 - val_loss: 0.9555 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02531: val_acc did not improve from 0.86765\n",
+ "Epoch 2532/3000\n",
+ " - 39s - loss: 0.2269 - acc: 0.9739 - val_loss: 0.9588 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02532: val_acc did not improve from 0.86765\n",
+ "Epoch 2533/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9695 - val_loss: 0.9719 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02533: val_acc did not improve from 0.86765\n",
+ "Epoch 2534/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9725 - val_loss: 0.9566 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02534: val_acc did not improve from 0.86765\n",
+ "Epoch 2535/3000\n",
+ " - 39s - loss: 0.2356 - acc: 0.9713 - val_loss: 0.9693 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02535: val_acc did not improve from 0.86765\n",
+ "Epoch 2536/3000\n",
+ " - 39s - loss: 0.2348 - acc: 0.9749 - val_loss: 0.9646 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02536: val_acc did not improve from 0.86765\n",
+ "Epoch 2537/3000\n",
+ " - 39s - loss: 0.2419 - acc: 0.9686 - val_loss: 0.9614 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02537: val_acc did not improve from 0.86765\n",
+ "Epoch 2538/3000\n",
+ " - 39s - loss: 0.2293 - acc: 0.9745 - val_loss: 0.9490 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02538: val_acc did not improve from 0.86765\n",
+ "Epoch 2539/3000\n",
+ " - 39s - loss: 0.2218 - acc: 0.9760 - val_loss: 0.9688 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02539: val_acc did not improve from 0.86765\n",
+ "Epoch 2540/3000\n",
+ " - 39s - loss: 0.2240 - acc: 0.9733 - val_loss: 0.9535 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02540: val_acc did not improve from 0.86765\n",
+ "Epoch 2541/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9748 - val_loss: 0.9751 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02541: val_acc did not improve from 0.86765\n",
+ "Epoch 2542/3000\n",
+ " - 39s - loss: 0.2210 - acc: 0.9755 - val_loss: 0.9443 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02542: val_acc did not improve from 0.86765\n",
+ "Epoch 2543/3000\n",
+ " - 39s - loss: 0.2189 - acc: 0.9751 - val_loss: 0.9744 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02543: val_acc did not improve from 0.86765\n",
+ "Epoch 2544/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9722 - val_loss: 0.9557 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02544: val_acc did not improve from 0.86765\n",
+ "Epoch 2545/3000\n",
+ " - 39s - loss: 0.2291 - acc: 0.9728 - val_loss: 0.9393 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02545: val_acc did not improve from 0.86765\n",
+ "Epoch 2546/3000\n",
+ " - 39s - loss: 0.2310 - acc: 0.9740 - val_loss: 0.9878 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02546: val_acc did not improve from 0.86765\n",
+ "Epoch 2547/3000\n",
+ " - 40s - loss: 0.2358 - acc: 0.9713 - val_loss: 0.9725 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02547: val_acc did not improve from 0.86765\n",
+ "Epoch 2548/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9710 - val_loss: 0.9541 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 02548: val_acc did not improve from 0.86765\n",
+ "Epoch 2549/3000\n",
+ " - 39s - loss: 0.2345 - acc: 0.9728 - val_loss: 0.9473 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02549: val_acc did not improve from 0.86765\n",
+ "Epoch 2550/3000\n",
+ " - 39s - loss: 0.2326 - acc: 0.9727 - val_loss: 0.9542 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02550: val_acc did not improve from 0.86765\n",
+ "Epoch 2551/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9731 - val_loss: 0.9695 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02551: val_acc did not improve from 0.86765\n",
+ "Epoch 2552/3000\n",
+ " - 39s - loss: 0.2387 - acc: 0.9716 - val_loss: 0.9688 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02552: val_acc did not improve from 0.86765\n",
+ "Epoch 2553/3000\n",
+ " - 39s - loss: 0.2266 - acc: 0.9740 - val_loss: 0.9568 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02553: val_acc did not improve from 0.86765\n",
+ "Epoch 2554/3000\n",
+ " - 39s - loss: 0.2283 - acc: 0.9757 - val_loss: 0.9807 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 02554: val_acc did not improve from 0.86765\n",
+ "Epoch 2555/3000\n",
+ " - 39s - loss: 0.2342 - acc: 0.9739 - val_loss: 0.9877 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 02555: val_acc did not improve from 0.86765\n",
+ "Epoch 2556/3000\n",
+ " - 39s - loss: 0.2289 - acc: 0.9737 - val_loss: 1.0034 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02556: val_acc did not improve from 0.86765\n",
+ "Epoch 2557/3000\n",
+ " - 39s - loss: 0.2401 - acc: 0.9733 - val_loss: 0.9596 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02557: val_acc did not improve from 0.86765\n",
+ "Epoch 2558/3000\n",
+ " - 39s - loss: 0.2342 - acc: 0.9734 - val_loss: 0.9918 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02558: val_acc did not improve from 0.86765\n",
+ "Epoch 2559/3000\n",
+ "\n",
+ "Epoch 02561: val_acc did not improve from 0.86765\n",
+ "Epoch 2562/3000\n",
+ " - 39s - loss: 0.2339 - acc: 0.9731 - val_loss: 0.9548 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02562: val_acc did not improve from 0.86765\n",
+ "Epoch 2563/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9728 - val_loss: 0.9750 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02563: val_acc did not improve from 0.86765\n",
+ "Epoch 2564/3000\n",
+ " - 39s - loss: 0.2299 - acc: 0.9724 - val_loss: 0.9559 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02564: val_acc did not improve from 0.86765\n",
+ "Epoch 2565/3000\n",
+ " - 40s - loss: 0.2439 - acc: 0.9719 - val_loss: 0.9723 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02565: val_acc did not improve from 0.86765\n",
+ "Epoch 2566/3000\n",
+ " - 39s - loss: 0.2289 - acc: 0.9742 - val_loss: 0.9365 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02566: val_acc did not improve from 0.86765\n",
+ "Epoch 2567/3000\n",
+ " - 39s - loss: 0.2319 - acc: 0.9752 - val_loss: 0.9556 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02567: val_acc did not improve from 0.86765\n",
+ "Epoch 2568/3000\n",
+ " - 39s - loss: 0.2375 - acc: 0.9730 - val_loss: 0.9566 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02568: val_acc did not improve from 0.86765\n",
+ "Epoch 2569/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9709 - val_loss: 0.9820 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02569: val_acc did not improve from 0.86765\n",
+ "Epoch 2570/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9724 - val_loss: 0.9375 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02570: val_acc did not improve from 0.86765\n",
+ "Epoch 2571/3000\n",
+ " - 39s - loss: 0.2235 - acc: 0.9758 - val_loss: 0.9616 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02571: val_acc did not improve from 0.86765\n",
+ "Epoch 2572/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9715 - val_loss: 0.9583 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02572: val_acc did not improve from 0.86765\n",
+ "Epoch 2573/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9719 - val_loss: 0.9294 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02573: val_acc did not improve from 0.86765\n",
+ "Epoch 2574/3000\n",
+ " - 39s - loss: 0.2295 - acc: 0.9721 - val_loss: 0.9768 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02574: val_acc did not improve from 0.86765\n",
+ "Epoch 2575/3000\n",
+ " - 39s - loss: 0.2446 - acc: 0.9692 - val_loss: 0.9572 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02575: val_acc did not improve from 0.86765\n",
+ "Epoch 2576/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9724 - val_loss: 0.9435 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02576: val_acc did not improve from 0.86765\n",
+ "Epoch 2577/3000\n",
+ " - 39s - loss: 0.2305 - acc: 0.9724 - val_loss: 0.9729 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02577: val_acc did not improve from 0.86765\n",
+ "Epoch 2578/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9724 - val_loss: 0.9760 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02578: val_acc did not improve from 0.86765\n",
+ "Epoch 2579/3000\n",
+ " - 39s - loss: 0.2335 - acc: 0.9725 - val_loss: 0.9668 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02579: val_acc did not improve from 0.86765\n",
+ "Epoch 2580/3000\n",
+ " - 39s - loss: 0.2306 - acc: 0.9740 - val_loss: 0.9427 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02580: val_acc did not improve from 0.86765\n",
+ "Epoch 2581/3000\n",
+ " - 39s - loss: 0.2285 - acc: 0.9733 - val_loss: 0.9472 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02581: val_acc did not improve from 0.86765\n",
+ "Epoch 2582/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9709 - val_loss: 0.9626 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02582: val_acc did not improve from 0.86765\n",
+ "Epoch 2583/3000\n",
+ " - 40s - loss: 0.2424 - acc: 0.9722 - val_loss: 0.9657 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02583: val_acc did not improve from 0.86765\n",
+ "Epoch 2584/3000\n",
+ " - 39s - loss: 0.2306 - acc: 0.9757 - val_loss: 0.9699 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02584: val_acc did not improve from 0.86765\n",
+ "Epoch 2585/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9728 - val_loss: 0.9721 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02585: val_acc did not improve from 0.86765\n",
+ "Epoch 2586/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9721 - val_loss: 0.9734 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02586: val_acc did not improve from 0.86765\n",
+ "Epoch 2587/3000\n",
+ " - 39s - loss: 0.2314 - acc: 0.9766 - val_loss: 0.9608 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02587: val_acc did not improve from 0.86765\n",
+ "Epoch 2588/3000\n",
+ " - 39s - loss: 0.2415 - acc: 0.9707 - val_loss: 0.9711 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02588: val_acc did not improve from 0.86765\n",
+ "Epoch 2589/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9740 - val_loss: 0.9527 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02589: val_acc did not improve from 0.86765\n",
+ "Epoch 2590/3000\n",
+ " - 39s - loss: 0.2334 - acc: 0.9704 - val_loss: 0.9726 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02590: val_acc did not improve from 0.86765\n",
+ "Epoch 2591/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9730 - val_loss: 0.9538 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02591: val_acc did not improve from 0.86765\n",
+ "Epoch 2592/3000\n",
+ " - 39s - loss: 0.2274 - acc: 0.9754 - val_loss: 0.9925 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02592: val_acc did not improve from 0.86765\n",
+ "Epoch 2593/3000\n",
+ " - 39s - loss: 0.2353 - acc: 0.9733 - val_loss: 0.9868 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 02593: val_acc did not improve from 0.86765\n",
+ "Epoch 2594/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9725 - val_loss: 0.9892 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02594: val_acc did not improve from 0.86765\n",
+ "Epoch 2595/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9695 - val_loss: 0.9467 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02595: val_acc did not improve from 0.86765\n",
+ "Epoch 2596/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9716 - val_loss: 0.9501 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02596: val_acc did not improve from 0.86765\n",
+ "Epoch 2597/3000\n",
+ " - 39s - loss: 0.2284 - acc: 0.9752 - val_loss: 0.9447 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02597: val_acc did not improve from 0.86765\n",
+ "Epoch 2598/3000\n",
+ " - 39s - loss: 0.2284 - acc: 0.9728 - val_loss: 0.9595 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02598: val_acc did not improve from 0.86765\n",
+ "Epoch 2599/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9718 - val_loss: 0.9990 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02599: val_acc did not improve from 0.86765\n",
+ "Epoch 2600/3000\n",
+ " - 39s - loss: 0.2321 - acc: 0.9736 - val_loss: 0.9632 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02600: val_acc did not improve from 0.86765\n",
+ "Epoch 2601/3000\n",
+ " - 39s - loss: 0.2301 - acc: 0.9728 - val_loss: 0.9759 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02601: val_acc did not improve from 0.86765\n",
+ "Epoch 2602/3000\n",
+ " - 39s - loss: 0.2300 - acc: 0.9731 - val_loss: 0.9669 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02602: val_acc did not improve from 0.86765\n",
+ "Epoch 2603/3000\n",
+ " - 39s - loss: 0.2305 - acc: 0.9737 - val_loss: 0.9272 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02603: val_acc did not improve from 0.86765\n",
+ "Epoch 2604/3000\n",
+ " - 39s - loss: 0.2328 - acc: 0.9733 - val_loss: 0.9628 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02604: val_acc did not improve from 0.86765\n",
+ "Epoch 2605/3000\n",
+ " - 39s - loss: 0.2414 - acc: 0.9718 - val_loss: 0.9475 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02605: val_acc did not improve from 0.86765\n",
+ "Epoch 2606/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9739 - val_loss: 0.9584 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02606: val_acc did not improve from 0.86765\n",
+ "Epoch 2607/3000\n",
+ " - 39s - loss: 0.2364 - acc: 0.9721 - val_loss: 0.9753 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02607: val_acc did not improve from 0.86765\n",
+ "Epoch 2608/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9727 - val_loss: 0.9618 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02608: val_acc did not improve from 0.86765\n",
+ "Epoch 2609/3000\n",
+ " - 39s - loss: 0.2309 - acc: 0.9749 - val_loss: 1.0208 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 02609: val_acc did not improve from 0.86765\n",
+ "Epoch 2610/3000\n",
+ " - 39s - loss: 0.2277 - acc: 0.9736 - val_loss: 1.0076 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 02610: val_acc did not improve from 0.86765\n",
+ "Epoch 2611/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9716 - val_loss: 0.9814 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02611: val_acc did not improve from 0.86765\n",
+ "Epoch 2612/3000\n",
+ " - 39s - loss: 0.2326 - acc: 0.9745 - val_loss: 0.9355 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02612: val_acc did not improve from 0.86765\n",
+ "Epoch 2613/3000\n",
+ " - 39s - loss: 0.2280 - acc: 0.9751 - val_loss: 0.9673 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02613: val_acc did not improve from 0.86765\n",
+ "Epoch 2614/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9737 - val_loss: 0.9913 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02614: val_acc did not improve from 0.86765\n",
+ "Epoch 2615/3000\n",
+ " - 39s - loss: 0.2359 - acc: 0.9728 - val_loss: 0.9636 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02615: val_acc did not improve from 0.86765\n",
+ "Epoch 2616/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9728 - val_loss: 0.9796 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02616: val_acc did not improve from 0.86765\n",
+ "Epoch 2617/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9734 - val_loss: 0.9404 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02617: val_acc did not improve from 0.86765\n",
+ "Epoch 2618/3000\n",
+ " - 40s - loss: 0.2341 - acc: 0.9716 - val_loss: 0.9376 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02618: val_acc did not improve from 0.86765\n",
+ "Epoch 2619/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9712 - val_loss: 0.9704 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02619: val_acc did not improve from 0.86765\n",
+ "Epoch 2620/3000\n",
+ " - 39s - loss: 0.2276 - acc: 0.9733 - val_loss: 0.9285 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02620: val_acc did not improve from 0.86765\n",
+ "Epoch 2621/3000\n",
+ " - 40s - loss: 0.2281 - acc: 0.9727 - val_loss: 0.9605 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02621: val_acc did not improve from 0.86765\n",
+ "Epoch 2622/3000\n",
+ " - 39s - loss: 0.2360 - acc: 0.9725 - val_loss: 0.9437 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02624: val_acc did not improve from 0.86765\n",
+ "Epoch 2625/3000\n",
+ " - 39s - loss: 0.2301 - acc: 0.9724 - val_loss: 0.9568 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02625: val_acc did not improve from 0.86765\n",
+ "Epoch 2626/3000\n",
+ " - 39s - loss: 0.2369 - acc: 0.9725 - val_loss: 0.9350 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02626: val_acc did not improve from 0.86765\n",
+ "Epoch 2627/3000\n",
+ " - 39s - loss: 0.2249 - acc: 0.9737 - val_loss: 0.9489 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02627: val_acc did not improve from 0.86765\n",
+ "Epoch 2628/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9718 - val_loss: 0.9521 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02628: val_acc did not improve from 0.86765\n",
+ "Epoch 2629/3000\n",
+ " - 40s - loss: 0.2266 - acc: 0.9748 - val_loss: 0.9827 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02629: val_acc did not improve from 0.86765\n",
+ "Epoch 2630/3000\n",
+ " - 39s - loss: 0.2345 - acc: 0.9712 - val_loss: 0.9961 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02630: val_acc did not improve from 0.86765\n",
+ "Epoch 2631/3000\n",
+ " - 39s - loss: 0.2252 - acc: 0.9737 - val_loss: 0.9493 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02631: val_acc did not improve from 0.86765\n",
+ "Epoch 2632/3000\n",
+ " - 39s - loss: 0.2273 - acc: 0.9740 - val_loss: 0.9706 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02632: val_acc did not improve from 0.86765\n",
+ "Epoch 2633/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9725 - val_loss: 0.9526 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02633: val_acc did not improve from 0.86765\n",
+ "Epoch 2634/3000\n",
+ " - 39s - loss: 0.2361 - acc: 0.9713 - val_loss: 0.9754 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02634: val_acc did not improve from 0.86765\n",
+ "Epoch 2635/3000\n",
+ " - 39s - loss: 0.2327 - acc: 0.9727 - val_loss: 0.9644 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02635: val_acc did not improve from 0.86765\n",
+ "Epoch 2636/3000\n",
+ " - 39s - loss: 0.2446 - acc: 0.9715 - val_loss: 1.0119 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 02636: val_acc did not improve from 0.86765\n",
+ "Epoch 2637/3000\n",
+ " - 39s - loss: 0.2314 - acc: 0.9707 - val_loss: 0.9635 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02637: val_acc did not improve from 0.86765\n",
+ "Epoch 2638/3000\n",
+ " - 39s - loss: 0.2306 - acc: 0.9712 - val_loss: 0.9775 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02638: val_acc did not improve from 0.86765\n",
+ "Epoch 2639/3000\n",
+ " - 39s - loss: 0.2284 - acc: 0.9742 - val_loss: 1.0107 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02639: val_acc did not improve from 0.86765\n",
+ "Epoch 2640/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9736 - val_loss: 0.9856 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02640: val_acc did not improve from 0.86765\n",
+ "Epoch 2641/3000\n",
+ " - 39s - loss: 0.2465 - acc: 0.9710 - val_loss: 0.9721 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02641: val_acc did not improve from 0.86765\n",
+ "Epoch 2642/3000\n",
+ " - 39s - loss: 0.2434 - acc: 0.9697 - val_loss: 0.9551 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02642: val_acc did not improve from 0.86765\n",
+ "Epoch 2643/3000\n",
+ " - 40s - loss: 0.2385 - acc: 0.9716 - val_loss: 0.9665 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02643: val_acc did not improve from 0.86765\n",
+ "Epoch 2644/3000\n",
+ " - 39s - loss: 0.2269 - acc: 0.9761 - val_loss: 0.9638 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02644: val_acc did not improve from 0.86765\n",
+ "Epoch 2645/3000\n",
+ " - 39s - loss: 0.2479 - acc: 0.9706 - val_loss: 0.9311 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02645: val_acc did not improve from 0.86765\n",
+ "Epoch 2646/3000\n",
+ " - 39s - loss: 0.2314 - acc: 0.9734 - val_loss: 0.9379 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02646: val_acc did not improve from 0.86765\n",
+ "Epoch 2647/3000\n",
+ " - 39s - loss: 0.2239 - acc: 0.9718 - val_loss: 0.9629 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02647: val_acc did not improve from 0.86765\n",
+ "Epoch 2648/3000\n",
+ " - 39s - loss: 0.2320 - acc: 0.9725 - val_loss: 0.9567 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02648: val_acc did not improve from 0.86765\n",
+ "Epoch 2649/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9730 - val_loss: 0.9335 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02649: val_acc did not improve from 0.86765\n",
+ "Epoch 2650/3000\n",
+ " - 39s - loss: 0.2335 - acc: 0.9749 - val_loss: 0.9543 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02650: val_acc did not improve from 0.86765\n",
+ "Epoch 2651/3000\n",
+ " - 39s - loss: 0.2270 - acc: 0.9719 - val_loss: 0.9624 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02651: val_acc did not improve from 0.86765\n",
+ "Epoch 2652/3000\n",
+ " - 39s - loss: 0.2402 - acc: 0.9715 - val_loss: 0.9500 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02652: val_acc did not improve from 0.86765\n",
+ "Epoch 2653/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9724 - val_loss: 0.9378 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02653: val_acc did not improve from 0.86765\n",
+ "Epoch 2654/3000\n",
+ " - 39s - loss: 0.2292 - acc: 0.9745 - val_loss: 0.9475 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02654: val_acc did not improve from 0.86765\n",
+ "Epoch 2655/3000\n",
+ " - 39s - loss: 0.2345 - acc: 0.9712 - val_loss: 0.9441 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02655: val_acc did not improve from 0.86765\n",
+ "Epoch 2656/3000\n",
+ " - 39s - loss: 0.2288 - acc: 0.9718 - val_loss: 0.9570 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02656: val_acc did not improve from 0.86765\n",
+ "Epoch 2657/3000\n",
+ " - 39s - loss: 0.2251 - acc: 0.9739 - val_loss: 0.9450 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02657: val_acc did not improve from 0.86765\n",
+ "Epoch 2658/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9713 - val_loss: 0.9706 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02658: val_acc did not improve from 0.86765\n",
+ "Epoch 2659/3000\n",
+ " - 40s - loss: 0.2327 - acc: 0.9731 - val_loss: 0.9554 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02659: val_acc did not improve from 0.86765\n",
+ "Epoch 2660/3000\n",
+ " - 39s - loss: 0.2306 - acc: 0.9724 - val_loss: 0.9553 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02660: val_acc did not improve from 0.86765\n",
+ "Epoch 2661/3000\n",
+ " - 39s - loss: 0.2325 - acc: 0.9734 - val_loss: 0.9595 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02661: val_acc did not improve from 0.86765\n",
+ "Epoch 2662/3000\n",
+ " - 39s - loss: 0.2453 - acc: 0.9709 - val_loss: 0.9733 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02662: val_acc did not improve from 0.86765\n",
+ "Epoch 2663/3000\n",
+ " - 39s - loss: 0.2384 - acc: 0.9704 - val_loss: 0.9429 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02663: val_acc did not improve from 0.86765\n",
+ "Epoch 2664/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9719 - val_loss: 0.9549 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02664: val_acc did not improve from 0.86765\n",
+ "Epoch 2665/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9715 - val_loss: 0.9797 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02665: val_acc did not improve from 0.86765\n",
+ "Epoch 2666/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9736 - val_loss: 1.0047 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02666: val_acc did not improve from 0.86765\n",
+ "Epoch 2667/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9715 - val_loss: 0.9700 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02667: val_acc did not improve from 0.86765\n",
+ "Epoch 2668/3000\n",
+ " - 39s - loss: 0.2296 - acc: 0.9734 - val_loss: 0.9496 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02668: val_acc did not improve from 0.86765\n",
+ "Epoch 2669/3000\n",
+ " - 39s - loss: 0.2408 - acc: 0.9712 - val_loss: 0.9649 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02669: val_acc did not improve from 0.86765\n",
+ "Epoch 2670/3000\n",
+ " - 39s - loss: 0.2281 - acc: 0.9746 - val_loss: 0.9795 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02670: val_acc did not improve from 0.86765\n",
+ "Epoch 2671/3000\n",
+ " - 39s - loss: 0.2244 - acc: 0.9761 - val_loss: 0.9272 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02671: val_acc did not improve from 0.86765\n",
+ "Epoch 2672/3000\n",
+ " - 39s - loss: 0.2326 - acc: 0.9722 - val_loss: 0.9374 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02672: val_acc did not improve from 0.86765\n",
+ "Epoch 2673/3000\n",
+ " - 39s - loss: 0.2323 - acc: 0.9710 - val_loss: 0.9724 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02673: val_acc did not improve from 0.86765\n",
+ "Epoch 2674/3000\n",
+ " - 40s - loss: 0.2306 - acc: 0.9731 - val_loss: 0.9914 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02674: val_acc did not improve from 0.86765\n",
+ "Epoch 2675/3000\n",
+ " - 39s - loss: 0.2359 - acc: 0.9719 - val_loss: 0.9399 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02675: val_acc did not improve from 0.86765\n",
+ "Epoch 2676/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9704 - val_loss: 0.9915 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02676: val_acc did not improve from 0.86765\n",
+ "Epoch 2677/3000\n",
+ " - 39s - loss: 0.2227 - acc: 0.9760 - val_loss: 0.9702 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02677: val_acc did not improve from 0.86765\n",
+ "Epoch 2678/3000\n",
+ " - 39s - loss: 0.2262 - acc: 0.9751 - val_loss: 0.9777 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02678: val_acc did not improve from 0.86765\n",
+ "Epoch 2679/3000\n",
+ " - 40s - loss: 0.2392 - acc: 0.9719 - val_loss: 0.9818 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02679: val_acc did not improve from 0.86765\n",
+ "Epoch 2680/3000\n",
+ " - 39s - loss: 0.2251 - acc: 0.9752 - val_loss: 0.9696 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02680: val_acc did not improve from 0.86765\n",
+ "Epoch 2681/3000\n",
+ " - 39s - loss: 0.2224 - acc: 0.9761 - val_loss: 0.9685 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02681: val_acc did not improve from 0.86765\n",
+ "Epoch 2682/3000\n",
+ " - 39s - loss: 0.2326 - acc: 0.9704 - val_loss: 0.9678 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02682: val_acc did not improve from 0.86765\n",
+ "Epoch 2683/3000\n",
+ " - 39s - loss: 0.2365 - acc: 0.9707 - val_loss: 0.9343 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02683: val_acc did not improve from 0.86765\n",
+ "Epoch 2684/3000\n",
+ " - 39s - loss: 0.2335 - acc: 0.9734 - val_loss: 0.9685 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02684: val_acc did not improve from 0.86765\n",
+ "Epoch 2685/3000\n",
+ " - 39s - loss: 0.2233 - acc: 0.9755 - val_loss: 0.9516 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02685: val_acc did not improve from 0.86765\n",
+ "Epoch 2686/3000\n",
+ " - 39s - loss: 0.2297 - acc: 0.9739 - val_loss: 0.9428 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02686: val_acc did not improve from 0.86765\n",
+ "Epoch 2687/3000\n",
+ " - 39s - loss: 0.2323 - acc: 0.9727 - val_loss: 0.9537 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02687: val_acc did not improve from 0.86765\n",
+ "Epoch 2688/3000\n",
+ " - 39s - loss: 0.2285 - acc: 0.9739 - val_loss: 0.9591 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02688: val_acc did not improve from 0.86765\n",
+ "Epoch 2689/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9722 - val_loss: 0.9604 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02689: val_acc did not improve from 0.86765\n",
+ "Epoch 2690/3000\n",
+ " - 39s - loss: 0.2258 - acc: 0.9748 - val_loss: 0.9452 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02690: val_acc did not improve from 0.86765\n",
+ "Epoch 2691/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9727 - val_loss: 0.9565 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02691: val_acc did not improve from 0.86765\n",
+ "Epoch 2692/3000\n",
+ " - 39s - loss: 0.2396 - acc: 0.9710 - val_loss: 0.9376 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02692: val_acc did not improve from 0.86765\n",
+ "Epoch 2693/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9725 - val_loss: 0.9516 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02693: val_acc did not improve from 0.86765\n",
+ "Epoch 2694/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9721 - val_loss: 0.9493 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02694: val_acc did not improve from 0.86765\n",
+ "Epoch 2695/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9722 - val_loss: 0.9456 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02695: val_acc did not improve from 0.86765\n",
+ "Epoch 2696/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9737 - val_loss: 0.9571 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02696: val_acc did not improve from 0.86765\n",
+ "Epoch 2697/3000\n",
+ " - 39s - loss: 0.2282 - acc: 0.9745 - val_loss: 0.9480 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02697: val_acc did not improve from 0.86765\n",
+ "Epoch 2698/3000\n",
+ " - 39s - loss: 0.2303 - acc: 0.9722 - val_loss: 0.9625 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02698: val_acc did not improve from 0.86765\n",
+ "Epoch 2699/3000\n",
+ " - 39s - loss: 0.2290 - acc: 0.9728 - val_loss: 0.9528 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02699: val_acc did not improve from 0.86765\n",
+ "Epoch 2700/3000\n",
+ " - 39s - loss: 0.2361 - acc: 0.9719 - val_loss: 0.9597 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02700: val_acc did not improve from 0.86765\n",
+ "Epoch 2701/3000\n",
+ " - 39s - loss: 0.2233 - acc: 0.9712 - val_loss: 0.9371 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02701: val_acc did not improve from 0.86765\n",
+ "Epoch 2702/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9698 - val_loss: 0.9522 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02702: val_acc did not improve from 0.86765\n",
+ "Epoch 2703/3000\n",
+ " - 39s - loss: 0.2281 - acc: 0.9725 - val_loss: 0.9647 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02703: val_acc did not improve from 0.86765\n",
+ "Epoch 2704/3000\n",
+ " - 39s - loss: 0.2312 - acc: 0.9736 - val_loss: 0.9552 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02704: val_acc did not improve from 0.86765\n",
+ "Epoch 2705/3000\n",
+ " - 39s - loss: 0.2316 - acc: 0.9742 - val_loss: 0.9813 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02705: val_acc did not improve from 0.86765\n",
+ "Epoch 2706/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9733 - val_loss: 0.9436 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02706: val_acc did not improve from 0.86765\n",
+ "Epoch 2707/3000\n",
+ " - 39s - loss: 0.2252 - acc: 0.9752 - val_loss: 0.9429 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02707: val_acc did not improve from 0.86765\n",
+ "Epoch 2708/3000\n",
+ " - 39s - loss: 0.2308 - acc: 0.9737 - val_loss: 0.9871 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02708: val_acc did not improve from 0.86765\n",
+ "Epoch 2709/3000\n",
+ " - 39s - loss: 0.2356 - acc: 0.9719 - val_loss: 0.9443 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02709: val_acc did not improve from 0.86765\n",
+ "Epoch 2710/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9724 - val_loss: 0.9841 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02710: val_acc did not improve from 0.86765\n",
+ "Epoch 2711/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9722 - val_loss: 0.9216 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 02711: val_acc did not improve from 0.86765\n",
+ "Epoch 2712/3000\n",
+ " - 40s - loss: 0.2289 - acc: 0.9730 - val_loss: 0.9406 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02712: val_acc did not improve from 0.86765\n",
+ "Epoch 2713/3000\n",
+ " - 39s - loss: 0.2304 - acc: 0.9734 - val_loss: 0.9590 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02713: val_acc did not improve from 0.86765\n",
+ "Epoch 2714/3000\n",
+ " - 39s - loss: 0.2410 - acc: 0.9701 - val_loss: 0.9280 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02714: val_acc did not improve from 0.86765\n",
+ "Epoch 2715/3000\n",
+ " - 39s - loss: 0.2321 - acc: 0.9748 - val_loss: 0.9687 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02715: val_acc did not improve from 0.86765\n",
+ "Epoch 2716/3000\n",
+ " - 39s - loss: 0.2386 - acc: 0.9698 - val_loss: 0.9585 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02716: val_acc did not improve from 0.86765\n",
+ "Epoch 2717/3000\n",
+ " - 39s - loss: 0.2384 - acc: 0.9724 - val_loss: 0.9603 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02717: val_acc did not improve from 0.86765\n",
+ "Epoch 2718/3000\n",
+ " - 39s - loss: 0.2264 - acc: 0.9754 - val_loss: 0.9597 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02718: val_acc did not improve from 0.86765\n",
+ "Epoch 2719/3000\n",
+ " - 40s - loss: 0.2292 - acc: 0.9736 - val_loss: 0.9373 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02719: val_acc did not improve from 0.86765\n",
+ "Epoch 2720/3000\n",
+ " - 39s - loss: 0.2347 - acc: 0.9716 - val_loss: 0.9777 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02720: val_acc did not improve from 0.86765\n",
+ "Epoch 2721/3000\n",
+ " - 39s - loss: 0.2354 - acc: 0.9727 - val_loss: 0.9495 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02721: val_acc did not improve from 0.86765\n",
+ "Epoch 2722/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9727 - val_loss: 0.9464 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02722: val_acc did not improve from 0.86765\n",
+ "Epoch 2723/3000\n",
+ " - 39s - loss: 0.2266 - acc: 0.9733 - val_loss: 0.9143 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 02723: val_acc did not improve from 0.86765\n",
+ "Epoch 2724/3000\n",
+ " - 39s - loss: 0.2252 - acc: 0.9710 - val_loss: 0.9688 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02724: val_acc did not improve from 0.86765\n",
+ "Epoch 2725/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9739 - val_loss: 0.9580 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02725: val_acc did not improve from 0.86765\n",
+ "Epoch 2726/3000\n",
+ " - 39s - loss: 0.2284 - acc: 0.9740 - val_loss: 0.9702 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02726: val_acc did not improve from 0.86765\n",
+ "Epoch 2727/3000\n",
+ " - 39s - loss: 0.2313 - acc: 0.9736 - val_loss: 0.9765 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02727: val_acc did not improve from 0.86765\n",
+ "Epoch 2728/3000\n",
+ " - 40s - loss: 0.2333 - acc: 0.9715 - val_loss: 0.9656 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02728: val_acc did not improve from 0.86765\n",
+ "Epoch 2729/3000\n",
+ " - 39s - loss: 0.2292 - acc: 0.9746 - val_loss: 0.9784 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02729: val_acc did not improve from 0.86765\n",
+ "Epoch 2730/3000\n",
+ " - 39s - loss: 0.2341 - acc: 0.9724 - val_loss: 0.9792 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02730: val_acc did not improve from 0.86765\n",
+ "Epoch 2731/3000\n",
+ " - 39s - loss: 0.2259 - acc: 0.9724 - val_loss: 0.9777 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02731: val_acc did not improve from 0.86765\n",
+ "Epoch 2732/3000\n",
+ " - 39s - loss: 0.2327 - acc: 0.9725 - val_loss: 0.9455 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02732: val_acc did not improve from 0.86765\n",
+ "Epoch 2733/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9730 - val_loss: 0.9740 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02733: val_acc did not improve from 0.86765\n",
+ "Epoch 2734/3000\n",
+ " - 39s - loss: 0.2301 - acc: 0.9751 - val_loss: 0.9649 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02734: val_acc did not improve from 0.86765\n",
+ "Epoch 2735/3000\n",
+ " - 39s - loss: 0.2362 - acc: 0.9722 - val_loss: 1.0015 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02735: val_acc did not improve from 0.86765\n",
+ "Epoch 2736/3000\n",
+ " - 39s - loss: 0.2335 - acc: 0.9730 - val_loss: 0.9797 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02736: val_acc did not improve from 0.86765\n",
+ "Epoch 2737/3000\n",
+ " - 40s - loss: 0.2323 - acc: 0.9733 - val_loss: 0.9533 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02737: val_acc did not improve from 0.86765\n",
+ "Epoch 2738/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9698 - val_loss: 0.9201 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02738: val_acc did not improve from 0.86765\n",
+ "Epoch 2739/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9730 - val_loss: 0.9690 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02739: val_acc did not improve from 0.86765\n",
+ "Epoch 2740/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9743 - val_loss: 0.9390 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02740: val_acc did not improve from 0.86765\n",
+ "Epoch 2741/3000\n",
+ " - 39s - loss: 0.2262 - acc: 0.9740 - val_loss: 0.9592 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02741: val_acc did not improve from 0.86765\n",
+ "Epoch 2742/3000\n",
+ " - 40s - loss: 0.2363 - acc: 0.9727 - val_loss: 0.9982 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02742: val_acc did not improve from 0.86765\n",
+ "Epoch 2743/3000\n",
+ " - 39s - loss: 0.2293 - acc: 0.9737 - val_loss: 0.9353 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02743: val_acc did not improve from 0.86765\n",
+ "Epoch 2744/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9725 - val_loss: 0.9360 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02744: val_acc did not improve from 0.86765\n",
+ "Epoch 2745/3000\n",
+ " - 38s - loss: 0.2312 - acc: 0.9745 - val_loss: 0.9840 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02745: val_acc did not improve from 0.86765\n",
+ "Epoch 2746/3000\n",
+ " - 39s - loss: 0.2343 - acc: 0.9725 - val_loss: 0.9587 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02746: val_acc did not improve from 0.86765\n",
+ "Epoch 2747/3000\n",
+ " - 39s - loss: 0.2340 - acc: 0.9686 - val_loss: 0.9858 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02747: val_acc did not improve from 0.86765\n",
+ "Epoch 2748/3000\n",
+ " - 39s - loss: 0.2379 - acc: 0.9707 - val_loss: 0.9561 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02748: val_acc did not improve from 0.86765\n",
+ "Epoch 2749/3000\n",
+ " - 39s - loss: 0.2385 - acc: 0.9707 - val_loss: 0.9698 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02749: val_acc did not improve from 0.86765\n",
+ "Epoch 2750/3000\n",
+ " - 39s - loss: 0.2313 - acc: 0.9716 - val_loss: 0.9570 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02750: val_acc did not improve from 0.86765\n",
+ "Epoch 2751/3000\n",
+ " - 39s - loss: 0.2345 - acc: 0.9716 - val_loss: 0.9611 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02751: val_acc did not improve from 0.86765\n",
+ "Epoch 2752/3000\n",
+ " - 40s - loss: 0.2372 - acc: 0.9727 - val_loss: 0.9495 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02752: val_acc did not improve from 0.86765\n",
+ "Epoch 2753/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9703 - val_loss: 0.9457 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02753: val_acc did not improve from 0.86765\n",
+ "Epoch 2754/3000\n",
+ " - 39s - loss: 0.2333 - acc: 0.9730 - val_loss: 0.9610 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02754: val_acc did not improve from 0.86765\n",
+ "Epoch 2755/3000\n",
+ " - 39s - loss: 0.2258 - acc: 0.9754 - val_loss: 0.9480 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02755: val_acc did not improve from 0.86765\n",
+ "Epoch 2756/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9733 - val_loss: 0.9723 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 02756: val_acc did not improve from 0.86765\n",
+ "Epoch 2757/3000\n",
+ " - 39s - loss: 0.2274 - acc: 0.9749 - val_loss: 0.9482 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02757: val_acc did not improve from 0.86765\n",
+ "Epoch 2758/3000\n",
+ " - 39s - loss: 0.2399 - acc: 0.9716 - val_loss: 0.9574 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02758: val_acc did not improve from 0.86765\n",
+ "Epoch 2759/3000\n",
+ " - 39s - loss: 0.2212 - acc: 0.9769 - val_loss: 0.9981 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 02759: val_acc did not improve from 0.86765\n",
+ "Epoch 2760/3000\n",
+ " - 39s - loss: 0.2356 - acc: 0.9716 - val_loss: 0.9675 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02760: val_acc did not improve from 0.86765\n",
+ "Epoch 2761/3000\n",
+ " - 39s - loss: 0.2339 - acc: 0.9752 - val_loss: 0.9587 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02761: val_acc did not improve from 0.86765\n",
+ "Epoch 2762/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9727 - val_loss: 0.9647 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02762: val_acc did not improve from 0.86765\n",
+ "Epoch 2763/3000\n",
+ " - 39s - loss: 0.2253 - acc: 0.9742 - val_loss: 0.9835 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02763: val_acc did not improve from 0.86765\n",
+ "Epoch 2764/3000\n",
+ " - 39s - loss: 0.2326 - acc: 0.9725 - val_loss: 0.9442 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02764: val_acc did not improve from 0.86765\n",
+ "Epoch 2765/3000\n",
+ " - 39s - loss: 0.2255 - acc: 0.9739 - val_loss: 0.9449 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02765: val_acc did not improve from 0.86765\n",
+ "Epoch 2766/3000\n",
+ " - 39s - loss: 0.2257 - acc: 0.9734 - val_loss: 0.9768 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02766: val_acc did not improve from 0.86765\n",
+ "Epoch 2767/3000\n",
+ " - 39s - loss: 0.2390 - acc: 0.9722 - val_loss: 0.9520 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02767: val_acc did not improve from 0.86765\n",
+ "Epoch 2768/3000\n",
+ " - 39s - loss: 0.2307 - acc: 0.9725 - val_loss: 0.9682 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02768: val_acc did not improve from 0.86765\n",
+ "Epoch 2769/3000\n",
+ " - 39s - loss: 0.2255 - acc: 0.9754 - val_loss: 0.9531 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02769: val_acc did not improve from 0.86765\n",
+ "Epoch 2770/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9721 - val_loss: 0.9927 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02770: val_acc did not improve from 0.86765\n",
+ "Epoch 2771/3000\n",
+ " - 39s - loss: 0.2404 - acc: 0.9712 - val_loss: 0.9565 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02771: val_acc did not improve from 0.86765\n",
+ "Epoch 2772/3000\n",
+ " - 39s - loss: 0.2238 - acc: 0.9730 - val_loss: 0.9801 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02772: val_acc did not improve from 0.86765\n",
+ "Epoch 2773/3000\n",
+ " - 39s - loss: 0.2399 - acc: 0.9725 - val_loss: 1.0138 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02773: val_acc did not improve from 0.86765\n",
+ "Epoch 2774/3000\n",
+ " - 39s - loss: 0.2293 - acc: 0.9757 - val_loss: 0.9712 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02774: val_acc did not improve from 0.86765\n",
+ "Epoch 2775/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9737 - val_loss: 0.9653 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02775: val_acc did not improve from 0.86765\n",
+ "Epoch 2776/3000\n",
+ " - 39s - loss: 0.2315 - acc: 0.9737 - val_loss: 0.9522 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02776: val_acc did not improve from 0.86765\n",
+ "Epoch 2777/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9703 - val_loss: 0.9803 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02777: val_acc did not improve from 0.86765\n",
+ "Epoch 2778/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9721 - val_loss: 0.9863 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02778: val_acc did not improve from 0.86765\n",
+ "Epoch 2779/3000\n",
+ " - 39s - loss: 0.2375 - acc: 0.9698 - val_loss: 0.9741 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02779: val_acc did not improve from 0.86765\n",
+ "Epoch 2780/3000\n",
+ " - 39s - loss: 0.2430 - acc: 0.9694 - val_loss: 0.9541 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02780: val_acc did not improve from 0.86765\n",
+ "Epoch 2781/3000\n",
+ " - 39s - loss: 0.2271 - acc: 0.9725 - val_loss: 0.9639 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02781: val_acc did not improve from 0.86765\n",
+ "Epoch 2782/3000\n",
+ " - 39s - loss: 0.2245 - acc: 0.9709 - val_loss: 0.9456 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02782: val_acc did not improve from 0.86765\n",
+ "Epoch 2783/3000\n",
+ " - 39s - loss: 0.2386 - acc: 0.9706 - val_loss: 0.9717 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02783: val_acc did not improve from 0.86765\n",
+ "Epoch 2784/3000\n",
+ " - 39s - loss: 0.2321 - acc: 0.9728 - val_loss: 0.9664 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02784: val_acc did not improve from 0.86765\n",
+ "Epoch 2785/3000\n",
+ " - 39s - loss: 0.2333 - acc: 0.9724 - val_loss: 0.9771 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02785: val_acc did not improve from 0.86765\n",
+ "Epoch 2786/3000\n",
+ " - 39s - loss: 0.2416 - acc: 0.9716 - val_loss: 0.9461 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02786: val_acc did not improve from 0.86765\n",
+ "Epoch 2787/3000\n",
+ " - 39s - loss: 0.2264 - acc: 0.9742 - val_loss: 0.9749 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02787: val_acc did not improve from 0.86765\n",
+ "Epoch 2788/3000\n",
+ " - 39s - loss: 0.2315 - acc: 0.9739 - val_loss: 0.9714 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02788: val_acc did not improve from 0.86765\n",
+ "Epoch 2789/3000\n",
+ " - 39s - loss: 0.2305 - acc: 0.9737 - val_loss: 0.9372 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02789: val_acc did not improve from 0.86765\n",
+ "Epoch 2790/3000\n",
+ " - 39s - loss: 0.2289 - acc: 0.9752 - val_loss: 0.9451 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02790: val_acc did not improve from 0.86765\n",
+ "Epoch 2791/3000\n",
+ " - 40s - loss: 0.2313 - acc: 0.9739 - val_loss: 0.9513 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02791: val_acc did not improve from 0.86765\n",
+ "Epoch 2792/3000\n",
+ " - 39s - loss: 0.2336 - acc: 0.9730 - val_loss: 0.9678 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02792: val_acc did not improve from 0.86765\n",
+ "Epoch 2793/3000\n",
+ " - 39s - loss: 0.2345 - acc: 0.9712 - val_loss: 0.9570 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02793: val_acc did not improve from 0.86765\n",
+ "Epoch 2794/3000\n",
+ " - 39s - loss: 0.2396 - acc: 0.9694 - val_loss: 0.9643 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02794: val_acc did not improve from 0.86765\n",
+ "Epoch 2795/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9721 - val_loss: 0.9542 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 02795: val_acc did not improve from 0.86765\n",
+ "Epoch 2796/3000\n",
+ " - 39s - loss: 0.2232 - acc: 0.9736 - val_loss: 0.9679 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02796: val_acc did not improve from 0.86765\n",
+ "Epoch 2797/3000\n",
+ " - 39s - loss: 0.2313 - acc: 0.9733 - val_loss: 0.9340 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02797: val_acc did not improve from 0.86765\n",
+ "Epoch 2798/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9724 - val_loss: 0.9634 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02798: val_acc did not improve from 0.86765\n",
+ "Epoch 2799/3000\n",
+ " - 39s - loss: 0.2295 - acc: 0.9710 - val_loss: 0.9558 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02799: val_acc did not improve from 0.86765\n",
+ "Epoch 2800/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9704 - val_loss: 0.9950 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02800: val_acc did not improve from 0.86765\n",
+ "Epoch 2801/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9703 - val_loss: 0.9898 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02801: val_acc did not improve from 0.86765\n",
+ "Epoch 2802/3000\n",
+ " - 39s - loss: 0.2277 - acc: 0.9768 - val_loss: 0.9946 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02802: val_acc did not improve from 0.86765\n",
+ "Epoch 2803/3000\n",
+ " - 39s - loss: 0.2334 - acc: 0.9734 - val_loss: 0.9753 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02803: val_acc did not improve from 0.86765\n",
+ "Epoch 2804/3000\n",
+ " - 40s - loss: 0.2256 - acc: 0.9743 - val_loss: 0.9590 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02804: val_acc did not improve from 0.86765\n",
+ "Epoch 2805/3000\n",
+ " - 39s - loss: 0.2249 - acc: 0.9752 - val_loss: 0.9551 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02805: val_acc did not improve from 0.86765\n",
+ "Epoch 2806/3000\n",
+ " - 39s - loss: 0.2242 - acc: 0.9749 - val_loss: 0.9682 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02806: val_acc did not improve from 0.86765\n",
+ "Epoch 2807/3000\n",
+ " - 39s - loss: 0.2260 - acc: 0.9752 - val_loss: 0.9732 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02807: val_acc did not improve from 0.86765\n",
+ "Epoch 2808/3000\n",
+ " - 39s - loss: 0.2393 - acc: 0.9727 - val_loss: 0.9725 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02808: val_acc did not improve from 0.86765\n",
+ "Epoch 2809/3000\n",
+ " - 39s - loss: 0.2250 - acc: 0.9736 - val_loss: 0.9644 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02809: val_acc did not improve from 0.86765\n",
+ "Epoch 2810/3000\n",
+ " - 39s - loss: 0.2286 - acc: 0.9718 - val_loss: 0.9761 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02810: val_acc did not improve from 0.86765\n",
+ "Epoch 2811/3000\n",
+ " - 39s - loss: 0.2235 - acc: 0.9731 - val_loss: 0.9830 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02811: val_acc did not improve from 0.86765\n",
+ "Epoch 2812/3000\n",
+ " - 39s - loss: 0.2412 - acc: 0.9722 - val_loss: 0.9622 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02812: val_acc did not improve from 0.86765\n",
+ "Epoch 2813/3000\n",
+ " - 39s - loss: 0.2340 - acc: 0.9725 - val_loss: 0.9395 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02813: val_acc did not improve from 0.86765\n",
+ "Epoch 2814/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9715 - val_loss: 0.9633 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02814: val_acc did not improve from 0.86765\n",
+ "Epoch 2815/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9715 - val_loss: 0.9530 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02815: val_acc did not improve from 0.86765\n",
+ "Epoch 2816/3000\n",
+ " - 39s - loss: 0.2236 - acc: 0.9737 - val_loss: 0.9386 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02816: val_acc did not improve from 0.86765\n",
+ "Epoch 2817/3000\n",
+ " - 39s - loss: 0.2321 - acc: 0.9734 - val_loss: 0.9752 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02817: val_acc did not improve from 0.86765\n",
+ "Epoch 2818/3000\n",
+ " - 39s - loss: 0.2249 - acc: 0.9748 - val_loss: 0.9716 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02818: val_acc did not improve from 0.86765\n",
+ "Epoch 2819/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9734 - val_loss: 0.9620 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02819: val_acc did not improve from 0.86765\n",
+ "Epoch 2820/3000\n",
+ " - 39s - loss: 0.2283 - acc: 0.9742 - val_loss: 0.9714 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02820: val_acc did not improve from 0.86765\n",
+ "Epoch 2821/3000\n",
+ " - 39s - loss: 0.2234 - acc: 0.9749 - val_loss: 0.9395 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02821: val_acc did not improve from 0.86765\n",
+ "Epoch 2822/3000\n",
+ " - 39s - loss: 0.2314 - acc: 0.9742 - val_loss: 0.9568 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02822: val_acc did not improve from 0.86765\n",
+ "Epoch 2823/3000\n",
+ " - 39s - loss: 0.2337 - acc: 0.9722 - val_loss: 0.9408 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02823: val_acc did not improve from 0.86765\n",
+ "Epoch 2824/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9709 - val_loss: 0.9897 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02824: val_acc did not improve from 0.86765\n",
+ "Epoch 2825/3000\n",
+ " - 40s - loss: 0.2299 - acc: 0.9742 - val_loss: 0.9846 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02825: val_acc did not improve from 0.86765\n",
+ "Epoch 2826/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9730 - val_loss: 0.9619 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02826: val_acc did not improve from 0.86765\n",
+ "Epoch 2827/3000\n",
+ " - 39s - loss: 0.2388 - acc: 0.9719 - val_loss: 0.9683 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02827: val_acc did not improve from 0.86765\n",
+ "Epoch 2828/3000\n",
+ " - 39s - loss: 0.2302 - acc: 0.9736 - val_loss: 0.9739 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02828: val_acc did not improve from 0.86765\n",
+ "Epoch 2829/3000\n",
+ " - 39s - loss: 0.2337 - acc: 0.9721 - val_loss: 0.9677 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02829: val_acc did not improve from 0.86765\n",
+ "Epoch 2830/3000\n",
+ " - 39s - loss: 0.2292 - acc: 0.9725 - val_loss: 0.9753 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02830: val_acc did not improve from 0.86765\n",
+ "Epoch 2831/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9722 - val_loss: 0.9577 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02831: val_acc did not improve from 0.86765\n",
+ "Epoch 2832/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9719 - val_loss: 0.9650 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02832: val_acc did not improve from 0.86765\n",
+ "Epoch 2833/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9743 - val_loss: 0.9495 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02833: val_acc did not improve from 0.86765\n",
+ "Epoch 2834/3000\n",
+ " - 39s - loss: 0.2262 - acc: 0.9748 - val_loss: 0.9674 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02834: val_acc did not improve from 0.86765\n",
+ "Epoch 2835/3000\n",
+ " - 39s - loss: 0.2274 - acc: 0.9743 - val_loss: 0.9432 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02835: val_acc did not improve from 0.86765\n",
+ "Epoch 2836/3000\n",
+ " - 39s - loss: 0.2296 - acc: 0.9719 - val_loss: 0.9380 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02836: val_acc did not improve from 0.86765\n",
+ "Epoch 2837/3000\n",
+ " - 40s - loss: 0.2370 - acc: 0.9719 - val_loss: 0.9320 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02837: val_acc did not improve from 0.86765\n",
+ "Epoch 2838/3000\n",
+ " - 39s - loss: 0.2253 - acc: 0.9761 - val_loss: 0.9912 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02838: val_acc did not improve from 0.86765\n",
+ "Epoch 2839/3000\n",
+ " - 39s - loss: 0.2315 - acc: 0.9745 - val_loss: 0.9500 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02839: val_acc did not improve from 0.86765\n",
+ "Epoch 2840/3000\n",
+ " - 39s - loss: 0.2382 - acc: 0.9712 - val_loss: 0.9481 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 02840: val_acc did not improve from 0.86765\n",
+ "Epoch 2841/3000\n",
+ " - 39s - loss: 0.2331 - acc: 0.9737 - val_loss: 0.9476 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02841: val_acc did not improve from 0.86765\n",
+ "Epoch 2842/3000\n",
+ " - 39s - loss: 0.2268 - acc: 0.9746 - val_loss: 0.9385 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02842: val_acc did not improve from 0.86765\n",
+ "Epoch 2843/3000\n",
+ " - 39s - loss: 0.2340 - acc: 0.9701 - val_loss: 0.9426 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02843: val_acc did not improve from 0.86765\n",
+ "Epoch 2844/3000\n",
+ " - 40s - loss: 0.2358 - acc: 0.9721 - val_loss: 0.9756 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02844: val_acc did not improve from 0.86765\n",
+ "Epoch 2845/3000\n",
+ " - 40s - loss: 0.2263 - acc: 0.9740 - val_loss: 0.9541 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02845: val_acc did not improve from 0.86765\n",
+ "Epoch 2846/3000\n",
+ " - 39s - loss: 0.2284 - acc: 0.9733 - val_loss: 0.9917 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02846: val_acc did not improve from 0.86765\n",
+ "Epoch 2847/3000\n",
+ " - 39s - loss: 0.2190 - acc: 0.9771 - val_loss: 0.9949 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02847: val_acc did not improve from 0.86765\n",
+ "Epoch 2848/3000\n",
+ " - 39s - loss: 0.2227 - acc: 0.9761 - val_loss: 0.9788 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02848: val_acc did not improve from 0.86765\n",
+ "Epoch 2849/3000\n",
+ " - 39s - loss: 0.2323 - acc: 0.9734 - val_loss: 0.9942 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02849: val_acc did not improve from 0.86765\n",
+ "Epoch 2850/3000\n",
+ " - 39s - loss: 0.2353 - acc: 0.9722 - val_loss: 0.9411 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02850: val_acc did not improve from 0.86765\n",
+ "Epoch 2851/3000\n",
+ " - 39s - loss: 0.2282 - acc: 0.9715 - val_loss: 0.9672 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02851: val_acc did not improve from 0.86765\n",
+ "Epoch 2852/3000\n",
+ " - 39s - loss: 0.2266 - acc: 0.9740 - val_loss: 0.9932 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02852: val_acc did not improve from 0.86765\n",
+ "Epoch 2853/3000\n",
+ " - 39s - loss: 0.2207 - acc: 0.9743 - val_loss: 0.9709 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02853: val_acc did not improve from 0.86765\n",
+ "Epoch 2854/3000\n",
+ " - 39s - loss: 0.2325 - acc: 0.9709 - val_loss: 1.0075 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 02854: val_acc did not improve from 0.86765\n",
+ "Epoch 2855/3000\n",
+ " - 40s - loss: 0.2341 - acc: 0.9712 - val_loss: 0.9482 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02855: val_acc did not improve from 0.86765\n",
+ "Epoch 2856/3000\n",
+ " - 39s - loss: 0.2214 - acc: 0.9777 - val_loss: 0.9615 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02856: val_acc did not improve from 0.86765\n",
+ "Epoch 2857/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9742 - val_loss: 0.9856 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02857: val_acc did not improve from 0.86765\n",
+ "Epoch 2858/3000\n",
+ " - 39s - loss: 0.2344 - acc: 0.9709 - val_loss: 0.9973 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 02858: val_acc did not improve from 0.86765\n",
+ "Epoch 2859/3000\n",
+ " - 39s - loss: 0.2299 - acc: 0.9722 - val_loss: 0.9504 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02859: val_acc did not improve from 0.86765\n",
+ "Epoch 2860/3000\n",
+ " - 39s - loss: 0.2370 - acc: 0.9703 - val_loss: 0.9648 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02860: val_acc did not improve from 0.86765\n",
+ "Epoch 2861/3000\n",
+ " - 39s - loss: 0.2385 - acc: 0.9707 - val_loss: 0.9778 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02861: val_acc did not improve from 0.86765\n",
+ "Epoch 2862/3000\n",
+ " - 39s - loss: 0.2300 - acc: 0.9746 - val_loss: 1.0015 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02862: val_acc did not improve from 0.86765\n",
+ "Epoch 2863/3000\n",
+ " - 39s - loss: 0.2417 - acc: 0.9700 - val_loss: 0.9629 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02863: val_acc did not improve from 0.86765\n",
+ "Epoch 2864/3000\n",
+ " - 39s - loss: 0.2299 - acc: 0.9739 - val_loss: 0.9532 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02864: val_acc did not improve from 0.86765\n",
+ "Epoch 2865/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9733 - val_loss: 0.9633 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02865: val_acc did not improve from 0.86765\n",
+ "Epoch 2866/3000\n",
+ " - 39s - loss: 0.2300 - acc: 0.9727 - val_loss: 0.9861 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02866: val_acc did not improve from 0.86765\n",
+ "Epoch 2867/3000\n",
+ " - 39s - loss: 0.2259 - acc: 0.9733 - val_loss: 1.0066 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 02867: val_acc did not improve from 0.86765\n",
+ "Epoch 2868/3000\n",
+ " - 39s - loss: 0.2293 - acc: 0.9728 - val_loss: 0.9920 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02868: val_acc did not improve from 0.86765\n",
+ "Epoch 2869/3000\n",
+ " - 39s - loss: 0.2339 - acc: 0.9718 - val_loss: 0.9812 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02869: val_acc did not improve from 0.86765\n",
+ "Epoch 2870/3000\n",
+ " - 39s - loss: 0.2257 - acc: 0.9751 - val_loss: 0.9495 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02870: val_acc did not improve from 0.86765\n",
+ "Epoch 2871/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9725 - val_loss: 0.9786 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02871: val_acc did not improve from 0.86765\n",
+ "Epoch 2872/3000\n",
+ " - 40s - loss: 0.2267 - acc: 0.9752 - val_loss: 0.9455 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02872: val_acc did not improve from 0.86765\n",
+ "Epoch 2873/3000\n",
+ " - 39s - loss: 0.2247 - acc: 0.9740 - val_loss: 0.9548 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02873: val_acc did not improve from 0.86765\n",
+ "Epoch 2874/3000\n",
+ " - 39s - loss: 0.2227 - acc: 0.9749 - val_loss: 0.9500 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02874: val_acc did not improve from 0.86765\n",
+ "Epoch 2875/3000\n",
+ " - 39s - loss: 0.2367 - acc: 0.9718 - val_loss: 0.9626 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02875: val_acc did not improve from 0.86765\n",
+ "Epoch 2876/3000\n",
+ " - 39s - loss: 0.2267 - acc: 0.9736 - val_loss: 0.9359 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02876: val_acc did not improve from 0.86765\n",
+ "Epoch 2877/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9718 - val_loss: 0.9731 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02877: val_acc did not improve from 0.86765\n",
+ "Epoch 2878/3000\n",
+ " - 39s - loss: 0.2306 - acc: 0.9742 - val_loss: 0.9859 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02878: val_acc did not improve from 0.86765\n",
+ "Epoch 2879/3000\n",
+ " - 39s - loss: 0.2368 - acc: 0.9718 - val_loss: 0.9794 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02879: val_acc did not improve from 0.86765\n",
+ "Epoch 2880/3000\n",
+ " - 39s - loss: 0.2334 - acc: 0.9731 - val_loss: 0.9678 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02880: val_acc did not improve from 0.86765\n",
+ "Epoch 2881/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9710 - val_loss: 0.9571 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02881: val_acc did not improve from 0.86765\n",
+ "Epoch 2882/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9712 - val_loss: 0.9580 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02882: val_acc did not improve from 0.86765\n",
+ "Epoch 2883/3000\n",
+ " - 39s - loss: 0.2240 - acc: 0.9755 - val_loss: 0.9550 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02883: val_acc did not improve from 0.86765\n",
+ "Epoch 2884/3000\n",
+ " - 39s - loss: 0.2268 - acc: 0.9754 - val_loss: 0.9680 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02884: val_acc did not improve from 0.86765\n",
+ "Epoch 2885/3000\n",
+ " - 39s - loss: 0.2372 - acc: 0.9728 - val_loss: 0.9495 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02885: val_acc did not improve from 0.86765\n",
+ "Epoch 2886/3000\n",
+ " - 39s - loss: 0.2321 - acc: 0.9740 - val_loss: 0.9657 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02886: val_acc did not improve from 0.86765\n",
+ "Epoch 2887/3000\n",
+ " - 39s - loss: 0.2373 - acc: 0.9737 - val_loss: 0.9532 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02887: val_acc did not improve from 0.86765\n",
+ "Epoch 2888/3000\n",
+ " - 39s - loss: 0.2358 - acc: 0.9740 - val_loss: 0.9484 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02888: val_acc did not improve from 0.86765\n",
+ "Epoch 2889/3000\n",
+ " - 40s - loss: 0.2341 - acc: 0.9728 - val_loss: 0.9350 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02889: val_acc did not improve from 0.86765\n",
+ "Epoch 2890/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9716 - val_loss: 0.9448 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02890: val_acc did not improve from 0.86765\n",
+ "Epoch 2891/3000\n",
+ " - 39s - loss: 0.2259 - acc: 0.9743 - val_loss: 0.9279 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 02891: val_acc did not improve from 0.86765\n",
+ "Epoch 2892/3000\n",
+ " - 39s - loss: 0.2273 - acc: 0.9754 - val_loss: 0.9367 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02892: val_acc did not improve from 0.86765\n",
+ "Epoch 2893/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9745 - val_loss: 0.9468 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02893: val_acc did not improve from 0.86765\n",
+ "Epoch 2894/3000\n",
+ " - 39s - loss: 0.2291 - acc: 0.9725 - val_loss: 0.9390 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02894: val_acc did not improve from 0.86765\n",
+ "Epoch 2895/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9704 - val_loss: 0.9466 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02895: val_acc did not improve from 0.86765\n",
+ "Epoch 2896/3000\n",
+ " - 39s - loss: 0.2315 - acc: 0.9739 - val_loss: 0.9739 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02896: val_acc did not improve from 0.86765\n",
+ "Epoch 2897/3000\n",
+ " - 39s - loss: 0.2418 - acc: 0.9716 - val_loss: 0.9661 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 02897: val_acc did not improve from 0.86765\n",
+ "Epoch 2898/3000\n",
+ " - 39s - loss: 0.2241 - acc: 0.9749 - val_loss: 0.9704 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02898: val_acc did not improve from 0.86765\n",
+ "Epoch 2899/3000\n",
+ " - 40s - loss: 0.2230 - acc: 0.9751 - val_loss: 0.9899 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02899: val_acc did not improve from 0.86765\n",
+ "Epoch 2900/3000\n",
+ " - 39s - loss: 0.2380 - acc: 0.9710 - val_loss: 0.9621 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02900: val_acc did not improve from 0.86765\n",
+ "Epoch 2901/3000\n",
+ " - 39s - loss: 0.2389 - acc: 0.9718 - val_loss: 0.9614 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02901: val_acc did not improve from 0.86765\n",
+ "Epoch 2902/3000\n",
+ " - 39s - loss: 0.2225 - acc: 0.9771 - val_loss: 0.9590 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02902: val_acc did not improve from 0.86765\n",
+ "Epoch 2903/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9727 - val_loss: 0.9474 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02903: val_acc did not improve from 0.86765\n",
+ "Epoch 2904/3000\n",
+ " - 39s - loss: 0.2265 - acc: 0.9748 - val_loss: 0.9567 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02904: val_acc did not improve from 0.86765\n",
+ "Epoch 2905/3000\n",
+ " - 39s - loss: 0.2381 - acc: 0.9718 - val_loss: 0.9852 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 02905: val_acc did not improve from 0.86765\n",
+ "Epoch 2906/3000\n",
+ " - 39s - loss: 0.2324 - acc: 0.9730 - val_loss: 0.9607 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02906: val_acc did not improve from 0.86765\n",
+ "Epoch 2907/3000\n",
+ " - 39s - loss: 0.2314 - acc: 0.9734 - val_loss: 1.0116 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 02907: val_acc did not improve from 0.86765\n",
+ "Epoch 2908/3000\n",
+ " - 39s - loss: 0.2307 - acc: 0.9724 - val_loss: 0.9805 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02908: val_acc did not improve from 0.86765\n",
+ "Epoch 2909/3000\n",
+ " - 39s - loss: 0.2295 - acc: 0.9727 - val_loss: 0.9573 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02909: val_acc did not improve from 0.86765\n",
+ "Epoch 2910/3000\n",
+ " - 39s - loss: 0.2263 - acc: 0.9748 - val_loss: 0.9568 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02910: val_acc did not improve from 0.86765\n",
+ "Epoch 2911/3000\n",
+ " - 39s - loss: 0.2260 - acc: 0.9745 - val_loss: 0.9586 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02911: val_acc did not improve from 0.86765\n",
+ "Epoch 2912/3000\n",
+ " - 39s - loss: 0.2250 - acc: 0.9755 - val_loss: 0.9495 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02912: val_acc did not improve from 0.86765\n",
+ "Epoch 2913/3000\n",
+ " - 40s - loss: 0.2362 - acc: 0.9742 - val_loss: 0.9499 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02913: val_acc did not improve from 0.86765\n",
+ "Epoch 2914/3000\n",
+ " - 39s - loss: 0.2337 - acc: 0.9728 - val_loss: 0.9913 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02914: val_acc did not improve from 0.86765\n",
+ "Epoch 2915/3000\n",
+ " - 39s - loss: 0.2312 - acc: 0.9731 - val_loss: 0.9826 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02915: val_acc did not improve from 0.86765\n",
+ "Epoch 2916/3000\n",
+ " - 39s - loss: 0.2324 - acc: 0.9743 - val_loss: 0.9496 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 02916: val_acc did not improve from 0.86765\n",
+ "Epoch 2917/3000\n",
+ " - 39s - loss: 0.2273 - acc: 0.9731 - val_loss: 0.9620 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02917: val_acc did not improve from 0.86765\n",
+ "Epoch 2918/3000\n",
+ " - 39s - loss: 0.2282 - acc: 0.9734 - val_loss: 0.9752 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02918: val_acc did not improve from 0.86765\n",
+ "Epoch 2919/3000\n",
+ " - 39s - loss: 0.2291 - acc: 0.9731 - val_loss: 0.9876 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02919: val_acc did not improve from 0.86765\n",
+ "Epoch 2920/3000\n",
+ " - 39s - loss: 0.2338 - acc: 0.9715 - val_loss: 0.9653 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02920: val_acc did not improve from 0.86765\n",
+ "Epoch 2921/3000\n",
+ " - 39s - loss: 0.2340 - acc: 0.9730 - val_loss: 0.9607 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02921: val_acc did not improve from 0.86765\n",
+ "Epoch 2922/3000\n",
+ " - 39s - loss: 0.2349 - acc: 0.9716 - val_loss: 0.9667 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02922: val_acc did not improve from 0.86765\n",
+ "Epoch 2923/3000\n",
+ " - 39s - loss: 0.2267 - acc: 0.9742 - val_loss: 0.9733 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02923: val_acc did not improve from 0.86765\n",
+ "Epoch 2924/3000\n",
+ " - 39s - loss: 0.2456 - acc: 0.9675 - val_loss: 0.9573 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02924: val_acc did not improve from 0.86765\n",
+ "Epoch 2925/3000\n",
+ " - 39s - loss: 0.2346 - acc: 0.9733 - val_loss: 0.9471 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02925: val_acc did not improve from 0.86765\n",
+ "Epoch 2926/3000\n",
+ " - 39s - loss: 0.2264 - acc: 0.9737 - val_loss: 0.9529 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02926: val_acc did not improve from 0.86765\n",
+ "Epoch 2927/3000\n",
+ " - 39s - loss: 0.2306 - acc: 0.9733 - val_loss: 0.9598 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02927: val_acc did not improve from 0.86765\n",
+ "Epoch 2928/3000\n",
+ " - 39s - loss: 0.2318 - acc: 0.9727 - val_loss: 0.9442 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02928: val_acc did not improve from 0.86765\n",
+ "Epoch 2929/3000\n",
+ " - 39s - loss: 0.2328 - acc: 0.9710 - val_loss: 0.9497 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02929: val_acc did not improve from 0.86765\n",
+ "Epoch 2930/3000\n",
+ " - 39s - loss: 0.2325 - acc: 0.9728 - val_loss: 0.9955 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02930: val_acc did not improve from 0.86765\n",
+ "Epoch 2931/3000\n",
+ " - 39s - loss: 0.2299 - acc: 0.9722 - val_loss: 0.9671 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02931: val_acc did not improve from 0.86765\n",
+ "Epoch 2932/3000\n",
+ " - 39s - loss: 0.2281 - acc: 0.9737 - val_loss: 0.9763 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02932: val_acc did not improve from 0.86765\n",
+ "Epoch 2933/3000\n",
+ " - 39s - loss: 0.2330 - acc: 0.9730 - val_loss: 0.9726 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02933: val_acc did not improve from 0.86765\n",
+ "Epoch 2934/3000\n",
+ " - 39s - loss: 0.2255 - acc: 0.9733 - val_loss: 0.9675 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02934: val_acc did not improve from 0.86765\n",
+ "Epoch 2935/3000\n",
+ " - 39s - loss: 0.2258 - acc: 0.9730 - val_loss: 0.9709 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02935: val_acc did not improve from 0.86765\n",
+ "Epoch 2936/3000\n",
+ " - 39s - loss: 0.2321 - acc: 0.9733 - val_loss: 0.9410 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 02936: val_acc did not improve from 0.86765\n",
+ "Epoch 2937/3000\n",
+ " - 39s - loss: 0.2294 - acc: 0.9739 - val_loss: 0.9431 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02937: val_acc did not improve from 0.86765\n",
+ "Epoch 2938/3000\n",
+ " - 40s - loss: 0.2340 - acc: 0.9703 - val_loss: 0.9907 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02938: val_acc did not improve from 0.86765\n",
+ "Epoch 2939/3000\n",
+ " - 39s - loss: 0.2314 - acc: 0.9731 - val_loss: 0.9625 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 02939: val_acc did not improve from 0.86765\n",
+ "Epoch 2940/3000\n",
+ " - 39s - loss: 0.2297 - acc: 0.9731 - val_loss: 0.9419 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02940: val_acc did not improve from 0.86765\n",
+ "Epoch 2941/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9712 - val_loss: 0.9525 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02941: val_acc did not improve from 0.86765\n",
+ "Epoch 2942/3000\n",
+ " - 39s - loss: 0.2405 - acc: 0.9692 - val_loss: 0.9610 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02942: val_acc did not improve from 0.86765\n",
+ "Epoch 2943/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9713 - val_loss: 0.9391 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 02943: val_acc did not improve from 0.86765\n",
+ "Epoch 2944/3000\n",
+ " - 40s - loss: 0.2457 - acc: 0.9709 - val_loss: 0.9798 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02944: val_acc did not improve from 0.86765\n",
+ "Epoch 2945/3000\n",
+ " - 39s - loss: 0.2353 - acc: 0.9722 - val_loss: 0.9567 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02945: val_acc did not improve from 0.86765\n",
+ "Epoch 2946/3000\n",
+ " - 40s - loss: 0.2332 - acc: 0.9730 - val_loss: 0.9623 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02946: val_acc did not improve from 0.86765\n",
+ "Epoch 2947/3000\n",
+ " - 39s - loss: 0.2350 - acc: 0.9716 - val_loss: 0.9460 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02947: val_acc did not improve from 0.86765\n",
+ "Epoch 2948/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9710 - val_loss: 0.9438 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02948: val_acc did not improve from 0.86765\n",
+ "Epoch 2949/3000\n",
+ " - 39s - loss: 0.2383 - acc: 0.9707 - val_loss: 0.9624 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02949: val_acc did not improve from 0.86765\n",
+ "Epoch 2950/3000\n",
+ " - 39s - loss: 0.2294 - acc: 0.9739 - val_loss: 0.9467 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02950: val_acc did not improve from 0.86765\n",
+ "Epoch 2951/3000\n",
+ " - 39s - loss: 0.2277 - acc: 0.9727 - val_loss: 0.9437 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02951: val_acc did not improve from 0.86765\n",
+ "Epoch 2952/3000\n",
+ " - 40s - loss: 0.2254 - acc: 0.9746 - val_loss: 0.9800 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02952: val_acc did not improve from 0.86765\n",
+ "Epoch 2953/3000\n",
+ " - 39s - loss: 0.2322 - acc: 0.9733 - val_loss: 0.9799 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02953: val_acc did not improve from 0.86765\n",
+ "Epoch 2954/3000\n",
+ " - 39s - loss: 0.2343 - acc: 0.9713 - val_loss: 0.9261 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 02954: val_acc did not improve from 0.86765\n",
+ "Epoch 2955/3000\n",
+ " - 39s - loss: 0.2244 - acc: 0.9749 - val_loss: 0.9527 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02955: val_acc did not improve from 0.86765\n",
+ "Epoch 2956/3000\n",
+ " - 39s - loss: 0.2334 - acc: 0.9721 - val_loss: 0.9262 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02956: val_acc did not improve from 0.86765\n",
+ "Epoch 2957/3000\n",
+ " - 39s - loss: 0.2262 - acc: 0.9754 - val_loss: 0.9614 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02957: val_acc did not improve from 0.86765\n",
+ "Epoch 2958/3000\n",
+ " - 39s - loss: 0.2311 - acc: 0.9728 - val_loss: 0.9471 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 02958: val_acc did not improve from 0.86765\n",
+ "Epoch 2959/3000\n",
+ " - 39s - loss: 0.2315 - acc: 0.9751 - val_loss: 0.9523 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02959: val_acc did not improve from 0.86765\n",
+ "Epoch 2960/3000\n",
+ " - 39s - loss: 0.2236 - acc: 0.9751 - val_loss: 0.9382 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02960: val_acc did not improve from 0.86765\n",
+ "Epoch 2961/3000\n",
+ " - 39s - loss: 0.2317 - acc: 0.9733 - val_loss: 0.9633 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 02961: val_acc did not improve from 0.86765\n",
+ "Epoch 2962/3000\n",
+ " - 39s - loss: 0.2391 - acc: 0.9721 - val_loss: 0.9601 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 02962: val_acc did not improve from 0.86765\n",
+ "Epoch 2963/3000\n",
+ " - 39s - loss: 0.2304 - acc: 0.9722 - val_loss: 0.9303 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02963: val_acc did not improve from 0.86765\n",
+ "Epoch 2964/3000\n",
+ " - 39s - loss: 0.2282 - acc: 0.9748 - val_loss: 0.9444 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 02964: val_acc did not improve from 0.86765\n",
+ "Epoch 2965/3000\n",
+ " - 39s - loss: 0.2323 - acc: 0.9731 - val_loss: 0.9879 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 02965: val_acc did not improve from 0.86765\n",
+ "Epoch 2966/3000\n",
+ " - 40s - loss: 0.2234 - acc: 0.9733 - val_loss: 0.9473 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02966: val_acc did not improve from 0.86765\n",
+ "Epoch 2967/3000\n",
+ " - 39s - loss: 0.2332 - acc: 0.9731 - val_loss: 0.9717 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02967: val_acc did not improve from 0.86765\n",
+ "Epoch 2968/3000\n",
+ " - 39s - loss: 0.2310 - acc: 0.9733 - val_loss: 0.9890 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02968: val_acc did not improve from 0.86765\n",
+ "Epoch 2969/3000\n",
+ " - 39s - loss: 0.2194 - acc: 0.9755 - val_loss: 0.9890 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02969: val_acc did not improve from 0.86765\n",
+ "Epoch 2970/3000\n",
+ " - 39s - loss: 0.2312 - acc: 0.9733 - val_loss: 0.9584 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02970: val_acc did not improve from 0.86765\n",
+ "Epoch 2971/3000\n",
+ " - 39s - loss: 0.2377 - acc: 0.9716 - val_loss: 0.9704 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02971: val_acc did not improve from 0.86765\n",
+ "Epoch 2972/3000\n",
+ " - 39s - loss: 0.2316 - acc: 0.9740 - val_loss: 0.9940 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02972: val_acc did not improve from 0.86765\n",
+ "Epoch 2973/3000\n",
+ " - 39s - loss: 0.2244 - acc: 0.9734 - val_loss: 1.0011 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 02973: val_acc did not improve from 0.86765\n",
+ "Epoch 2974/3000\n",
+ " - 39s - loss: 0.2277 - acc: 0.9733 - val_loss: 1.0004 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 02974: val_acc did not improve from 0.86765\n",
+ "Epoch 2975/3000\n",
+ " - 39s - loss: 0.2374 - acc: 0.9727 - val_loss: 0.9763 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02975: val_acc did not improve from 0.86765\n",
+ "Epoch 2976/3000\n",
+ " - 39s - loss: 0.2355 - acc: 0.9715 - val_loss: 0.9598 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 02976: val_acc did not improve from 0.86765\n",
+ "Epoch 2977/3000\n",
+ " - 39s - loss: 0.2285 - acc: 0.9731 - val_loss: 0.9543 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02977: val_acc did not improve from 0.86765\n",
+ "Epoch 2978/3000\n",
+ " - 39s - loss: 0.2352 - acc: 0.9712 - val_loss: 0.9493 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02978: val_acc did not improve from 0.86765\n",
+ "Epoch 2979/3000\n",
+ " - 39s - loss: 0.2279 - acc: 0.9713 - val_loss: 0.9609 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 02979: val_acc did not improve from 0.86765\n",
+ "Epoch 2980/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9721 - val_loss: 0.9717 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02980: val_acc did not improve from 0.86765\n",
+ "Epoch 2981/3000\n",
+ " - 39s - loss: 0.2157 - acc: 0.9775 - val_loss: 0.9468 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 02981: val_acc did not improve from 0.86765\n",
+ "Epoch 2982/3000\n",
+ " - 39s - loss: 0.2356 - acc: 0.9704 - val_loss: 0.9644 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02982: val_acc did not improve from 0.86765\n",
+ "Epoch 2983/3000\n",
+ " - 39s - loss: 0.2255 - acc: 0.9749 - val_loss: 0.9791 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 02983: val_acc did not improve from 0.86765\n",
+ "Epoch 2984/3000\n",
+ " - 39s - loss: 0.2235 - acc: 0.9748 - val_loss: 0.9988 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 02984: val_acc did not improve from 0.86765\n",
+ "Epoch 2985/3000\n",
+ " - 39s - loss: 0.2371 - acc: 0.9721 - val_loss: 0.9731 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 02985: val_acc did not improve from 0.86765\n",
+ "Epoch 2986/3000\n",
+ " - 39s - loss: 0.2378 - acc: 0.9712 - val_loss: 0.9274 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 02986: val_acc did not improve from 0.86765\n",
+ "Epoch 2987/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9734 - val_loss: 0.9346 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02987: val_acc did not improve from 0.86765\n",
+ "Epoch 2988/3000\n",
+ " - 39s - loss: 0.2307 - acc: 0.9733 - val_loss: 0.9555 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02988: val_acc did not improve from 0.86765\n",
+ "Epoch 2989/3000\n",
+ " - 39s - loss: 0.2333 - acc: 0.9722 - val_loss: 1.0066 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 02989: val_acc did not improve from 0.86765\n",
+ "Epoch 2990/3000\n",
+ " - 39s - loss: 0.2357 - acc: 0.9707 - val_loss: 0.9499 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 02990: val_acc did not improve from 0.86765\n",
+ "Epoch 2991/3000\n",
+ " - 39s - loss: 0.2329 - acc: 0.9721 - val_loss: 0.9717 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 02991: val_acc did not improve from 0.86765\n",
+ "Epoch 2992/3000\n",
+ " - 39s - loss: 0.2421 - acc: 0.9710 - val_loss: 0.9940 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 02992: val_acc did not improve from 0.86765\n",
+ "Epoch 2993/3000\n",
+ " - 39s - loss: 0.2226 - acc: 0.9748 - val_loss: 0.9649 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02993: val_acc did not improve from 0.86765\n",
+ "Epoch 2994/3000\n",
+ " - 39s - loss: 0.2339 - acc: 0.9724 - val_loss: 0.9805 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 02994: val_acc did not improve from 0.86765\n",
+ "Epoch 2995/3000\n",
+ " - 39s - loss: 0.2266 - acc: 0.9746 - val_loss: 0.9752 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 02995: val_acc did not improve from 0.86765\n",
+ "Epoch 2996/3000\n",
+ " - 39s - loss: 0.2298 - acc: 0.9730 - val_loss: 0.9582 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 02996: val_acc did not improve from 0.86765\n",
+ "Epoch 2997/3000\n",
+ " - 39s - loss: 0.2279 - acc: 0.9746 - val_loss: 0.9703 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 02997: val_acc did not improve from 0.86765\n",
+ "Epoch 2998/3000\n",
+ " - 39s - loss: 0.2382 - acc: 0.9710 - val_loss: 0.9552 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 02998: val_acc did not improve from 0.86765\n",
+ "Epoch 2999/3000\n",
+ " - 39s - loss: 0.2220 - acc: 0.9740 - val_loss: 0.9601 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 02999: val_acc did not improve from 0.86765\n",
+ "Epoch 3000/3000\n",
+ " - 39s - loss: 0.2251 - acc: 0.9758 - val_loss: 0.9554 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 03000: val_acc did not improve from 0.86765\n"
+ ]
+ }
+ ],
+ "source": [
+ "batch_size = 50\n",
+ "epochs = 3000\n",
+ "timesteps = 50\n",
+ "data_dim = (27,15)\n",
+ "l1v = 0.005\n",
+ "l2v = 0.015\n",
+ "\n",
+ "\n",
+ "tf.get_default_graph()\n",
+ "model = Sequential()\n",
+ "\n",
+ "model.add(TimeDistributed(Conv2D(64, kernel_size=(3,3), activation='relu', \n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v)),\n",
+ " input_shape=(timesteps ,27, 15, 1)))\n",
+ "model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(MaxPooling2D(pool_size=(2,2), strides=None,\n",
+ " padding='same', data_format='channels_last')))\n",
+ "model.add(TimeDistributed(Dropout(0.50)))\n",
+ "\n",
+ "model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(Conv2D(16, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(MaxPooling2D(pool_size=(2,2), strides=None, padding='same', data_format='channels_last')))\n",
+ "model.add(TimeDistributed(Dropout(0.50)))\n",
+ "\n",
+ "model.add(TimeDistributed(Flatten()))\n",
+ "\n",
+ "model.add(keras.layers.LSTM(80, return_sequences=True, input_shape=(timesteps, data_dim), kernel_regularizer=regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(Dropout(0.5))\n",
+ "\n",
+ "model.add(keras.layers.LSTM(50, return_sequences=False, input_shape=(timesteps, data_dim), kernel_regularizer=regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(Dropout(0.5))\n",
+ "\n",
+ "model.add(Dense(num_classes, activation='softmax'))\n",
+ "\n",
+ "#optimizer = optimizers.Adagrad()\n",
+ "optimizer = optimizers.Adam(lr = 0.0001, decay=1e-6)\n",
+ "#optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.1)\n",
+ "model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=optimizer,\n",
+ " metrics=['accuracy'])\n",
+ "\n",
+ "model.load_weights('./ModelSnapshots/LSTM-v1_DONE.h5')\n",
+ " \n",
+ "#Broadcast progress to the tensorboard.\n",
+ "\n",
+ "config = \"\"\n",
+ "for layer in model.layers:\n",
+ " config += str(layer.output).split('\\\"')[1].split(\"/\")[0] + str(layer.output_shape) + \"\\n\\n\"\n",
+ "config += \"batchsize: \" + str(batch_size) + \"\\n\\n\" + \"epochs: \" + str(epochs) + \"\\n\\n\" \n",
+ "config += \"l1: \" + str(l1v) + \"\\n\\n\" + \"l2: \" + str(l2v) + \"\\n\\n\"\n",
+ "\n",
+ "model.summary()\n",
+ "current_name = \"LSTM_v2\"\n",
+ "readable_timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')\n",
+ "tensorflowfolder = \"/srv/share/tensorboardfiles/\" + current_name \n",
+ "print(current_name + readable_timestamp)\n",
+ "logger = LoggingTensorBoard(settings_str_to_log = config, log_dir=tensorflowfolder, histogram_freq=0,\n",
+ " write_graph=True, write_images=True, update_freq = 'epoch')\n",
+ "\n",
+ "storer = ModelCheckpoint(\"./ModelSnapshots/\" + current_name + readable_timestamp + '-{epoch:03d}.h5',\n",
+ " monitor='val_acc', verbose=1,\n",
+ " save_best_only=True, save_weights_only=False,\n",
+ " mode='auto', period=1)\n",
+ "\n",
+ "learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', \n",
+ " patience=30, \n",
+ " verbose=1, \n",
+ " factor=0.95, \n",
+ " min_lr=0.00001)\n",
+ "\n",
+ "history = model.fit(x_train, y_train_one_hot,\n",
+ " batch_size=batch_size,\n",
+ " epochs=epochs,\n",
+ " verbose=2,\n",
+ " validation_data=(x_test, y_test_one_hot),\n",
+ " callbacks=[storer,logger, learning_rate_reduction])\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model.save(\"./ModelSnapshots/\" + current_name + \"_DONE.h5\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZIAAAEWCAYAAABMoxE0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAIABJREFUeJzs3Xd4FNX6wPHvm5ACJLQEaaFKb4I0FREQkKYoioqK7arY288Gtotce+9X0avYG4qioiBIUekISpFeQw0tQCCEJOf3x5lNZjfbQrIJ5f08T57MzpyZOZsy754uxhiUUkqpIxVV2hlQSil1bNNAopRSqkg0kCillCoSDSRKKaWKRAOJUkqpItFAopRSqkg0kCgVhIiMFpHHw0y7TkR6RjpPSh1tNJAopZQqEg0kSp0ARKRMaedBHb80kKhjnlOldJ+I/C0iGSLyPxGpJiI/icg+EZkkIpVd6QeIyBIR2SMiU0WkmetYWxH50znvCyDe517nishC59wZItI6zDz2F5EFIrJXRDaKyAif42c619vjHL/G2V9WRF4QkfUiki4ivzv7uolIqp+fQ09ne4SIjBGRj0VkL3CNiHQUkZnOPbaIyOsiEus6v4WI/CIiu0Rkm4g8KCLVReSAiCS50p0qImkiEhPOe1fHPw0k6nhxEdALaAycB/wEPAhUxf6d3wEgIo2Bz4C7nGPjge9FJNZ5qH4LfARUAb5yrotzblvgPeBGIAl4GxgnInFh5C8DuAqoBPQHbhaRC5zr1nXy+5qTpzbAQue854F2wBlOnu4HcsP8mZwPjHHu+QmQA9wNJAOnAz2AW5w8JAKTgJ+BmkBDYLIxZiswFbjEdd0rgc+NMYfDzIc6zmkgUceL14wx24wxm4DfgNnGmAXGmExgLNDWSXcp8KMx5hfnQfg8UBb7oD4NiAFeNsYcNsaMAea67jEUeNsYM9sYk2OM+QA45JwXlDFmqjFmkTEm1xjzNzaYdXUOXw5MMsZ85tx3pzFmoYhEAf8C7jTGbHLuOcMYcyjMn8lMY8y3zj0PGmPmG2NmGWOyjTHrsIHQk4dzga3GmBeMMZnGmH3GmNnOsQ+AIQAiEg1chg22SgEaSNTxY5tr+6Cf1wnOdk1gveeAMSYX2AjUco5tMt4zma53bdcF7nGqhvaIyB6gtnNeUCLSSUSmOFVC6cBN2JIBzjVW+zktGVu15u9YODb65KGxiPwgIlud6q4nw8gDwHdAcxGpjy31pRtj5hxhntRxSAOJOtFsxgYEAEREsA/RTcAWoJazz6OOa3sj8IQxppLrq5wx5rMw7vspMA6obYypCLwFeO6zETjZzzk7gMwAxzKAcq73EY2tFnPzndr7v8AyoJExpgK26s+dhwb+Mu6U6r7ElkquREsjyocGEnWi+RLoLyI9nMbie7DVUzOAmUA2cIeIxIjIhUBH17nvADc5pQsRkfJOI3piGPdNBHYZYzJFpCO2OsvjE6CniFwiImVEJElE2jilpfeAF0WkpohEi8jpTpvMCiDeuX8M8DAQqq0mEdgL7BeRpsDNrmM/ADVE5C4RiRORRBHp5Dr+IXANMAANJMqHBhJ1QjHGLMd+sn4N+4n/POA8Y0yWMSYLuBD7wNyFbU/5xnXuPOAG4HVgN7DKSRuOW4CRIrIPeBQb0DzX3QD0wwa1XdiG9lOcw/cCi7BtNbuAZ4AoY0y6c813saWpDMCrF5cf92ID2D5sUPzClYd92Gqr84CtwEqgu+v4H9hG/j+NMe7qPqUQXdhKKRUOEfkV+NQY825p50UdXTSQKKVCEpEOwC/YNp59pZ0fdXTRqi2lVFAi8gF2jMldGkSUP1oiUUopVSRaIlFKKVUkJ8REbsnJyaZevXqlnQ2llDqmzJ8/f4cxxnd8UgEnRCCpV68e8+bNK+1sKKXUMUVEwurqrVVbSimlikQDiVJKqSLRQKKUUqpITog2En8OHz5MamoqmZmZpZ2ViIqPjyclJYWYGF2DSCkVGSdsIElNTSUxMZF69erhPdnr8cMYw86dO0lNTaV+/fqlnR2l1HHqhK3ayszMJCkp6bgNIgAiQlJS0nFf6lJKla4TNpAAx3UQ8TgR3qNSqnSd0IFEKXXimLN2F8u37mP7vkw27TnIXxv3lHaWwrI6bT97Mw8X6pz9h7IjlBv/IhpIRKSPiCwXkVUiMszP8boiMllE/haRqSKS4uzvLiILXV+ZInKBc2y0iKx1HWsTyfcQKXv27OHNN98s9Hn9+vVjz55j4x9AqZJmjGHlNv/zSl7y9kx6vzydjk9MpvPTv3L+G38w/JtFTFm+vUDanfsP8cqkleTm+p+LMDsnt8h5XbwpnVXb9+e9zsrOZe2ODK80Yxek0uOFaQx+e1bevoxD2bwwcTlZ2f7zMGX5dlr+ewLz1u0qch7DFbFA4iz9+QbQF2gOXCYizX2SPQ98aIxpDYwEngIwxkwxxrQxxrQBzgYOABNd593nOW6MWRip9xBJgQJJdnbwTxLjx4+nUqVKkcqWOg6t3ZHBfV/9FfDht21vJoeyc0JeJ+NQNjv3Hyru7BXaroysgJ+43/tjHb1emk69YT/y4cx1Ia/12ZwNXPv+3ALB5MGxi3hp0gpmrdnJ9BVpZB62P5+d+w8xZdl2Gj70E/PXez+oU3cf4LM5GwCYsXoH57/xB78s3Rbw3ue+9js9X5yW9/qRbxfT/fmp7M7IAmDaijTu/uIvAJZu2ZuX7tXJK3nt11V8/af/dcxmrNoBwC//bGPWmp0cyIp86SSSvbY6AquMMWsARORz4HxgqStNc+D/nO0pwLd+rjMI+MkYcyCCeS1xw4YNY/Xq1bRp04aYmBji4+OpXLkyy5YtY8WKFVxwwQVs3LiRzMxM7rzzToYOHQrkT/eyf/9++vbty5lnnsmMGTOoVasW3333HWXLli3ld6aKW26uoe8rv3FXz0b0bVUDgHrDfgTgn5F9KBsb7ZU+/cBhysZGE1smipxcQ/fnpwJweac6tK1TOS/djv2HSCofS6cnJ3NqnUp8ffMZ3PbpAppUT+SOHo28rrl08176vfobAOue7p+3f+iH8zildiVu7d7QK78NHhzPfy5oyZWn1fXaf+unf7Jw4x7+fV4L+rSsTvrBw8THRBFXxvs9gC1dzFyzk2bVK/DshGUMapfCMz8tZ47zSfv1y9tyWoMkpizbzsZdB7ijRyP+80P+4+XR75YwuEMdHvt+CekHg1cNXfv+XL4YehqdGiSxYMNuJiyxAWDBxj08N2G533Mu+u9MfrzjTKJEaFajApe9M4uNuw7Su0V1Ln9nNgA3fDiPuQ/1ZMqy7Szdspf+rWuQm2tonZL/YbDesB95blBrvpi3EYBJ/2yjZ7NqXP3eHK/7XfjmH2xNz2Rzuu08M/ybRXRvchLVK8YD8NHMdTz+4z8cckoqM1fv5O1pa5j0f11peFJC0PdfVBGbRl5EBgF9jDHXO6+vBDoZY25zpfkUmG2MecVZH/trINkYs9OV5lfgRWPMD87r0cDp2HW2JwPDjDEFPiaJyFBgKECdOnXarV/vPWXMP//8Q7NmzQB47PslLN281/cSRdK8ZgX+fV6LgMfXrVvHueeey+LFi5k6dSr9+/dn8eLFed10d+3aRZUqVTh48CAdOnRg2rRpJCUleQWShg0bMm/ePNq0acMll1zCgAEDGDJkSIF7ud+rKn77D2VTPjYaEWH9zgy+/nMTd/dsFFZHh/d+X8vIH5ay4vG+7DmQRXauITkhjiH/m8295zShQdXyPD9hOZ/P3Uh8TBTT7+/OSYnxeYEE4KVLT6FX8+r8tXEPNSrGc/YL02iQXJ4nBrZi2da9PPa9fbh+e2tndu4/xHUfzOOVwW248/OF1KwYn/dgalWrIos2pQPQtXFVru1cDwMM/3oRW/fm9/y7tfvJvDFlNTOHn83pT/0KwIS7zqJuUjlyjeGtaWt4dfJKAFY/2Y+Dh3P4Y9UObvxovtd7f//aDlz7/ly7fU0H0vYfonG1RF6etIKpy9OO8Lfh7a0hp3LTx3+Gnf7Mhsn87nyiPxY0rZ7Ij3d0YfeBLNo/Pslvmt8f6E5K5XJHdH0RmW+MaR8qXWmPI7kXeF1ErgGmY9eezitji0gNoBUwwXXOcOya0rHAKOABbLWYF2PMKOc47du3P+oXXenYsaPXWI9XX32VsWPHArBx40ZWrlxJUlKS1zn169enTRvbRNSuXTvWrVtXYvk9Fsxbt4v3/ljL65edSlSU90P9xYnL+XDWeuY/3Ito17GcXMOPi7Zwbqsa5BrDnLW7KBdXht9XpnFr94Z5wWH2mp1cOmoWP9x+Jue+9jsP9GnKzd1Opu8rv3EgK4eUymWpEF+GZycsZ01aBn8MO5uaFePzzt+85yBfzUvlpUkrAPhp8Rbu/NzW0n5zyxnMWbuLS96e6ZXnzMO5dHxiMr/d391rv63++Mtr35odGVz2ziyvfR/OXMc3f24CyLuXJ4gAeUEEbLXKtBX+H+ZvTFkNkBdEAJ4Y/w/T/aQ/+cHxfq8B5AURgGtHzw2Yrih+WVqw/SOYYymIACzbui/ozxigbEzB0l5xi2Qg2QTUdr1OcfblMcZsBi4EEJEE4CJjjLsl+RJgrDHmsOucLc7mIRF5HxuMiiRYyaGklC9fPm976tSpTJo0iZkzZ1KuXDm6devmdyxIXFxc3nZ0dDQHDx4skbwerTIOZbNw4x46N0wGbLXC7gOH2X1+FkkJ9meVlZ3LrowsXv11FWAfHF0b58+S/cGMdYz8YSkzV+/gszkbva7frclJHM7J5YMZ6/h24WYAPpppS7rPT1zO6rT9HMiyn4PuH/O317mdn/6VYDwPdoAL35wRNO0NHx7ZTNaeIBIJ/oLI0SBQO8KJJP4YDyRzgUYiUh8bQAYDl7sTiEgysMsYk4stabznc43LnP3uc2oYY7aI/Wh3AbA4QvmPqMTERPbt89+7JD09ncqVK1OuXDmWLVvGrFmz/KY7nny3cBN3fr6QWcN75NX5uqUfOMy1o+fQpVFV7u7VOG//rowsfl22nR5NT6Ltf34B4I9hZ1OrUtm8ksaEJdt4cOwi7uvdpEB9t7seetxtnRnp1LH7BhGAN6euYvyirV77PPXaObmGMfNL5qG1bKuudqvCd0wHEmNMtojchq2WigbeM8YsEZGRwDxjzDigG/CUiBhs1datnvNFpB62RDPN59KfiEhVQICFwE2Reg+RlJSUROfOnWnZsiVly5alWrVqecf69OnDW2+9RbNmzWjSpAmnnXZaKea0ZHgewl2e/ZUL2tTi2s712X8om0vensnke7rS4wX7Z/Dnhj3c3O1kokSILRPFqU7wcFu/I4NZq3eyY7/t/fLg2EUAARtNPQa8/kfQ475BRKljQXRU5AclnxBrtrdv3974Lmx1IjVAl/Z7PZyTy8+Lt3Ju6xoBG6Cv/2Auk/7xX599+9kNec2pilKqtEz6v65e3XUBZgw7m49nreeHv7ewYVfhO5Y+dWErhn+zyGvf/65uz3UfzKNcbHReVSnAGScnUS42Ou//ZNp93ej63FSvc+c/3JPnJy7PK1HXqlSWP4adXeh8eYTb2K4j21XE5OYajDEM/2YRt3+2gHd+W0NurqHesB95aOwipi7fzkHnHyUmOvCfogaR0vPYgBZM+r+zeOcq/8+SO85uSH+nS3Jx6t+qhlc3Y4+rTq/LK4PbUC72yKprujRKZtxtnfNef3xdp7ztMTedHvC8Vwa3oeFJCSTGeVfixERHcX+fpkx3dYD4/YHuvHtVe4ae1YD3rmlPx3pV8o493N/7A93gDrW9XlcqF0OPZtVY93R/Jt59Fk2rJ3JXT9sVe3jfZrx7dQdGXdmOWcN7FOj2PeS0OiQlxPHkwFY8O6g1cx/qWaQgUhhaIjkBRPK9rtq+n8zDObSoWcGrtPHq5JW8+MsKalSMZ0t68EkjK5eLYfeBwk0BcawY3KE2n88t2N7iq3eLanljF76/7UzOe/33vGOVy8Xw5yO92JmRxTvT1/D29DVe517RqQ6fzN5Q6Lz5thk1qFqeNWneI6vXPNkvr8fb9r2ZVE2MIzvXsD8zm8rlYwE4kJXt1eV3eN+m/LF6Jxe3S+H2zxYAMPmergz67wzqJJUvMDXJ3Id6kpwQS/3h+b2PVj/Zj+goYd2ODLo9P5XG1RJ49bK2NK1eIS/N7owsxi/eQv2k8pxSuxIrt+/ngjf+yMv3rLU7qV4hnj9W76RNSiWu+2AuE+46i8rlY2n57wnsP5TN3yPOofUIO9Z53dP9WbI5ne8WbubK0+rS5dkpefcadWU7zmlRnZ8Xb+GlX1bywb868tvKNC5unx8ItqQf5EBWDidX9R6zcSg7h8ysXLJycklOiGX7vkN0enJyXmlh4pKtrNmRwbodGTzUvxmJ8eEt+bB9XyYdn5gMQNXEOL688XTqJ5cPcVbhhFsi0UByAijO97p0817SDx7m6vfmkOUaKT2sb1Nu6noy63ZkUKFsjN+2i+PZfb2bMPSsBmQcymbI/2ZzV4/GtEqpSLUK8ezNPJz3sAK4tH1tFm9OZ+W2/YwY0IIyUcIlHWqTtu8QCXFlKBsbzfz1u9iVcZit6Qfp2bwaNSragaZP/LiUd35bC0ByQizt61bhqQtbsTn9IGvSMjjvlJoAnPPSNAa1S2HO2l0cys5l5/4sr9HRYB+c63dm0OOFaSTGl2HBo+ewJm0/uQZqVynL7ozDfjs+FEbPF6cxuENtru/SIG/f939t5vbPFvBgv6Zc2r4OFcvZB6dnbMyn13fiDKfnHcCmPQepXiE+rLr+CUu2sjptP7d0axg03V8b9zB2wSb+fV7zvADmrwS0L/Mwn8zewNAuDQp0IS+KT2dvoEujZGpXObLxHQBb0zM57anJVKsQx+wHexZb3tw0kLhoICnce83NNWTl5BJXJorsXONV7eQeCOfWqlZFujauyutTju1qqK6Nq1I/uTzXd6nPwawcer00nevPrE/H+lUY6jOgbuUTfRk1fQ2J8WW46vR6Qa+7dPNeXp60gjZ1KnFz15MxBnKNoUyQKj1/3piyiucmLOe+3k28RpOHkptrMMC7v61hS3om1SvGc1PXkwE7b5SIlEijrMeKbftoXC3Ra5/nb8vfAz2Ser80nTa1K/HMoNYlet+iysk13PrJn9xwVgPa1a0c+oQjoIHERQNJ4d5r56d/ZdOegyTElWH/oWyuPK0uYxds4rf7u+d1sT2aDDmtDh/PCl61M/SsBjzYz/4MAgVDgKn3dqOeq3pgUWo6TWskEhMdxew1O2lbpzLLt+5j9tqdXp+yS8rhnFy++TOVi9vVLtZPyEeDDk9MIm3foRIPJCqwY2Vku4qg7NxcogqxHokxhrT9h9i0xw5s9EyM99EsO+iupILInT0a8YpT3x7I5Z3qkFw+lld/XUXVhPzql+n3dees52zd9hWd6nBKSiW6NanKSRUKVtF42gRmP9iDSuViyDycS8Wy3vXTrVIq5m13apCUt8+9vyTFREdxaYc6pXLvSJt+X3eyc4s+q64qeRpISsmePXv49NNPueWWWwp97ssvv8zQoUMpVy54/erSzXspF5v/K/5y3kZqVizLmY2S/ab/+s9N3PvVX36PlZQbutTn7l6NuatnI1J3H+Tg4RxGjFtC/9Y1eGhs/tjTc5pXY9663QBEiR2EuDsjizpJ+T+Tkee3DFpd8+s93bxe+5s4UJUc2wtJfwfHIg0kpcQzjfyRBpIhQ4Z4BZJtezOJEqFqYpxX2gNZ2Xg+Y7un7XjzilP5/q/N/LTYDrJb82S/UgsiJ1ctz+q0DD66riNdGtnpSkQkryHy0xvsgMy4MtH8sWoHD/VvRnJCHLPW2Flgy0RHUatSWWpVsg3S393amRXb9gUMIp/e0IndGcdnLzGlSoMGklLinka+V69enHTSSXz55ZccOnSIgQMH8thjj5GRkcEll1xCamoqOTk5PPLII2zbto3NmzfTvXt3kpOTmTLFVuNsc2Zn3ZlxiKTycQUCiq9bPvGeEbVBiInfjtTTF7ZimGvA1al1KtGiZsW86rJrzqjHiAHhzXU2qF0Kg9ql5L1OjLd/vpXKeVdHnVK7EqfUDrxmyxkn+y+RKaWOjAYSgJ+GwdZFodMVRvVW0PfpgIeffvppFi9ezMKFC5k4cSJjxoxhzpw5GGMYMGAA06dPJy0tjZo1a/Ljj7ZxOD09nYoVK/Liiy8yZcoU4hIqkrbvkFfQyMrOZUv6QSqWzf/Vpu4+yLRpq4v3/QXwYL+m1K5cjpudQNWrebW8QDLv4Z4kJ8Tx7M/LALi528k80KfpEd/r+i71KR8bzSXta4dOrJSKGA0kR4GJEycyceJE2rZtC8D+/ftZuXIlXbp04Z577uGBBx7g3HPPpUuXLl7neZblLO9nlK/vxH5P/7QsQrnPlxBXhqFnnYy7J2BSQlyBXjieLq+xhez66iuuTDTXdK4fOqFSKqI0kEDQkkNJMMYwfPhwbrzxxgLH/vzzT8aPH8/DDz9Mjx49ePTRRzEG0g9kQZT99a1K21/gvEh75qJWTPpne95Soi9degoDTqkFkDfC3VP15Cs+xgaQwvQoU0odvTSQlBL3NPK9e/fmkUce4YorriAhIYFNmzZxMBvE5FCr+kkMGTKESpUq8e6775J5OIfYsuVYnppGSp26Ie5SPLo2rkqfltU5kJXDf35YyvyHe5KUEMelHerQ/fmprN2RQeuUSl6N2+9f04FG1fwv73ntGfXZsS+L67toaUKp44EGklLinka+b9++dO8/kHYdOhFbJoqEhAQefu5NNqxbw2tPjSAXSCwXz2uvv8GKbfu46IpruOXKQVStVp3/ffl9seftso61Of3kZLo1qcpfG/dweoOkvOqo6870fvjn5NpqrGif0kX3picFvH7Z2GgePa95MedaKVVadGT7UeLvVDuRXeuUSl6vPWpWLMvm9CNbAXHbhjXcMG5L6ISOwows/nnxFu4f8zdzHupZIgvoKKVKjk4jf4wyxpCVXXB075EGkVCCTZ0djj4ta/D3iN4aRJQ6gWkgOcos2pTOsq17Qycsgi9vzA8e7etV4YmBLfn0ersuQ+MA7RpKKRXICd1GYowJuGJfJGXn2Dmwco1h+bZ91K585FNJh1I+Nho776v1yuA2dKxfhabVE/O6CF/RyTbav3xpG04/OSlieVFKHZ9O2DaStWvXkpiYSFJSUokGk1xjWLwpnbIx0Rw8nBP6hCKoGF+Gfel7mLk8lVq169K1cVXKOyu8HczK4UBWNkkJwUfAK6VOXDr7bwgpKSmkpqaSlpYWsXt4YrQ7TqXuLr62jtgyQla2/w8CyQmxHIiJZlP6YV6bvZuxHVrmBRGwPad8l+pUSqkjEdFAIiJ9gFewU3q+a4x52ud4XeA9oCqwCxhijEl1juUAnnlLNhhjBjj76wOfA0nAfOBKY0xWYfMWExND/fqRHcfQ4tGfqVg2hvF35o9I7/tB8U3FPu/hnhzKzqXz07967b+/TxN6tbaLHjXMyeWzOnULLP+plFLFJWKN7SISDbwB9AWaA5eJiO/ggeeBD40xrYGRwFOuYweNMW2crwGu/c8ALxljGgK7gesi9R6O1Krt+3nix6VkZOWwOT2TNiN/oc3IX0g/WPQZZ2/sahdTuuaMeiQnxFGrUlnu6NHIK42nzQPs+hUtapbO2hlKqRNDJEskHYFVxpg1ACLyOXA+sNSVpjnwf872FODbYBcU25hxNnC5s+sDYATw32LLdTHo+eI0v/u7Pje1yNdu6QSFzq41re/q0YjBHWoTHxNNfEyU1xokSikVaZHs/lsL2Oh6nersc/sLuNDZHggkioin21C8iMwTkVkicoGzLwnYY4zJDnJNAERkqHP+vEi2g5SEgW3z3+J5p9Tkl7vPolfzann7oqKEmpXKUqV8rAYRpVSJK+1xJPcCXUVkAdAV2AR4ujLVdXoLXA68LCInF+bCxphRxpj2xpj2VatWLdZMl7SXLm3j9bpRtcRSyolSShUUyY+vmwD3QhEpzr48xpjNOCUSEUkALjLG7HGObXK+rxGRqUBb4GugkoiUcUolBa5Z2l4Nsda4UkodbyJZIpkLNBKR+iISCwwGxrkTiEiyiHjyMBzbgwsRqSwicZ40QGdgqbGDXqYAg5xzrga+i+B7CGr/oWzqDfuRz+ds4M8Nu7nt0z958ZcVhbrGmQ39r9ZXu0rZ4siiUkpFXMRKJMaYbBG5DZiA7f77njFmiYiMBOYZY8YB3YCnRMQA04FbndObAW+LSC422D1tjPE00j8AfC4ijwMLgP9F6j2EsjXdLm877JtFNDwpgVXbC78uyJMDW3HWc3a53Ju7nUxMlNC+XhWa1kik4xOT6duyOgA/3H6mzmellDoqRbRl1hgzHhjvs+9R1/YYYIyf82YArQJccw22R1ipcw80PJIgAlAuLj843HTWyVR0rT++7D99iHGmb29ZS7vwKqWOTqXd2H7Myc7JZcaqHQCsTcso9Pm9W1SjWY0Kea8rl4vN244t4/3riI+J9losSimljkYaSArptV9Xcfm7s/l95Q6u/3Be6BN8lImK4uPr8gtU7kARE61BQyl17NFAUkhrdthSyJD/zT6i88tES95EidUrxHsd09KHUupYpKPXwpR5OIcZq3dQ1Gd9xbK2DeT9azrQtIYdD/L2le34al5qqUxpr5RSRaWBJExPjv+HD2euP+Lzy8ZEM/DUWtzTqwngvaZ57xbV6d2iepHzqJRSpUGrtsLw0az1fD5nY+iELuVio3lsQIu81+XjonlyYCuvXllKKXU80EAShke+XUxWTsF11IMZcEpNLm6fQusUT7ddrbZSSh2fNJBEyKB2KZSLLcO7V4VcXEwppY5p2kYSAeue7p+3XaV8LI2rJfBAn6almCOllIocDSQh7M0s2mJUZaKjmHh312LKjVJKHX20aiuELXsySzsLSil1VNNAEoLBlHYWlFLqqKaBJASjcUQppYLSQBKCBhKllApOG9uL0W/3d2fH/kOlnQ2llCpRWiIJwV8bSZdG+asa3te7Sd527SrlaFunconkSymljhYaSELwV7XVs1k1AMbecga3dm9YwjlSSqmji1ZthXDua797vV6onaqJAAAgAElEQVT3dH+MMfRqXo2aley66j/d2YVKOoeWUuoEpYHkCIhIXhABvFY8VEqpE41WbQVhtMuWUkqFFNFAIiJ9RGS5iKwSkWF+jtcVkcki8reITBWRFGd/GxGZKSJLnGOXus4ZLSJrRWSh89UmUvnP1TiilFIhRSyQiEg08AbQF2gOXCYizX2SPQ98aIxpDYwEnnL2HwCuMsa0APoAL4tIJdd59xlj2jhfCyP1HnK1RKKUUiFFskTSEVhljFljjMkCPgfO90nTHPjV2Z7iOW6MWWGMWelsbwa2A1UjmFe/NI4opVRokQwktQD3soKpzj63v4ALne2BQKKIJLkTiEhHIBZY7dr9hFPl9ZKIxPm7uYgMFZF5IjIvLS3tiN7A4UIuZqWUUiei0m5svxfoKiILgK7AJiDHc1BEagAfAdcaYzxP9eFAU6ADUAV4wN+FjTGjjDHtjTHtq1Y9ssLMmPmpR3SeUkqdSCLZ/XcTUNv1OsXZl8eptroQQEQSgIuMMXuc1xWAH4GHjDGzXOdscTYPicj72GAUEXsOFG0tEqWUOhFEMpDMBRqJSH1sABkMXO5OICLJwC6ntDEceM/ZHwuMxTbEj/E5p4YxZouICHABsDhSb2DRpj1522NvOYP6yeUjdSullDpmRaxqyxiTDdwGTAD+Ab40xiwRkZEiMsBJ1g1YLiIrgGrAE87+S4CzgGv8dPP9REQWAYuAZODxSL2HSf9sz9tOToijUrnYSN1KKaWOWREd2W6MGQ+M99n3qGt7DDDGz3kfAx8HuObZxZzNsERHSWncVimljnql3dh+VKvlmgZFA4lSSvmngSSIuknl8rajRAOJUkr5o4EkCPeARC2RKKWUfxpIwhStJRKllPJLA0kQSQn5vbSi9CellFJ+6eMxiOoV4vO2tWpLKaX800AShHvORm1sV0op/zSQBOGeRl4DiVJK+aeBJAjttaWUUqFpIAnCGEOlcjGse7q/BhKllApAA0kQuQY0fCilVHAaSIIwGG0bUUqpEMIKJCLyjYj0F5ETKvDkGtA4opRSwYUbGN7EriWyUkSeFpEmEczTUcMYEI0kSikVVFiBxBgzyRhzBXAqsA6YJCIzRORaEYmJZAZLkzFG20iUUiqEsKuqRCQJuAa4HlgAvIINLL9EJGdHAWN0/IhSSoUS1sJWIjIWaAJ8BJznWjf9CxGZF6nMlbZcY7SNRCmlQgh3hcRXjTFT/B0wxrQvxvwcVQxaIlFKqVDCrdpqLiKVPC9EpLKI3BKhPB013FOkKKWU8i/cQHKDMWaP54UxZjdwQ2SydBQxOn28UkqFEu5jMlpc/WBFJBqIDZLek66PiCwXkVUiMszP8boiMllE/haRqSKS4jp2tYisdL6udu1vJyKLnGu+KhHsn5trDKL9tpRSKqhwA8nP2Ib1HiLSA/jM2ReQE2zeAPoCzYHLRKS5T7LngQ+NMa2BkcBTzrlVgH8DnYCOwL9FpLJzzn+xpaFGzlefMN9Dodk2kkhdXSmljg/hBpIHgCnAzc7XZOD+EOd0BFYZY9YYY7KAz4HzfdI0B351tqe4jvcGfjHG7HKq0X4B+ohIDaCCMWaWMcYAHwIXhPkeCi1XByQqpVRIYfXaMsbkYksC/y3EtWsBG12vU7ElDLe/gAuxY1IGAonOeBV/59ZyvlL97C9ARIYCQwHq1KlTiGznM9r9VymlQgp3rq1GIjJGRJaKyBrPVzHc/16gq4gsALoCm4CcYrguxphRxpj2xpj2VatWPcJr6Oy/SikVSrhVW+9jSyPZQHdsldLHIc7ZBNR2vU5x9uUxxmw2xlxojGkLPOTs2xPk3E3OdsBrFied/VcppUILN5CUNcZMBsQYs94YMwLoH+KcuUAjEakvIrHAYGCcO4GIJLtmFB4OvOdsTwDOccarVAbOASY4I+r3ishpTm+tq4DvwnwPhZabq7P/KqVUKOGObD/kPPBXisht2FJAQrATjDHZTtoJQDTwnjFmiYiMBOYZY8YB3YCnRMQA04FbnXN3ich/sMEIYKQxZpezfQswGigL/OR8RYSWSJRSKrRwA8mdQDngDuA/2Oqtq4OeARhjxgPjffY96toeA4wJcO575JdQ3PvnAS3DzHeR5OrAdqWUCilkIHHGg1xqjLkX2A9cG/FcHSV09l+llAotZBuJMSYHOLME8nLU0e6/SikVWrhVWwtEZBzwFZDh2WmM+SYiuTpK6Oy/SikVWriBJB7YCZzt2meA4zqQ6HokSikVWrgj20+YdhG3Vy9rS662uCulVFDhrpD4PrYE4sUY869iz9FRpEL8cbscvVJKFZtwq7Z+cG3HY+fF2lz82VFKKXWsCbdq62v3axH5DPg9IjlSSil1TDnS9f8aAScVZ0aUUkodm8JtI9mHdxvJVuwaJUoppU5w4VZtJUY6I0oppY5N4a5HMlBEKrpeVxKRiK1MqJRS6tgRbhvJv40x6Z4Xzpoh/45MlpRSSh1Lwg0k/tKF23VYKaXUcSzcQDJPRF4UkZOdrxeB+ZHMmFJKqWNDuIHkdiAL+AL4HMjEWYRKKaXUiS3cXlsZwLAI50UppdQxKNxeW7+ISCXX68oiMiFy2VJKKXWsCLdqK9npqQWAMWY3OrJdKaUU4QeSXBGp43khIvXwMxuwLxHpIyLLRWSViBSoGhOROiIyRUQWiMjfItLP2X+FiCx0feWKSBvn2FTnmp5jGtCUUqoUhduF9yHgdxGZBgjQBRga7ARnrfc3gF5AKjBXRMYZY5a6kj0MfGmM+a+INAfGA/WMMZ8AnzjXaQV8a4xZ6DrvCmPMvDDzrpRSKoLCKpEYY34G2gPLgc+Ae4CDIU7rCKwyxqwxxmRhe3ud73tpoIKzXRH/U9Nf5pyrlFLqKBTupI3XA3cCKcBC4DRgJt5L7/qqBWx0vU4FOvmkGQFMFJHbgfJATz/XuZSCAeh9EckBvgYeN8boMoZKKVVKwm0juRPoAKw3xnQH2gJ7gp8SlsuA0caYFKAf8JGI5OVJRDoBB4wxi13nXGGMaYWtXusCXOnvwiIyVETmici8tLS0YsiqUkopf8INJJnGmEwAEYkzxiwDmoQ4ZxNQ2/U6xdnndh3wJYAxZiZ29cVk1/HB2Kq0PMaYTc73fcCn2Cq0Aowxo4wx7Y0x7atWrRoiq0oppY5UuIEk1RlH8i3wi4h8B6wPcc5coJGI1BeRWGxQGOeTZgPQA0BEmmEDSZrzOgq4BFf7iIiUEZFkZzsGOBdYjFJKqVIT7sj2gc7mCBGZgm0Y/znEOdkichswAYgG3jPGLBGRkcA8Y8w4bKP9OyJyN7bh/RpXe8dZwEZjzBrXZeOACU4QiQYmAe+E8x6UUkpFhpwI7dTt27c38+Zpb2GllCoMEZlvjGkfKt2RrtmulFJKARpIlFJKFZEGkmNZeipMexZOgOpJpdTRSwNJcTAGtvxd8vf94kqY8gTsWFHy91ZKKYcGkuKw+Gt4uwssGVuy9z18wH7PzQkvfeZeWPd75PITSs5hGNUd1kzzf3ziIzDpsZLNk1KqyDSQFAdPiSBteQnfWJzvYVZtjb0JRveHfdsilqOg0jfC5j9h3O3+j894FX5/sWTzVBI+uRi+vKq0c3F0mvkGPF0ndDp1VNNAUiycB7rJLeHbSug0bjtX2u8HdwdOs28r/PFKwXaXL4bAiIqFu19ONhza79pRyPweL1ZOhKXflew9jYGDxTGLUQS48zbhQchMD5z20H6Y+oz9W1JHLQ0kxcEzPVhpNXqHe9+Ysvb74QzYudr/P+dX18Ivj8L2f7z3//N94fP19XXwVK3815kRerBlZ8GutZG5djA52fbneCT2bIDDmYU/7+Bu2L89dLoFH8MzdWH7ssLfI9LmjLJ5m+MaSxwo0E55AqY+CYu+Kpm8qSOigaQ4SCmVSApbtRUda7/vWguvnQpj/Swps2GG/Z5bDJ8Al35rvx/cA5v+hFHdnAOu/Gamw+YFsHFOwfM3L4APBkD2oeD3+f5OeLWNbQMqSZP+bX+O6b5TyAWQnmrfpzHwciv40me+UWNg7XTIDfJ39FxDeL5R8PvMew/G3Wa3d/ipbk1bAXtdKzYsGgPf3hreeygOy36038ffm7/PX9XfEzVh1pt2OyfA38D+NNiqsySVNg0kxUEK+UAH+Hk4fDQwdLpADu6G7Uvs9t9f2obsUKKcGXH2Og++xV/bB/zWRYHP2TALHq925PkE+PQSeKe7/2MvNrcB5n+98vd5Og98cgmsnQaPnwSp873P278dVky026t+sd8Ph1oiJwzG2AdrOKUFT8eF/VvDu/ZLLez73LfFvl450fv4W13gg/NgVFdbkhiZVLCk5Rvgf30cXmsHKyfl7/vh7oL3/vFeWzW5dBy80QFebJZ/7OvrYOHHsGOl/X17pM6HbUvCe2+hLP7GVc0Z4P/EXbLeutiWnEMZ1RXe6lxw/9rp9uf5/V2FzmoBubnw1xdFq15LWw4/P2g/CITzvwqwe719H26Z6fmlt9HnwuT/2OfIhIeOPG/FQANJYeQcDvBHcAQlkllvwupfvfdlptt/9nnvhz5/zL/yt2e8Grox1xjY5DyMc7Ly97/THd46s2D69X/YqocJD0G266E69Wnvn8GYf/lvLB3lChwbZxc8nnMYfn8ZsvYXPDb7LVtdlbHdex/A/A9gz0bbaeDTiyHL9bBxtxn9eI/9We7ZCDNeh1dPhR2r8o/n5vgv6Swfbx+sk0YUPOaxdrrteRYVnf9ewD7AR1S0/+AA05/z367kfoj//rJNM6IibHMC+ta/7YM9Nxv+GQcvNIPPLve+xrIf7YeA6c/BzlXwyUWwYXbB+62caPfNdaqR3KWg1VO807/eHt7rbf9WDmfCu2fDf8+AFRMC/yx+ftBe4+2ugdP8eA+MuRZeOcUGvEBVsZ6/y72b4bcXvI95AuqIirZ3H9hreT4U5ebaYz8/aF9/cJ79Oc5/H8bdAcuDTg0YWGY6/P25Lb3PeNX+bl9oav9+jIFX29r7/rezd+/J0ed6/1+90RFmvQEZafnVk7Pfzv9ZGAMzXrP3y821P/9XWtv3kbHDpjmcCd/eYv/Xd66Gdb/Bb8/b58jM120+Jj0WfqAqRjrXVmF4HpjDNnjv/+1FmPwYdL4LeoXZfdXzDzzC1dC4bYn9xwV4wJlcuWylgudumg/v+FlTrNNN0HOE/WQeHQOxCbYuvnJd+yDd5dTndx0G0572yU+6d75C5t8nvft9ZOyA504OfG7FOnDWvfD9HeHdC6D1YOj3rP0dJNaEfX4W07xrsW2HKRNvH4r+PLrbPnwmPGgf0o/sAImGySOg41BbavC4/ldIaVfwGgV+RgI3TrddwPPSpBe+c4Jb+ar2odP8gvwqwsu+gM8uDXzOabfkVwUVRd0zYb1PN/FhG2D3OjipBcz7H2xbDPW72qDr8eguW6qp2sQG9X3bIL4iPOFToi0T7/3hxOPM/4Pm58OH5/tvT3t0F4ysYre73OMdbB7aCk9Ut9sPrINn6hU8f4RPo/7GubBpHpx2s329d4v9u/r7S/u3sHeTfZAHUud02DAz//XZj9i/652rbZUnwD3LIbG6999C68H2A8uhvXDzTKjW3AaDjwZCuWRI6QArfspPX6WBPWfqk/n7bvrDf0kM7M/3vlU2oJSrEjj/YQh3rq1w12xXkN+75JNL4OL3Iba8fZ3X2B6kRLJpPlQ/BaID/MiXfAvzR+e/fqau/e77xw/+gwjYT+2xCfZTCsAFb8G3N8G1P+UHESgYRMA+JCrXC5x/fwKNXwkWRPLOLeSnJonKv5+/IALw1TX2wRDM6+1gl2tC6dfbQ7kk+/v54xXvtO+eDadcDu2uhjqn2X2H9vm5qPEOIgDLf/KTrhAynMXYPEEEggcRKJ4gAgWDCOR/iGrcB1Y4n+7//NA7zahuthRw6cfQ7Dx4oTE06FbwWv6CCNiu38G6f38wIH/bt8SybWn+tr8g4mvrIvifsyDraTfbAOj+8OEpAQfjDiIAv/7HBpJPBuXve6FJwf/hv10rh3tK5J6q1AM7vIMI2L9XdxCBgp1h3LIzbenvwE64ZTac1DT0eykirdo6Eisn5P8zQX6Viqfu29fWxfbh/+vIwNf86mpYM6Xg/qXj4NmTbQNpOOa+m7+d6jRg//Fq6PNGn+f9gA3lhab5nw4LK31D4Xs7Lf0OdofomRUqiEDB97h7XX6Vnz9/fQofXWi357wDL7UMfQ+AzwaHl+5YsyJIFdFWZ3aHL4bYRnCANVOL797+ApzHu8FW/XaMqGiruF5o6l3tlLYicAm2sEZULPg35qlu88df1W44vr0p+PEDO+33NzvlV41FkJZIioOnRLLoK6h7BrQYCPGVXAHGaYw9kt4lnjrtTy+G2xfA/PeCp3dXCXiqLX0/4fiTvsHW94bLN2jOHmUbxvds8J/eV2E/PR/OCFwSi7TDGUWrpjoRPd+wtHPgn79S3RsdInvPWW8EPvbRQOj5mP1AUxiF6VV5YBeUTw6drgg0kByptdMh6wCceiVeA+1+uDu/18yg96HlhWCcKhmJsvWyaa5i6bo/7IjvUHavg5GVIa4QD7T5YTTaF5ef7iu5eyl1PJn078hev1LkZw7QQBIu3/aA+aPt16lXBh5hPuZaJ5A4bSdR0fn1sh6j+xUuH4eCjAJWSilfMfERv4W2kYRrqp8G6jwhpv7IdZVIlFLqOKNPtnClzg18LFSA8JRIlo8vvvyoY0ft04r3etdNCp1GlazkJqWdg1KlgSQcGTv896jyCDZ54v7tpTB1iioxN7pGHgcKGOe94n9/MKfdmj8Tga/qYfYcA7j8K7htPnS5N3RagDZXhH/t4lC5fuBjgz8t5psdwaShgz/zfl21KbT202Bf9/TA16hYu/D3DeSekp5hPDwRDSQi0kdElovIKhEZ5ud4HRGZIiILRORvEenn7K8nIgdFZKHz9ZbrnHYissi55qsihZ0C9wh8F2IeomCDOtf9HrnJClXpqn8W1DgFqjjjZvo9WzDNiHTbj39EOgx4zQ4sG5EOg3x63w1Phau+g37PQ9Nzoc+T0MRpP/N9mHkm3/QY/CncvxauGV+wM0bjcyC5IfR4xA7i86jkjFNq3Mc7fdUwxxycH6DXXUy5/Hz743mo1jnDPpBvc0r6DXvZAX5uFZwJP4ursdj35xZK03OhaT+42zVNTHQs9PFTzS3OLAd1O0Ovkfb30Psp24Pzzr9C3yupkf8A5Suxuv/9D6z3P+ashEQskIhINPAG0BdoDlwmIs19kj0MfGmMaQsMBtx/nauNMW2cL3en6f8CNwCNnC+f/4QICDbtOuT32fZnzLV2UkFVeFWbhU5TGJXrQctB0PWB4rleTDn7/YZf4fY/bVBp6OpMcZLPn/upV8GFb9vtlhfZf/x6zkDGuEQ7eK/jDTD4E7vvwlFwyyz7MPN9SJz7MlRv7byv+nYEc70AI5093B94Lv8CTr0aWl+Sv+/M/4PTb4Oh0+zsA77iXUGqxUB4ZCfc+JtPMBLvkkTLi2yp7e6ldjR2khN0z7rXvr/oGLh1DlzyIVzg+ve/cTrUbAN3LLDbN06H6yfnH+/9lP1+9sPB37P74XyTzziUckmBz6veOv/3UDElf/+g94KPFm8xEDrfCcM3wOm3wMWjbScb9+9v6FT7gcHj0o/h9nlw3qv22M0zvH/Wwdw8w/6N+JsBowRFstdWR2CVMWYNgIh8DpwPuIagYoAKznZFIMCQZUtEagAVjDGznNcfAhcARRxGHEKwuWseq5LfvfdEVL1V8EkfC+Pcl+EH1yR7jXt7d5UOR4PuthoyNsF7sNf9a+2DPybePlCnPeP//LJVoP8L9gOAPw17wZl3warJ+VNrlK2U/4885Ov8MSfBHlQeQ772ni/MLaYsnBQgmLa/FtpdY8fzVKiZvz+xuu3ZF5sIZeK8zzn5bDtyvP2/7HUHOANVx/zLvq+eTjfUmm3sAx7stB/7ttq5umq1t9W0a6ZArBNEa7S2wTlvoKLxrup1l7wq1rLjqwDiKuTvr+q0L1RpkL+vxine+8pWtt8905Kcfov9Ajs+yzMDwIOb4Unn59H7KZum13/sTArugAD2b3fQ+/Csn+q1QLUMyc7My3U72/noPMKpGBm20aaLS/SutmzmTMMSEw81nbFcdy+1z5Vda1yzZjtiyuWvjlqtBUeDSAaSWoB7gEQq0MknzQhgoojcDpQH3H1j64vIAmAv8LAx5jfnmqk+13QteJFPRIYCQwHq1Cli0TjY4J/jMYicdZ+dDDAc0XGh04Tj2p9tPbMnkDToDt2Gwx8vF+46V31rJ3ycNMJ7IJj7U6T7n/6OhXbCwUPpUK2l/dQqYs/fsx4qpNgHznkvw7IfoP119ng9PxNd5uVhHHw4ILwlkMvEFXzgh0vEO4gAXDnWDgxt7Wdkff0uthThO03Po7so0H7gCSRN+tk5oea+Yx/qfZ+lwOy9rS+GRj3Dm5rkvJftzy4lyEjyCimBj10zvuD9Lx5tZ9dNW2anLbrgLftePYEj0c/s1T0etSWyclVs5wVPt/y8ecZCzEF49Q92KpIna+SXDCF4NXe8K3hWa2nnFWszxH/auAT7vWZb+zt7o2N+kBmeagfnlq/q5x6VbN6DLRYWAaU9juQyYLQx5gUROR34SERaAluAOsaYnSLSDvhWRAoVeo0xo4BRYCdtLFIuw10T/XjRcpCtMghnNHdxdWn2rJXiMei90P3fG/fNH7Xf4sL8NoAysd7BIli9f5l4+NdPdhqUId/kn9fvefj5AVtt4HnQd7g+vPdS61RIqAZnF/PU3nXPhCZ9g6epWAvaXB74uL+53jyzGLuldLAP5rgEO6ngJR9Co94QFeD3HeUEHk9A6vMMLBlbMF3Zyrb6LpCrxuWXUPzex8/9RWw7lGdOqTaXBT6/5wg7bYu7vah2B1u91LCn7Vjzckuo0cb7vF4jYZWrt1xUlC2VXfG1/X1P8cyFFeajRsT+TMMRXQbu+NN172i4cZr/tMOcyV5LeCaGSDa2bwLc3RVSnH1u1wFfAhhjZgLxQLIx5pAxZqezfz6wGmjsnO/+uOLvmsWvOBZ5OpZ4gsPFH9gqhov+Fzitv4cQ2NIE2EZV3/r9Tk6V0IWuFfJ8awZ8qwra/8v7dY9HbX20R7UW3r2ZzrjDlmquGQ//8jM/VFvnk2Bcgj333uXen1wbn2Pr54+ktBCXCPeuCF5qORLX/ghn3Fa81wyk/wu27cfT0N38/DAHtjkP0tNuguuCTD8fSIOugRuUi8OZd9tODb6anWerEivVtu+7v8+kkJ3vhKv9rBLaqKct1XS5x/69uducSpMn/+c8XiK3i2QgmQs0EpH6IhKLbUwf55NmA9ADQESaYQNJmohUdRrrEZEG2Eb1NcaYLcBeETnN6a11FRD5xbD9rTJ3PKrW0k6F76kHbnGBbeRsNchWAfkTX9F/47XnH6rVIO/95ZKh79M2uLS+xPXJzydweIKZRNlp4/v7zArr/kQJBXv8JFaz1Vz1OufXr7v1f8n2xolL9P++TnRl4qCWnyn0g6UH26HgWFerXeFHg1eoYf/e/P2tlYYO19v/2dNL5oNHxKq2jDHZInIbMAGIBt4zxiwRkZHAPGPMOOAe4B0RuRv7UeYaY4wRkbOAkSJyGMgFbjLG7HIufQswGiiLbWSPbEP77FERvXyJSagG+7cFT1O5XuD1VKoE6O8vUdD9Qbuam3t67Mr1bcOnZ6p9jzNu937dcSh8d0vBKew9VV0PbbOlE3cJ5WZn+m7Pvib9Q/dY8lUmtmDjqzpy0TF2TZDiajNTRRfofzYCItpGYowZD4z32feoa3spUOAJYIz5Gvg6wDXnAYUYkVVEx8tkhI3OsWNa/E3Ffspl8NdnwRsKfdU5w67v7qnPvvBt26Nn0Zf2tYh3EClTFrIPFqyianuF/fLlCSRlXG0nSQ1tT6Vqnm61TiAp7PgAFRn6ezhh6cj2E4Uxtu7XH0/3w8KMwL/mB9sw2t3VoNw3QJdae3H7LVCbii9/6W6fX7BEo5QqdRpIjnfnPGG/mxzbKPiga6hOXEUbXJIb29eNehY835+hznrlDbrmdxOF4AO1PKUdCRFIPEEtHJ7SimdMg1KqVGggORqdebdt0yiMQI18nrYHz+C42PK2QR1gwCu2YTG5kR2w1/46v5cooFqQmkV/4xeAvBJJqO7Cg963A7fC0fRcu/58CfVMUUr5p4HkaNTtQe+Rv6Fc8BY8sM7/sab97Yhx91QS3Ybbfc0vyN9Xrkp4o3MhePXUhW/7n/On95M2iASaiNAjOsZ74FaofHQfHv50EkqpiCjtAYnKHxHCHtgE/gdgnfl/cGifvVZ7n+k+YuIL7gvHxaNh1lvhBxy3jjcEH4imlDpmaSA5GklU+A3f5QKsxeyZN6k4tRjoPQhQKaXQqq3gtgSZ/jkhgqNvIfyuuHf9XXDf0KnFmROllApKA0kwc4IMRizqYLb6XQMfi4rGq2rruknQKsDUC74D/sD/ZG5KKRUhGkiCyQ1SveSZQjwc7tlBPQL1Xqp5qv3uLpHU7mB7JsVXgsQaoe+na8MrpUqQPnGCCdZOUZgG5/pn+bu4/7QDXvM+ft0v9ntiNTuzZ7Cut3ldhiO/aKRSSnloIAkm2FojgdowPMuuuvkLOhX8LKMy8O38GWw9ly/MeJK8QX8aSJRSJUcDSTC+65C4l371F0j6v+h/imrfqqYHt0DCSXbbHVA8q8dBfmmoQFAIpxFeA4lSquRoIAlmyTfer70Gyvl5oHe4zq5n4Ms3kLin9OjgGk3euLef6xcmKGiJRClV8jSQFEaUa14pT4mkSb+C6XqN9H7tr/G7ww120Sj3UpvuAHAk1VTmSIKPUkoVjQaSwvCaGsR5aJfxswCOb/dbf4GkYi27aJS/9aTd1w8UFC7+AO70GUPiWUTKX5dgpZSKEA0khRHtp0Tib0FVkfkAAAuVSURBVO4o37YViYLGfez2g1vCu1egEolnf2x5qFzX+1jvJ+GB9TobrlKqROkUKYXhFTSCzGbr221YouDyLwp3r2rNYf9W/yUee1E/+YuGspUK7ldKqQjSEkkwvjPwutfSMEECSdP+dvr25ucHThPKxaPh6u+Dr/GhlFJHAS2RBFOgisrdGB6oey5QPhnu/Asmjwycxu38N+DwQe998RUDDGRUSqmjiwaSYHwHJHoFhDB6SOVVcYUIJG2HBD/ufdFCpFVKqciLaNWWiPQRkeUiskpEhvk5XkdEpojIAhH5W0T6Oft7ich8EVnkfD/bdc5U55oLna+TIvYGfEsk+Oue67zu+kDB8z0zBCcW40zBHYfa79VbFd81lVKqCCJWIhGRaOANoBeQCswVkXHGmKWuZA8DXxpj/isizYHxQD1gB3CeMWaziLQEJgDuOUWuMMbMi1Te8xRoNHeiRpUG+bP/Vm3mf0VAsAs5JZxUvGt4NOkb+H5KKVUKIlm11RFYZYxZAyAinwPnA+5AYgBPi3ZFYDOAMWaBK80SoKyIxBljDkUwvwX5Vm3VOcMuU5tQzTaC/2sipHQIfH5UNLS8MLJ5VEqpUhbJQFIL2Oh6nQp08kkzApgoIrcD5YGefq5zEfCnTxB5X0RygK+Bx40pOPGViAwFhgLUqVPnSN9DvlvnQHJj73aSOr5vRymlTjyl3f33MmC0MSYF6Ad8JJLfV1ZEWgDPADe6zrnCGNMK6OJ8XenvwsaYUcaY9saY9lWrFsNCT5Xr6RxWSinlRyQDySbAPYNhirPP7TrgSwBjzEwgHkgGEJEUYCxwlTFmtecEY8wm5/s+4FNsFVrk6WJRSinlVySfjnOBRiJSX0RigcHAOJ80G4AeACLSDBtI0kSkEvAjMMwY84cnsYiUERFPoIkBzgUWR/A9uGhpRCml/IlYIDHGZAO3YXtc/YPtnbVEREaKyAAn2T3ADSLyF/AZcI3T3nEb0BB41KebbxwwQUT+BhZiSzjvROo9eNESiVJK+RXRAYnGmPHYLr3ufY+6tpcCnf2c9zjweIDLtivOPIZN20eUUsov/ZgdLg0kSinllwaSYAa+Xdo5UEqpo54GkmD8rcuulFLKiwaSoDSQKKVUKBpIgvGda0sppVQBGkiC0aotpZQKSQNJUBpIlFIqFA0kwWiJRCmlQtJAEoy2kSilVEgaSJRSShWJBpJgkhqWdg6UUuqop4EkmPpdSjsHSil11NNAopRSqkg0kCillCoSDSRKKaWKRAOJUkqpIonowlbHhesmwbYSWs1XKaWOQRpIQqndwX4ppZTyS6u2lFJKFYkGEqWUUkUS0UAiIn1EZLmIrBKRYX6O1xGRKSKyQET+FpF+rmPDnfOWi0jvcK+plFKqZEUskIhINPAG0BdoDlwmIs19kj0MfGmMaQsMBt50zm3uvG4B9AHeFJHoMK+plFKqBEWyRNIRWGWMWWOMyQI+B873SWOACs52RWCzs30+8Lkx5pAxZi2wyrleONdUSilVgiIZSGoBG12vU519biOAISKSCowHbg9xbjjXBEBEhorIPBGZl5aWdqTvQSmlVAil3dh+GTDaGJMC9AM+EpFiyZMxZpQxpr0xpn3VqlWL45JKKaX8iOQ4kk1AbdfrFGef23XYNhCMMTNFJB5IDnFuqGsqpZQqQWIitJysiJQBVgA9sA/7ucDlxpglrjQ/AV8YY0aLSDNgMraqqjnwKbZNpKazvxEgoa4ZIC9pwPojfCvJwP+3d7chdlR3HMe/P20SrRGTWJVQimZV0BRiGkVstVIqjdQ3VUgx+BRtQdAIVRRqsQ/Wdy20hYI0VhqIbRA1NTQIksYoEV+YGHUT81DNA75Qokt9iE2hxcR/X5z/jbfL3t29d3b3ziy/Dwz33DNzL/9/ztycnTMzZ/7Z42frxrnUz3TJA5xLXVXJ5eyIGHNIZ9KOSCLiqKS7gI3AicDqiNgt6SFge0RsAO4FHpV0D+XE+61Rerbdkp4E9gBHgZURcQxgpO8cRyw9j21J2h4Rl/T6+TpxLvUzXfIA51JXU5HLpB2RTBfeoeppuuQyXfIA51JXU5FLv0+2m5lZw7kjGdsf+x3ABHIu9TNd8gDnUleTnouHtszMrBIfkZiZWSXuSMzMrBJ3JKNo2kzDkt6W9IakQUnbs26epE2S9uXr3KyXpN9nbjslLelz7KslDUna1VbXdeySVuT2+yStqFEuD0p6N9tmsAkzXUv6Ss7OvUfSbkk/yvrGtcsouTSxXU6StE3Sjszll1m/QNLWjOsJSTOzfla+35/rzxkrx65FhJcRFsp9KgeAAWAmsANY2O+4xoj5beBLw+p+Ddyf5fuBX2X5GuBZyk2elwFb+xz7lcASYFevsQPzgIP5OjfLc2uSy4PAfSNsuzD3rVnAgtznTqzD/gfMB5Zk+VTKzcALm9guo+TSxHYRMDvLM4Ct+e/9JLA861cBd2T5TmBVlpdTbgLvmGMvMfmIpLPpMtPw94A1WV4DXNtW/1gULwNzJM3vR4AAEfEi8OGw6m5jvxrYFBEfRsRHwCZyCp6p1CGXTmo703VEHIqI17L8L2AvZeaJxrXLKLl0Uud2iYg4km9n5BLAt4F1WT+8XVrttQ64SpLonGPX3JF0Nu6ZhmskgL9LelXS7Vl3VkQcyvJ7wFlZbkJ+3cZe95zuyiGf1a3hIBqSSw6HfI3y12+j22VYLtDAdlF5PtMgMETpmA8AH0fE0RHiOh5zrj8MnM4E5uKOZHq5IiKWUB78tVLSle0roxzPNvJ67ybHnv4AnAssBg4Bv+lvOOMnaTbwV+DuiPikfV3T2mWEXBrZLhFxLCIWUyauvRS4oJ/xuCPpbDyzF9dKRLybr0PAesoO9n5ryCpfh3LzJuTXbey1zSki3s8f/2fAo3w+hFDrXCTNoPzHuzYins7qRrbLSLk0tV1aIuJj4AXg65ShxNb8ie1xHY85158GfMAE5uKOpLNXgPPzSoiZlJNUG/ocU0eSTpF0aqsMLAV2UWJuXSWzAvhbljcAt+SVNpcBh9uGK+qi29g3Akslzc0hiqVZ13fDzj9dR2kbKLkszytrFlBmud5GDfa/HEf/E7A3In7btqpx7dIpl4a2yxmS5mT5ZOA7lHM+LwDLcrPh7dJqr2XA83kk2SnH7k3l1QZNWyhXobxFGX98oN/xjBHrAOUKjB3A7la8lLHQzcA+4DlgXtYLeDhzewO4pM/xP04ZWviUMlb7w15iB35AOWm4H7itRrn8OWPdmT/g+W3bP5C5vAl8ty77H3AFZdhqJzCYyzVNbJdRcmliuywCXs+YdwE/z/oBSkewH3gKmJX1J+X7/bl+YKwcu108RYqZmVXioS0zM6vEHYmZmVXijsTMzCpxR2JmZpW4IzEzs0rckZjVnKRvSXqm33GYdeKOxMzMKnFHYjZBJN2Uz4kYlPRITqx3RNLv8rkRmyWdkdsulvRyTha4Xp8/0+M8Sc/lsyZek3Rufv1sSesk/UPS2rxT26wW3JGYTQBJFwLXA5dHmUzvGHAjcAqwPSK+CmwBfpEfeQz4cUQsotxZ3apfCzwcERcB36DcIQ9lttq7Kc+QGAAun/SkzMbpC2NvYmbjcBVwMfBKHiycTJnM8DPgidzmL8DTkk4D5kTElqxfAzyVc6V9OSLWA0TEfwDy+7ZFxDv5fhA4B3hp8tMyG5s7ErOJIWBNRPzk/yqlnw3brtc5if7bVj6Gf7tWIx7aMpsYm4Flks6E4881P5vyG2vNyHoD8FJEHAY+kvTNrL8Z2BLlyX3vSLo2v2OWpC9OaRZmPfBfNWYTICL2SPop5QmVJ1Bm/l0J/Bu4NNcNUc6jQJnWe1V2FAeB27L+ZuARSQ/ld3x/CtMw64ln/zWbRJKORMTsfsdhNpk8tGVmZpX4iMTMzCrxEYmZmVXijsTMzCpxR2JmZpW4IzEzs0rckZiZWSX/A9GUd7wrK+RIAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEWCAYAAACJ0YulAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAIABJREFUeJzt3Xd4VFX6wPHvmxAIJfSANOkoqIgQEBUFGwL23te2dndd14Zr+WFva1ldFOvq2l37LqCAFFEEKSLSCT20hAAhBNLP749zZ+bOZGYyKZNJMu/nefLMndvmXGa4773nnPseMcaglFJKASTEugBKKaVqDw0KSimlvDQoKKWU8tKgoJRSykuDglJKKS8NCkoppbw0KCgVIRF5R0Qei3DdDSJySlX3o1RN06CglFLKS4OCUkopLw0Kql5xqm3uFpElIpInIm+JSHsRmSwiuSIyTURaudY/S0SWicgeEZkpIn1dy44SkUXOdp8AyQGfdYaILHa2nSMi/StZ5utFJF1EdonINyLS0ZkvIvKCiGSKyF4R+V1EDneWjRGR5U7ZtojIXZX6B1MqgAYFVR+dD5wK9AHOBCYDfwNSsb/5PwOISB/gI+AvzrJJwH9FpKGINAS+At4DWgP/cfaLs+1RwNvAjUAb4DXgGxFpVJGCishJwJPARUAHYCPwsbN4JHCCcxwtnHWynWVvATcaY1KAw4HpFflcpULRoKDqo5eNMTuMMVuA2cA8Y8yvxph84EvgKGe9i4GJxpipxpgi4O9AY+BYYCiQBLxojCkyxnwGzHd9xg3Aa8aYecaYEmPMu0CBs11FXA68bYxZZIwpAO4DjhGRbkARkAIcCogxZoUxZpuzXRHQT0SaG2N2G2MWVfBzlQpKg4Kqj3a4pg8Eed/Mme6IvTIHwBhTCmwGOjnLthj/jJEbXdNdgTudqqM9IrIH6OJsVxGBZdiHvRvoZIyZDvwTGA9kisjrItLcWfV8YAywUURmicgxFfxcpYLSoKDi2VbsyR2wdfjYE/sWYBvQyZnncbBrejPwuDGmpeuviTHmoyqWoSm2OmoLgDHmJWPMIKAfthrpbmf+fGPM2UA7bDXXpxX8XKWC0qCg4tmnwOkicrKIJAF3YquA5gA/A8XAn0UkSUTOA4a4tn0DuElEjnYahJuKyOkiklLBMnwEXCMiA5z2iCew1V0bRGSws/8kIA/IB0qdNo/LRaSFU+21Fyitwr+DUl4aFFTcMsasAq4AXgZ2YhulzzTGFBpjCoHzgKuBXdj2hy9c2y4ArsdW7+wG0p11K1qGacCDwOfYu5OewCXO4ubY4LMbW8WUDTzrLLsS2CAie4GbsG0TSlWZ6CA7SimlPPROQSmllJcGBaWUUl4aFJRSSnlpUFBKKeXVINYFqKi2bduabt26xboYSilVpyxcuHCnMSa1vPXqXFDo1q0bCxYsiHUxlFKqThGRjeWvpdVHSimlXDQoKKWU8tKgoJRSyqvOtSkEU1RUREZGBvn5+bEuSlQlJyfTuXNnkpKSYl0UpVQ9VS+CQkZGBikpKXTr1g3/pJb1hzGG7OxsMjIy6N69e6yLo5Sqp+pF9VF+fj5t2rSptwEBQERo06ZNvb8bUkrFVr0ICkC9Dgge8XCMSqnYqjdBISL5e6G4INalUEqpWiu+gsKutZC1stp3u2fPHl555ZUKbzdmzBj27NlT7eVRSqnKiq+gAGCqf4CqUEGhuLg47HaTJk2iZcuW1V4epZSqrHrR+yjWxo4dy9q1axkwYABJSUkkJyfTqlUrVq5cyerVqznnnHPYvHkz+fn53H777dxwww2AL2XHvn37GD16NMOGDWPOnDl06tSJr7/+msaNG8f4yJRS8abeBYWH/7uM5Vv3Bl9YuM++Nvy5Qvvs17E5/3fmYSGXP/XUUyxdupTFixczc+ZMTj/9dJYuXertOvr222/TunVrDhw4wODBgzn//PNp06aN3z7WrFnDRx99xBtvvMFFF13E559/zhVXXFGhciqlVFXVu6BQGwwZMsTvWYKXXnqJL7/8EoDNmzezZs2aMkGhe/fuDBgwAIBBgwaxYcOGGiuvUkp51LugEO6Knq2/2tdW3aFx9OrymzZt6p2eOXMm06ZN4+eff6ZJkyaMGDEi6LMGjRo18k4nJiZy4MCBqJVPKaVCib+G5ihISUkhNzc36LKcnBxatWpFkyZNWLlyJXPnzq3h0imlVOTq3Z1CLLRp04bjjjuOww8/nMaNG9O+fXvvslGjRjFhwgT69u3LIYccwtChQ2NYUqWUCk+MMbEuQ4WkpaWZwEF2VqxYQd++fcvfuIaqj6Ip4mNVSikXEVlojEkrbz2tPlJKKeWlQaEi9u+C4sJYl0IppaJGg0KkTCns2QjZa2JdEqWUihoNCpHyNL2UFMW0GEopFU0aFDzyczSDqlIq7mlQ8Ni1DjJXxLoUSikVUxoU/FSue25lU2cDvPjii+zfv79S2yqlVHXToBCx0AFDg4JSqr6I7yeas1ZDcT506F+l3bhTZ5966qm0a9eOTz/9lIKCAs4991wefvhh8vLyuOiii8jIyKCkpIQHH3yQHdu3s3XrVk4cMYK2qanMmDGjmg5MKaUqp/4FhcljYfvvQRYYX+rsBo0hoQEUOvmKGqb4Twc66Ag47fGQH+lOnT1lyhQ+++wzfvnlF4wxnHXWWfzwww9kZWXRsWNHJk6cCNicSC0aN+D5vz/NjM/epO2hmv5CKRV78VN9VENdSadMmcKUKVM46qijGDhwICtXrmTNmjUcccQRTJ06lXvvvZfZs2fTokWL8DsqLYHdG7QLrFKqRtW/O4XRTwWfn7sDcrfaaU/uI08upI5H+U8HU1riTIRvjDbGcN9993HjjTeWWbZo0SImTZrEAw88wMknn8xDY+8MvaMDu+DAbpBEaNkl7GcqpVR1iZ87BYnert2ps0877TTefvtt9u2zVVVbtmwhMzOTrVu30qRJE6644gruvvtuFi1aZLdt1pTcfXnRK5xSSlVA/btTqKqiA2AMNGwSsCD0HYI7dfbo0aO57LLLOOaYYwBo1qwZ77//Punp6dx9990kJCSQlJTEq6++CsANl5/HqEv+SMcu3bShWSkVc3EUFCK8VchaaV9DVSOF8OGHH/q9v/322/3e9+zZk9NOO81/o8I8/nTtJfzpxmsh9ZAKfZ5SSkVD/FQfVVThfnvXoJRScUSDQig7V/nuGqD8h52LDrgao6tT3RoESala5Z0zbDf1SPzwLMx5ObrlqQPqTVAofwQ59/JqaHU2xv55prNW2vxJUVTXRslTqsYVF/iPebJhNsx7NbJtpz8GUx6o2ufvWBali8OaE7WgICJvi0imiCwNsVxE5CURSReRJSIysLKflZycTHZ2dviTZt7Oyu7eyt/j/37bYvsDAF9wKMyD/dn24blITuCm1De9e6OvW2ywVY0hOzub5OTkChZcqTjyWDt4LBW2Bz3tRKakGMa1gC9uqNh2mSvg1WNhxhMV2275N/Dx5RXbJoqi2dD8DvBP4N8hlo8Gejt/RwOvOq8V1rlzZzIyMsjKygq9Us5W30l4p4GkxrAn01m2wjddZjsnc+qeTf7zPOvvEkAgJ9O+bsqyn7MrAaScmLt3G5QWQUKi7+rC83kF++yzCg33QxPbZTU5OZnOnTuH36dSCiYcB+NyKrdtwV77uuQTOO912LoYWnUrf1z33G32NWN+6HWePwySm0OfUXDK/9l5n15ZTnly4aub4fTnoVm7iA6hKqIWFIwxP4hItzCrnA3829jL+7ki0lJEOhhjtlX0s5KSkujevXv4lZ4ebR8GA7jkI2jVFT65yL4flwPjQqSZGJdjv8i9Gf7zPOuP3QyJSfD4MZDYyAaC4gPwt63QsGn4Mnn20aCx3cazb4AFb8N3d8Cgq+GEu2HtDOhbzo+npuxaB/8cDDf/DKl9Yl0aVVFF+TbNS2Il/vsX7rfpYmrg5OS1bQm8djzc9KNNOVNVuzfAP46EC9+Bw86180KNpWIMvD4cOg2C66eXXf7TP6DnyXDQ4fiqpcPUEuzNgL1A5nJfUHB/lgRUbS/9An7/D6yaBCkdYMyz5R5eVcWyTaETsNn1PsOZV4aI3CAiC0RkQdi7gbBc/9imxN7mRaIo3z8glNltgq+qyP2Fbp4HH15c9frFLQvh3TPhm9sgf2/V9hXM++fDovcqts2yL6G0GH77qPrLo6LDnS7l8fbwwfmV2887Y+DvvX3v10yFjAVl18vfC8/2ho1zKvc5hfshe62tiv3iejtv1WT/dYyxV/EV5dlm6Re+eY+5gpynWhh8tQtbFpbdjzEw9SF7V/L1rfDeOb5lJUWwbpb/+vnl3LmUlkD6NP9zxmfX2IBQg+pEQ7Mx5nVjTJoxJi01NbVyO0lIDPcBoZd9/0gEO3e2L873XfF/fDms/hb27fBftaQYvr3Pvzoq8OrAzrQv2393NWA7n3NgD+xx4mlRvq3//O3jCMoZRPo0G3CCMQZ2rQ9SNOdn424TAcjdbsuy8efwn1lSXOcb42qtglzYF1AVunsDPNrW/zeybmbl9u9u9yo6AB9cAG+eXHa9bYshLxOmh04kSUEurJwIG360PX/A/uZ2roFProCXB8KEYb5egHudNDWPptqT8JJP7VX88m/KL3fWKt90abF93bnavm7+xX/dd8/wTbuDaeZK//Xcv/9f33ftv9RebP37LJj5lG1r3L0Rnjq4bLnc557Zz9ntPro0+DH88nrw+dUslg+vbQHcSX06O/OixH2nUBp6tUB7gxTps+tcb0zwoFIUYoyE9Kkw9xVY7HrYraK9il45xuZxGpdj/+MBfP8oHHlJxfazr5y7rl/egMl3w/UzoJO7H4Dzb+n5d1zyqQ2enuD2yeVwT5ieWI+2gbaHwKgnYf8u6H9hxcqtgisthSedNqdxOb4ectnpdt6XN1b8NwK2N8+Kb+DwgLuLzfPCbOT8Fjb+6Ju1d5vNFJDsJIP85k/2rtMjP8dehKz8X/BdLvwXnPkilBTak3CTtnb+on9Dv7PKru/Oljx+CFw7BTocCZ87/389weatU0MfRqkrKLxytP13XTfTBovuw4Nvs/FHaNjMTmfMhyc6Bl9v62L/9oeZTgP1mu9sEJsbYa+pahbLO4VvgD84vZCGAjmVaU+I2LF/8k27rxog/Em5QZDePks/89+2Iid1z4m0vMAU7O6htMReLXkS+9kVPTuOvAwek8Ik5APY+JN93b3efrbn6t5TtjkvwWPt7e19zmbf3Y+n7aYoH+a95ttu9RT46hY7vXMVvH8efPHHipe7PKWlMOsZG3CiKXOFTbRY3bYssndcWxZVbLvAu9IJx9uqomC/4UBvnmo/c9o4yAmoLp3xuD2Rpk/zn++uZgkU7Pf7/KHwzyF2et5r/gEB7DMCoQKCh1/ZnN98+lT72/rxBf91Jwzzf79rnb17d9uxPPznBctS/O+z7R2SCXO3m3atU7Zpodd5fXjoqqF9mTYIxkA0u6R+BPwMHCIiGSJynYjcJCI3OatMAtYB6cAbwC3RKgsAvV1XAzMCb2nDnFATyruZMhW78/By/6eJ8IQ++V54vm/Abpz97N1S8TTb5a3vOS5JhCc6wT/TnPeun01xfujtZ/8dJt9je3EAfHghLP4gsrIZY09Sn/4hsvXd1n5vv+PJ94ReJ3OFDWL7suCnl8IH9jkvw/iAjgjrZ8MrQ+G5KDS0f/+wfV0zNfx6WatsVeL+XfDiETDe1Xnvp5dgx+/2qtpdlx0qUGY4VSg/vgAvHOa/LMepqnR36/70D5F3vXRvt2+7fQ333YTjLps7aH54oQ1oYRlIbOg/69VjoE2v0Jt4qpo8Cl3JKz0BLpjc7eWUxREqa4K7CquGRS0oGGMuNcZ0MMYkGWM6G2PeMsZMMMZMcJYbY8ytxpiexpgjjDFBWquqUbiTe0Fu6GXlPedmDGFP6su/LntnEm6/WxY6V9ZBVghsaAvckedkErEwB/flzbbKAHw9qrxtGxE+/HfAebYj3L9vKGum2NflX9vXrNXhr0w9stf67nDCBaxXhtoT6Vc3w9QHYWvAVfnvn/mehJ3yAGSt8F++9POy+ywttb3ExrWwVW/lKSm2VQTuh63ydvrq+z3tYIX7bT174Lbjh8DTXe36ezZBgevkP/VB3/THl/mmn+nhm96xDBa+G7yxNn0arP/BTnsC5lc3+ZYv/9o3aBXA2umwM91VPtcxvXCY7+6xOm2YXbH1C3Lh4yD19dnpZed57N7o/959IZWziZB+/zSyMm0qp/0tBuInIV64huYFb4de9tsn4febsQBahHl24FvnxFKmz3SIE+sbJ9nXM14ouyzwjmT9D7ZnkkdgQ1h5gjZwO35ztXm4e17s2VT2Qb6yO3ZePA3SEdwJ7UyHfw6y3XofzCzbJjN+sH0tr+/5y662D/eFQEmRbWht0Ql6nuSb7wnYRQdgyoO2IfXq//nqnd316O4ug8Fu7ee+AlPut9PzXoMh14cu5zjXIEtF++3ntOxqG109EpPs66dX2pP0Q7t8v2N31cVed3VieVzfhacHXqe0squ97xz3uBxY9kXZ5YHec7p2tuoOBx/j//spzoenu/nex6qTQWXuTt46xf99XmV7P1aTA3vKf16iiuIoKIQ51HBXlKXlVLF8cH4EVUzYk8CVX5W/nsfq78rOKwoYdyHonQO20Xf2c/DQbkiohpvBn170Tb8YQT9xUwIT7/KdQNfNcPpxh3Bgt68uuaQA8rLL774XiaJ8WwUz62lIPRR+dbre3uPqUeW52ispsm0kgdwnhZJCaNAo9OftcD1FG+o3VVxo22jcVk4M3stNnACw1kmpXlrsCgquk3tmOfXi5Qlsi3D7Kci/STi715c9vkBvnBR+eW32zyABtCbt3gCNB0T1I+InKCS3CL0s3J1CJALrHUNZ+rnv6tl9tR3syimwQSyYUFfgP/3DV66EhmWXb1sCLQ8Of6dQVfNd1Servw1/PE93gxNdOWee7RFyVUqKfFfQbhkLIaW9/7zVk+0f+PfyeCbIg46RpEHJz7F3aykH+c9/OQ1u+Rm/u7+czbbqoUUX26i+czX0OsVene/e4L99qJQMU+6HQVfZ78kAz/SEO5baK0X3nUKk7TSh5GwOvcxdDVVdtlXi2QJl5WyGjhoUqke4oFBTt4TrZgb/D1je3UhIIYJCQgMbEBa8DT1G2LEa3AHgtePtk6HuLntugf3ca8KMx0Iv+98dvumJTo+pHcvg/Ddg8Ue2/jbwRFtR7l5Qofq9v3euvRsIrMLKXhO8Cucf/eGUcb4G0AGXBy9nSYinacG2rXguOgpzbRvCuJxKdm5Qdd7s56DvmeWvVwXxExRqg3C36ZWRGdD4uX+nf131t/fa15QO8Jel9oGfIy+280IFhPy9/k+s1gbuO7lF7/qmX6rYQEgRC5WLxlM9tO23IMuWBX82xd0jpjJX9J9dW3beu2fBxRV8Cl3VDydUstdWBWhQqEnVfXW3PuAx+sAeKh6522y9+pKP7V84T3UJv1zBayeUnResV0u0rJ9V8WcYVN13/ltw6Jiof0ydSHNRb5SXNTWaojzWg6ph7jw7yl/bCJ4dqY7EesEcd3v565z+fNl596y3iS/D6VwzjdwaFGpSJL2UqiRMw7H7KexgIn3YRqnarln74PO7HQ9/cNqLDjvPN/8PX0OXSmXt9zn4GNvb79RHoEWQHEduR11Rdl6T1nDSA8F7KDZMsa+BD95FiQaFmhQqH1Jt8NwhZdMbqLrPkxo6GjocCY2a+94PvdU33TNIkrxIHRMiQWNKh/K3DWyEPfwC+3rem/b5kx7D4f/2+F/Rt+xKmQuq/q4cUY1bh96/x/B7fN2/r5nom3/Perh3g+/9Q7ttt+Yxf/fNa9fPN93zRHggoOOLpyNKYpju0NVIg4LyCUxvoKyEIF1g64rep5Wdd9V/q2ffN/7gnyLCPX5I07aV3+9pj8N9TiJKd9K54SEaWY92nrTucCScPd6/7c6bqsV10hexz3sc+2f7PrlF2edPznzRBouL3/fd4d+x3L4Pxv1AZMuDbfnv3WjvABq3gmu+hXNf9wWOIdfbOxQoWy3UwHVHcNi5vtxV4XpQViNtaK5PCiuRTkKVr8cIm3SttrtuKnQcaDsWvOg8LNj3TJt7ypTYJ+Cv/Aq6B2koD+ahXfCIc5V8xgs2T1TvU20yOM+JuF1fX4oQ98n4tCdswMhc4f9E9HXTyj4l7OZ5grxRM7hjma0KetQJMIed69892WP00/bPw5Nh4Pi7oOsx9vM7Dy673SnjYNgd9sR97gSbmmTQ1TZ9R1Jj+MsSu16b3rBqon0aHmybhSftdiiNmvm/73qM/XPrPtz+ux5xUej9XPiOzVSwYXblBkWqBA0KSpWn06DoBoWeJ9ncQW6SEFlvtV6n+DJxdnEStLV09SBr1Az+8FXwgaBadPF/buaP0+FN1xWvOzVMmqtrrDst+rC/+rrausvbtK29sl/4jn9Q6DIYuh5nq0w8Dzj+daXNoApwgav7cWD6mMatbBqN3evtKGxN2gZPX3P689B7JBzhVB2FSo2SkGgDAkDzjmVHQvNod6j98+7/Of/0MpUl4v/v6uYuc+DnR5lWHylVngaNIuvRUh53HqVrp/imTwp4avjyz+HutZHtc9TT9qG4o28Ov56If0C4Yznc/BOc+qh93/s06DzInowOPx9GRzjso+ekevxdkXe5vmYSnO6qU29eTlvB1RNtd0ywV869T7NpS5p3CD4saKNmvoAQDd1PsOUAGD4W7qtfbXEaFFTstD0kOvtt0iay9Zp3ggcCnt7uOLDsegkN4IaZkX/+uc4IWS27+uYNudE3KMyop+FgV2+XwLriBo1s9UWZciT512kffCy07QXnvAKjn4q8fGCrQpJb2GqgAVfAGa5ukhe8DUffYKdPGWd7xYTSpDXcvc6u4wkKp4yrWFkAOoZ5ELHbMN9JvuMAuPzT4KlOalK/c+x3ccLd0CgltmWpZvEVFG4P8iSqip0eI6p3fxe/D4eeAX/+Ff62Nfj3ndTEN33EBWUbGJt3hHNf8yWjA1uV425EBf8TvsepTlK73qfaK25PnTTAmGdcjZ7Of7sbZsGlH0ObnnDNZF9AErFB4Y/f+3q7HH4B/GmBfT/QGWOiaZjgd9qTcPYroZd7NGgI54wPnel32B3l959v2sa5C/FUUblOK626lV8GgKv+B38J8ZR9bSRiv4saquevSfXviMKJ9AeqakZ1Pcw34j6Y/bztBunuMtiwqW2czFwBl3/mVHUI/PfP8Pt/7NU7wJAbbBCY5wx/eOQl0P9ieNhJUeypt+40yJdGvPepcMrDNvXF204Pn2P/DENv8b+KfdCVaC+wJ0zHAYCT3KzrsXbI062LfHc6ndPgvDfseAfuRsozX7J18uGG1jwmumNWBdXBOZb2rl5sPUbA9dMhqWn4cTUaNSvbOKtiQkxFxweOsbS0NLNgQRXG43n3rLLpISLVdZj/mLOqaobfa9NvhDPqaV8Op0B9ne9ybJjBTiK14r92sPhDz4BLnIbTvGw7HsYZL/hOWJ7cUmM3+ap9PPPKG+thyyJ440T482JoHSRTa3GBHZ+j23FVP55Y2bUOWofJcuu2aZ7t6dOrCs80qIiJyEJjTLmPRcdX9RHAIVXIHRLNVNN1TYdqSN/bJ0gfeo8hN9qT7NE3+qqZPFf2Hhe/Vz0BAewTqQkN/MfybtrGZmINdgXrfmjr/Lfsw1Hl6TTQHlOwgAC2KqsuBwSIPCCAbVfRgFDrxFf1EYQfga08iUnQZShsnlt95amr2vSKLC9+w5TQz09Igq3yWfu9b96xf7ID0Zx4n7OO+B7yKSmCX16rWrlDadoWHsouf72ht8Lc8f4XCNHs6aJUDYu/O4UmrctfJ5TEhrbnQzwa7Awt+Yev7ZOsI4OMf3BPkBG3mqXa12sm+/bhJbZu3qPfObaefswztk96IHfjrztNQE0a9UT51URK1WHxFxTahxkWEsKnNEhoUGOPmtcKx90OqX1tP/qRj9qnYXuMsOkEmneAW+b51u1/iQ24fc/y34fnEX1J8O+bDtCqq+0S6akWats7/J1cQoLtxnrOhPDjHyulKi3+qo9SnZPKVzf5z+9xoh1LOJzDz/N/n9IRcisyaHotEfRYXV0Kb1tgT9Dg62YJNlmXm/spy35OMAgcVLzHCDuGcNNU//nuq21PnpxIOj3c9kv56yilKi3+7hTA1xiW7DqBnfe6/7JgAh9s6nBk9ZYrmMpUVQRLv+sW2Oe+XT9bn/7QLvt5bSsx8pon8dppT8DIx33zT3nY3lG06WnfN25tUyO4eRKORT21uFKqPPEZFLxcV6bN2tkHicK1GZTpfRTl7rzuHi5u4Ybka9Tc9nkfdHXw5UdeZhtswZckrLjAVttUpRHe88xBoxQ49jabH6Zpqn04yn1Hce/6sjlmjr7J/rl7/iilYiI+g4KnETMwzcIho32pCCIx8jH/jJPJLUOvGyipaZCG1wDBqlMat7LldOswwKbrBftUaINGcOY/gufDaZQCJYV2etA1zudUYZjQyz+DPqPKBszBf4S70yPbR8MmNsulPrykVMzFZ1BI7WNHYBoTYdKvUNr29s9N3/9i2989Evdv9c9FH1SQoCAJtr+7W+se9oGou9f61+mf+nDw3XrK2GWITeh1WRV6VPU+FS77RJ/hUKqeiM+gAHYEpmBJx9wnN3eenHA8eXCOvhGu/TbyUafa9Q2/PNidQmqQbTyDhgQObBKY1wdsldEJd8GfFtmgduJ9NkgqpRTxHBQi4e55A3iH7DvnVTugiXd2wFXyWS/7qmbADi946BlwyYc2BfPpz9n5PYbbk/Mfvrav/S+21S4efUb67/f4O+GSECM/hTLgCl96ZID+F9oA4mn4VUopl/ju7uHJDNljhGum6wSfdp3Nmjn5Xv/BSAZcFrCjgKDQopMdzm/hv3zzPPl0Dj3df902PX0n6PNeh98+gflv2vz0gcMPnvSgLwA1bGbzxpTnnPH2deqD4ddTSiniPSg0bBq+y2dCgj2JTx4bfj+eE3Wofvb9wwy3F2zdJm1sTpjAOxD3+/PegI8vjXy/10yOrwfvlFKVotXa/dSyAAAep0lEQVRHgSrVYOrZJkRQqMjzDCLQ+5SA3DpBgorfCT6CMnc91j+lsVJKBaFBISLlPI9Q3p1CVZ3/Rtk7moOH+jKVas8fpVQ10aBQRpAT7LA77GtgqgYPzzMCDRpGp0jBJCTaZxHADleolFLVIL7bFCI1+Dr7F8oFb8PaGcFHdmt/RNSKRccBcOcqaNY+ep+hlIorUb1TEJFRIrJKRNJFpExrrYgcLCIzRORXEVkiIlUYAaeaVKYqpnGrssnywA7FeGMlR3mLVMpBWn2klKo2UQsKIpIIjAdGA/2AS0WkX8BqDwCfGmOOAi4BIhhpvA5JTKpaPiGllKph0bxTGAKkG2PWGWMKgY+BswPWMYAn61sLoBbkodarbqVU/IpmUOgEuJ74IsOZ5zYOuEJEMoBJQNA0mSJyg4gsEJEFWVlZ0SirUkopYt/76FLgHWNMZ2AM8J6IlCmTMeZ1Y0yaMSYtNTVED6DqovXzSqk4Fs2gsAXo4nrf2Znndh3wKYAx5mcgGahA7mqllFLVKZpBYT7QW0S6i0hDbEPyNwHrbAJOBhCRvtigEOP6Ib1TUErFr6gFBWNMMXAb8B2wAtvLaJmIPCIintHd7wSuF5HfgI+Aq42J1mPBFaXBQSkVf6L68JoxZhK2Adk97yHX9HLguGiWocK0TUEpFcdi3dBce2lwUErFIQ0KodSWWiyllKpBmvsokCTAcbdDv3NiXRKllKpxGhQCiQQZhlMppeKDVh8ppZTy0qCglFLKS4OCUkopLw0KSimlvDQoKKWU8tKgoJRSykuDglJKKS8NCkoppbw0KCillPLSoKCUUsoroqAgIreLSHOx3hKRRSIyMtqFU0opVbMivVO41hizFxgJtAKuBJ6KWqmUUkrFRKRBwTO4wBjgPWPMMnRoMqWUqnciDQoLRWQKNih8JyIpQGn0iqWUUioWIk2dfR0wAFhnjNkvIq2Ba6JXLKWUUrEQ6Z3CMcAqY8weEbkCeADIiV6xlFJKxUKkQeFVYL+IHAncCawF/h21UimllIqJSINCsTHGAGcD/zTGjAdSolcspZRSsRBpm0KuiNyH7Yp6vIgkAEnRK5ZSSqlYiPRO4WKgAPu8wnagM/Bs1EqllFIqJiIKCk4g+ABoISJnAPnGGG1TUEqpeibSNBcXAb8AFwIXAfNE5IJoFkwppVTNi7RN4X5gsDEmE0BEUoFpwGfRKphSSqmaF2mbQoInIDiyK7CtUkqpOiLSO4VvReQ74CPn/cXApOgUSSmlVKxEFBSMMXeLyPnAcc6s140xX0avWEoppWIh0jsFjDGfA59HsSxKKaViLGxQEJFcwARbBBhjTPOolEoppVRMhA0KxhhNZaGUUnFEexAppZTyimpQEJFRIrJKRNJFZGyIdS4SkeUiskxEPoxWWfbmF7F5135KS4PVhimllIIoBgURSQTGA6OBfsClItIvYJ3ewH3AccaYw4C/RKs8H87bxPHPzKCgWAeMU0qpUKJ5pzAESDfGrDPGFAIfY1Nvu10PjDfG7AYIeECuWiU4I0qXGr1TUEqpUKIZFDoBm13vM5x5bn2APiLyk4jMFZFRwXYkIjeIyAIRWZCVlVWpwiSIjQolGhSUUiqkWDc0NwB6AyOAS4E3RKRl4ErGmNeNMWnGmLTU1NRKfZAnKBitPVJKqZCiGRS2AF1c7zs789wygG+MMUXGmPXAamyQqHZafaSUUuWLZlCYD/QWke4i0hC4BPgmYJ2vsHcJiEhbbHXSumgUJiFBq4+UUqo8UQsKxphi4DbgO2AF8KkxZpmIPCIiZzmrfQdki8hyYAZwtzEmOxrl8VQf6Z2CUkqFFnHuo8owxkwiIJuqMeYh17QB/ur8RZW3TUFjglJKhRTrhuYa42lTKNGH15RSKqT4CQoJWn2klFLliZ+goNVHSilVrjgKCvZVq4+UUiq0OAoKWn2klFLliZ+g4G1TiHFBlFKqFoufoOBUH+3KK4xtQZRSqhaLo6Bgo8JFr/0c45IopVTtFXdBQSmlVGhxFBR80wcKS2JXEKWUqsXiKCj4osLrP0Ql555SStV5cRMUEl23CoUleqeglFLBxE1QcDcpCNq+oJRSwcRNUHBXH2mbs1JKBReXQUEppVRwcRMUGiX5DlXDg1JKBRc3QaFts0axLoJSStV6cRMUurZu4p1+aXo63cZOjGFplFKqdoqboJCQEL7SaM7ancxJ31lDpVFKqdopqmM01yWXvTEPgA1PnR7jkiilVOzEzZ1CMKWaR1sppfzEdVB45rtV5OYXxboYSilVa8R1UJgway0XvPozE5ds886797MlHPbQtzEslVJKxU7ctyms2pHLrR8u8r7/ZMHmGJZGKaViK67vFMJ5+8f1rM3ax9jPlzB95Y5YF0cppWqEmDo2kH1aWppZsGBBpbbN2L2fYU/PqNS22itJKVWXichCY0xaeevF1Z1C51ZNyl9JKaXiWFwFBaWUUuFpUFBKKeWlQUEppZSXBoUK+PiXTfy2eU+si6GUUlETd0Ghd7tmldru84UZjP3id84e/xMHCnWMZ6VU/RR3QWHqX4fzy99OrvB2d/7nN+/02eN/pKTUBM2dZIxh0abdVSqjUkrFSlSDgoiMEpFVIpIuImPDrHe+iBgRKbcPbXVo1zyZJeNG8srlA73zPr/52Ii3X71jHz3/Nokr355XZtkn8zdz3itz+Hbp9mopq1JK1aSopbkQkURgPHAqkAHMF5FvjDHLA9ZLAW4Hyp5ho6h5chJjjujA+ifHYEz54y0E81N6NiWlhvNe+Ym9+cXMuGsEa7P2AbAxO6+6i6yUUlEXzdxHQ4B0Y8w6ABH5GDgbWB6w3qPA08DdUSxLSCKCVGHQ5p5/m+SdXrY1B3F2VreeE1dKKSua1UedAHd2uQxnnpeIDAS6GGNqxdiYb12Vxpe3HMuNw3tUavuPf9nM0i05AJTWsfQhSikFMWxoFpEE4HngzgjWvUFEFojIgqysrKiV6eS+7Tnq4FaUlFTuhF5capizNhuAvQeKq7NoSilVI6IZFLYAXVzvOzvzPFKAw4GZIrIBGAp8E6yx2RjzujEmzRiTlpqaGsUiW0UlpQA8dEa/Cm330S+bvNP5Rbbb6tjPl/il5lZKqdosmkFhPtBbRLqLSEPgEuAbz0JjTI4xpq0xppsxphswFzjLGFO5FKjV6IQ+NvCkdWtV6X28M2cDXy/ewsfzN/sN4qOUUrVZ1IKCMaYYuA34DlgBfGqMWSYij4jIWdH63Opwct/2rHx0FP07t6SB0yvp85uPqfB+bv94sXfaGENdS1OulIo/cTWeQmVs2JnHyu25jDr8IPIKitmQncedn/7GFUO78sBXSyu+v6dO5+H/LuNfP21gw1On8+6cDZzQJ5XubZtGofRKKWVFOp6CBoUq6Da2ap2m1jw+mt73T6ZN04YsfPDUaiqVUkqVpYPs1AGeBu3svELyCorLLPtu2XatclJK1SgNClXw+7iRVdrenVgv7bFpAJzy/CxO/PtMJsxcy43vLWTqch0fWilVc6L5RHO9l5KcxO0n92bRpt2cfGg7DuvUgqTEBM4Z/1NE2w9yAgHAgaISZq3OIj3TpsnYkZsPwLac/OovuFJKhaBBoYruOLVPmXkJAkESqJbr4f8u804v3bIXgP/7ZhntUhox+ogOlS6jUkpFSquPouCvTqB4/qIjK7TduixfEr3FrsF8bv5gEee9Etndh1JKVYUGhSi47aTebHjqdHq3S6m2fS7atMc7foMxhr35RdW2b6WU8tCgEEVHdG7Bv68dUm37e2n6GrqNnUj3+ybRf9wUMp12h/2FxXy+MEN7KimlqkzbFKLshD6pTL9zOFv2HODo7m14f+5G/nBMV3blFTLkie8rtK8Xp63xe79yWy7bc/I565+2aumHNVmkdWvNhJlr+WnsSdV2DEqp+KEPr8XQNf/6hRmropP1ddyZ/WjSsAEXDe5S/spKqXov0ofX9E4hhl64eAAfzNvE1OU7/BqWq8O4/9qxjLbmHODmET1p1CCxWvevlKqf9E6hlkjP3MfjE5dH5c7hlL7tefMq3wXCxuw88gpK6NexebV/llKqdtI0F3VMr3bN+Nc1Q3jvuiGMOeIgAM7ob59NaN20YZX2PW3FDvKLSnhy8gqmLd/B8GdnMual2fzx3QWsc8aUBsjeV8DRT0xj+da9Vfo8pVTdpXcKdYQ7+V6rJkns3l99XVL/dFIv7hx5CF8v3sLtHy/mrCM78tKlR1Xb/pVSsadtCvVM97ZNWb8zj/n3n0JqSqMqZ2h1e3l6OsbgrU4qLC6luKSUA0UlpCQnsWNvPoXFpXRp3aTaPlMpVTvpnUIdkV9UQlFJKSnJSQBMX7mDxZtzaN+8ESP7HcTgx6eVs4fKOXtAR75evBWwY0EopeomHU8hzuzOK+T6fy+gZ2ozbh7Rk5Ev/EChk5q7ulyc1oWkBkLfDs25/Oiu3vnfLdvO379bxee3HEtzJ2gppWoXDQpxbsueAxz31PSo7X/2PSfSpXUTjDF0v2+Sd/6Gp04nK7eAhg0SaNFYA4RStYW2KcS5g5onc9aRHTnp0Hb85RM7VvS8v53M0RV8ijqU45+ZAcCALi395i/fupcxL80GoFPLxrx77RB6tWvGjr35tGnakAaJ2uFNqdpM7xTiwNeLt3BYx+b0chL0zVyVyeZd+3nw62XlbFl1Zx3ZkbtGHsIJz87ghD6pPHtBf9o3T2bO2p389ZPf+P7O4TRt1IDiklJ63T+Zx845nCuGdg26rzU7cskvKuWIzi2iXm6l6hutPlLlKik15OYXMeCRqd55d57ah2uGdWfXvkJOeHZGVD73f38axhkv/+j9vJtH9CQ3v5ijHp1KSnIDFj80kv2Fxd5GdQ9Pjytt8Faq4jQoqIjlFRSTmCAkJ/mnwpi1Oour3v6lRspw3sBOfLFoi9+89MdHk5ggLNi4m4OaJ3urrBY9eKr3gb69+UVsz8mna5sm7MorpEOLxgCszdpHp5aNyxyTUvFKg4KqNvlFJWTsPkD75o04YtyUSo8sV51WPzaaueuymTBrLXPWZjOoaysWbtzNPy4ZwD2fLaGguJQLB3Xm2QsrNtCRUvWVBgUVFcYYRIQ3Z6+jY8vGTJi1llP6tuf5qau96zx0Rj+WZOzhK+f5hlh677ohXPmWvdtxVzs99PVSZq7K4od7TvTOKy4pJb+4lGaNtP+Fqn80KKgaddJzM+l7UHPOPLIDow63OZumr9zB5wu3cMGgzizevId/fL+mnL1E3/1j+nL9CT287ROvXD6Qd+dsYNOu/bRs0pAV2/ay/skxiEiF9/3KzHSe+XZVhdo8SksNhSWlXP2vX/jbmL7079yyzDobs/PILyrlkIMqPpJfZm4+bZo2IjGh4sdTFcYYCopLtfquFtGgoGqVfQXFHP5/3/Gnk3rx8vR07/y7RvZBRPjH92vo2CKZDdn7Y1hK6/T+Hbh08MFMW7GDd+ZsAGxywqE92tCmaUOSkxK57cNFpCQnMeueEcxalcXD/13Olj0HAJh//yk88+1KmjZqQLNGDbh4cBce+GopfxvTl30FxQzq2gqw6UT6PDCZkw9tx/crMzmsY3Mm/vl4wAaLu/7zGxcP7sLFr88F/O90Fm3azVFdWnqD1+w1WSSKMLRHGyYt3cbR3duQmCAMfHQqNw7vwX2j+/odY35RCYkJQlJAF+H8ohKemLSCO07pQ6uARIzLt+5lf2Exad1ae+dtyt5PcsME2qUk+6370vdreH7qan64+0Q6tExm1fZcDu8U/V5j67L2sWnXfkYc0i7qn1XXaFBQtc6+gmKaJCUyaek2bvvwVw5pn8J3d5zgXb5jbz7Dnp5OWtfW/Lwum+WPnMYlr89lZL/2TF+ZyaJN1TvmRKy0aJxEzoHgCQ2H9WrLLSf25LI35pVZdlFaZz5dkMHp/Tswcck2AJ65oD9fLtrCz+uyAWjaMJG8whIAbhrekwmz1vptP/rwDjRLbsCFE3722/eEKwYx6vCD+M+Czdz92RIGdGnJtpwDHNuzLQMPbknbZo24+YNFgA1Ob85ex2MTV3i3v3JoVw5qkUxBcSlrs/Z5ywdw1TFdeffnjXx/53B6pjbzzs8rKOaN2eu49cReFJfYccfbN/cFl8279rN8215OO+wgsvcVsK+gmK5tmgb9d9u5r4C2zRpVuYdaYbHNApBzoIib3l/Iy5ceRYvGSTStgSrF3PwiEhOEomKDJFDt2QE0KKh6xRjDN79tpVPLxlzgnND6dWjOmsxcikrq1m+4tmqclMiBopJy10vr2ooFG3dX+nPOG9iJa47tzpn//LHMsteuHMShB6WwcnsuN763EICJfx7G6S/ZdV+9fCDH9mrLd8u207V1E1o2acjzU1fx3bId3DvqUJ7+diXge7J+Q3YeR3RqwYRZa3lx2hpuGdGTW0/sRV5BMSc9N4t9BcUc37stF6Z1oVubJlz82lySEoW9+cV+5Zp51wi6tbUBKTM3nx9W7+S8ozqREFAtt3nXfl6ctoabR/QkPTOXXu1SyMzNp2dqM2/AW7x5D93bNKVFkyR27isgY/cBBnRpSbexE70ZkEVg/ZO+wDZ/wy46tmxMp5aNK/3vrkFB1VvGGIpKDA0b2KqPxZv3sC+/mH4dm7Ny+14ue2MeX9xyLAMPbsVfPv6VyUu3869rBpe5+u7WpkmtqK5SdUNgyvq+HZpz9oCOPDV5ZUTbD+nWmjevTqP/uCk0SBCKXV34+nVozvJt/uOYXDCoM+t35jHqsIN4fJK9K1vz+OgyVX6R0qCgVIAvFmWwLSefKcu2c81x3TnnqE4UFpeyr6CYlOQGNEgQsvMKGfv57/Tv3MLbo+rGE3rw2g/rvPsZddhBfLtsOwB/G3MoT0yK7KSgVFU9d+GRnD+oc6W21aCgVA0qKC5hXVYeCSJMW7GDa4/rTm5BEUkJCdz5n9/4eW02B4pKmHz78Vz51jy6t23K/A22CsYzVgbgbS+4cXgPduTk89XirfxxWHcM8NaP672fd86AjmzefYCFG3fTt0NzVjhXmYcelEJigrCsCqPnXTm0Kyf3bcfV/5pf+X8QFRVPnXcElww5uFLbalBQqhYpKTVk5uZ7n7gOtDZrH/lFJRzW0ddDZ1deIS99v4b7xhxKowaJzFqdxXE92/glFdyy5wAHNU+m70PfcvvJvbn1xF7eZUsybLXasb3a8uum3XRp3YSWjZO47cNf+XbZdlo3bUiCQIIIH14/lE278miYmMiw3m0B2/CZkpzEhRPmMH/DbqbccQJ92qeQva+A7XvzueS1ueQW2Lr3O07pQ4kxXDy4C8c9NZ27TzuEA4UlpGfu49tl2zmjfwd27M3n1hN78fjEFazJ9A0DWxHjLxvIy9PXsHJ7bqW2r+tOO6w9r11Z7nk9KA0KSqmgiktK2bhrv19PoMooLTUs37a33K6mnsDlflZiScYeDu/Ygicnr+DiwQdTVFLKQU5DbKumDckvKkEEvlu2gz9/9CsAKx4ZReOG9rmHTdn72ZCdxwNfLWXTrv28evlAPvxlE1t2H2DdzjyO7NyCr28bRn5RCa/OXMvE37fx6uUDOfWFHwD4/OZj+GT+Zn5el83Mu05kf2ExTRs2oKi0lItem0uTpERWbt9LcanhthN78WRAu8EjZx/G7DU7GdClJf06Nuca111VclIC+UX+Y5lcfWw3b/fmUO4ddSj7C4s5unsbbv1wUdAeamNHH8pNw3uG3U8oGhSUUvVCZm4+gpCa0qjMso3ZefxvyTZuGdETEcEYwwvT1nBRWmc6tyo7fOy4b5bxzpwNrH1iTIUe6MvZX8Te/KKQQ9Ju2XOAdVn7OL53ash9vDhtNS9OW8Orlw/k+D6pNGqQQFJiAttyDpDarFGZtPLbcg5w3itz2JaTT5OGibx11WCG9mhdqQcroZYEBREZBfwDSATeNMY8FbD8r8AfgWIgC7jWGLMx3D41KCil4smrM9cy8rD2Vb6zizQoRG3EExFJBMYDo4F+wKUi0i9gtV+BNGNMf+Az4JlolUcppeqim0f0rHJAqIhoDoM1BEg3xqwzxhQCHwNnu1cwxswwxng6is8FKtfXSimlVLWIZlDoBGx2vc9w5oVyHTA52AIRuUFEFojIgqysrGosolJKKbdaMWCuiFwBpAHPBltujHndGJNmjElLTQ3dkKOUUqpqopnlaQvQxfW+szPPj4icAtwPDDfGFESxPEoppcoRzTuF+UBvEekuIg2BS4Bv3CuIyFHAa8BZxpjMKJZFKaVUBKIWFIwxxcBtwHfACuBTY8wyEXlERM5yVnsWaAb8R0QWi8g3IXanlFKqBkQ1SbgxZhIwKWDeQ67pU6L5+UoppSqmVjQ0K6WUqh3qXJoLEckCwj71HEZbYGc1FieW9Fhqp/pyLPXlOECPxaOrMabc7pt1LihUhYgsiOQx77pAj6V2qi/HUl+OA/RYKkqrj5RSSnlpUFBKKeUVb0Hh9VgXoBrpsdRO9eVY6stxgB5LhcRVm4JSSqnw4u1OQSmlVBgaFJRSSnnFTVAQkVEiskpE0kVkbKzLUx4R2SAivzvpPxY481qLyFQRWeO8tnLmi4i85BzbEhEZGOOyvy0imSKy1DWvwmUXkauc9deIyFW16FjGicgW57tZLCJjXMvuc45llYic5pof09+fiHQRkRkislxElonI7c78Ove9hDmWuvi9JIvILyLym3MsDzvzu4vIPKdcnzj54xCRRs77dGd5t/KOscKMMfX+Dzsc6FqgB9AQ+A3oF+tylVPmDUDbgHnPAGOd6bHA0870GOxYFAIMBebFuOwnAAOBpZUtO9AaWOe8tnKmW9WSYxkH3BVk3X7Ob6sR0N35zSXWht8f0AEY6EynAKud8ta57yXMsdTF70WAZs50EjDP+ff+FLjEmT8BuNmZvgWY4ExfAnwS7hgrU6Z4uVModxS4OuJs4F1n+l3gHNf8fxtrLtBSRDrEooAAxpgfgF0Bsyta9tOAqcaYXcaY3cBUYFT0S+8vxLGEcjbwsTGmwBizHkjH/vZi/vszxmwzxixypnOxSSo7UQe/lzDHEkpt/l6MMWaf8zbJ+TPASdghiqHs9+L5vj4DThYRIfQxVli8BIWKjgJXGxhgiogsFJEbnHntjTHbnOntQHtnui4cX0XLXtuP6TanWuVtT5ULdeRYnCqHo7BXpXX6ewk4FqiD34uIJIrIYiATG2TXAnuMzTQdWC5vmZ3lOUAbqvFY4iUo1EXDjDEDgdHArSJygnuhsfeMdbI/cV0uu+NVoCcwANgGPBfb4kRORJoBnwN/McbsdS+ra99LkGOpk9+LMabEGDMAOxDZEODQWJYnXoJCRKPA1SbGmC3OaybwJfbHssNTLeS8egYmqgvHV9Gy19pjMsbscP4jlwJv4LtNr9XHIiJJ2JPoB8aYL5zZdfJ7CXYsdfV78TDG7AFmAMdgq+s8Qxu4y+Uts7O8BZBNNR5LvASFckeBq01EpKmIpHimgZHAUmyZPb09rgK+dqa/Af7g9BgZCuS4qgRqi4qW/TtgpIi0cqoBRjrzYi6gveZc7HcD9lgucXqIdAd6A79QC35/Tr3zW8AKY8zzrkV17nsJdSx19HtJFZGWznRj4FRsG8kM4AJntcDvxfN9XQBMd+7wQh1jxdVkS3ss/7C9KVZj6+vuj3V5yilrD2xPgt+AZZ7yYusOvwfWANOA1sbXg2G8c2y/A2kxLv9H2Nv3Imzd5nWVKTtwLbbBLB24phYdy3tOWZc4/xk7uNa/3zmWVcDo2vL7A4Zhq4aWAIudvzF18XsJcyx18XvpD/zqlHkp8JAzvwf2pJ4O/Ado5MxPdt6nO8t7lHeMFf3TNBdKKaW84qX6SCmlVAQ0KCillPLSoKCUUspLg4JSSikvDQpKKaW8NCgoVYNEZISI/C/W5VAqFA0KSimlvDQoKBWEiFzh5LlfLCKvOUnL9onIC07e++9FJNVZd4CIzHUSsX0pvjEJeonINCdX/iIR6ensvpmIfCYiK0XkA+cJXaVqBQ0KSgUQkb7AxcBxxiYqKwEuB5oCC4wxhwGzgP9zNvk3cK8xpj/2iVrP/A+A8caYI4FjsU9Gg83q+RdsDvwewHFRPyilItSg/FWUijsnA4OA+c5FfGNsorhS4BNnnfeBL0SkBdDSGDPLmf8u8B8nd1UnY8yXAMaYfABnf78YYzKc94uBbsCP0T8spcqnQUGpsgR41xhzn99MkQcD1qtsjpgC13QJ+v9Q1SJafaRUWd8DF4hIO/COY9wV+//Fk7nyMuBHY0wOsFtEjnfmXwnMMnZEsAwROcfZRyMRaVKjR6FUJegVilIBjDHLReQB7Mh3CdgMqbcCecAQZ1kmtt0BbCrjCc5Jfx1wjTP/SuA1EXnE2ceFNXgYSlWKZklVKkIiss8Y0yzW5VAqmrT6SCmllJfeKSillPLSOwWllFJeGhSUUkp5aVBQSinlpUFBKaWUlwYFpZRSXv8P0RBOvomyUK8AAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "# summarize history for accuracy\n",
+ "plt.plot(history.history['acc'])\n",
+ "plt.plot(history.history['val_acc'])\n",
+ "plt.title('model accuracy')\n",
+ "plt.ylabel('accuracy')\n",
+ "plt.xlabel('epoch')\n",
+ "plt.legend(['train', 'test'], loc='upper left')\n",
+ "plt.show()\n",
+ "# summarize history for loss\n",
+ "plt.plot(history.history['loss'])\n",
+ "plt.plot(history.history['val_loss'])\n",
+ "plt.title('model loss')\n",
+ "plt.ylabel('loss')\n",
+ "plt.xlabel('epoch')\n",
+ "plt.legend(['train', 'test'], loc='upper left')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_13_LSTM-Report.ipynb b/python/Step_13_LSTM-Report.ipynb
new file mode 100644
index 0000000..2313876
--- /dev/null
+++ b/python/Step_13_LSTM-Report.ipynb
@@ -0,0 +1,1052 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics \n",
+ "\n",
+ "# Used for graph export\n",
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "from keras import backend as K\n",
+ "\n",
+ "\n",
+ "target_names = [\"tap\", \"twotap\", \"swipeleft\", \"swiperight\", \"swipeup\", \"swipedown\", \"twoswipeup\", \"twoswipedown\", \"circle\", \"arrowheadleft\", \"arrowheadright\", \"checkmark\", \"flashlight\", \"l\", \"lmirrored\", \"screenshot\", \"rotate\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n"
+ ]
+ }
+ ],
+ "source": [
+ "df = pd.read_pickle(\"DataStudyCollection/df_lstm_norm50.pkl\")\n",
+ "\n",
+ "lst = df.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)\n",
+ "df.TaskID = df.TaskID % 17\n",
+ "\n",
+ "x = np.concatenate(df.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "x = x / 255.0\n",
+ "\n",
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = len(df.TaskID.unique())\n",
+ "y = utils.to_categorical(df.TaskID, num_classes)\n",
+ "\n",
+ "labels = sorted(df.TaskID.unique())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.3\n",
+ "\n",
+ "# set session config\n",
+ "sess = tf.Session(config=config)\n",
+ "tf.keras.backend.set_session(sess)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = tf.keras.models.load_model('./ModelSnapshots/LSTM-v2-00398.h5')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 47.2 s, sys: 6.47 s, total: 53.6 s\n",
+ "Wall time: 30.9 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "lst = []\n",
+ "batch = 100\n",
+ "for i in range(0, len(x), batch):\n",
+ " _x = x[i : i+batch]\n",
+ " lst.extend(model.predict(_x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "9193"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"TaskIDPred\"] = lst\n",
+ "df.TaskIDPred = df.TaskIDPred.apply(lambda x: np.argmax(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_train = df[df.userID.isin(train_ids)]\n",
+ "df_test = df[df.userID.isin(test_ids)]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[323 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 11]\n",
+ " [ 1 443 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]\n",
+ " [ 1 0 333 1 0 0 0 0 0 0 0 0 0 0 2 0 0]\n",
+ " [ 3 0 0 317 0 0 0 0 0 0 0 0 0 1 0 0 0]\n",
+ " [ 1 0 0 0 321 1 0 1 0 0 0 0 1 0 0 0 0]\n",
+ " [ 1 0 0 0 0 327 0 0 0 0 0 0 1 1 0 1 3]\n",
+ " [ 0 0 0 0 0 0 431 2 0 0 0 0 0 0 1 0 0]\n",
+ " [ 0 0 0 0 0 1 1 436 0 0 0 0 0 0 0 0 0]\n",
+ " [ 0 0 0 1 0 0 0 0 396 1 0 3 0 1 2 0 0]\n",
+ " [ 1 1 6 1 0 0 0 0 0 374 0 3 0 2 0 0 0]\n",
+ " [ 0 0 0 4 0 1 0 0 0 0 380 0 0 0 1 2 0]\n",
+ " [ 0 0 0 0 0 0 0 0 0 0 0 364 0 0 0 0 0]\n",
+ " [ 0 0 0 2 4 2 0 0 0 0 0 0 394 0 0 0 1]\n",
+ " [ 0 0 0 6 0 3 0 0 1 3 0 4 1 383 0 0 0]\n",
+ " [ 0 0 2 0 0 2 0 0 0 0 1 0 2 0 407 0 0]\n",
+ " [ 0 0 0 0 0 0 0 0 1 0 5 0 0 0 20 376 0]\n",
+ " [ 12 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 477]]\n",
+ "[[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. ]]\n",
+ "Accuray: 0.979\n",
+ "Recall: 0.979\n",
+ "F1-Score: 0.978\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " tap 0.94 0.96 0.95 336\n",
+ " twotap 1.00 1.00 1.00 445\n",
+ " swipeleft 0.98 0.99 0.98 337\n",
+ " swiperight 0.95 0.99 0.97 321\n",
+ " swipeup 0.99 0.99 0.99 325\n",
+ " swipedown 0.97 0.98 0.97 334\n",
+ " twoswipeup 1.00 0.99 0.99 434\n",
+ " twoswipedown 0.99 1.00 0.99 438\n",
+ " circle 0.99 0.98 0.99 404\n",
+ " arrowheadleft 0.99 0.96 0.98 388\n",
+ "arrowheadright 0.98 0.98 0.98 388\n",
+ " checkmark 0.97 1.00 0.99 364\n",
+ " flashlight 0.98 0.98 0.98 403\n",
+ " l 0.99 0.96 0.97 401\n",
+ " lmirrored 0.94 0.98 0.96 414\n",
+ " screenshot 0.99 0.94 0.96 402\n",
+ " rotate 0.97 0.97 0.97 490\n",
+ "\n",
+ " micro avg 0.98 0.98 0.98 6624\n",
+ " macro avg 0.98 0.98 0.98 6624\n",
+ " weighted avg 0.98 0.98 0.98 6624\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "\n",
+ "print(sklearn.metrics.confusion_matrix(df_train.TaskID.values, df_train.TaskIDPred.values, labels=labels))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_train.TaskID.values, df_train.TaskIDPred.values, labels=labels)\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(np.round(cm,1))\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_train.TaskID.values, df_train.TaskIDPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_train.TaskID.values, df_train.TaskIDPred.values, average='macro'))\n",
+ "#print(\"Precision: %.2f\" % metrics.average_precision_score(df_train.TaskID.values, df_train.TaskIDPred.values))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(df_train.TaskID.values, df_train.TaskIDPred.values, average='macro'))\n",
+ "print(sklearn.metrics.classification_report(df_train.TaskID.values, df_train.TaskIDPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[ 98 5 1 1 0 7 0 5 0 0 0 0 5 0 0 0 4]\n",
+ " [ 2 130 0 0 4 0 6 1 0 0 0 0 4 0 0 0 1]\n",
+ " [ 0 0 103 0 0 0 0 0 0 0 0 0 0 0 27 0 0]\n",
+ " [ 0 0 0 106 0 0 0 0 0 2 0 1 1 17 0 0 0]\n",
+ " [ 0 0 0 0 116 0 0 0 2 0 0 0 10 0 0 0 0]\n",
+ " [ 0 0 0 0 1 124 0 0 0 0 0 1 2 0 6 0 1]\n",
+ " [ 0 0 0 0 17 0 144 0 0 0 0 0 3 0 0 0 0]\n",
+ " [ 0 0 0 0 0 18 0 151 0 0 0 0 0 0 0 0 0]\n",
+ " [ 0 3 1 0 0 0 1 0 130 8 0 4 0 4 14 1 0]\n",
+ " [ 0 0 1 0 0 0 0 0 0 136 0 3 1 11 0 2 0]\n",
+ " [ 0 0 0 0 0 1 0 0 0 1 124 0 0 0 4 22 0]\n",
+ " [ 0 0 0 2 1 0 0 0 2 1 0 140 1 0 0 0 0]\n",
+ " [ 0 0 1 0 6 15 0 0 1 0 0 0 139 2 0 0 0]\n",
+ " [ 0 0 0 0 0 3 0 0 0 2 0 2 2 149 1 0 0]\n",
+ " [ 1 0 0 0 0 2 0 0 0 0 7 0 1 1 151 0 0]\n",
+ " [ 0 0 0 1 0 0 0 0 2 3 2 0 0 0 6 146 0]\n",
+ " [ 31 0 0 0 0 1 0 0 0 0 0 2 1 0 0 0 142]]\n",
+ "[[0.8 0. 0. 0. 0. 0.1 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0.8 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.2 0. 0. ]\n",
+ " [0. 0. 0. 0.8 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0.1 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0.1 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0.1 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0.8 0. 0. 0. 0. 0. 0.1 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. 0. 0.1 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.8 0. 0. 0. 0. 0.1 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0.1 0. 0. 0. 0. 0. 0. 0.8 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. ]\n",
+ " [0.2 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.8]]\n",
+ "Accuray: 0.868\n",
+ "Recall: 0.867\n",
+ "F1-Score: 0.868\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " tap 0.74 0.78 0.76 126\n",
+ " twotap 0.94 0.88 0.91 148\n",
+ " swipeleft 0.96 0.79 0.87 130\n",
+ " swiperight 0.96 0.83 0.89 127\n",
+ " swipeup 0.80 0.91 0.85 128\n",
+ " swipedown 0.73 0.92 0.81 135\n",
+ " twoswipeup 0.95 0.88 0.91 164\n",
+ " twoswipedown 0.96 0.89 0.93 169\n",
+ " circle 0.95 0.78 0.86 166\n",
+ " arrowheadleft 0.89 0.88 0.89 154\n",
+ "arrowheadright 0.93 0.82 0.87 152\n",
+ " checkmark 0.92 0.95 0.93 147\n",
+ " flashlight 0.82 0.85 0.83 164\n",
+ " l 0.81 0.94 0.87 159\n",
+ " lmirrored 0.72 0.93 0.81 163\n",
+ " screenshot 0.85 0.91 0.88 160\n",
+ " rotate 0.96 0.80 0.87 177\n",
+ "\n",
+ " micro avg 0.87 0.87 0.87 2569\n",
+ " macro avg 0.88 0.87 0.87 2569\n",
+ " weighted avg 0.88 0.87 0.87 2569\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_test.TaskID.values, df_test.TaskIDPred.values, labels=labels))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_test.TaskID.values, df_test.TaskIDPred.values, labels=labels)\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(np.round(cm,1))\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_test.TaskID.values, df_test.TaskIDPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_test.TaskID.values, df_test.TaskIDPred.values, average='macro'))\n",
+ "#print(\"Precision: %.2f\" % metrics.average_precision_score(df_test.TaskID.values, df_test.TaskIDPred.values))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(df_test.TaskID.values, df_test.TaskIDPred.values, average='macro'))\n",
+ "print(sklearn.metrics.classification_report(df_test.TaskID.values, df_test.TaskIDPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Export "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "output nodes names are: ['output_node0']\n"
+ ]
+ }
+ ],
+ "source": [
+ "output_node_prefix = \"output_node\"\n",
+ "num_output = 1\n",
+ "pred = [None]*num_output\n",
+ "pred_node_names = [None]*num_output\n",
+ "for i in range(num_output):\n",
+ " pred_node_names[i] = output_node_prefix+str(i)\n",
+ " pred[i] = tf.identity(model.outputs[i], name=pred_node_names[i])\n",
+ "print('output nodes names are: ', pred_node_names)\n",
+ "output_node_prefix = pred_node_names[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "model.inputs[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#sess = K.get_session()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "output_path = \"./Models/\"\n",
+ "output_file = \"LSTM.pb\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "time_distributed_10_input\n",
+ "time_distributed_10/kernel\n",
+ "time_distributed_10/kernel/read\n",
+ "time_distributed_10/bias\n",
+ "time_distributed_10/bias/read\n",
+ "time_distributed_10/Reshape/shape\n",
+ "time_distributed_10/Reshape\n",
+ "time_distributed_10/convolution\n",
+ "time_distributed_10/BiasAdd\n",
+ "time_distributed_10/Relu\n",
+ "time_distributed_10/Reshape_1/shape\n",
+ "time_distributed_10/Reshape_1\n",
+ "time_distributed_11/kernel\n",
+ "time_distributed_11/kernel/read\n",
+ "time_distributed_11/bias\n",
+ "time_distributed_11/bias/read\n",
+ "time_distributed_11/Reshape/shape\n",
+ "time_distributed_11/Reshape\n",
+ "time_distributed_11/convolution\n",
+ "time_distributed_11/BiasAdd\n",
+ "time_distributed_11/Relu\n",
+ "time_distributed_11/Reshape_1/shape\n",
+ "time_distributed_11/Reshape_1\n",
+ "time_distributed_12/Reshape/shape\n",
+ "time_distributed_12/Reshape\n",
+ "time_distributed_12/MaxPool\n",
+ "time_distributed_12/Reshape_1/shape\n",
+ "time_distributed_12/Reshape_1\n",
+ "time_distributed_13/Reshape/shape\n",
+ "time_distributed_13/Reshape\n",
+ "time_distributed_13/keras_learning_phase/input\n",
+ "time_distributed_13/keras_learning_phase\n",
+ "time_distributed_13/cond/Switch\n",
+ "time_distributed_13/cond/switch_t\n",
+ "time_distributed_13/cond/pred_id\n",
+ "time_distributed_13/cond/mul/y\n",
+ "time_distributed_13/cond/mul\n",
+ "time_distributed_13/cond/mul/Switch\n",
+ "time_distributed_13/cond/dropout/keep_prob\n",
+ "time_distributed_13/cond/dropout/Shape\n",
+ "time_distributed_13/cond/dropout/random_uniform/min\n",
+ "time_distributed_13/cond/dropout/random_uniform/max\n",
+ "time_distributed_13/cond/dropout/random_uniform/RandomUniform\n",
+ "time_distributed_13/cond/dropout/random_uniform/sub\n",
+ "time_distributed_13/cond/dropout/random_uniform/mul\n",
+ "time_distributed_13/cond/dropout/random_uniform\n",
+ "time_distributed_13/cond/dropout/add\n",
+ "time_distributed_13/cond/dropout/Floor\n",
+ "time_distributed_13/cond/dropout/div\n",
+ "time_distributed_13/cond/dropout/mul\n",
+ "time_distributed_13/cond/Switch_1\n",
+ "time_distributed_13/cond/Merge\n",
+ "time_distributed_13/Reshape_1/shape\n",
+ "time_distributed_13/Reshape_1\n",
+ "time_distributed_14/kernel\n",
+ "time_distributed_14/kernel/read\n",
+ "time_distributed_14/bias\n",
+ "time_distributed_14/bias/read\n",
+ "time_distributed_14/Reshape/shape\n",
+ "time_distributed_14/Reshape\n",
+ "time_distributed_14/convolution\n",
+ "time_distributed_14/BiasAdd\n",
+ "time_distributed_14/Relu\n",
+ "time_distributed_14/Reshape_1/shape\n",
+ "time_distributed_14/Reshape_1\n",
+ "time_distributed_15/kernel\n",
+ "time_distributed_15/kernel/read\n",
+ "time_distributed_15/bias\n",
+ "time_distributed_15/bias/read\n",
+ "time_distributed_15/Reshape/shape\n",
+ "time_distributed_15/Reshape\n",
+ "time_distributed_15/convolution\n",
+ "time_distributed_15/BiasAdd\n",
+ "time_distributed_15/Relu\n",
+ "time_distributed_15/Reshape_1/shape\n",
+ "time_distributed_15/Reshape_1\n",
+ "time_distributed_16/Reshape/shape\n",
+ "time_distributed_16/Reshape\n",
+ "time_distributed_16/MaxPool\n",
+ "time_distributed_16/Reshape_1/shape\n",
+ "time_distributed_16/Reshape_1\n",
+ "time_distributed_17/Reshape/shape\n",
+ "time_distributed_17/Reshape\n",
+ "time_distributed_17/cond/Switch\n",
+ "time_distributed_17/cond/switch_t\n",
+ "time_distributed_17/cond/pred_id\n",
+ "time_distributed_17/cond/mul/y\n",
+ "time_distributed_17/cond/mul\n",
+ "time_distributed_17/cond/mul/Switch\n",
+ "time_distributed_17/cond/dropout/keep_prob\n",
+ "time_distributed_17/cond/dropout/Shape\n",
+ "time_distributed_17/cond/dropout/random_uniform/min\n",
+ "time_distributed_17/cond/dropout/random_uniform/max\n",
+ "time_distributed_17/cond/dropout/random_uniform/RandomUniform\n",
+ "time_distributed_17/cond/dropout/random_uniform/sub\n",
+ "time_distributed_17/cond/dropout/random_uniform/mul\n",
+ "time_distributed_17/cond/dropout/random_uniform\n",
+ "time_distributed_17/cond/dropout/add\n",
+ "time_distributed_17/cond/dropout/Floor\n",
+ "time_distributed_17/cond/dropout/div\n",
+ "time_distributed_17/cond/dropout/mul\n",
+ "time_distributed_17/cond/Switch_1\n",
+ "time_distributed_17/cond/Merge\n",
+ "time_distributed_17/Reshape_1/shape\n",
+ "time_distributed_17/Reshape_1\n",
+ "time_distributed_18/Reshape/shape\n",
+ "time_distributed_18/Reshape\n",
+ "time_distributed_18/Shape\n",
+ "time_distributed_18/strided_slice/stack\n",
+ "time_distributed_18/strided_slice/stack_1\n",
+ "time_distributed_18/strided_slice/stack_2\n",
+ "time_distributed_18/strided_slice\n",
+ "time_distributed_18/Const\n",
+ "time_distributed_18/Prod\n",
+ "time_distributed_18/stack/0\n",
+ "time_distributed_18/stack\n",
+ "time_distributed_18/Reshape_1\n",
+ "time_distributed_18/Reshape_2/shape\n",
+ "time_distributed_18/Reshape_2\n",
+ "lstm_3/kernel\n",
+ "lstm_3/kernel/read\n",
+ "lstm_3/recurrent_kernel\n",
+ "lstm_3/recurrent_kernel/read\n",
+ "lstm_3/bias\n",
+ "lstm_3/bias/read\n",
+ "lstm_3/strided_slice/stack\n",
+ "lstm_3/strided_slice/stack_1\n",
+ "lstm_3/strided_slice/stack_2\n",
+ "lstm_3/strided_slice\n",
+ "lstm_3/strided_slice_1/stack\n",
+ "lstm_3/strided_slice_1/stack_1\n",
+ "lstm_3/strided_slice_1/stack_2\n",
+ "lstm_3/strided_slice_1\n",
+ "lstm_3/strided_slice_2/stack\n",
+ "lstm_3/strided_slice_2/stack_1\n",
+ "lstm_3/strided_slice_2/stack_2\n",
+ "lstm_3/strided_slice_2\n",
+ "lstm_3/strided_slice_3/stack\n",
+ "lstm_3/strided_slice_3/stack_1\n",
+ "lstm_3/strided_slice_3/stack_2\n",
+ "lstm_3/strided_slice_3\n",
+ "lstm_3/strided_slice_4/stack\n",
+ "lstm_3/strided_slice_4/stack_1\n",
+ "lstm_3/strided_slice_4/stack_2\n",
+ "lstm_3/strided_slice_4\n",
+ "lstm_3/strided_slice_5/stack\n",
+ "lstm_3/strided_slice_5/stack_1\n",
+ "lstm_3/strided_slice_5/stack_2\n",
+ "lstm_3/strided_slice_5\n",
+ "lstm_3/strided_slice_6/stack\n",
+ "lstm_3/strided_slice_6/stack_1\n",
+ "lstm_3/strided_slice_6/stack_2\n",
+ "lstm_3/strided_slice_6\n",
+ "lstm_3/strided_slice_7/stack\n",
+ "lstm_3/strided_slice_7/stack_1\n",
+ "lstm_3/strided_slice_7/stack_2\n",
+ "lstm_3/strided_slice_7\n",
+ "lstm_3/strided_slice_8/stack\n",
+ "lstm_3/strided_slice_8/stack_1\n",
+ "lstm_3/strided_slice_8/stack_2\n",
+ "lstm_3/strided_slice_8\n",
+ "lstm_3/strided_slice_9/stack\n",
+ "lstm_3/strided_slice_9/stack_1\n",
+ "lstm_3/strided_slice_9/stack_2\n",
+ "lstm_3/strided_slice_9\n",
+ "lstm_3/strided_slice_10/stack\n",
+ "lstm_3/strided_slice_10/stack_1\n",
+ "lstm_3/strided_slice_10/stack_2\n",
+ "lstm_3/strided_slice_10\n",
+ "lstm_3/strided_slice_11/stack\n",
+ "lstm_3/strided_slice_11/stack_1\n",
+ "lstm_3/strided_slice_11/stack_2\n",
+ "lstm_3/strided_slice_11\n",
+ "lstm_3/zeros_like\n",
+ "lstm_3/Sum/reduction_indices\n",
+ "lstm_3/Sum\n",
+ "lstm_3/ExpandDims/dim\n",
+ "lstm_3/ExpandDims\n",
+ "lstm_3/Tile/multiples\n",
+ "lstm_3/Tile\n",
+ "lstm_3/Tile_1/multiples\n",
+ "lstm_3/Tile_1\n",
+ "lstm_3/transpose/perm\n",
+ "lstm_3/transpose\n",
+ "lstm_3/Shape\n",
+ "lstm_3/strided_slice_12/stack\n",
+ "lstm_3/strided_slice_12/stack_1\n",
+ "lstm_3/strided_slice_12/stack_2\n",
+ "lstm_3/strided_slice_12\n",
+ "lstm_3/TensorArray\n",
+ "lstm_3/TensorArray_1\n",
+ "lstm_3/TensorArrayUnstack/Shape\n",
+ "lstm_3/TensorArrayUnstack/strided_slice/stack\n",
+ "lstm_3/TensorArrayUnstack/strided_slice/stack_1\n",
+ "lstm_3/TensorArrayUnstack/strided_slice/stack_2\n",
+ "lstm_3/TensorArrayUnstack/strided_slice\n",
+ "lstm_3/TensorArrayUnstack/range/start\n",
+ "lstm_3/TensorArrayUnstack/range/delta\n",
+ "lstm_3/TensorArrayUnstack/range\n",
+ "lstm_3/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3\n",
+ "lstm_3/time\n",
+ "lstm_3/while/maximum_iterations\n",
+ "lstm_3/while/iteration_counter\n",
+ "lstm_3/while/Enter\n",
+ "lstm_3/while/Enter_1\n",
+ "lstm_3/while/Enter_2\n",
+ "lstm_3/while/Enter_3\n",
+ "lstm_3/while/Enter_4\n",
+ "lstm_3/while/Merge\n",
+ "lstm_3/while/Merge_1\n",
+ "lstm_3/while/Merge_2\n",
+ "lstm_3/while/Merge_3\n",
+ "lstm_3/while/Merge_4\n",
+ "lstm_3/while/Less\n",
+ "lstm_3/while/Less/Enter\n",
+ "lstm_3/while/Less_1\n",
+ "lstm_3/while/Less_1/Enter\n",
+ "lstm_3/while/LogicalAnd\n",
+ "lstm_3/while/LoopCond\n",
+ "lstm_3/while/Switch\n",
+ "lstm_3/while/Switch_1\n",
+ "lstm_3/while/Switch_2\n",
+ "lstm_3/while/Switch_3\n",
+ "lstm_3/while/Switch_4\n",
+ "lstm_3/while/Identity\n",
+ "lstm_3/while/Identity_1\n",
+ "lstm_3/while/Identity_2\n",
+ "lstm_3/while/Identity_3\n",
+ "lstm_3/while/Identity_4\n",
+ "lstm_3/while/add/y\n",
+ "lstm_3/while/add\n",
+ "lstm_3/while/TensorArrayReadV3\n",
+ "lstm_3/while/TensorArrayReadV3/Enter\n",
+ "lstm_3/while/TensorArrayReadV3/Enter_1\n",
+ "lstm_3/while/MatMul\n",
+ "lstm_3/while/MatMul/Enter\n",
+ "lstm_3/while/MatMul_1\n",
+ "lstm_3/while/MatMul_1/Enter\n",
+ "lstm_3/while/MatMul_2\n",
+ "lstm_3/while/MatMul_2/Enter\n",
+ "lstm_3/while/MatMul_3\n",
+ "lstm_3/while/MatMul_3/Enter\n",
+ "lstm_3/while/BiasAdd\n",
+ "lstm_3/while/BiasAdd/Enter\n",
+ "lstm_3/while/BiasAdd_1\n",
+ "lstm_3/while/BiasAdd_1/Enter\n",
+ "lstm_3/while/BiasAdd_2\n",
+ "lstm_3/while/BiasAdd_2/Enter\n",
+ "lstm_3/while/BiasAdd_3\n",
+ "lstm_3/while/BiasAdd_3/Enter\n",
+ "lstm_3/while/MatMul_4\n",
+ "lstm_3/while/MatMul_4/Enter\n",
+ "lstm_3/while/add_1\n",
+ "lstm_3/while/mul/x\n",
+ "lstm_3/while/mul\n",
+ "lstm_3/while/add_2/y\n",
+ "lstm_3/while/add_2\n",
+ "lstm_3/while/Const\n",
+ "lstm_3/while/Const_1\n",
+ "lstm_3/while/clip_by_value/Minimum\n",
+ "lstm_3/while/clip_by_value\n",
+ "lstm_3/while/MatMul_5\n",
+ "lstm_3/while/MatMul_5/Enter\n",
+ "lstm_3/while/add_3\n",
+ "lstm_3/while/mul_1/x\n",
+ "lstm_3/while/mul_1\n",
+ "lstm_3/while/add_4/y\n",
+ "lstm_3/while/add_4\n",
+ "lstm_3/while/Const_2\n",
+ "lstm_3/while/Const_3\n",
+ "lstm_3/while/clip_by_value_1/Minimum\n",
+ "lstm_3/while/clip_by_value_1\n",
+ "lstm_3/while/mul_2\n",
+ "lstm_3/while/MatMul_6\n",
+ "lstm_3/while/MatMul_6/Enter\n",
+ "lstm_3/while/add_5\n",
+ "lstm_3/while/Tanh\n",
+ "lstm_3/while/mul_3\n",
+ "lstm_3/while/add_6\n",
+ "lstm_3/while/MatMul_7\n",
+ "lstm_3/while/MatMul_7/Enter\n",
+ "lstm_3/while/add_7\n",
+ "lstm_3/while/mul_4/x\n",
+ "lstm_3/while/mul_4\n",
+ "lstm_3/while/add_8/y\n",
+ "lstm_3/while/add_8\n",
+ "lstm_3/while/Const_4\n",
+ "lstm_3/while/Const_5\n",
+ "lstm_3/while/clip_by_value_2/Minimum\n",
+ "lstm_3/while/clip_by_value_2\n",
+ "lstm_3/while/Tanh_1\n",
+ "lstm_3/while/mul_5\n",
+ "lstm_3/while/TensorArrayWrite/TensorArrayWriteV3\n",
+ "lstm_3/while/TensorArrayWrite/TensorArrayWriteV3/Enter\n",
+ "lstm_3/while/add_9/y\n",
+ "lstm_3/while/add_9\n",
+ "lstm_3/while/NextIteration\n",
+ "lstm_3/while/NextIteration_1\n",
+ "lstm_3/while/NextIteration_2\n",
+ "lstm_3/while/NextIteration_3\n",
+ "lstm_3/while/NextIteration_4\n",
+ "lstm_3/while/Exit_2\n",
+ "lstm_3/TensorArrayStack/TensorArraySizeV3\n",
+ "lstm_3/TensorArrayStack/range/start\n",
+ "lstm_3/TensorArrayStack/range/delta\n",
+ "lstm_3/TensorArrayStack/range\n",
+ "lstm_3/TensorArrayStack/TensorArrayGatherV3\n",
+ "lstm_3/transpose_1/perm\n",
+ "lstm_3/transpose_1\n",
+ "dropout_7/cond/Switch\n",
+ "dropout_7/cond/switch_t\n",
+ "dropout_7/cond/pred_id\n",
+ "dropout_7/cond/mul/y\n",
+ "dropout_7/cond/mul\n",
+ "dropout_7/cond/mul/Switch\n",
+ "dropout_7/cond/dropout/keep_prob\n",
+ "dropout_7/cond/dropout/Shape\n",
+ "dropout_7/cond/dropout/random_uniform/min\n",
+ "dropout_7/cond/dropout/random_uniform/max\n",
+ "dropout_7/cond/dropout/random_uniform/RandomUniform\n",
+ "dropout_7/cond/dropout/random_uniform/sub\n",
+ "dropout_7/cond/dropout/random_uniform/mul\n",
+ "dropout_7/cond/dropout/random_uniform\n",
+ "dropout_7/cond/dropout/add\n",
+ "dropout_7/cond/dropout/Floor\n",
+ "dropout_7/cond/dropout/div\n",
+ "dropout_7/cond/dropout/mul\n",
+ "dropout_7/cond/Switch_1\n",
+ "dropout_7/cond/Merge\n",
+ "lstm_4/kernel\n",
+ "lstm_4/kernel/read\n",
+ "lstm_4/recurrent_kernel\n",
+ "lstm_4/recurrent_kernel/read\n",
+ "lstm_4/bias\n",
+ "lstm_4/bias/read\n",
+ "lstm_4/strided_slice/stack\n",
+ "lstm_4/strided_slice/stack_1\n",
+ "lstm_4/strided_slice/stack_2\n",
+ "lstm_4/strided_slice\n",
+ "lstm_4/strided_slice_1/stack\n",
+ "lstm_4/strided_slice_1/stack_1\n",
+ "lstm_4/strided_slice_1/stack_2\n",
+ "lstm_4/strided_slice_1\n",
+ "lstm_4/strided_slice_2/stack\n",
+ "lstm_4/strided_slice_2/stack_1\n",
+ "lstm_4/strided_slice_2/stack_2\n",
+ "lstm_4/strided_slice_2\n",
+ "lstm_4/strided_slice_3/stack\n",
+ "lstm_4/strided_slice_3/stack_1\n",
+ "lstm_4/strided_slice_3/stack_2\n",
+ "lstm_4/strided_slice_3\n",
+ "lstm_4/strided_slice_4/stack\n",
+ "lstm_4/strided_slice_4/stack_1\n",
+ "lstm_4/strided_slice_4/stack_2\n",
+ "lstm_4/strided_slice_4\n",
+ "lstm_4/strided_slice_5/stack\n",
+ "lstm_4/strided_slice_5/stack_1\n",
+ "lstm_4/strided_slice_5/stack_2\n",
+ "lstm_4/strided_slice_5\n",
+ "lstm_4/strided_slice_6/stack\n",
+ "lstm_4/strided_slice_6/stack_1\n",
+ "lstm_4/strided_slice_6/stack_2\n",
+ "lstm_4/strided_slice_6\n",
+ "lstm_4/strided_slice_7/stack\n",
+ "lstm_4/strided_slice_7/stack_1\n",
+ "lstm_4/strided_slice_7/stack_2\n",
+ "lstm_4/strided_slice_7\n",
+ "lstm_4/strided_slice_8/stack\n",
+ "lstm_4/strided_slice_8/stack_1\n",
+ "lstm_4/strided_slice_8/stack_2\n",
+ "lstm_4/strided_slice_8\n",
+ "lstm_4/strided_slice_9/stack\n",
+ "lstm_4/strided_slice_9/stack_1\n",
+ "lstm_4/strided_slice_9/stack_2\n",
+ "lstm_4/strided_slice_9\n",
+ "lstm_4/strided_slice_10/stack\n",
+ "lstm_4/strided_slice_10/stack_1\n",
+ "lstm_4/strided_slice_10/stack_2\n",
+ "lstm_4/strided_slice_10\n",
+ "lstm_4/strided_slice_11/stack\n",
+ "lstm_4/strided_slice_11/stack_1\n",
+ "lstm_4/strided_slice_11/stack_2\n",
+ "lstm_4/strided_slice_11\n",
+ "lstm_4/zeros_like\n",
+ "lstm_4/Sum/reduction_indices\n",
+ "lstm_4/Sum\n",
+ "lstm_4/ExpandDims/dim\n",
+ "lstm_4/ExpandDims\n",
+ "lstm_4/Tile/multiples\n",
+ "lstm_4/Tile\n",
+ "lstm_4/Tile_1/multiples\n",
+ "lstm_4/Tile_1\n",
+ "lstm_4/transpose/perm\n",
+ "lstm_4/transpose\n",
+ "lstm_4/Shape\n",
+ "lstm_4/strided_slice_12/stack\n",
+ "lstm_4/strided_slice_12/stack_1\n",
+ "lstm_4/strided_slice_12/stack_2\n",
+ "lstm_4/strided_slice_12\n",
+ "lstm_4/TensorArray\n",
+ "lstm_4/TensorArray_1\n",
+ "lstm_4/TensorArrayUnstack/Shape\n",
+ "lstm_4/TensorArrayUnstack/strided_slice/stack\n",
+ "lstm_4/TensorArrayUnstack/strided_slice/stack_1\n",
+ "lstm_4/TensorArrayUnstack/strided_slice/stack_2\n",
+ "lstm_4/TensorArrayUnstack/strided_slice\n",
+ "lstm_4/TensorArrayUnstack/range/start\n",
+ "lstm_4/TensorArrayUnstack/range/delta\n",
+ "lstm_4/TensorArrayUnstack/range\n",
+ "lstm_4/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3\n",
+ "lstm_4/time\n",
+ "lstm_4/while/maximum_iterations\n",
+ "lstm_4/while/iteration_counter\n",
+ "lstm_4/while/Enter\n",
+ "lstm_4/while/Enter_1\n",
+ "lstm_4/while/Enter_2\n",
+ "lstm_4/while/Enter_3\n",
+ "lstm_4/while/Enter_4\n",
+ "lstm_4/while/Merge\n",
+ "lstm_4/while/Merge_1\n",
+ "lstm_4/while/Merge_2\n",
+ "lstm_4/while/Merge_3\n",
+ "lstm_4/while/Merge_4\n",
+ "lstm_4/while/Less\n",
+ "lstm_4/while/Less/Enter\n",
+ "lstm_4/while/Less_1\n",
+ "lstm_4/while/Less_1/Enter\n",
+ "lstm_4/while/LogicalAnd\n",
+ "lstm_4/while/LoopCond\n",
+ "lstm_4/while/Switch\n",
+ "lstm_4/while/Switch_1\n",
+ "lstm_4/while/Switch_2\n",
+ "lstm_4/while/Switch_3\n",
+ "lstm_4/while/Switch_4\n",
+ "lstm_4/while/Identity\n",
+ "lstm_4/while/Identity_1\n",
+ "lstm_4/while/Identity_2\n",
+ "lstm_4/while/Identity_3\n",
+ "lstm_4/while/Identity_4\n",
+ "lstm_4/while/add/y\n",
+ "lstm_4/while/add\n",
+ "lstm_4/while/TensorArrayReadV3\n",
+ "lstm_4/while/TensorArrayReadV3/Enter\n",
+ "lstm_4/while/TensorArrayReadV3/Enter_1\n",
+ "lstm_4/while/MatMul\n",
+ "lstm_4/while/MatMul/Enter\n",
+ "lstm_4/while/MatMul_1\n",
+ "lstm_4/while/MatMul_1/Enter\n",
+ "lstm_4/while/MatMul_2\n",
+ "lstm_4/while/MatMul_2/Enter\n",
+ "lstm_4/while/MatMul_3\n",
+ "lstm_4/while/MatMul_3/Enter\n",
+ "lstm_4/while/BiasAdd\n",
+ "lstm_4/while/BiasAdd/Enter\n",
+ "lstm_4/while/BiasAdd_1\n",
+ "lstm_4/while/BiasAdd_1/Enter\n",
+ "lstm_4/while/BiasAdd_2\n",
+ "lstm_4/while/BiasAdd_2/Enter\n",
+ "lstm_4/while/BiasAdd_3\n",
+ "lstm_4/while/BiasAdd_3/Enter\n",
+ "lstm_4/while/MatMul_4\n",
+ "lstm_4/while/MatMul_4/Enter\n",
+ "lstm_4/while/add_1\n",
+ "lstm_4/while/mul/x\n",
+ "lstm_4/while/mul\n",
+ "lstm_4/while/add_2/y\n",
+ "lstm_4/while/add_2\n",
+ "lstm_4/while/Const\n",
+ "lstm_4/while/Const_1\n",
+ "lstm_4/while/clip_by_value/Minimum\n",
+ "lstm_4/while/clip_by_value\n",
+ "lstm_4/while/MatMul_5\n",
+ "lstm_4/while/MatMul_5/Enter\n",
+ "lstm_4/while/add_3\n",
+ "lstm_4/while/mul_1/x\n",
+ "lstm_4/while/mul_1\n",
+ "lstm_4/while/add_4/y\n",
+ "lstm_4/while/add_4\n",
+ "lstm_4/while/Const_2\n",
+ "lstm_4/while/Const_3\n",
+ "lstm_4/while/clip_by_value_1/Minimum\n",
+ "lstm_4/while/clip_by_value_1\n",
+ "lstm_4/while/mul_2\n",
+ "lstm_4/while/MatMul_6\n",
+ "lstm_4/while/MatMul_6/Enter\n",
+ "lstm_4/while/add_5\n",
+ "lstm_4/while/Tanh\n",
+ "lstm_4/while/mul_3\n",
+ "lstm_4/while/add_6\n",
+ "lstm_4/while/MatMul_7\n",
+ "lstm_4/while/MatMul_7/Enter\n",
+ "lstm_4/while/add_7\n",
+ "lstm_4/while/mul_4/x\n",
+ "lstm_4/while/mul_4\n",
+ "lstm_4/while/add_8/y\n",
+ "lstm_4/while/add_8\n",
+ "lstm_4/while/Const_4\n",
+ "lstm_4/while/Const_5\n",
+ "lstm_4/while/clip_by_value_2/Minimum\n",
+ "lstm_4/while/clip_by_value_2\n",
+ "lstm_4/while/Tanh_1\n",
+ "lstm_4/while/mul_5\n",
+ "lstm_4/while/TensorArrayWrite/TensorArrayWriteV3\n",
+ "lstm_4/while/TensorArrayWrite/TensorArrayWriteV3/Enter\n",
+ "lstm_4/while/add_9/y\n",
+ "lstm_4/while/add_9\n",
+ "lstm_4/while/NextIteration\n",
+ "lstm_4/while/NextIteration_1\n",
+ "lstm_4/while/NextIteration_2\n",
+ "lstm_4/while/NextIteration_3\n",
+ "lstm_4/while/NextIteration_4\n",
+ "lstm_4/while/Exit_1\n",
+ "lstm_4/while/Exit_2\n",
+ "lstm_4/sub/y\n",
+ "lstm_4/sub\n",
+ "lstm_4/TensorArrayReadV3\n",
+ "dropout_8/cond/Switch\n",
+ "dropout_8/cond/switch_t\n",
+ "dropout_8/cond/pred_id\n",
+ "dropout_8/cond/mul/y\n",
+ "dropout_8/cond/mul\n",
+ "dropout_8/cond/mul/Switch\n",
+ "dropout_8/cond/dropout/keep_prob\n",
+ "dropout_8/cond/dropout/Shape\n",
+ "dropout_8/cond/dropout/random_uniform/min\n",
+ "dropout_8/cond/dropout/random_uniform/max\n",
+ "dropout_8/cond/dropout/random_uniform/RandomUniform\n",
+ "dropout_8/cond/dropout/random_uniform/sub\n",
+ "dropout_8/cond/dropout/random_uniform/mul\n",
+ "dropout_8/cond/dropout/random_uniform\n",
+ "dropout_8/cond/dropout/add\n",
+ "dropout_8/cond/dropout/Floor\n",
+ "dropout_8/cond/dropout/div\n",
+ "dropout_8/cond/dropout/mul\n",
+ "dropout_8/cond/Switch_1\n",
+ "dropout_8/cond/Merge\n",
+ "dense_2/kernel\n",
+ "dense_2/kernel/read\n",
+ "dense_2/bias\n",
+ "dense_2/bias/read\n",
+ "dense_2/MatMul\n",
+ "dense_2/BiasAdd\n",
+ "dense_2/Softmax\n",
+ "output_node0\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "g = tf.GraphDef()\n",
+ "g.ParseFromString(open(output_path + output_file, \"rb\").read())\n",
+ "s = \"\"\n",
+ "for n in g.node:\n",
+ " s =s + str(n.name) + \"\\n\"\n",
+ "\n",
+ "print(s)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "INFO:tensorflow:Froze 16 variables.\n",
+ "INFO:tensorflow:Converted 16 variables to const ops.\n",
+ "Saved the freezed graph at: ./Models/LSTM.pb\n"
+ ]
+ }
+ ],
+ "source": [
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)\n",
+ "\n",
+ "graph_io.write_graph(constant_graph, output_path, output_file, as_text=False)\n",
+ "\n",
+ "print('Saved the freezed graph at: ', (output_path + output_file))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_15_CNN-Ensemble-Report.ipynb b/python/Step_15_CNN-Ensemble-Report.ipynb
new file mode 100644
index 0000000..309448f
--- /dev/null
+++ b/python/Step_15_CNN-Ensemble-Report.ipynb
@@ -0,0 +1,427 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "\n",
+ "target_names = [\"Knuckle\", \"Finger\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n"
+ ]
+ }
+ ],
+ "source": [
+ "df = pd.read_pickle(\"DataStudyCollection/df_statistics.pkl\")\n",
+ "\n",
+ "lst = df.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)\n",
+ "df[\"GestureId\"] = df.TaskID % 17\n",
+ "\n",
+ "#df_train = dfAll[dfAll.userID.isin(train_ids)]\n",
+ "#df_test = dfAll[dfAll.userID.isin(test_ids)]\n",
+ "\n",
+ "x = np.concatenate(df.Blobs.values).reshape(-1,27,15,1)\n",
+ "x = x / 255.0\n",
+ "\n",
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = len(df.TaskID.unique())\n",
+ "y = tf.keras.utils.to_categorical(df.TaskID, num_classes)\n",
+ "\n",
+ "labels = sorted(df.TaskID.unique())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.stack(df.Blobs)\n",
+ "x = x.reshape(-1, 27, 15, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = 2\n",
+ "y = tf.keras.utils.to_categorical(df.InputMethod, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Text(0.5, 1.0, 'Label for image 1 is: [1. 0.]')"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAALEAAAEICAYAAAAQmxXMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAD1tJREFUeJzt3X2wXHV9x/H35yaBQIiFAGYgPIRmUp2UGeIMgm3BQhEE+hD8oymM0GhpY1XG2vpQdNoGqVXGKVUZLaNAIEWBQShD1BQIGRjaKRUCA5oUNBASSBoSIASChIck3/5xflc2l717d++evXu/8HnN3Nlz9jx9d+9nf3vO2bP7U0RgltlAvwsw65ZDbOk5xJaeQ2zpOcSWnkNs6fUsxJLulvTndS+rytWSnpd0X3dVgqQjJL0kaUK36xov6npMkq6R9JqkdTWV1sm2f6M8hl0j5WjEEEtaJ+kD9ZXXtROAU4HDIuK4blcWEU9GxH4Rsav70npH0smS7pL0wkihqvkxfS0iZjbUMV/Sf0t6WdLdna5M0l9LelrSi5IWS9q72XwR8YuI2A/4z5HWmXF34khgXUT8stMFJU3sQT1j5ZfAYuBzfa5jK/AN4JJOF5T0QeBC4BSq/+OvA1/qtqBRh1jSAZJ+JOmZ8tb+I0mHDZltlqT7yqvuVknTGpZ/X3lFb5P0sKST2tjm+cCVwG+Vt5ovlfv/QtJjkrZKWirp0IZlQtInJa0B1jRZ58wyz8QyfrekL5faXpL0Q0kHSvp+eRz3S5rZsPw3JT1Vpj0g6cSGaftIWlKen0ckfV7Shobph0q6uTyHT0j61HCPPSLui4hrgbVtPE9DH9NHJK2VtL1s58MjraNFHXdGxI3A/41i8QXAVRGxOiKeB/4R+MhoaxnUTUs8AFxN9Yo6AtgBfGvIPH8K/BlwCLATuAxA0gzgx8CXgWnAZ4GbJR3caoMRcRXwl8C95e1ykaTfA74KzC/bWQ/cMGTRs4DjgTltPrazgfOAGcAs4N7yWKcBjwCLGua9H5hbpl0H/EDS5DJtETCTqsU5FTh3cCFJA8APgYfLdk4BPl1aq9pImkL1vJ8REVOB3wYeKtOOKI3IEXVus4XfpHq8gx4Gpks6sJuVjjrEEfFcRNwcES9HxHbgn4DfHTLbtRGxqrz1/z0wvxxsnAssi4hlEbE7IpYDK4EzR1HKh4HFEfFgRLwKfIGqpZ7ZMM9XI2JrROxoc51XR8TjEfEC8B/A46UF2gn8AHjP4IwR8b3yXOyMiEuBvYF3lcnzga9ExPMRsYHyIi7eCxwcERdHxGsRsRa4guoFVLfdwNGS9omITRGxutT+ZETsHxFP9mCbzewHvNAwPjg8tZuVdrM7sa+k70haL+lF4B5g/yFHxE81DK8HJgEHUbXef1xagW2StlEdsB0yilIOLesGICJeAp6jat2a1dGOzQ3DO5qM7zc4IumzZVfhhfI4fo3qMQ7W1rjtxuEjgUOHPAdfBKZ3WGtLpQH5E6p3sE2Sfizp3XVuowMvAe9oGB8c3t7NSrvZnfgMVYtzfES8A3h/uV8N8xzeMHwE8DrwLNU/89rSCgz+TYmIjg8WqPbNjhwcKW+fBwIbG+bpyaV6Zf/381Qt7gERsT9V6zL4HGwCGo8TGp+Pp4AnhjwHUyNiNO9GLUXE7RFxKlUj8ShVi98Pq4FjGsaPATZHxHPdrLTdEE+SNLnhbyLVW8AOYFs5YFvUZLlzJc2RtC9wMXBTOe3zPeAPJX1Q0oSyzpOaHBi243rgo5LmltM1XwF+EhHrRrGuTk2l2td/Bpgo6R/Ys6W5EfhCOQieAVzQMO0+YLukvy0HgBMkHS3pvc02JGmg7GtPqkY1WdJeIxUoabqkeeXF/SpVa7h7NA+2rG9CqWMiMFDqmNTm4v8GnF8ysT/wd8A1o61lULshXkYV2MG/i6hOs+xD1bL+D3Bbk+WuLUU+DUwGPgUQEU8B86jePp+hapU+10E9vxIRd1Ltb99M1fLNojf7lc3cTvW4f0G1S/MKe+4yXAxsAJ4A7gRuogoS5cX8B1QHhU9QPY9XUu2ONPN+qud+GW8cSN/RRo0DwN9QvWNtpTpu+Tjs8aFIJwd255VtXw6cWIZ/1bKX9Z3YbMGIuA34GnAX8CTVc7aoYdnVozlzIl8UP3YkfRw4OyKGHgCPS5KuAM6hesufNcbbnk115mcv4BMRcc2w8zrEvSPpEKrTa/cCs6lOK34rIr7R18LeYjJ/gpXBXsB3gKOAbVTnr/+1rxW9BbkltvQyXjthtocx353YS3vHZKYMP4M0/DRAE0a6urD1O0vsHNcXq70lbOf5ZyOi5SUEdaolxJJOB74JTACubPWhxWSmcPyE04Zf16TWJQ3sP9wZqGJX65Du2rqt9fK7HfJu3Rk3rR95rvp0vTtRPmb+NnAG1QU250hq90Ibs67VsU98HPBYRKyNiNeojsDn1bBes7bUEeIZ7Pkp1Qb2vPgGSQslrZS08vXqAyuz2ozJ2YmI+G5EHBsRx06i6bdRzEatjhBvZM+rsw5jzyvIzHqqjhDfD8yWdFS5qupsYGkN6zVrS9en2CJip6QLqK7omkD1LYvVrRca/krAgb1H2N2Y2uIcM8D2Eb4/6lNobzm1nCeOiGVUlwiajTl/7GzpOcSWnkNs6TnElp5DbOk5xJbemF9PrIEBBvbZZ/gZZrT+7ZDHzz2o5fRJ21tfj3z4ZS+1nL775ZdbTrfxxy2xpecQW3oOsaXnEFt6DrGl5xBbeg6xpTfufsYqRvhdiZ9/9PKu1v/7N/xRy+m7143Vj6ZbXdwSW3oOsaXnEFt6DrGl5xBbeg6xpecQW3rj7jzxwPMvtpx+9GWfaDn95UNa92717h0jdo1sybgltvQcYkvPIbb0HGJLzyG29BxiS88htvTG/Dxx7N7N7leG77dDr7zScvkjb9jQ3fZ3tF6/5VNXP3brgO3ALmBnRBxbx3rN2lFnS3xyRDxb4/rM2uJ9YkuvrhAHcIekByQtrGmdZm2pa3fihIjYKOmdwHJJj0bEPYMTS7AXAkxm35o2aVappSWOiI3ldgtwC1VXuY3T3Rmj9UwdHZRPkTR1cBg4DVjV7XrN2lXH7sR04BZJg+u7LiJua7lEi77kdj23tfXWRppubzt1dMa4FjimhlrMRsWn2Cw9h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSazvEkhZL2iJpVcN90yQtl7Sm3B7QmzLNhtdJS3wNcPqQ+y4EVkTEbGBFGTcbU22HuHTpNbTDjHnAkjK8BDirprrM2tZtnx3TI2JTGX6aqhOaN3E/dtZLtR3YRURQ9SzabJr7sbOe6TbEmyUdAlBut3Rfkllnug3xUmBBGV4A3Nrl+sw61skptuuBe4F3Sdog6XzgEuBUSWuAD5RxszHV9oFdRJwzzKRTaqrFbFT8iZ2l5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl120/dhdJ2ijpofJ3Zm/KNBtet/3YAXw9IuaWv2X1lGXWvm77sTPruzr2iS+Q9NOyu9G0W1xJCyWtlLTydV6tYZNmb+g2xJcDs4C5wCbg0mYzuR8766WuQhwRmyNiV0TsBq4AjqunLLP2dRXiwY4Yiw8Bq4ab16xX2u4CrPRjdxJwkKQNwCLgJElzqbrDXQd8rAc1mrXUbT92V9VYi9mo+BM7S88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS6+TfuwOl3SXpP+VtFrSX5X7p0laLmlNuW3a+YxZr3TSEu8EPhMRc4D3AZ+UNAe4EFgREbOBFWXcbMx00o/dpoh4sAxvBx4BZgDzgCVltiXAWXUXadZK290dNJI0E3gP8BNgekRsKpOeBqY3mX8hsBBgMvuOZpNmw+r4wE7SfsDNwKcj4sXGaRERVJ3QMOR+92NnPdNRiCVNogrw9yPi38vdmwe7Aiu3W+ot0ay1Ts5OiKq3pEci4l8aJi0FFpThBcCt9ZVnNrJO9ol/BzgP+Jmkh8p9XwQuAW6UdD6wHphfb4lmrXXSj91/ARpm8in1lGPWOX9iZ+k5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJZeWyFu0RHjRZI2Snqo/J3Z23LN3qzdX4of7IjxQUlTgQckLS/Tvh4R/9yb8sxG1laISz91m8rwdkmDHTGa9d1o+rGbyRsdMQJcIOmnkhYP16+zpIWSVkpa+TqvjrpYs2Y67cduaEeMlwOzgLlULfWlzZZzZ4zWS530Y/emjhgjYnNE7IqI3cAVwHG9KdNseO2enWjaEeNgT6LFh4BV9ZZnNrJ2z04M1xHjOZLmUvXnvA74WO0Vmo2g3bMTw3XEuKzecsw650/sLD2H2NJziC09h9jSc4gtPYfY0lNEjO0GpWeA9UPuPgh4dkwL6Yzr68yREXHwWG1szEPctAhpZUQc2+86huP6xjfvTlh6DrGlN15C/N1+FzAC1zeOjYt9YrNujJeW2GzUHGJLr68hlnS6pJ9LekzShf2spRlJ6yT9rPwcwcp+1wNQvsu4RdKqhvumSVouaU25bfpdx7eqvoVY0gTg28AZwByqC+zn9KueFk6OiLnj6DzsNcDpQ+67EFgREbOBFWX8baOfLfFxwGMRsTYiXgNuAOb1sZ4UIuIeYOuQu+cBS8rwEuCsMS2qz/oZ4hnAUw3jGxh/v2URwB2SHpC0sN/FtDC9/DYIwNPA9H4WM9ba/Y7d29UJEbFR0juB5ZIeLS3huBURIeltdd60ny3xRuDwhvHDyn3jRkRsLLdbgFsYvz9JsHnwm+fldkuf6xlT/Qzx/cBsSUdJ2gs4G1jax3r2IGlK+d05JE0BTmP8/iTBUmBBGV4A3NrHWsZc33YnImKnpAuA24EJwOKIWN2vepqYDtxS/eQGE4HrIuK2/pYEkq4HTgIOkrQBWARcAtwo6Xyqy1zn96/CseePnS09f2Jn6TnElp5DbOk5xJaeQ2zpOcSWnkNs6f0/kZhtd/D2o3sAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "i = 1\n",
+ "plt.imshow(x[i].reshape(27, 15)) #np.sqrt(784) = 28\n",
+ "plt.title(\"Label for image %i is: %s\" % (i, y[i]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.4\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loadpath = \"./ModelSnapshots/CNN-33767.h5\"\n",
+ "model = tf.keras.models.load_model(loadpath)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 1min 45s, sys: 12 s, total: 1min 57s\n",
+ "Wall time: 1min 12s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "lst = []\n",
+ "batch = 100\n",
+ "for i in range(0, len(x), batch):\n",
+ " _x = x[i: i+batch]\n",
+ " lst.extend(model.predict(_x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"InputMethodPred\"] = lst\n",
+ "df.InputMethodPred = df.InputMethodPred.apply(lambda x: np.argmax(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = df.groupby([\"userID\", \"TaskID\", \"VersionID\"])[[\"InputMethodPred\", \"InputMethod\"]].agg(lambda x: x.tolist()).reset_index()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from collections import Counter"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.InputMethod = df.InputMethod.apply(lambda x: Counter(x).most_common()[0][0])\n",
+ "df.InputMethodPred = df.InputMethodPred.apply(lambda x: Counter(x).most_common()[0][0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ "
\n",
+ " \n",
+ " InputMethod | \n",
+ " InputMethodPred | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 4425 | \n",
+ " 4425 | \n",
+ " 4425 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 124 | \n",
+ " 124 | \n",
+ " 124 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 83 | \n",
+ " 83 | \n",
+ " 83 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 5935 | \n",
+ " 5935 | \n",
+ " 5935 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID\n",
+ "InputMethod InputMethodPred \n",
+ "0 0 4425 4425 4425\n",
+ " 1 124 124 124\n",
+ "1 0 83 83 83\n",
+ " 1 5935 5935 5935"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.groupby([\"InputMethod\", \"InputMethodPred\"]).count()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_train = df[df.userID.isin(train_ids)]\n",
+ "df_test = df[df.userID.isin(test_ids)]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[3260 82]\n",
+ " [ 47 4274]]\n",
+ "[[0.97546379 0.02453621]\n",
+ " [0.01087711 0.98912289]]\n",
+ "Accuray: 0.983\n",
+ "Recall: 0.989\n",
+ "Precision: 0.977\n",
+ "F1-Score: 0.985\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.99 0.98 0.98 3342\n",
+ " Finger 0.98 0.99 0.99 4321\n",
+ "\n",
+ " micro avg 0.98 0.98 0.98 7663\n",
+ " macro avg 0.98 0.98 0.98 7663\n",
+ "weighted avg 0.98 0.98 0.98 7663\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_train.InputMethod.values, df_train.InputMethodPred.values, labels=[0, 1]))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_train.InputMethod.values, df_train.InputMethodPred.values, labels=[0, 1], )\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(cm)\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_train.InputMethod.values, df_train.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_train.InputMethod.values, df_train.InputMethodPred.values))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(df_train.InputMethod.values, df_train.InputMethodPred.values))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(df_train.InputMethod.values, df_train.InputMethodPred.values))\n",
+ "print(sklearn.metrics.classification_report(df_train.InputMethod.values, df_train.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[1165 42]\n",
+ " [ 36 1661]]\n",
+ "[[0.96520298 0.03479702]\n",
+ " [0.02121391 0.97878609]]\n",
+ "Accuray: 0.973\n",
+ "Recall: 0.979\n",
+ "Precision: 0.967\n",
+ "F1-Score: 0.977\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.97 0.97 0.97 1207\n",
+ " Finger 0.98 0.98 0.98 1697\n",
+ "\n",
+ " micro avg 0.97 0.97 0.97 2904\n",
+ " macro avg 0.97 0.97 0.97 2904\n",
+ "weighted avg 0.97 0.97 0.97 2904\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_test.InputMethod.values, df_test.InputMethodPred.values, labels=[0, 1]))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_test.InputMethod.values, df_test.InputMethodPred.values, labels=[0, 1], )\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(cm)\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_test.InputMethod.values, df_test.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_test.InputMethod.values, df_test.InputMethodPred.values))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(df_test.InputMethod.values, df_test.InputMethodPred.values))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(df_test.InputMethod.values, df_test.InputMethodPred.values))\n",
+ "print(sklearn.metrics.classification_report(df_test.InputMethod.values, df_test.InputMethodPred.values, target_names=target_names))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_20_Paper_Figure.ipynb b/python/Step_20_Paper_Figure.ipynb
new file mode 100644
index 0000000..9d6dc52
--- /dev/null
+++ b/python/Step_20_Paper_Figure.ipynb
@@ -0,0 +1,494 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.read_pickle(\"DataStudyCollection/df_statistics.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA8lJREFUeJzt2D1qVGEYhmFHJ/4VCppOrCyt7azF3hXYuAB7EdcjQgo7N+AKBMHCysJmCiMJYnKsD2M1zOFLbq5rAS9PcW4+OKtpmq4AHVdHDwD2S9QQI2qIETXEiBpi1kscfX7/1YX6pX765NHoCVvOr61GT5i5/fnb6AkzZ5vN6AkX3qfz9//9iLzUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXErJc4urpxfYmzO7v35vvoCVueHX4ZPWHm6MXT0RPmNpvRCy4tLzXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpj1IlcPDhY5u6t3D49GT9jy+Pqt0RNmPty5OXoCe+KlhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghZr3E0enkZImzO3v59vXoCVv+3F2NnjDz4OeP0RNm/o4ecIl5qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiFkvcXQ6/r3E2Z0dfvw6esK21Wr0gpmzX8ejJ7AnXmqIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTHrJY6en54ucXZ3F20PLMhLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzGqaptEbgD3yUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0x/wB+HS+RtgmxWAAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA91JREFUeJzt2DFqVGEYhlEnDsSAChZaWFiK2LkJSzegpZXrcA8uwspaa8FObLRUG9s4KCSGybW+jEUIuf76cE45xccLw8MPdzVN0yWgY2/0AOBiiRpiRA0xooYYUUPMeomjD/cf/1Of1PcOroyesGt/f/SCuePj0Qtmtj9+jp6w63Q7esHM69OXqz/97qWGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmvcTRabtd4uy5nd69M3rCjs+Pro2eMHPz/enoCTPX3nwcPWHHdrMZPeFMvNQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmLWSxxdXb68xNlzO7x3dfSEHZ+evhg9Yeb+2yejJ8xcf/fv/WeXNpvRC87ESw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8x69IC/4caHw9ETdjx4/mz0hJlbX05GT5iZjo5GT/hveakhRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaohZL3F07+DKEmfP7+u30Qt23H61GT1hZvp1MnoCF8RLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghZr3E0e1ms8TZlsPvoxcQ5aWGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmNU3T6A3ABfJSQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTG/ASBTOQKG6jN+AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA91JREFUeJzt2S1ulGEYhlGGDgUDKRIJVWUJeLaAx2FZA5ptYFgCCQbLBkhQJBQUP4GKIXQ+9JdBkNLpO71yjqx4cifNNW8ys5im6QrQcXX0AOB8iRpiRA0xooYYUUPMchtHH+492qmv1Bf7+6MnbNi7fTB6wsyu/Qqy/vZ99IQN02o1esLMq/XLxd/+7qWGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmuY2ji+W1bZw9s8XR4egJG349/zF6wsz7d3dGT5i5/+zD6Akbfn/6PHrCP/FSQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIWY4ecBF+3r05esKGN0cvRk+YOTx+PHrC3I3roxdcWl5qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpilts4Op2ebuPsmd16+3H0hA0Pnj4ZPWHm3vFq9IS59Xr0gkvLSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8xy9ICLsP7ydfSEDQevV6MnzCz2duvzfTpdj55wae3WfxL4b6KGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmuZWr69OtnD2r9cnJ6AmbdnETCV5qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiFtM0jd4AnCMvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BDzB5HPOlI/So2zAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA/pJREFUeJzt2T2OW2UYhmF7bAEphgjoIroBKVkAFXWyCdoUsBaWQkuVWQAdDQVdKqQoPwo/GiFNiH1SH5liZObwObeuq3Tx6pGlW59kr6dpWgEdZ6MHALdL1BAjaogRNcSIGmK2Sxx99OE3J/WT+tmdj0ZPOLD+7JPRE2b2r16PnjCzv7oaPeHQif1TdLn/Yf1vn3upIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qI2S5x9Ozu+RJnj/b7wy9HTzjw8ePfRk+YefP9/dETZj548vPoCYem3egFN+KlhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQs13k6mazyNljPf96Gj3hwE8Pfhw9Yeare9+NnjDz6bQfPeG95aWGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmu8jV6+tFzh7r88tp9IQDF/98O3rCzMWvf4+eMLc+wfdm2o1ecCMn+M0B/4WoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQsx094P9w/suL0RMOfPHsfPSEme3Lv0ZPmNltNqMnHJj2u9ETbsRLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghZrvE0d0ffy5x9nintme1Wq2ejh4w93b0AG6NlxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpj1NE2jNwC3yEsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xLwD3C47I4MwRxwAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA9ZJREFUeJzt2D1qFWEYhmETDoREwWCTwkosLW1cgSuwtBZ0F27CJWhhaekyLKzUJgFthAQt8jvWw7GQkOFLbq+rnOLlaW4+mI1pmm4BHZujBwBXS9QQI2qIETXEiBpiVkscfbr9/Fr9Ut/c2ho9Yd1123R2NnrBzMWv36MnrJlOT0ZPmPl48X7jb9+91BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xKyWOLq5s7PE2Uub7u+NnrDm8NHu6Akzu59+jp4ws7l/PnrCmvPTk9ET/omXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzGqJoxt3bi9x9tK+Pbs3esKazy/ejJ4w8/j1y9ETZvbefR894cbyUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELMaPeB/9ergyegJM3e/nIyewBXxUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELNa4uh0eLTE2Ut78PbH6Alrvn54OHrCzPbB/ugJM+fHx6Mn3FheaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMasljp4fHS1x9vKu255r6Gz0AK6MlxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpiNaZpGbwCukJcaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiPkDAyM3Nq0aV9kAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA75JREFUeJzt2C2OU2EYhmGGFCYEgSEZEgwkKFAYJB4cW8CzATZA2ACLGJbACnAgCIafZBCgK0YwKQd9UlSZk69z57pkxZtH9O6X9GCapktAx+XRA4DzJWqIETXEiBpiRA0xqyWOPrn9Yq/+Uj+7czR6wpZptV+/p1c+n4yeMPf7bPSCLZv1evSEmXd/3h786/P9+mYB/03UECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIWS1xdLp+bYmzO3v45uPoCVteH30YPWHm6eNnoyfMbL6djJ5wYXmpIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXErBa5enh1kbO7On7/aPSELV8f3Bw9YebL81ujJ8zcffl99IQLy0sNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMapGrP34ucnZX91+djp6w5fTwxugJM/d+fRo9YWYzesAF5qWGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmtcTRzXq9xNnd7dseWJCXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcQcTNM0egNwjrzUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8xfwk8uZpcYacUAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA8hJREFUeJzt2LFtU2EYhlGcWITUIGigIBUTMAINK0DFNAxAwQw01FDSMQQSDSIBJAQRTWJfastUJle/8+icAT69xX38S15M03QD6DgYPQC4WqKGGFFDjKghRtQQs5zj6JPjZ3v1l/rhndujJ2xbLEYv2HD55evoCZvWq9EL9t779Zt/fkReaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYpZzHD04vjXH2Z39evxg9IQtf+7u1+/pvbcXoydsWJ2ejZ5wbe3XlwX8N1FDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BCznOPo+uH9Oc7u7MOr16MnbFlN69ETNjz9+Hz0hE2nZ6MXXFteaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYpZzHD38eT7H2Z2dvHsxesKWo09HoydsOPn2efSEDZejB1xjXmqIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmKWcxydfp/PcXZnj17eHD1h700XF6MncEW81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYpZzHF19/zHH2d3t2x6YkZcaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooaYxTRNozcAV8hLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcT8BePyM5XVDyMSAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA8xJREFUeJzt2KGKVGEch2FHhnWCsAgbrBbvQLCJyUsQjGLxXrwJMdoN2gxmg9hsCxYNsirjCnPMhzENe/h2X54nTvjzY+Dlg7Oapuka0HF99ADgYokaYkQNMaKGGFFDzHqJo4+On16qT+qrzWb0hD2rG0ejJ8zsvn0fPWFmt92OnnDpvdu9Xv3vdy81xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0x6yWOrjabJc4e7Nf9O6Mn7Dl9/Hf0hJm7L45HT5j7+Hn0givLSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIWa9yNXjm4ucPdTXJ39GT9jz5cHL0RNmHr56NnrCzNHoAVeYlxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpj1Ild//Fzk7KFuvbk9esKee2+fj54wc/Lh0+gJM7vRA64wLzXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTHrJY5O2+0SZw928v509IQ90+/L9R/tzs9HT+CCeKkhRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSslzi6Oztb4uzBLtseWJKXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmNU0TaM3ABfISw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXE/AOiQTRpF6XfyQAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA71JREFUeJzt2DGKU2EYhlGTicLEapagoDuZykIQrN2NixB34BKsXIWCipUwIlYDERyTWF9iIXGuv/NwTpni403x8MNd7Pf7W0DHcvQA4HqJGmJEDTGihhhRQ8xqjqPny6f/1Sf1k7Oz0RMOLO6uR0+Y2F9djZ4wsf36bfSEQ7vt6AUTr3evFr/73UsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMapary5NZzh7r7fMHoycc+PT4xegJE08+nI+eMPH90Xr0hAO7y8vRE/6IlxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8xqjqPLO7fnOHu008+z/M2/8vDNs9ETJpbv16MnTNxfvRs94cbyUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELOa4+jux9UcZ4927+XH0RMOrU9HL5jaXIxeMLHdbEZPuLG81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xKxmubrbznL2WD8vvoyeAP+MlxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXELPb7/egNwDXyUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xvwCLTy8+CemsOgAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA9FJREFUeJzt2L1qFVEYhtFz4kkEA9oINjZW2gpegWAtWOUCvDALG7HR2juwsLQXtNMEiT8ETMZ6OBYhZNjxYa1yio+3edgw62maVkDHzugBwOUSNcSIGmJEDTGihpjNEkefbA6u1C/1nb3d0RO2rPdvjJ4wc3r0ffSEubPT0QuuvHdnr9f/+u6lhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghZrPE0Wu3bi5x9sK+Pn0wesKWP88OR0+Y2Xl7f/SEmdsvP4yesGU6ORk94Vy81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYjZLHF1f31vi7IX9uLsePWHLx0evRk+YeTgdjJ4ws/Nmf/SELacnJ6MnnIuXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmM0SR89+/lri7IXde/Fp9IQtj98/Hz1h5s6336MnzJweHo2e8N/yUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELNZ4ujZ8fESZy/squ1ZrVar3c9fRk+YmUYP4NJ4qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMepqm0RuAS+SlhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmL+AhRbNW5yBk8IAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA71JREFUeJzt2KGKVGEch2FnGVZEm2gTQVzQ5iWIweItWO3ejd1qd8NGm9Vk0GYTBWUXB3WP+TimYQ6f+/I88YQ/v/LywVlN03QJ6DgYPQDYL1FDjKghRtQQI2qIWS9x9PGVp//VL/WDa1dHT9h28/roBTOrsx+jJ8ycf/4yesKW89PT0RNmTs5frf713UsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMeomj02azxNmdfX/yYPSELTeefxw9Yeb98dHoCTO3X3wbPeHC8lJDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaohZL3J1tVrk7K4+PRq9YNubuyejJ8zcuX9r9AT2xEsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMeomjq8PDJc7u7Ojl2egJWx6+fjZ6wsy9D19HT5iZfv4aPeHC8lJDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BCzXuLotNkscXZ3b9+NXrDl8ugBf/k9egB746WGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMatpmkZvAPbISw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXE/AFdwi3mpFIf/wAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA8FJREFUeJzt2D1qVGEYhuGTzCDptEtnFxS3Idor6Ios3IGLcAfa2caAvaIIWoiVPwOCMMf6MBYSMn7JzXWVp3h5mpsPzsE8zxPQcTh6AHCxRA0xooYYUUOMqCFmvY+j9w4fXapf6qsb10dP2LE9uTl6wsL22mr0hIXVm7ejJ+zYbjajJyy83D4/+Nt3LzXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTHrfRw9PDrax9lz+/rwzugJO06fPBs9YeHVr9ELlp7efzB6wq53H0Yv+CdeaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMeu9XF2t9nL2vObLNWeapml6/P7u6AkLZ69PRk9YuP378+gJV5aXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmPXoAf/D8YtPoyfs+HF6PHrCwq2fX0ZPWJi/fR894cryUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELPex9HtZrOPs+d22fZM0zRNH0cPoMpLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmIO5nkevQG4QF5qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIeYPDXQwTAtceasAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA71JREFUeJzt2D1qVGEYhmEnmUgIqJ2NnfFnEdY2lhbuxaW4BisrCxXcg31ALIzYRAIyjWesh7Ea5uQbb66rPMXL09x8cBbr9foW0HE0egCwX6KGGFFDjKghRtQQs5zj6PPjVwf1S/3o7Gz0hC2LA9u0vr4ePWHDtFqNnnDwPkxvF//67qWGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmOcfR43t35zi7s9/PnoyesOXq/GT0hA0P3n0bPWHD9PWw9vxPvNQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmKWowfchPuvL0ZP2PL54afREza8eP9y9AT2xEsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMco6ji9PTOc7u7MvH89ETtjy683j0hA1P/1yOnsCeeKkhRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaohZznF0uvo1x9mdPXxzMXrCttsnoxdsmH78HD2BPfFSQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIWc5xdFqt5ji7s+n75egJcGO81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xCzW6/XoDcAeeakhRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmL9/Ay6J4ZxVMAAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA85JREFUeJzt2DFqVGEYhlEjozZB7NMEFdyDlaVi4wbsbASzEhehrVimdAkKomhhY2ehQkhSTZNc68tYDbn8ycM55RQfbzEPP9ydaZquAR3XRw8ALpaoIUbUECNqiBE1xKyWOPpk7+BSfVKfbu+OnrDp+HT0gpnp5HLtOV+vR0+49D6cv9/53+9eaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYlZLHD16tL/E2a3tvvg1esKGP4f3R0+Y2Xv7ffSEufV69IIry0sNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmtcTR3w+nJc5u7c6z49ETNnz5cTh6wszTd49HT5g7Phm94MryUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELNa4uj+4dkSZ7f26vPH0RM2PHjzcvSEmbunX0dP4IJ4qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiFktcfTm0XqJs1t7ffB89IQN937+HT1h7sYifwUG8FJDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaohZLXF0+vRtibNbuzV6wH+cjR5AlpcaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooaYnWmaRm8ALpCXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaoj5B69YNYmBemy4AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA8VJREFUeJzt2D1qVGEYhmHHTCUS04ighVrqOizch1buwCVYC27CBViIe7C2F4TY+JNIwmSO9WGshhy+8ea6yileHgZuPjiraZpuAB03Rw8ArpeoIUbUECNqiBE1xKyXOPr85MVBfVJf3TkePWHHdH4+esLM9sfP0RNmps1m9ISD93H7fvWv373UECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXErBe5+uDeImf39fXZ3dETdtz/9H30hLnfZ6MXzG02oxf8t7zUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpi1ksc/fPwZImze/v8+t3oCTueHr8aPWHm0dtvoyfMXF1cjJ7w3/JSQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQs17i6K0vp0uc3dvjDy9HT9hx+3L0grnp8sAGsTcvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMesljk6/zpY4u7cnb45GT9i13Y5eMDMdHeB/xF681BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYtZLHL06PV3i7P4ObQ8syEsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMapqm0RuAa+SlhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmL+Ag0ZNoQqADd8AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA8RJREFUeJzt2LGOTGEch2EzGStWIQhRiFJ0bkIhLsAliLtwIe6ARq/TUopJ1BKJhMgikV3rqCejmszJt948T3mKf37Nmy85i2mazgEdy9EDgP0SNcSIGmJEDTGihpjVHEcf3Hhypn6p/75za/SEM2/5Zj16wobp5Hj0hDPv1Z8Xi39991JDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BCzmuPodPP6HGd39vL5s9ETthwuD0ZP2PDw/qPREzacvv8wesJ/y0sNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmNcfR5bfvc5zd2b3Xj0dP2HL65cLoCRvuflyPnsCeeKkhRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaohZzXF0unRxjrM7u3L55+gJW36sD0dP2HRwfvQC9sRLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzGqWq58+z3J2V1ef3h49Ycu1X19HT9h0fDJ6AXvipYYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELOa4+jp0dEcZ3f39t3oBVum0QPI8lJDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BCzmKZp9AZgj7zUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8xfJqYvv8PtX+kAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA/RJREFUeJzt2DGKFGkch+Fu7QmU1UBUjJQ9gAfwAF7B0MBAwQt5CGMjL+HCYqAsuywTiCAjkzgtdJdx0QYyTPnpy/OEFfz5JS8f1HqaphXQcWn0AOBiiRpiRA0xooYYUUPMZomjDy8/+qV+qV++dm30hAPrq1dGT5iZzs5GT5jZf/m19qxWq9W03Y6eMPN6/3L9ve9eaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYjZLHF1vjpY4e353bo1ecOD9k9ujJ8zce3U2esLM0d//jZ5wYLfdjp7wQ7zUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiNqMH/Ayn92+OnnDg3eMXoyfM3P/0fPSEmbvH10dPOHRyMnrBD/FSQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQs1ni6LTbLXH23K58/Dp6woFnxw9GT5j543g/esLc2Xb0gt+WlxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpjNIlen/SJnz+vor39GTzjw/9M/R0+YufHh39ETZnYnn0dP+G15qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xGwWuTpNi5w9r93p6egJh968Hb1gZj96ABfGSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8x6mqbRG4AL5KWGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYr4BVeNAAZSwwgYAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA8hJREFUeJzt2D1qFWEYhmGjB4IWERREETdgZ5/aBVjHytL9uAlXIO7AUsTGP4xIwJQimoSTsR6OhYQMX3JzXeUUL09z88FsTdN0Bei4OnoAcL5EDTGihhhRQ4yoIWa1xNHH1/cu1C/1a3fvjJ6w4eeje6MnzNz49mv0hLl3H0cv2DAdHY2eMPP69OXWv757qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiFktcfTq9vYSZ8/sy96D0RM2vH/+YvSEmWf7u6MnzBw8vT96wob1h8+jJ/wXLzXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpjVEken9XqJs2e28/V09IQNu2+fjJ4w833/9ugJMw//HIyecGl5qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiFktcXQ6Plni7JndevVp9IRNb3ZGL5i5+ftg9ISZ9Y/D0RMuLS81xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xqyWOTifHS5w9s/Xh4egJmy7iJhK81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFma5qm0RuAc+SlhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmL+AimdOMFSSmsTAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA/1JREFUeJzt2DFrXXUcx+GmvVksxBidhWLW4itwdBFfQJxd7NKxb0poRwffQPdCO8UgSBEEUZOQWHrv7Xw4DuGSwz9+eJ7xDD++y4c/nL3tdnsP6Lg/egBwu0QNMaKGGFFDjKghZrXE0a9XJ3fql/qDT49GT5j59/HnoydM7J+/Gz1hYu/1L6MnzGwuL0dPmPh58+Pef333UkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELNa5OpmvcjZXf3x7fHoCTM/PHsxesLEy3++GD1h4u3JZ6MnzGzOLkdPuBEvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmNUSRx8cHCxxdmcX31yMnjDz/ce/j54w8dVHp6MnTDx59HT0hJnV2a+jJ9yIlxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpjVEkc3V9dLnN3Z4fOHoyfMfLn/3egJE9evDkdPmDg+/W30hJn3owfckJcaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooaY1RJHt+v1Emd39slPb0ZPmDl6eTh6wtTVn6MXTGzOL0ZP+N/yUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiFktcnWzXuTsrtZ//T16wtxd3ESClxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpi97XY7egNwi7zUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8wHM75AaUOPz7EAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA91JREFUeJzt2DtqVGEch2EnGYgWIlip4AUre9egthZa2dm6HzeRJWQXEhRUvCCIlZWEmNuxPoyFxBk+fXme8hR/fs3LB2cxTdMFoGNr9ABgvUQNMaKGGFFDjKghZrmJow+2nv5Tv9SX16+NnrDix/1boyfMXPp2MHrCzGL//egJK84OD0dPmNk721387ruXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmOUmjm5dvLiJs+f28fnd0RNWvH7xcvSEmcfvHo2eMHP05PLoCasOD0cv+CNeaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMctNHJ1OzzZx9tyOr0yjJ6x4c3QwesLMq/07oyfM3Dt5O3rCf8tLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzGKaprUffbjzbP1H/8L2zRujJ6z4efvq6AkzO5+/j54wc/rl6+gJK6bjo9ETZvbOdhe/++6lhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghZrmJo9Px0SbOntvJh0+jJ6zY/sc2nYwewNp4qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMYpqm0RuANfJSQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTG/AKCLP+/TEaq6AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAABAdJREFUeJzt2LGL13Ucx/F+x8+GTryg8Shw8B8QbbIxkMam9v6EdpcWVzedHR0dXZ2VQsQho6WCqChQrwPvfs5fvg7Hj/v2uXvyeIzf4c1refKB72qz2XwAdOyMHgCcLlFDjKghRtQQI2qIWS9x9Mv1N2fql/rO7kejJ8yctU2bNwejJ0wcvXo9esLc8dHoBROPjh+s3vfdSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8x6iaOrC4uc3d7l/dELZv69sjd6wsTFn1+NnjCxev7T6Akzm8Oj0RNOxEsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFmvcTRnY/3lji7td+/H71g7un1e6MnTNz48evREyb2vv1k9ISZt7/+NnrCiXipIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIWY8e8H/4Yv/l6AkzNz+7NnrCxK0XD0dPmLjz4VejJ5xbXmqIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmLWSxw9/vufJc5u7cntq6MnzPxx/7/REya+u/v56AkTn/71bPSEc8tLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghZr3I1Z3VIme3tff4l9ETZi79sDt6wsTq4M/REyaODg9HTzi3vNQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSsNpvN6A3AKfJSQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTHvAMqWOe4STpmsAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAABAFJREFUeJzt2LFrnHUcx3GTu0AJhjhUqYNIwMXBqaurY5cOduj/0TF0de5cwVmdHCP6D3QsFFwcbbbADUa45JwfzkGOPPxyb16v8YYvHw7e/OA52Gw2HwAdh6MHAHdL1BAjaogRNcSIGmKWcxz9ZvHtvfqkvvjibPSELeuPT0ZPmDj683L0hIn1X+9HT7j3Lm5/PPiv373UECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXELOc4enh8PMfZnf3x8nT0hC0XX78aPWHi6XcvRk+YePT91egJW26vr0dP+F+81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYpZzHD385OEcZ3d2/viX0RO2nB19OHrCxIMnl6MnTP2wGL1gb3mpIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIWc5y9fqfWc7u6verL0dP2PLs5LfREyZWfz8YPWHio5ub0RP2lpcaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooaY5RxHN+ubOc7u7M1PX42esOX8+Wr0hImjX09HT5haLEYv2FteaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMcs5jm5WqznO7uyz1+9GT9jy9ufPR0+Y+PTqfv1Ht+v16Al7y0sNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMwWazGb0BuENeaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCHmX+qLOZmn6i25AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAABBlJREFUeJzt2D+rlnUcx3HP3R0HT4ODS4iKNYSzg1uNPQchGsTVubEHUIu0NrQ3BOEg+CDEpqKcBA1BoT8Ypzzncr64Gg43Xvz07es13sOXz3C/+cG1N03TKaBjM3oA8GqJGmJEDTGihhhRQ8x2jaOfnv7stfqkvrlwbvSEheMzB6MnzGz+eD56wszxw0ejJyxMh4ejJ8zcPf5+7/9+91JDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BCzXePoZn9/jbM7++3G+6MnLFz5+JfRE2Z+unN59ISZS7eejp6wcHR4OHrCiXipIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEbNc4On14fo2zO7t97evRExZufvDJ6Akz3z24NXrCzJc/fj56wtL9P0cvOBEvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMds1ju798+8aZ3f2639nR09Y2Lx3MHrCzNX9d0dPmDneX+Wv+VbwUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELNd5erjJ6uc3dUX314fPWHh3s/fjJ4w89Wzj0ZPmHnn2d+jJywcjR5wQl5qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0x2zWOTi9erHF2Zxd/eDJ6wsKVo5ujJ8wc/D6NnjBz9q8Hoye8sbzUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXE7E3TNHoD8Ap5qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooaYl0upQEIS4h/yAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA+VJREFUeJzt2D1qFWEYhmFPPOJPglroAgQr7VIEXIC1rRuwsHEF7sM96BLcgGBjpZXYSxS0SiLJWA/HQg4ZvuTmusopXp7m5oNZTdN0BejYGT0AOF+ihhhRQ4yoIUbUELNe4ujT9fML9Uv96u290RM2rHZ3R0+YOf1+OHrCzPTnZPSEC+/92bvVv757qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiFkvcXR1bZGzW/v26vHoCRv2Dg5HT5i59/ru6Akz06fPoydcWl5qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0x6yWO7ty8scTZre0dHI6esOHj/tvRE2b2n7wcPWHm/qfRCy4vLzXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTHrJY5Op2dLnN3a7ps7oydsePDsxegJMw8/H42eMLO6fn30hA3T8fHoCf/FSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8x6iaPT0fESZ7d268PX0RM2PPpye/SEmenX79ETZs7OptETLi0vNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmPUSR6c/J0uc3drpj5+jJ2y6iJtI8FJDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BCzmqZp9AbgHHmpIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpi/ghc5L1BWVhIAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA9pJREFUeJzt2LGOTGEch2FHRiKh00g0QiQ6oRCVlntQuAYX4AZcg0qrVCp0bkEiKO0mRGUJa/eoT0Y12ZNvvXmecop/fsnMmy+ZaZ7nM0DH2dEDgJMlaogRNcSIGmJEDTGbNY4+uPj4VP2lPl25PHrClun7j9ETFo6+fhs9YWE+/D16wqn3+vjl9K/PvdQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcRs1jh6ePfmGmd3dv3Zu9ETtrz5dGP0hIUbT0YvWPqztz96wn/LSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIWazxtH9e+fXOLuzc/fn0RO2vP/0YvSEhYeXH42esLS3P3rBf8tLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzGaNoxc+z2ucTfk1H46esDAdHo2esOAXtDsvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMZs1jl56u7/G2Z19fHp79IQtt57fGT1h4dqXD6MncEK81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYjZrHJ0Ofq5xdmdXXx2MnrBl+nM8esLC/ON0fWfszksNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMNM/z6A3ACfJSQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTF/AX8OOewwYPNOAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA+1JREFUeJzt2L2OjGEch2GzO0S1gkItxFFodSoJcQZqlTNxAkqH4CB8RCORkA0FlY/4SNh91W9exWYZz7hzXeUU//wyyT1PMqtpmk4AHTujBwB/l6ghRtQQI2qIETXErDdx9Nrura36S321uzt6wsLOmb3RE+Z2tus7mj5/Hj1h4fD799ETZh4dPlz97nMvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMevRA/6FnfPnRk9YeHv78ugJMz/2Ri+Yu/jgzegJC4ev90dPOBIvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmPVGrk7TRs4e1/vrl0ZPWHh67/7oCTPvD76MnjBz8/Hd0RMWTr/eHz3hSLzUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXErDdydbXayNnjOvvi2+gJC1ef3Rg9Yebd0wujJ8xcebI/esLCz9EDjshLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzHojV1fb9Vtx8vmr0RMWTt3ZGz1hZu/ry9ETZg4+fRo94b+1XfUBf0zUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXErDdy9fBgI2eP6+DDx9ETlrZxEwleaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYlbTNI3eAPxFXmqIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKgh5hdJrjn3aK68hQAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAABBJJREFUeJzt2LGKXGUch2FnnTibKIFFSGtjZR9yA8Eidbr0XoEXY2FhZSNegSB2goVFICRdigQLq5ikcHV2TurDUViGHL7dl+cpp/jz48DLB7OZpukDoONk9ADg/RI1xIgaYkQNMaKGmO0aR7/cPbpSf6mffPLx6AkLm91HoyfMTPv96Akzh1d/jZ6wcNW+0U+HHzb/9buXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmO0aR6f9v2ucPdr+i89GT1h4cf/W6Akzd37fj54wc+vnJ6MnLEz7q/WN/o+XGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzHaNoye73Rpnj/by639GT1h4eu+70RNmfnx7e/SEmW8fPhg9Yenxs9ELLsVLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzHaNo5vT3Rpnj3bjw4vRExYupsPoCTN3T/8YPWHmm9unoycsXJcX8LrsBC5J1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiNmucXT6+3yNs0c7/f5s9ISFz//8avSEmU9/vTF6wsydJ89GT1i4GD3gkrzUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpitmscPZyfr3H2aGe/PB89YeHst5ujJ8xMr9+MnjBzeHO19lwnXmqIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmI20zSN3gC8R15qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIeYdY2JDaF9OZsQAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA+lJREFUeJzt2TGOTWEch2F35hIakolI1IJGO5VSLEFhA2I5orICjSXoFUJiD8IkCiQKMnLnqE+OQiaOb7x5nvIW//yaN19yz2aapnNAx97oAcDfJWqIETXEiBpiRA0x2zWO3t97cKb+Ut+/fHn0hIXN1YPRE2Z2H45GT5iZjo9HT1g6Y1+KXp682Pzudy81xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0x2zWObs5fWOPsqb1/dGf0hIV7D1+PnjDz6unh6AkzB8/fjp6wMP08Hj3hj3ipIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEbNc4unfp4hpnT+3HtWn0hIUn19+MnjBz++bh6AkzB6MH/Me81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xGzXOLr79m2Ns6d269nR6AkLd988Hj1h5sa7T6MnzOx2u9ET/lteaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYrajB/wLu/cfR09YuPL5y+gJMyfff4yeMLPZ3x89YWE62Y2e8Ee81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYrarXJ2mVc6e1vTzePSEhd3Xs7eJBi81xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xm2maRm8A/iIvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BDzCz6KPk4JUCCpAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA/dJREFUeJzt2LGKVGcch+GdcRRNmkXBLl26XIKdkEJsbLwK8XL0JrTQMuQKhLQJYpGQWEkUXFBX0JljfTgWy+L47b48TznFx28YXv4wq2maDoCO9egBwLclaogRNcSIGmJEDTGbfTz66/rumfpLfXXx0ugJC+urh6MnzB1/HL1gZvvu/egJS7vt6AUzv+8erb72uUsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMZvSA72H1y8+jJyz8e+tw9ISZK/9PoyfMXH/81+gJC9u3R6MnnIhLDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghZrOXV9cX9vLsaT2/9+PoCQv/3H44esLM0e549ISZOy/vj56wcOm3P0ZPOBGXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmM3oAd/DtWdn72s+uPHT6AkzR59/GD1h5vKrD6MnLOxGDzghlxpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpjNXl6ddnt59rSuP3kxesLC0z9vjp4ws/60HT1hZv33f6MnnFsuNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmM1eXp2mvTx7WtvXb0ZPWFidsU1n6xc7ONiOHnCOudQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSspmkavQH4hlxqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIeYLUEY8gMmWSP8AAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA/ZJREFUeJzt2DFuXGUYhlGPc/FELpw9IFGwAXZAsoJU1KTIYlKkyxqooUFCQqJDSEhR6jRZAKQIxGP7ph5dkMzYV//w6JzSxadXM3rml7yZ5/kE6DgdPQC4X6KGGFFDjKghRtQQM61x9OvTp0f1L/XNdjt6wtLNUX1EJ/PucvQE/qMfb77b/NPfvdQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcRMaxx9cHGxxtmDvX/85egJC2fvr0dP2HP28+vRE/bMu6vRE5Zujus7+zdeaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMdMaR//+6os1zh7sl5evRk9Y+DjvRk/Y8+Tb56Mn7Nn+8OvoCf9bXmqIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmKmVY7+db3G2YN9/+Hh6AkLn08fRk/Yc3V+XL/v281m9ISleR694FaO65sE7kzUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qImVY5+ubtGmcP9uLZN6MnLFydPxg9Yc+j39+NnrDnevps9ISFeXc5esKteKkhRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcRMaxy9/uPPNc4ebPrpt9ETFlb54O/gavQA7o2XGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmM08z6M3APfISw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEfAJxZDrYSGDmAAAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA/NJREFUeJzt2LGKVGcch+HZ3SkMrFoEtNtrsAhBiJXoLQi5A+8gXZpcQ8BGbyApQyAkEFtvwMYrkEAshF3UeE7qw1gs4558k5fnKbf482OZdz6Yo3meN0DH8egBwNUSNcSIGmJEDTGihpjtGkcfnjw6qJ/Ut7dvjZ6w64troxcsTK//Gj1hYTo/Hz3h4P0+/XT0qb97qSFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiNmucfT49HSNs3t7+cPZ6Ak7vv3qxegJC78+uzd6wsLtHw/r/7PZbDab6ePoBZfipYYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELNd4+jxzRtrnN3b47vPR0/Y8d2Xr0ZPWPj5mzujJywcPTkZPWHHPH0cPeFSvNQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcRs1zg6/f1mjbN7++X7+6Mn7Hj69YPRExbOfns3egJXxEsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMdo2j08XFGmf3dvrHy9ETdlz/87C+T+f3H0ZPWJj+Oaw9/yeH9ckCPpuoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qI2a5ydZ5XObuv6e3b0RPgP+OlhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKgh5mie59EbgCvkpYYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpi/gWimDjGN6I/ugAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAABAVJREFUeJzt2M2qjWEch2Fr7yXlYw+QwY5SSsZmUmYGDsBAzsDAYZg4BWcgOQDFhANwBgx85CMp5WNjGb+9Brud5eHuuobv4N9vcvfUu1itVvuAjo3RA4A/S9QQI2qIETXEiBpilus4ennj6j/1S33z2NHRE2YWW0dGT5j6tjN6wcSPN+9GT5hZ7XwbPWHiwc+7i99991JDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BCzHD3gb1gcOTx6wszbS9ujJ0xsPf86esLE8s270RP+W15qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xy7Vc3dhcy9m9enbt5OgJM49u3B49YeLOh/OjJ0w8uXJm9ISZ7y9ejp6wK15qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpilus4unHo4DrO7tmBC+9HT5g5sXlo9ISJm0efjp4w8fDsxdETZjZfvBw9YVe81BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xCzXcXT15es6zu7ZzuNjoyfM3Du3NXrCxPsf26MnTOz/8GX0hJmfowfskpcaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMcvSAv+HU/dejJ8zc+nh99ISJz8cXoydMnP70avSE/5aXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmMVqtRq9AfiDvNQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDzC8pATidXPUl+QAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA9pJREFUeJzt2cHKjGEch2HzzUSUwlLkLGzFwlE4AWVr4XRslZ2FnICNslE2kqz07VhgMfNavw2l4fWMu+tazuLfr5nunvq+1TRNZ4COk9EDgL9L1BAjaogRNcSIGmI2Sxy9e/7eUf1J/eTGtdET9mwvXRg9YWb99sPoCTPbL19GT9h3ZP8perF7svrZ515qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiNkscPbl+dYmzB7v99PXoCXseXnk3esLMzUf3R0+Yufz45egJ/y0vNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmM0iV9frRc6WvPy2HT1h5tzn3egJc6sjfG+m4/rNfuUIvzngT4gaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xm0Wufjpd5Oyhnj+4NXrCnmcX7oyeMHPx1fvRE2Z26/XoCXum3Xb0hN/ipYYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIWazxNHd129LnD3Y2TcfR0/Yc/ZkNXrC3JH9ZhzOSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIWazxNHp+/clzh5se3o6egL8M15qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiVtM0jd4A/EVeaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCHmB5e8NvThaQtjAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA+5JREFUeJzt2KGKVWsch+G9Z3bScLCooCC2c9CkzWTxFsQLMYp3YPUmRLwAkycZxGCymgzCIKKGGfYs82IbZOPim3l5nrjCn195+WCtp2laAR0HowcAf5eoIUbUECNqiBE1xGyWOPpg8+hM/VI/uHhh9IRdN66NXjCz/nI0esLM6dHX0RN2TCfHoyfMvD59sf7ddy81xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xm0Wunm4XObuvn/f/Gz1hx62nH0ZPmPn/5Z3RE2auP3s3esK55aWGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BCzWeTqweEiZ/f1+d7Z2rNarVZvrr0dPWHmyaMfoyfMvH9+afSEHduT49ET/oiXGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmM0iV0+3i5zd181X30dP2HH39sPRE2aOPv8zesLMv9PH0RPOLS81xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xm0WurteLnN3X4cdPoyfsuPL48ugJM1dPvoyeMLM9Ph494dzyUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiNkscnWaFjm7r+23b6Mn7DqLm0jwUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUELOepmn0BuAv8lJDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMb8AVtk7Rfu4C/MAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAA8NJREFUeJzt2SFuFGEch2G2TBF4EkgwICq5BTcAwwEQ3IA7oLkErhIOgEUWBSFAgoAgSA3bDnozCLLZyVfePI9c8c/PvPmSnc08z9eAjqPRA4DDEjXEiBpiRA0xooaYaY2jD48eX6m/1Kc7t0dPWDh/cHf0hB03z76NnrBj++nz6AlLV+xL0ZvLV5u//e6lhhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghZlrl6mazytl9fXl0f/SEhXfPX46esOPe6dPRE3acPPs6esLSfDF6wT/xUkOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiJnWOLq5cWONs3s7/jWPnrDw+vx49IQd08/roydwIF5qiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpipjWOzr+3a5zd263T96MnLLw4ezJ6wo6Tjx9GT9ixnS9HT/hveakhRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaoiZVrl6ebHK2X1dfP8xesLC5u3V2rQdPYCD8VJDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmM08z6M3AAfkpYYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpi/gAA0TNCJbjRYgAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAD4CAYAAAA0L6C7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAABABJREFUeJzt2bGKVGcch2FnM7BrSBMhaBOxtZA0goKNjdcQL8NG8ArSeSVeghdgIUlhJYtoI6TIohYuy67sHuvhpJDF4zf78jzlFH9+DLx8MLOapukS0LEzegDwfYkaYkQNMaKGGFFDzHqJow/WD7fqJ/Wdvd3RE2ZWu9u1aTo+Hj1hw9nR0egJc1v2T9Hzs2er//vcSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8x6iaM7e7tLnD23k7s3R0+YOfhju76jay8+j56wYfX369ETZqYvJ6MnfBMvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGmPUSR3eu/LrE2XO78/Tl6Akzf119NXrChkf/3h49YcP+n7+PnjBz+ubd6AnfxEsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUPMevSAH+HN4W+jJ8wcnB6OnrDh1s/vR0/YsP/TjdETLiwvNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMesljp7+d7DE2XP7+OTm6Akz9+89Hj1hwy/vz0ZP2HDl09vREy4sLzXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihpj1Ekenk5Mlzp7b+p/90RNmrr++PHrCVjv98Gn0hAvLSw0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xIgaYkQNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ8xqmqbRG4DvyEsNMaKGGFFDjKghRtQQI2qIETXEiBpiRA0xooYYUUOMqCFG1BAjaogRNcSIGmJEDTGihhhRQ4yoIUbUECNqiBE1xHwFOXw+G2qP3g0AAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "for y in df.Input.unique():\n",
+ " for x in df.userID.unique():\n",
+ " img = df[(df.userID == x) & (df.Input == y)].iloc[100].Blobs[1:7, 1:7]\n",
+ " plt.axis('off')\n",
+ " plt.imshow(img)\n",
+ " plt.savefig(\"./out/blob_%s_%i.pdf\" % (y,x) , bbox_inches='tight', transparent=False, pad_inches=0)\n",
+ " plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_32_ReadData-Evaluation.ipynb b/python/Step_32_ReadData-Evaluation.ipynb
new file mode 100644
index 0000000..bfba250
--- /dev/null
+++ b/python/Step_32_ReadData-Evaluation.ipynb
@@ -0,0 +1,366 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## This notebook creates one dataframe from all participants data\n",
+ "## It also removes 1% of the data as this is corrupted"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def cast_to_int(row):\n",
+ " try:\n",
+ " return np.array([a if float(a) >= 0 else 0 for a in row[2:-1]], dtype=np.uint8)\n",
+ " except Exception as e:\n",
+ " return None\n",
+ " \n",
+ "def load_csv(file):\n",
+ " temp_df = pd.read_csv(file, delimiter=\";\")\n",
+ " temp_df.Image = temp_df.Image.str.split(',')\n",
+ " temp_df.Image = temp_df.Image.apply(cast_to_int)\n",
+ " return temp_df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['DataStudyEvaluation/2_studyData.csv', 'DataStudyEvaluation/12_studyData.csv', 'DataStudyEvaluation/5_studyData.csv', 'DataStudyEvaluation/1_studyData.csv', 'DataStudyEvaluation/10_studyData.csv', 'DataStudyEvaluation/6_studyData.csv', 'DataStudyEvaluation/3_studyData.csv', 'DataStudyEvaluation/7_studyData.csv', 'DataStudyEvaluation/8_studyData.csv', 'DataStudyEvaluation/9_studyData.csv', 'DataStudyEvaluation/11_studyData.csv', 'DataStudyEvaluation/4_studyData.csv']\n",
+ "CPU times: user 1.35 s, sys: 786 ms, total: 2.14 s\n",
+ "Wall time: 1min 43s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(cpu_count() - 2)\n",
+ "data_files = [\"DataStudyEvaluation/%s\" % file for file in os.listdir(\"DataStudyEvaluation\") if file.endswith(\".csv\") and \"studyData\" in file]\n",
+ "print(data_files)\n",
+ "df_lst = pool.map(load_csv, data_files)\n",
+ "dfAll = pd.concat(df_lst)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "608084"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df = dfAll[dfAll.Image.notnull()]\n",
+ "df = df[df.userID != \"userID\"]\n",
+ "df.userID = pd.to_numeric(df.userID)\n",
+ "len(df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "loaded 610816 values\n",
+ "removed 2732 values (thats 0.447%)\n",
+ "new df has size 608084\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"loaded %s values\" % len(dfAll))\n",
+ "print(\"removed %s values (thats %s%%)\" % (len(dfAll) - len(df), round((len(dfAll) - len(df)) / len(dfAll) * 100, 3)))\n",
+ "print(\"new df has size %s\" % len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = df.reset_index(drop=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 1553593631562 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " false | \n",
+ " false | \n",
+ " [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 2, 1, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1553593631595 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " false | \n",
+ " false | \n",
+ " [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 222, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1553593631634 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " false | \n",
+ " false | \n",
+ " [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 222, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 2 | \n",
+ " 1553593631676 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " false | \n",
+ " false | \n",
+ " [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 222, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 2 | \n",
+ " 1553593631716 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " false | \n",
+ " false | \n",
+ " [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 222, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "0 2 1553593631562 0 34 0 0 \n",
+ "1 2 1553593631595 0 34 0 0 \n",
+ "2 2 1553593631634 0 34 0 0 \n",
+ "3 2 1553593631676 0 34 0 0 \n",
+ "4 2 1553593631716 0 34 0 0 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "0 0 false false \n",
+ "1 0 false false \n",
+ "2 0 false false \n",
+ "3 0 false false \n",
+ "4 0 false false \n",
+ "\n",
+ " Image \n",
+ "0 [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 2, 1, 0, 0, ... \n",
+ "1 [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 222, 0, 0, 0... \n",
+ "2 [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 222, 0, 0, 0... \n",
+ "3 [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 222, 0, 0, 0... \n",
+ "4 [3, 3, 3, 2, 0, 0, 1, 0, 0, 0, 1, 222, 0, 0, 0... "
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([ 2, 12, 5, 1, 10, 6, 3, 7, 8, 9, 11, 4])"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.userID.unique()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.userID = pd.to_numeric(df.userID)\n",
+ "df.TaskID = pd.to_numeric(df.TaskID)\n",
+ "df.VersionID = pd.to_numeric(df.VersionID)\n",
+ "df.Timestamp = pd.to_numeric(df.Timestamp)\n",
+ "df.Current_Task = pd.to_numeric(df.Current_Task)\n",
+ "df.Task_amount = pd.to_numeric(df.Task_amount)\n",
+ "df.RepetitionID = pd.to_numeric(df.RepetitionID)\n",
+ "df.loc[df.Actual_Data == \"false\", \"Actual_Data\"] = False\n",
+ "df.loc[df.Actual_Data == \"true\", \"Actual_Data\"] = True\n",
+ "df.loc[df.Is_Pause == \"false\", \"Is_Pause\"] = False\n",
+ "df.loc[df.Is_Pause == \"true\", \"Is_Pause\"] = True"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyEvaluation/AllData.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_33_CNN_PreprocessData.ipynb b/python/Step_33_CNN_PreprocessData.ipynb
new file mode 100644
index 0000000..de77465
--- /dev/null
+++ b/python/Step_33_CNN_PreprocessData.ipynb
@@ -0,0 +1,962 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "import cv2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 56454 | \n",
+ " 12 | \n",
+ " 1553865148939 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 17 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 56455 | \n",
+ " 12 | \n",
+ " 1553865148981 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 17 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 56456 | \n",
+ " 12 | \n",
+ " 1553865149021 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 17 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 56457 | \n",
+ " 12 | \n",
+ " 1553865149060 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 17 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 56458 | \n",
+ " 12 | \n",
+ " 1553865149099 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 17 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "56454 12 1553865148939 1 510 17 2 \n",
+ "56455 12 1553865148981 1 510 17 2 \n",
+ "56456 12 1553865149021 1 510 17 2 \n",
+ "56457 12 1553865149060 1 510 17 2 \n",
+ "56458 12 1553865149099 1 510 17 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "56454 0 True False \n",
+ "56455 0 True False \n",
+ "56456 0 True False \n",
+ "56457 0 True False \n",
+ "56458 0 True False \n",
+ "\n",
+ " Image \n",
+ "56454 [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... \n",
+ "56455 [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... \n",
+ "56456 [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... \n",
+ "56457 [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... \n",
+ "56458 [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, ... "
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyEvaluation/AllData.pkl\")\n",
+ "df = dfAll[(dfAll.Actual_Data == True) & (dfAll.Is_Pause == False)]\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([12, 5, 1, 10, 6, 3, 7, 8, 9, 11])"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.userID.unique()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 12.2 s, sys: 2.34 s, total: 14.6 s\n",
+ "Wall time: 14.2 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "def is_max(df):\n",
+ " df_temp = df.copy(deep=True)\n",
+ " max_version = df_temp.RepetitionID.max()\n",
+ " df_temp[\"IsMax\"] = np.where(df_temp.RepetitionID == max_version, True, False)\n",
+ " df_temp[\"MaxRepetition\"] = [max_version] * len(df_temp)\n",
+ " return df_temp\n",
+ "\n",
+ "df_grp = df.groupby([df.userID, df.TaskID, df.VersionID])\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "result_lst = pool.map(is_max, [grp for name, grp in df_grp])\n",
+ "df = pd.concat(result_lst)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.Image = df.Image.apply(lambda x: x.reshape(27, 15))\n",
+ "df.Image = df.Image.apply(lambda x: x.clip(min=0, max=255))\n",
+ "df.Image = df.Image.apply(lambda x: x.astype(np.uint8))\n",
+ "df[\"ImageSum\"] = df.Image.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyEvaluation/dfFiltered.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "recorded actual: 608084, used data: 413500\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"recorded actual: %s, used data: %s\" % (len(dfAll), len(df)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.read_pickle(\"DataStudyEvaluation/dfFiltered.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ " IsMax | \n",
+ " MaxRepetition | \n",
+ " ImageSum | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 178160 | \n",
+ " 1 | \n",
+ " 1553521741802 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... | \n",
+ " False | \n",
+ " 1 | \n",
+ " 286 | \n",
+ "
\n",
+ " \n",
+ " 178161 | \n",
+ " 1 | \n",
+ " 1553521741842 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... | \n",
+ " False | \n",
+ " 1 | \n",
+ " 319 | \n",
+ "
\n",
+ " \n",
+ " 178162 | \n",
+ " 1 | \n",
+ " 1553521741882 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... | \n",
+ " False | \n",
+ " 1 | \n",
+ " 72 | \n",
+ "
\n",
+ " \n",
+ " 178163 | \n",
+ " 1 | \n",
+ " 1553521741922 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... | \n",
+ " False | \n",
+ " 1 | \n",
+ " 288 | \n",
+ "
\n",
+ " \n",
+ " 178164 | \n",
+ " 1 | \n",
+ " 1553521741990 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... | \n",
+ " False | \n",
+ " 1 | \n",
+ " 308 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "178160 1 1553521741802 16 510 0 2 \n",
+ "178161 1 1553521741842 16 510 0 2 \n",
+ "178162 1 1553521741882 16 510 0 2 \n",
+ "178163 1 1553521741922 16 510 0 2 \n",
+ "178164 1 1553521741990 16 510 0 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "178160 0 True False \n",
+ "178161 0 True False \n",
+ "178162 0 True False \n",
+ "178163 0 True False \n",
+ "178164 0 True False \n",
+ "\n",
+ " Image IsMax \\\n",
+ "178160 [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... False \n",
+ "178161 [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... False \n",
+ "178162 [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... False \n",
+ "178163 [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... False \n",
+ "178164 [[0, 1, 1, 1, 0, 1, 0, 4, 1, 0, 2, 1, 1, 0, 1]... False \n",
+ "\n",
+ " MaxRepetition ImageSum \n",
+ "178160 1 286 \n",
+ "178161 1 319 \n",
+ "178162 1 72 \n",
+ "178163 1 288 \n",
+ "178164 1 308 "
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Label if knuckle or finger\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " #val = \"Knuckle\"\n",
+ " val = 0\n",
+ " elif row['TaskID'] >= 17:\n",
+ " #val = \"Finger\"\n",
+ " val = 1\n",
+ " return val\n",
+ "df['InputMethod'] = df.apply(f, axis=1)\n",
+ "\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " val = \"Knuckle\"\n",
+ " elif row['TaskID'] >= 17:\n",
+ " val = \"Finger\"\n",
+ " return val\n",
+ "df['Input'] = df.apply(f, axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Svens new Blob detection\n",
+ "def detect_blobs(image, task):\n",
+ " #image = e.Image\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = np.copy(image)\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 200, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " contours.sort(key=lambda a: cv2.contourArea(a))\n",
+ " if len(contours) > 0:\n",
+ " # if two finger or knuckle\n",
+ " cont_count = 2 if task in [1, 6, 7, 18, 23, 24] and len(contours) > 1 else 1\n",
+ " for i in range(1, cont_count + 1):\n",
+ " max_contour = contours[-1 * i]\n",
+ " xmax, ymax = np.max(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " xmin, ymin = np.min(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " #croped_im = np.zeros((27,15))\n",
+ " blob = large[max(ymin - 1, 0):min(ymax + 1, large.shape[0]),max(xmin - 1, 0):min(xmax + 1, large.shape[1])]\n",
+ " #croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " #return (1, [croped_im])\n",
+ " lstBlob.append(blob)\n",
+ " lstMin.append(xmax-xmin)\n",
+ " lstMax.append(ymax-ymin)\n",
+ " count = count + 1\n",
+ " return (count, lstBlob, lstMin, lstMax)\n",
+ " else:\n",
+ " return (0, [np.zeros((29, 19))], 0, 0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 5.65 s, sys: 4.52 s, total: 10.2 s\n",
+ "Wall time: 9.76 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(os.cpu_count()-1)\n",
+ "temp_blobs = pool.starmap(detect_blobs, zip(df.Image, df.TaskID))\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobCount\"] = [a[0] for a in temp_blobs]\n",
+ "df[\"BlobImages\"] = [a[1] for a in temp_blobs]\n",
+ "df[\"BlobW\"] = [a[2] for a in temp_blobs]\n",
+ "df[\"BlobH\"] = [a[3] for a in temp_blobs]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0 334475\n",
+ "1 73449\n",
+ "2 5576\n",
+ "Name: BlobCount, dtype: int64"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.BlobCount.value_counts()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfX = df[(df.BlobCount == 1)].copy(deep=True)\n",
+ "dfX.BlobImages = dfX.BlobImages.apply(lambda x : x[0])\n",
+ "dfX.BlobW = dfX.BlobW.apply(lambda x : x[0])\n",
+ "dfX.BlobH = dfX.BlobH.apply(lambda x : x[0])\n",
+ "\n",
+ "dfY = df[(df.BlobCount == 2)].copy(deep=True)\n",
+ "dfY.BlobImages = dfY.BlobImages.apply(lambda x : x[0])\n",
+ "dfY.BlobW = dfY.BlobW.apply(lambda x : x[0])\n",
+ "dfY.BlobH = dfY.BlobH.apply(lambda x : x[0])\n",
+ "\n",
+ "dfZ = df[(df.BlobCount == 2)].copy(deep=True)\n",
+ "dfZ.BlobImages = dfZ.BlobImages.apply(lambda x : x[1])\n",
+ "dfZ.BlobW = dfZ.BlobW.apply(lambda x : x[1])\n",
+ "dfZ.BlobH = dfZ.BlobH.apply(lambda x : x[1])\n",
+ "\n",
+ "df = dfX.append([dfY, dfZ])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sample Size not Argumented: 84601\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Sample Size not Argumented:\", len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobArea\"] = df[\"BlobW\"] * df[\"BlobH\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 84601.0\n",
+ "mean 16.5\n",
+ "std 5.9\n",
+ "min 12.0\n",
+ "25% 12.0\n",
+ "50% 16.0\n",
+ "75% 16.0\n",
+ "max 72.0\n",
+ "Name: BlobArea, dtype: float64"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.BlobArea.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " count | \n",
+ " mean | \n",
+ " std | \n",
+ " min | \n",
+ " 25% | \n",
+ " 50% | \n",
+ " 75% | \n",
+ " max | \n",
+ "
\n",
+ " \n",
+ " Input | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " Finger | \n",
+ " 59879.0 | \n",
+ " 17.3 | \n",
+ " 5.8 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 16.0 | \n",
+ " 20.0 | \n",
+ " 56.0 | \n",
+ "
\n",
+ " \n",
+ " Knuckle | \n",
+ " 24722.0 | \n",
+ " 14.8 | \n",
+ " 5.7 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 16.0 | \n",
+ " 72.0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " count mean std min 25% 50% 75% max\n",
+ "Input \n",
+ "Finger 59879.0 17.3 5.8 12.0 12.0 16.0 20.0 56.0\n",
+ "Knuckle 24722.0 14.8 5.7 12.0 12.0 12.0 16.0 72.0"
+ ]
+ },
+ "execution_count": 19,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.groupby(\"Input\").BlobArea.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobSum\"] = df.BlobImages.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 84601.000000\n",
+ "mean 1238.209170\n",
+ "std 485.150602\n",
+ "min 467.000000\n",
+ "25% 930.000000\n",
+ "50% 1094.000000\n",
+ "75% 1383.000000\n",
+ "max 4275.000000\n",
+ "Name: BlobSum, dtype: float64"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.BlobSum.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAD8CAYAAACcjGjIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAExRJREFUeJzt3XuMXOV5x/HvU3OzyAVz0cqyUe00ViMTtwRcIEoUrUAFA1HMHyQCoWBSGksFpKR1lZhGKrkhkVQUgkQSucHFpGkMIYlA4Ii64FVaVVzDxRhEWcARthysxFziRCHd5Okf824Y9t31zHhmdgb7+5FGe85z3jnzzGvv/nbOOTMbmYkkSc3+aNANSJKGj+EgSaoYDpKkiuEgSaoYDpKkiuEgSaoYDpKkiuEgSaoYDpKkyiGDbmB/HXvssblo0aKu9vGrX/2KI488sjcN9YH9dcf+umN/3RnG/h555JGfZ+ZxbQ3OzLfk7eSTT85ubdmypet99JP9dcf+umN/3RnG/oCHs82fsR5WkiRVDAdJUsVwkCRVDAdJUsVwkCRVDAdJUsVwkCRVDAdJUsVwkCRV3rIfn/FWtGjt3R2NX7Nsgks6vM9Mtl9zbk/2I+ng4CsHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVKl7XCIiDkR8WhE3FXWF0fEAxExHhG3RsRhpX54WR8v2xc17ePKUn8mIs5qqq8otfGIWNu7pydJ2h+dvHL4FPB00/pXgOsy893Ay8ClpX4p8HKpX1fGERFLgQuAE4AVwNdL4MwBbgTOBpYCF5axkqQBaSscImIhcC7wrbIewOnA7WXIBuC8sryyrFO2n1HGrwQ2ZubrmfkCMA6cUm7jmfl8Zv4W2FjGSpIGpN2P7L4e+Azw9rJ+DPBKZk6U9R3AgrK8AHgRIDMnIuLVMn4BcH/TPpvv8+KU+qnTNRERq4HVACMjI4yNjbXZ/vT27t3b9T46sWbZROtBTUbmdn6fmfTjec72/HXK/rpjf90Z9v5aaRkOEfFhYHdmPhIRo/1vaWaZuQ5YB7B8+fIcHe2unbGxMbrdRyc6/dsMa5ZNcO3W3vzJje0XjfZkP81me/46ZX/dsb/uDHt/rbTzk+cDwEci4hzgCOAdwNeAoyLikPLqYSGws4zfCRwP7IiIQ4B3Ar9oqk9qvs9MdUnSALQ855CZV2bmwsxcROOE8n2ZeRGwBTi/DFsF3FGW7yzrlO33ZWaW+gXlaqbFwBLgQeAhYEm5+umw8hh39uTZSZL2SzfHLD4LbIyILwOPAjeV+k3AtyNiHNhD44c9mbktIm4DngImgMsz83cAEXEFcA8wB1ifmdu66EuS1KWOwiEzx4Cxsvw8jSuNpo75DfDRGe5/NXD1NPVNwKZOepEk9Y/vkJYkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVQwHSVLFcJAkVVqGQ0QcEREPRsTjEbEtIr5Q6osj4oGIGI+IWyPisFI/vKyPl+2LmvZ1Zak/ExFnNdVXlNp4RKzt/dOUJHWinVcOrwOnZ+afAycCKyLiNOArwHWZ+W7gZeDSMv5S4OVSv66MIyKWAhcAJwArgK9HxJyImAPcCJwNLAUuLGMlSQPSMhyyYW9ZPbTcEjgduL3UNwDnleWVZZ2y/YyIiFLfmJmvZ+YLwDhwSrmNZ+bzmflbYGMZK0kakEPaGVR+u38EeDeN3/KfA17JzIkyZAewoCwvAF4EyMyJiHgVOKbU72/abfN9XpxSP3WGPlYDqwFGRkYYGxtrp/0Z7d27t+t9dGLNsonWg5qMzO38PjPpx/Oc7fnrlP11x/66M+z9tdJWOGTm74ATI+Io4IfAe/ra1cx9rAPWASxfvjxHR0e72t/Y2Bjd7qMTl6y9u6Pxa5ZNcO3Wtv6JWtp+0WhP9tNstuevU/bXHfvrzrD310pHVytl5ivAFuD9wFERMfmTayGwsyzvBI4HKNvfCfyiuT7lPjPVJUkD0s7VSseVVwxExFzgL4GnaYTE+WXYKuCOsnxnWadsvy8zs9QvKFczLQaWAA8CDwFLytVPh9E4aX1nL56cJGn/tHPMYj6woZx3+CPgtsy8KyKeAjZGxJeBR4GbyvibgG9HxDiwh8YPezJzW0TcBjwFTACXl8NVRMQVwD3AHGB9Zm7r2TOUJHWsZThk5hPA+6apP0/jSqOp9d8AH51hX1cDV09T3wRsaqNfSdIs8B3SkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqhgOkqSK4SBJqrQMh4g4PiK2RMRTEbEtIj5V6kdHxOaIeLZ8nVfqERE3RMR4RDwRESc17WtVGf9sRKxqqp8cEVvLfW6IiOjHk5UktaedVw4TwJrMXAqcBlweEUuBtcC9mbkEuLesA5wNLCm31cA3oBEmwFXAqcApwFWTgVLGfLLpfiu6f2qSpP3VMhwyc1dm/qQs/xJ4GlgArAQ2lGEbgPPK8krglmy4HzgqIuYDZwGbM3NPZr4MbAZWlG3vyMz7MzOBW5r2JUkagI7OOUTEIuB9wAPASGbuKpt+BoyU5QXAi01321Fq+6rvmKYuSRqQQ9odGBFvA74PfDozX2s+LZCZGRHZh/6m9rCaxqEqRkZGGBsb62p/e/fu7XofnVizbKKj8SNzO7/PTPrxPGd7/jplf92xv+4Me3+ttBUOEXEojWD4Tmb+oJRfioj5mbmrHBraXeo7geOb7r6w1HYCo1PqY6W+cJrxlcxcB6wDWL58eY6Ojk43rG1jY2N0u49OXLL27o7Gr1k2wbVb287vfdp+0WhP9tNstuevU/bXHfvrzrD310o7VysFcBPwdGb+c9OmO4HJK45WAXc01S8uVy2dBrxaDj/dA5wZEfPKiegzgXvKttci4rTyWBc37UuSNADt/Fr6AeDjwNaIeKzU/gG4BrgtIi4Ffgp8rGzbBJwDjAO/Bj4BkJl7IuJLwENl3Bczc09Zvgy4GZgL/KjcJEkD0jIcMvO/gZned3DGNOMTuHyGfa0H1k9Tfxh4b6teJEmzw3dIS5IqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqvflLMhp6izr8Q0PtWLNsouUfMNp+zbk9f1xJ/ecrB0lSxXCQJFUMB0lSxXCQJFUMB0lSxXCQJFUMB0lSxXCQJFUMB0lSxXCQJFUMB0lSxXCQJFUMB0lSxXCQJFUMB0lSxXCQJFUMB0lSxXCQJFUMB0lSxXCQJFUOGXQDg7Bo7d0ArFk2wSVlWZL0Bl85SJIqhoMkqWI4SJIqhoMkqdIyHCJifUTsjognm2pHR8TmiHi2fJ1X6hERN0TEeEQ8EREnNd1nVRn/bESsaqqfHBFby31uiIjo9ZOUJHWmnVcONwMrptTWAvdm5hLg3rIOcDawpNxWA9+ARpgAVwGnAqcAV00GShnzyab7TX0sSdIsaxkOmfljYM+U8kpgQ1neAJzXVL8lG+4HjoqI+cBZwObM3JOZLwObgRVl2zsy8/7MTOCWpn1JkgZkf885jGTmrrL8M2CkLC8AXmwat6PU9lXfMU1dkjRAXb8JLjMzIrIXzbQSEatpHK5iZGSEsbGx/drPmmUTAIzMfWN5GB0I/e3vv1Ev7N27d6CP34r9dcf++mt/w+GliJifmbvKoaHdpb4TOL5p3MJS2wmMTqmPlfrCacZPKzPXAesAli9fnqOjozMN3adLmt4hfe3W4X2T+IHQ3/aLRmenmWmMjY2xv/9HZoP9dcf++mt/DyvdCUxecbQKuKOpfnG5auk04NVy+Oke4MyImFdORJ8J3FO2vRYRp5WrlC5u2pckaUBa/loaEd+l8Vv/sRGxg8ZVR9cAt0XEpcBPgY+V4ZuAc4Bx4NfAJwAyc09EfAl4qIz7YmZOnuS+jMYVUXOBH5WbJGmAWoZDZl44w6YzphmbwOUz7Gc9sH6a+sPAe1v1IUmaPb5DWpJUMRwkSRXDQZJUMRwkSRXDQZJUMRwkSRXDQZJUMRwkSRXDQZJUMRwkSRXDQZJUMRwkSRXDQZJUMRwkSZXh/TNjOiAsKn91bxBuXnHkwB5beqvzlYMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqeLfc9ABa+vOV7lkgH9PopU1yyZ62t/2a87t2b4kXzlIkiqGgySpYjhIkiqGgySpYjhIkiqGgySp4qWs0gFiUY8v2+3kUlsvoz3w+MpBklQZmnCIiBUR8UxEjEfE2kH3I0kHs6EIh4iYA9wInA0sBS6MiKWD7UqSDl7Dcs7hFGA8M58HiIiNwErgqYF2JaktvT7f0Y41yyYYnfVHPXgMSzgsAF5sWt8BnDqgXiS9RQwilCYd6CfhIzMH3QMRcT6wIjP/uqx/HDg1M6+YMm41sLqs/inwTJcPfSzw8y730U/21x376479dWcY+/vjzDyunYHD8sphJ3B80/rCUnuTzFwHrOvVg0bEw5m5vFf76zX76479dcf+ujPs/bUyFCekgYeAJRGxOCIOAy4A7hxwT5J00BqKVw6ZORERVwD3AHOA9Zm5bcBtSdJBayjCASAzNwGbZvlhe3aIqk/srzv21x37686w97dPQ3FCWpI0XIblnIMkaYgc8OEQEdsjYmtEPBYRD5fa0RGxOSKeLV/nlXpExA3lIzyeiIiT+tDP+ojYHRFPNtU67iciVpXxz0bEqj739/mI2Fnm8LGIOKdp25Wlv2ci4qymel8+DiUijo+ILRHxVERsi4hPlfrA53AfvQ3F/EXEERHxYEQ8Xvr7QqkvjogHymPdWi4KISIOL+vjZfuiVn33qb+bI+KFpvk7sdRn/fuj7HtORDwaEXeV9aGYv57LzAP6BmwHjp1S+yqwtiyvBb5Sls8BfgQEcBrwQB/6+RBwEvDk/vYDHA08X77OK8vz+tjf54G/n2bsUuBx4HBgMfAcjQsK5pTldwGHlTFLe9TffOCksvx24H9LHwOfw330NhTzV+bgbWX5UOCBMie3AReU+jeBvynLlwHfLMsXALfuq+8+9nczcP4042f9+6Ps/++AfwfuKutDMX+9vh3wrxxmsBLYUJY3AOc11W/JhvuBoyJifi8fODN/DOzpsp+zgM2ZuSczXwY2Ayv62N9MVgIbM/P1zHwBGKfxUSh/+DiUzPwtMPlxKL3ob1dm/qQs/xJ4msY77Ac+h/vobSazOn9lDvaW1UPLLYHTgdtLfercTc7p7cAZERH76Ltf/c1k1r8/ImIhcC7wrbIeDMn89drBEA4J/EdEPBKNd1gDjGTmrrL8M2CkLE/3MR77+ubulU77GUSfV5SX7usnD9kMur/yMv19NH7DHKo5nNIbDMn8lUMijwG7afzQfA54JTMnpnmsP/RRtr8KHDOb/WXm5PxdXebvuog4fGp/U/ro57/t9cBngN+X9WMYovnrpYMhHD6YmSfR+MTXyyPiQ80bs/E6b2gu2Rq2fopvAH8CnAjsAq4dbDsQEW8Dvg98OjNfa9426Dmcprehmb/M/F1mnkjjUwhOAd4zqF6mM7W/iHgvcCWNPv+CxqGizw6it4j4MLA7Mx8ZxOPPtgM+HDJzZ/m6G/ghjW+IlyYPF5Wvu8vwtj7Gow867WdW+8zMl8o37e+Bf+GNl8AD6S8iDqXxw/c7mfmDUh6KOZyut2Gbv9LTK8AW4P00DsdMvuep+bH+0EfZ/k7gF7Pc34pyuC4z83XgXxnc/H0A+EhEbKdxqO904GsM4fz1xKBPevTzBhwJvL1p+X9oHHv8J9588vKrZflc3nyC68E+9bWIN5/w7agfGr89vUDjZNu8snx0H/ub37T8tzSOlwKcwJtPrD1P42TqIWV5MW+cUD2hR70FcAtw/ZT6wOdwH70NxfwBxwFHleW5wH8BHwa+x5tPqF5Wli/nzSdUb9tX333sb37T/F4PXDPI74/yGKO8cUJ6KOav17eBN9DXJ9e42uPxctsGfK7UjwHuBZ4F/nPyP075T3YjjeOwW4HlfejpuzQOLfwfjWONl+5PP8Bf0TiRNQ58os/9fbs8/hM0PvOq+Yfd50p/zwBnN9XPoXG1znOT896j/j5I45DRE8Bj5XbOMMzhPnobivkD/gx4tPTxJPCPTd8nD5Z5+B5weKkfUdbHy/Z3teq7T/3dV+bvSeDfeOOKpln//mja/yhvhMNQzF+vb75DWpJUOeDPOUiSOmc4SJIqhoMkqWI4SJIqhoMkqWI4SJIqhoMkqWI4SJIq/w/fXLnLbRy6cwAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "df.BlobSum.hist()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#Small / Blobs where the pixels are only a \"little\" hit\n",
+ "dfX = df[df.BlobSum <= 255]\n",
+ "len(dfX)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sample Size argumented: 84601\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Sample Size argumented:\", len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def pasteToEmpty (blob):\n",
+ " croped_im = np.zeros((27,15))\n",
+ " croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " return croped_im"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"Blobs\"] = df.BlobImages.apply(lambda x: pasteToEmpty(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyEvaluation/df_statistics.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[[\"userID\", \"TaskID\", \"Blobs\", \"InputMethod\"]].to_pickle(\"DataStudyEvaluation/df_blobs_area.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# display blobs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "userID 1\n",
+ "Timestamp 1553522289862\n",
+ "Current_Task 155\n",
+ "Task_amount 510\n",
+ "TaskID 0\n",
+ "VersionID 11\n",
+ "RepetitionID 0\n",
+ "Actual_Data True\n",
+ "Is_Pause False\n",
+ "Image [[1, 1, 1, 0, 0, 0, 0, 2, 1, 2, 2, 0, 3, 0, 0]...\n",
+ "IsMax True\n",
+ "MaxRepetition 0\n",
+ "ImageSum 933\n",
+ "InputMethod 0\n",
+ "Input Knuckle\n",
+ "BlobCount 1\n",
+ "BlobImages [[2, 2, 4, 5, 2], [1, 5, 11, 13, 5], [4, 9, 71...\n",
+ "BlobW 3\n",
+ "BlobH 4\n",
+ "BlobArea 12\n",
+ "BlobSum 710\n",
+ "Blobs [[2.0, 2.0, 4.0, 5.0, 2.0, 0.0, 0.0, 0.0, 0.0,...\n",
+ "Name: 191534, dtype: object\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAANoAAAFpCAYAAAD6NDa0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAECNJREFUeJzt3X/oXfV9x/HXy/xwkAZNDPsuGtvUYoM/oIla040gGQUXZTQWiihzS+dGWrZA/WvLWljFMli30sGwMGSVZtDZdrPWEOw0c90siFlj8EestUldJIZodGlMdEhNvu/9cc+3uzveb+4933Pu209ung/48r333Pf3fM69N6+cX+97jyNCAMbrnPd6AYCzAUEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxLMzxzMdlH9XrYb/w0ta6iLiKH/kFKDJknnnDO+lWjTec8laKdOnWpUPz093XgMTJ5W/+ptb7D9gu39trd2tVDApJlz0GzPk/Q1STdIulzSrbYv72rBgEnSZo12raT9EfFiRPxC0rckbexmsYDJ0iZoF0k62Hf/5WpaY7F6dVH10x/96Fjnr7Vrz676jDFKq6/xXI+i2f6UpA0R8YfV/d+VtDYittTqNkvaXN29moMhmDTjPup4SNLFffdXVNPqC3GPpHuk8g7vA1narF5+JOlS2x+0vVDSLZK2d7NYwGSZ8xotIk7a3iLpYUnzJN0bEc91tmTABJnzPtqcBrODfTRMmiI7Q5oEe/78Zou3cOHCRvVzCf3bb7/dqJ6gQaKpGEhB0IAEBA1IQNCABAQNSFBE0GLVqkb1TXsRpy+7TNHgCObJtWs1PTU1tuUpri+PXsfu62vSz6M1OXc1CYf333nnncZj4Mwyynm0ItZowKQjaEACggYkIGhAgvRex3nz5o1c2/RgyLp165ouTmOPP/54o3oOhkBijQakIGhAAoIGJCBoQAKCBiQoImjj/t7Ft1as0HSDo51N65v2RhbXl0evY/f1Nem9jk0O2S9YsKDR/K+77rqmi9RY08P7J06cGNOSoBT0OgKFIGhAAoIGJCBoQIL0Xscm33PY9IOct912W6P61157rVG9JO3atavx3wCs0YAEBA1IQNCABAQNSEDQgARFBK1pr+OpK65QnHfeSLX7li3T8XPPHXne/3XhhTq6eHGz5fnwhzV9wQWj/0FpfXn0OnZfX1P09dEWN/wHf/fddzeqn8vh/bvuuqtR/bFjxxqPgTMLvY5AIQgakICgAQkIGpCg6IMhixYtajT/NWvWNKo/cuRIo3pJOnjwYKP6t956q/EYOLNwMAQoBEEDEhA0IAFBAxIQNCABQQMSFBG0pk3FTb+w9NhHPqJ3GvRNvn3ZZTrZoEm46cXoi2uApam4+/oazqM1xHk01HEeDSgEQQMSEDQgAUEDEqQfDLGH7jf+UpMLy0vNrz7T5MtcZ5w6dapR/cmTJxuPgTMLB0OAQrT6SnDbBySdkHRK0smIuKaLhQImTRffvf+bEfF6B/MBJhabjkCCtkELSY/YftL25i4WCJhEbYO2LiKuknSDpD+2/a6LSNvebHu37d2zzSRWrWo06LgvLj/9oQ81qm+6PMX15dHr2H19TWeH923fKenNiPjKaWo4vI+JM9bD+7YX2V48c1vS9ZL2znV+wCRrc9RxStID1RpqvqR/jIh/6WSpgAlDZ0hDbDqijs4QoBDpa7S0wYAkrNGAQhA0IAFBAxIQNCABQQMSlBG00vrUqO+2PmOM0uprOLwPtMThfaAQBA1IQNCABAQNSEDQgAQEDUhA0IAEBA1IQNCABAQNSFBG0ErrU6O+2/qMMUqrr6HXEWiJXkegEAQNSEDQgAQEDUhA0IAEBA1IQNCABAQNSEDQgAQEDUhQRtBK61Ojvtv6jDFKq6+h1xFoiV5HoBAEDUhA0IAEBA1IQNCABAQNSEDQgAQEDUhA0IAEBA1IUEbQSutTo77b+owxSquvodcRaIleR6AQBA1IQNCABAQNSEDQgAQEDUhA0IAEQ4Nm+17bR2zv7Zu21PZO2/uq30vGu5jAmW2UNdo3JG2oTdsq6dGIuFTSo9V9ALMYGrSIeEzS0drkjZK2Vbe3Sbqp4+UCJspc99GmIuJwdfsVSVOzFdrebHu37d2zzq20PjXqu63PGKO0+pqReh1tr5S0IyKurO4fi4jz+x7/eUQM3U+j1xGTaJy9jq/aXi5J1e8jc5wPcFaYa9C2S9pU3d4k6cFuFgeYTEM3HW3fJ2m9pGWSXpX0RUnfk/QdSe+X9JKkmyOifsBk0LzYdMTEGWXTkc+jAS3xeTSgEAQNSEDQgAQEDUhA0IAEBA1IUEbQSutTo77b+owxSquv4Twa0BLn0YBCEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IEEZQSutT436buszxiitvoZeR6Aleh2BQhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCBBGUErrU+N+m7rM8Yorb6GXkegJXodgUIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgQRlBK61Pjfpu6zPGKK2+hl5HoCV6HYFCEDQgAUEDEhA0IAFBAxIQNCABQQMSDA2a7XttH7G9t2/anbYP2X6q+rlxvIsJnNlGWaN9Q9KGAdP/JiJWVz8PdbtYwGQZGrSIeEzS0YRlASZWm320LbafqTYtl7RaitL61Kjvtj5jjNLqa0bqdbS9UtKOiLiyuj8l6XVJIelLkpZHxO2z/O1mSZuru1e3WlqgQKP0Os4paKM+NqCWpmJMnLE1Fdte3nf3k5L2zlYLQJo/rMD2fZLWS1pm+2VJX5S03vZq9TYdD0j6zBiXETjj8Xk0oCU+jwYUgqABCQgakICgAQkIGpCAoAEJCBqQoIygldYQSn239RljlFZfwwlroCVOWAOFIGhAAoIGJCBoQAKCBiQgaEACggYkIGhAAoIGJCBoQIIyglZanxr13dZnjFFafQ29jkBL9DoChSBoQAKCBiQgaEACggYkIGhAAoIGJCBoQAKCBiQgaECCMoJWWp8a9d3WZ4xRWn0NvY5AS/Q6AoUgaEACggYkIGhAAoIGJCBoQAKCBiQgaEACggYkIGhAgjKCVlqfGvXd1meMUVp9Db2OQEv0OgKFIGhAAoIGJCBoQAKCBiQgaECCoUGzfbHtH9j+se3nbH+umr7U9k7b+6rfS8a/uMCZaeh5NNvLJS2PiD22F0t6UtJNkj4t6WhE/KXtrZKWRMSfDpkX59EwcTo5jxYRhyNiT3X7hKTnJV0kaaOkbVXZNvXCB2CARvtotldKWiNpl6SpiDhcPfSKpKlOlwyYICMHzfb7JN0v6Y6ION7/WPS2PwduFtrebHu37d2zzry0PjXqu63PGKO0+pqReh1tL5C0Q9LDEfHVatoLktZHxOFqP+7fI2LVkPmwj4aJ08k+mm1L+rqk52dCVtkuaVN1e5OkB+eykMDZYJSjjusk/VDSs5Kmq8mfV28/7TuS3i/pJUk3R8TRIfNijYaJM8oajY/JAC3xMRmgEAQNSEDQgAQEDUhA0IAEBA1IQNCABGUErbQ+Neq7rc8Yo7T6Gk5YAy1xwhooBEEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxKUEbTS+tSo77Y+Y4zS6mvodQRaotcRKARBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSlBG00vrUqO+2PmOM0upr6HUEWqLXESgEQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEpQRtNL61Kjvtj5jjNLqa+h1BFqi1xEoBEEDEhA0IAFBAxIQNCABQQMSEDQgwdCg2b7Y9g9s/9j2c7Y/V02/0/Yh209VPzeOf3GBM9PQE9a2l0taHhF7bC+W9KSkmyTdLOnNiPjKyINxwhoTaJQT1vNHmMlhSYer2ydsPy/povaLB5w9Gu2j2V4paY2kXdWkLbafsX2v7SVzXorS+tSo77Y+Y4zS6mtG7nW0/T5J/yHpLyLiu7anJL0uKSR9Sb3Ny9sH/N1mSZuru1e3WlqgQKNsOo4UNNsLJO2Q9HBEfHXA4ysl7YiIK4fMh300TJxOmoptW9LXJT3fH7LqIMmMT0raO5eFBM4Goxx1XCfph5KelTRdTf68pFslrVZv0/GApM9UB05ONy/WaJg4nW06doWgYRLxeTSgEAQNSEDQgAQEDUhA0IAEBA1IUEbQSutTo77b+owxSquv4Twa0BLn0YBCEDQgAUEDEhA0IAFBAxIM/c6Qjr0u6aUB05dVj2U728Z9L8ee1HE/MEpR6uH9WRfC3h0R1zDu5I59to1bx6YjkICgAQlKCdo9jDvxY59t4/4/ReyjAZOulDUaMNFSg2Z7g+0XbO+3vXXA4+fa/nb1+K7q+yLbjjnwIh21mvW23+i7YMeftx23mu8B289W89w94HHb/tvq+T5j+6oOxlzV9zyesn3c9h21ms6eb/Ut1Uds7+2bttT2Ttv7qt8Dv8Xa9qaqZp/tTR2M+9e2f1K9lg/YPn+Wvz3t+zIWEZHyI2mepJ9JukTSQklPS7q8VvNHkv6uun2LpG93MO5ySVdVtxdL+umAcder9wWwXT/nA5KWnebxGyV9X5IlfUzSrjG85q9I+sC4nq+k6yRdJWlv37S/krS1ur1V0pcH/N1SSS9Wv5dUt5e0HPd6SfOr218eNO4o78s4fjLXaNdK2h8RL0bELyR9S9LGWs1GSduq2/8s6ePVF7jOWUQcjog91e0Tkkq6SMdGSf8QPU9IOr/2xbRtfVzSzyJiUJNAJyLiMUlHa5P738dt6l19qO63JO2MiKMR8XNJOyVtaDNuRDwSESeru09IWjHq/MYtM2gXSTrYd/9lvfsf/C9rqhfsDUkXdLUAAy7S0e/XbT9t+/u2r+hoyJD0iO0nq2sQ1I3ymrRxi6T7ZnlsHM93xlT835fpviJpakDNuJ/77eptLQwy7H3pXHYL1numukjH/ZLuiIjjtYf3qLd59WZ1QcXvSbq0g2HXRcQh278qaaftn1T/E4+d7YWSPiHpzwY8PK7n+y4REdkf+LX9BUknJX1zlpL09yVzjXZI0sV991dU0wbW2J4v6TxJ/9124OoiHfdL+mZEfLf+eEQcj4g3q9sPSVpge1nbcSPiUPX7iKQH1Nt87jfKazJXN0jaExGvDliusTzfPq/ObAJXv48MqBnLc7f9aUm/Lel3otohqxvhfelcZtB+JOlS2x+s/re9RdL2Ws12STNHnz4l6d9me7FGNdtFOmo1vzazL2j7WvVel1YBt73IvSukyvYi9XbU6xcC2S7p96qjjx+T9EYMuX5BA7dqls3GcTzfmv73cZOkBwfUPCzpettLqqOS11fT5sz2Bkl/IukTEfE/s9SM8r50L/PIi3pH2X6q3tHHL1TT7qpeGEn6FUn/JGm/pP+UdEkHY65Tb5v8GUlPVT83SvqspM9WNVskPafekdAnJP1GB+NeUs3v6WreM8+3f1xL+lr1ejwr6ZqOXudF6gXnvL5pY3m+6oX5sKR31NvP+gP19qsflbRP0r9KWlrVXiPp7/v+9vbqvd4v6fc7GHe/evt9M+/zzBHsCyU9dLr3Zdw/dIYACegMARIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCDB/wKQFtuEw/WlBAAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "plt.clf()\n",
+ "plt.figure(figsize=(6, 6))\n",
+ "ax = plt.gca()\n",
+ "data_point = 100\n",
+ "data = df.Blobs.iloc[data_point]\n",
+ "print(df.iloc[data_point])\n",
+ "plt.imshow(data, cmap='gray', vmin=0, vmax=255)\n",
+ "# Loop over data dimensions and create text annotations.\n",
+ "for i in range(0, data.shape[0]):\n",
+ " for j in range(0, data.shape[1]):\n",
+ " text = ax.text(j, i, int(data[i, j]),\n",
+ " ha=\"center\", va=\"center\", color=\"cyan\", fontsize=1)\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_34_CNN-Report.ipynb b/python/Step_34_CNN-Report.ipynb
new file mode 100644
index 0000000..6dc51bc
--- /dev/null
+++ b/python/Step_34_CNN-Report.ipynb
@@ -0,0 +1,273 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "\n",
+ "target_names = [\"Knuckle\", \"Finger\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# the data, split between train and test sets\n",
+ "df = pd.read_pickle(\"DataStudyEvaluation/df_blobs_area.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([ 1, 3, 5, 6, 7, 8, 9, 10, 11, 12])"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.userID.unique()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.vstack(df.Blobs)\n",
+ "x = x.reshape(-1, 27, 15, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = 2\n",
+ "y = utils.to_categorical(df.InputMethod, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Text(0.5, 1.0, 'Label for image 1 is: [1. 0.]')"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAALEAAAEICAYAAAAQmxXMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAD0RJREFUeJzt3XuwnHV9x/H3JyeBQBKEAGYgXEIz0U7qDHEGQVugIKLA1An+0RRGaGhpY1XG2nopOm2D1CrjlKodKSOXQETBQShD1BQIGRjqlAqBAU0KNRgSyDEXIFwSrgn59o/nd2Rz2LNn9+xzzp4vfF4zZ/bZfW7f3fPZ3z633Z8iArPMJvS6ALNuOcSWnkNs6TnElp5DbOk5xJbeqIVY0t2S/qLueVW5RtKzku7rrkqQdISkHZL6ul3WeFHXc5J0raTXJK2vqbRO1v2u8hxeHy5Hw4ZY0npJH6qvvK4dD5wKHBYRx3a7sIh4IiKmRsTr3Zc2eiSdLOkuSc8PF6qan9M3ImJWQx0LJP23pJck3d3pwiT9jaTNkl6QtETS3s2mi4hfRcRU4L+GW2bGzYkjgfUR8WKnM0qaOAr1jJUXgSXAF3pcxzbgW8Alnc4o6SPAhcApVP/H3wG+0m1BIw6xpAMk/UTSU+Wj/SeSDhs02WxJ95V33a2SpjfM//7yjn5O0sOSTmpjnecDVwEfKB81XymP/6WkxyRtk7RM0qEN84SkT0taC6xtssxZZZqJ5f7dkr5aatsh6ceSDpT0g/I87pc0q2H+b0t6sox7QNIJDeP2kbS0vD6PSPqipI0N4w+VdHN5DR+X9JmhnntE3BcR1wHr2nidBj+n8yStk7S9rOfjwy2jRR13RsSNwG9GMPtC4OqIWBMRzwL/BJw30loGdNMSTwCuoXpHHQG8DHxn0DR/Cvw5cAiwC/g3AEkzgZ8CXwWmA58HbpZ0cKsVRsTVwF8B95aPy8WSPgh8HVhQ1rMB+OGgWc8EjgPmtvnczgLOBWYCs4F7y3OdDjwCLG6Y9n5gXhl3PfAjSZPLuMXALKoW51TgnIGZJE0Afgw8XNZzCvDZ0lrVRtIUqtf99IiYBvw+8FAZd0RpRI6oc50t/B7V8x3wMDBD0oHdLHTEIY6IZyLi5oh4KSK2A/8M/OGgya6LiNXlo/8fgAVlZ+McYHlELI+I3RGxAlgFnDGCUj4OLImIByPiVeBLVC31rIZpvh4R2yLi5TaXeU1E/Doingf+E/h1aYF2AT8C3jswYUR8v7wWuyLiUmBv4N1l9ALgaxHxbERspLyJi/cBB0fExRHxWkSsA66kegPVbTfwHkn7RMSmiFhTan8iIvaPiCdGYZ3NTAWeb7g/MDytm4V2szmxr6TvStog6QXgHmD/QXvETzYMbwAmAQdRtd5/XFqB5yQ9R7XDdsgISjm0LBuAiNgBPEPVujWrox1bGoZfbnJ/6sAdSZ8vmwrPl+fxDqrnOFBb47obh48EDh30GnwZmNFhrS2VBuRPqD7BNkn6qaTfrXMdHdgB7Ndwf2B4ezcL7WZz4nNULc5xEbEfcGJ5XA3THN4wfASwE3ia6p95XWkFBv6mRETHOwtU22ZHDtwpH58HAv0N04zKpXpl+/eLVC3uARGxP1XrMvAabAIa9xMaX48ngccHvQbTImIkn0YtRcTtEXEqVSPxKFWL3wtrgKMb7h8NbImIZ7pZaLshniRpcsPfRKqPgJeB58oO2+Im850jaa6kfYGLgZvKYZ/vAx+V9BFJfWWZJzXZMWzHDcCfSZpXDtd8Dfh5RKwfwbI6NY1qW/8pYKKkf2TPluZG4EtlJ3gmcEHDuPuA7ZL+ruwA9kl6j6T3NVuRpAllW3tSdVeTJe01XIGSZkiaX97cr1K1hrtH8mTL8vpKHROBCaWOSW3O/j3g/JKJ/YG/B64daS0D2g3xcqrADvxdRHWYZR+qlvV/gNuazHddKXIzMBn4DEBEPAnMp/r4fIqqVfpCB/X8VkTcSbW9fTNVyzeb0dmubOZ2quf9K6pNmlfYc5PhYmAj8DhwJ3ATVZAob+Y/otopfJzqdbyKanOkmROpXvvlvLEjfUcbNU4A/pbqE2sb1X7LJ2GPkyKd7NidW9Z9OXBCGf5ty16Wd0KzGSPiNuAbwF3AE1Sv2eKGedeM5MiJfFH82JH0SeCsiBi8AzwuSboSOJvqI3/2GK97DtWRn72AT0XEtUNO6xCPHkmHUB1euxeYQ3VY8TsR8a2eFvYWk/kMVgZ7Ad8FjgKeozp+/e89regtyC2xpZfx2gmzPYz55sRe2jsmM2WsV2tjaDvPPh0RLS8hqFMtIZZ0GvBtoA+4qtVJi8lM4Tid0s3Khhnf5YfL7nF9RWYKd8ZNG4afqj5db06U08yXAadTXWBztqR2L7Qx61od28THAo9FxLqIeI1qD3x+Dcs1a0sdIZ7JnmepNrLnxTdIWiRplaRVO6sTVma1GZOjExFxRUQcExHHTKLpt1HMRqyOEPez59VZh7HnFWRmo6qOEN8PzJF0VLmq6ixgWQ3LNWtL14fYImKXpAuorujqo/qWxZoRL3CYQ2h979iv5fjYuav1+Nd2th7vQ2zp1HKcOCKWU10iaDbmfNrZ0nOILT2H2NJziC09h9jSc4gtvd58PWnC0L842rff1CHHAbz0gXe1HL9zauv35f4/a32V4K5Nm1uOt/HHLbGl5xBbeg6xpecQW3oOsaXnEFt6DrGl15vjxK2u2Z3R+ucKLr6s9U/rnji55Wg+eF7rXskm+ThxOm6JLT2H2NJziC09h9jSc4gtPYfY0nOILb3eHCdu8dsS2vFSy1kXXfepluP7Xmm96iP7W3eZ5l+dyMctsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXXm+PELXox3f3Mtpazzr5imEXvGub3iXe82HoBlk5d/ditB7ZTnSvYFRHH1LFcs3bU2RKfHBFP17g8s7Z4m9jSqyvEAdwh6QFJi2papllb6tqcOD4i+iW9E1gh6dGIuGdgZAn2IoDJ7FvTKs0qtbTEEdFfbrcCt1B1lds43p0x2qipo4PyKZKmDQwDHwZWd7tcs3bVsTkxA7hF1TXCE4HrI+K2kS5s9yutLwje3f+bkS7a3qLq6IxxHXB0DbWYjYgPsVl6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl13aIJS2RtFXS6obHpktaIWltuT1gdMo0G1onLfG1wGmDHrsQWBkRc4CV5b7ZmGo7xKVLr8EdL88HlpbhpcCZNdVl1rZu++yYERGbyvBmqk5o3sT92Nloqm3HLiKCqmfRZuPcj52Nmm5DvEXSIQDldmv3JZl1ptsQLwMWluGFwK1dLs+sY50cYrsBuBd4t6SNks4HLgFOlbQW+FC5bzam2t6xi4izhxh1Sk21mI2Iz9hZeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbet32Y3eRpH5JD5W/M0anTLOhdduPHcA3I2Je+VteT1lm7eu2Hzuznqtjm/gCSb8omxtNu8WVtEjSKkmrdvJqDas0e0O3Ib4cmA3MAzYBlzabyP3Y2WjqKsQRsSUiXo+I3cCVwLH1lGXWvq5CPNARY/ExYPVQ05qNlra7ACv92J0EHCRpI7AYOEnSPKrucNcDnxiFGs1a6rYfu6trrMVsRHzGztJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NLrpB+7wyXdJel/Ja2R9Nfl8emSVkhaW26bdj5jNlo6aYl3AZ+LiLnA+4FPS5oLXAisjIg5wMpy32zMdNKP3aaIeLAMbwceAWYC84GlZbKlwJl1F2nWStvdHTSSNAt4L/BzYEZEbCqjNgMzmky/CFgEMJl9R7JKsyF1vGMnaSpwM/DZiHihcVxEBFUnNAx63P3Y2ajpKMSSJlEF+AcR8R/l4S0DXYGV2631lmjWWidHJ0TVW9IjEfGvDaOWAQvL8ELg1vrKMxteJ9vEfwCcC/xS0kPlsS8DlwA3Sjof2AAsqLdEs9Y66cfuZ4CGGH1KPeWYdc5n7Cw9h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSayvELTpivEhSv6SHyt8Zo1uu2Zu1+0vxAx0xPihpGvCApBVl3Dcj4l9Gpzyz4bUV4tJP3aYyvF3SQEeMZj03kn7sZvFGR4wAF0j6haQlQ/XrLGmRpFWSVu3k1REXa9ZMp/3YDe6I8XJgNjCPqqW+tNl87ozRRlMn/di9qSPGiNgSEa9HxG7gSuDY0SnTbGjtHp1o2hHjQE+ixceA1fWWZza8do9ODNUR49mS5lH157we+ETtFZoNo92jE0N1xLi83nLMOuczdpaeQ2zpOcSWnkNs6TnElp5DbOkpIsZ2hdJTwIZBDx8EPD2mhXTG9XXmyIg4eKxWNuYhblqEtCoijul1HUNxfeObNycsPYfY0hsvIb6i1wUMw/WNY+Nim9isG+OlJTYbMYfY0utpiCWdJun/JD0m6cJe1tKMpPWSfll+jmBVr+sBKN9l3CppdcNj0yWtkLS23Db9ruNbVc9CLKkPuAw4HZhLdYH93F7V08LJETFvHB2HvRY4bdBjFwIrI2IOsLLcf9voZUt8LPBYRKyLiNeAHwLze1hPChFxD7Bt0MPzgaVleClw5pgW1WO9DPFM4MmG+xsZf79lEcAdkh6QtKjXxbQwo/w2CMBmYEYvixlr7X7H7u3q+Ijol/ROYIWkR0tLOG5FREh6Wx037WVL3A8c3nD/sPLYuBER/eV2K3AL4/cnCbYMfPO83G7tcT1jqpchvh+YI+koSXsBZwHLeljPHiRNKb87h6QpwIcZvz9JsAxYWIYXArf2sJYx17PNiYjYJekC4HagD1gSEWt6VU8TM4Bbqp/cYCJwfUTc1tuSQNINwEnAQZI2AouBS4AbJZ1PdZnrgt5VOPZ82tnS8xk7S88htvQcYkvPIbb0HGJLzyG29BxiS+//AWtaZbVAuFrxAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "i = 1\n",
+ "plt.imshow(x[i].reshape(27, 15)) #np.sqrt(784) = 28\n",
+ "plt.title(\"Label for image %i is: %s\" % (i, y[i]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.4\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Colocations handled automatically by placer.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/layers/core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Use tf.cast instead.\n"
+ ]
+ }
+ ],
+ "source": [
+ "loadpath = \"./ModelSnapshots/CNN-33767.h5\"\n",
+ "model = tf.keras.models.load_model(loadpath)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 14.2 s, sys: 1.25 s, total: 15.5 s\n",
+ "Wall time: 11 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "lst = []\n",
+ "batch = 100\n",
+ "for i in range(0, len(x), batch):\n",
+ " _x = x[i: i+batch]\n",
+ " lst.extend(model.predict(_x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"InputMethodPred\"] = lst\n",
+ "df.InputMethodPred = df.InputMethodPred.apply(lambda x: np.argmax(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_eval = df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[21071 3651]\n",
+ " [ 2096 57783]]\n",
+ "[[0.85231777 0.14768223]\n",
+ " [0.03500392 0.96499608]]\n",
+ "Accuray: 0.932\n",
+ "Recall: 0.909\n",
+ "Precision: 0.932\n",
+ "F1-Score: 0.916\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.91 0.85 0.88 24722\n",
+ " Finger 0.94 0.96 0.95 59879\n",
+ "\n",
+ " accuracy 0.93 84601\n",
+ " macro avg 0.93 0.91 0.92 84601\n",
+ "weighted avg 0.93 0.93 0.93 84601\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_eval.InputMethod.values, df_eval.InputMethodPred.values, labels=[0, 1]))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_eval.InputMethod.values, df_eval.InputMethodPred.values, labels=[0, 1], )\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(cm)\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_eval.InputMethod.values, df_eval.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_eval.InputMethod.values, df_eval.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(df_eval.InputMethod.values, df_eval.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(df_eval.InputMethod.values, df_eval.InputMethodPred.values, average=\"macro\"))\n",
+ "print(sklearn.metrics.classification_report(df_eval.InputMethod.values, df_eval.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_36_LSTM_ReadData.ipynb b/python/Step_36_LSTM_ReadData.ipynb
new file mode 100644
index 0000000..9849912
--- /dev/null
+++ b/python/Step_36_LSTM_ReadData.ipynb
@@ -0,0 +1,283 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Filtering the data for the LSTM: removes all the rows, where we used the revert button, when the participant performed a wrong gesture\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "from multiprocessing import Pool, cpu_count"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 8351 | \n",
+ " 2 | \n",
+ " 1553594010364 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 28 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 8352 | \n",
+ " 2 | \n",
+ " 1553594010414 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 28 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 8353 | \n",
+ " 2 | \n",
+ " 1553594010445 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 28 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 8354 | \n",
+ " 2 | \n",
+ " 1553594010485 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 28 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 8355 | \n",
+ " 2 | \n",
+ " 1553594010525 | \n",
+ " 1 | \n",
+ " 510 | \n",
+ " 28 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "8351 2 1553594010364 1 510 28 2 \n",
+ "8352 2 1553594010414 1 510 28 2 \n",
+ "8353 2 1553594010445 1 510 28 2 \n",
+ "8354 2 1553594010485 1 510 28 2 \n",
+ "8355 2 1553594010525 1 510 28 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "8351 0 True False \n",
+ "8352 0 True False \n",
+ "8353 0 True False \n",
+ "8354 0 True False \n",
+ "8355 0 True False \n",
+ "\n",
+ " Image \n",
+ "8351 [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... \n",
+ "8352 [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... \n",
+ "8353 [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... \n",
+ "8354 [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... \n",
+ "8355 [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... "
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyEvaluation/AllData.pkl\")\n",
+ "df_actual = dfAll[(dfAll.Actual_Data == True) & (dfAll.Is_Pause == False)]\n",
+ "df_actual.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "12"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(df_actual.userID.unique())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "all: 608084, actual data: 495142\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"all: %s, actual data: %s\" % (len(dfAll), len(df_actual)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 23.3 s, sys: 3.08 s, total: 26.3 s\n",
+ "Wall time: 26 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "# filter out all gestures, where the revert button was pressed during the study and the gestrue was repeated\n",
+ "def is_max(df):\n",
+ " df_temp = df.copy(deep=True)\n",
+ " max_version = df_temp.RepetitionID.max()\n",
+ " df_temp[\"IsMax\"] = np.where(df_temp.RepetitionID == max_version, True, False)\n",
+ " df_temp[\"MaxRepetition\"] = [max_version] * len(df_temp)\n",
+ " return df_temp\n",
+ "\n",
+ "df_filtered = df_actual.copy(deep=True)\n",
+ "df_grp = df_filtered.groupby([df_filtered.userID, df_filtered.TaskID, df_filtered.VersionID])\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "result_lst = pool.map(is_max, [grp for name, grp in df_grp])\n",
+ "df_filtered = pd.concat(result_lst)\n",
+ "df_filtered = df_filtered[df_filtered.IsMax == True]\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered.to_pickle(\"DataStudyEvaluation/df_lstm.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "actual: 495142, filtered data: 457271\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"actual: %s, filtered data: %s\" % (len(df_actual), len(df_filtered)))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_37_LSTM_Preprocessing.ipynb b/python/Step_37_LSTM_Preprocessing.ipynb
new file mode 100644
index 0000000..27768c4
--- /dev/null
+++ b/python/Step_37_LSTM_Preprocessing.ipynb
@@ -0,0 +1,1303 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Preprocessing for LSTM: Blobdetection and Cutting"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered = pd.read_pickle(\"DataStudyEvaluation/df_lstm.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ " IsMax | \n",
+ " MaxRepetition | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 178293 | \n",
+ " 1 | \n",
+ " 1553521747262 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 1, 1, 1, 2, ... | \n",
+ " True | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " 178294 | \n",
+ " 1 | \n",
+ " 1553521747302 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ... | \n",
+ " True | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " 178295 | \n",
+ " 1 | \n",
+ " 1553521747342 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ... | \n",
+ " True | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " 178296 | \n",
+ " 1 | \n",
+ " 1553521747388 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ... | \n",
+ " True | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " 178297 | \n",
+ " 1 | \n",
+ " 1553521747422 | \n",
+ " 16 | \n",
+ " 510 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ... | \n",
+ " True | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "178293 1 1553521747262 16 510 0 2 \n",
+ "178294 1 1553521747302 16 510 0 2 \n",
+ "178295 1 1553521747342 16 510 0 2 \n",
+ "178296 1 1553521747388 16 510 0 2 \n",
+ "178297 1 1553521747422 16 510 0 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "178293 1 True False \n",
+ "178294 1 True False \n",
+ "178295 1 True False \n",
+ "178296 1 True False \n",
+ "178297 1 True False \n",
+ "\n",
+ " Image IsMax \\\n",
+ "178293 [0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 1, 1, 1, 2, ... True \n",
+ "178294 [0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ... True \n",
+ "178295 [0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ... True \n",
+ "178296 [0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ... True \n",
+ "178297 [0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ... True \n",
+ "\n",
+ " MaxRepetition \n",
+ "178293 1 \n",
+ "178294 1 \n",
+ "178295 1 \n",
+ "178296 1 \n",
+ "178297 1 "
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_filtered.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "12"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(df_filtered.userID.unique())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.reshape(27, 15))\n",
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.clip(min=0, max=255))\n",
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.astype(np.uint8))\n",
+ "df_filtered[\"ImageSum\"] = df_filtered.Image.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#LSTMs new Blob detection (only detect, if there are blobs)\n",
+ "def detect_blobs(image):\n",
+ " #image = image.reshape(27, 15)\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = image\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 200, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " return len(contours) > 0"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 1.93 s, sys: 581 ms, total: 2.51 s\n",
+ "Wall time: 2.71 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "temp_blobs = pool.map(detect_blobs, df_filtered.Image)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered[\"ContainsBlobs\"] = temp_blobs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Label if knuckle or finger\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " #val = \"Knuckle\"\n",
+ " val = 0\n",
+ " elif row['TaskID'] >= 17:\n",
+ " #val = \"Finger\"\n",
+ " val = 1\n",
+ " return val\n",
+ "df_filtered['InputMethod'] = df_filtered.apply(f, axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Problem: some timestamps are strings (XXXXE+XXXX) which is not accurate enough, switching to index instead\n",
+ "\"\"\"def cast_to_int(x):\n",
+ " if type(x) == int:\n",
+ " return x\n",
+ " x = str(x).replace(\",\", \".\")\n",
+ " return int(float(x))\n",
+ "\n",
+ "df_filtered.Timestamp = df_filtered.Timestamp.map(cast_to_int)\"\"\"\n",
+ "df_filtered.index = range(len(df_filtered))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "1\n",
+ "2\n",
+ "3\n",
+ "4\n",
+ "5\n",
+ "6\n",
+ "7\n",
+ "8\n",
+ "9\n",
+ "10\n",
+ "11\n",
+ "12\n",
+ "CPU times: user 1min 32s, sys: 60.2 ms, total: 1min 32s\n",
+ "Wall time: 1min 32s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "# trim image sequences down to only between first and last detected blob\n",
+ "UserIDs = []\n",
+ "TaskIDs = []\n",
+ "VersionIDs = []\n",
+ "Blobs = []\n",
+ "for userID in df_filtered.userID.unique():\n",
+ " print(userID)\n",
+ " for TaskID in df_filtered[df_filtered.userID == userID].TaskID.unique():\n",
+ " for VersionID in df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID)].VersionID.unique():\n",
+ " first_blob = -1\n",
+ " last_blob = -1\n",
+ " for index, row in df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID) & (df_filtered.VersionID == VersionID)].iterrows():\n",
+ " if row.ContainsBlobs:\n",
+ " last_blob = index\n",
+ " if first_blob == -1:\n",
+ " first_blob = index\n",
+ " if first_blob >= 0 and last_blob >= 0:\n",
+ " UserIDs.append(userID)\n",
+ " TaskIDs.append(TaskID)\n",
+ " VersionIDs.append(VersionID)\n",
+ " Blobs.append(df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID) & (df_filtered.VersionID == VersionID) & (df_filtered.index >= first_blob) & (df_filtered.index <= last_blob)].Image.tolist())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "UserIDs = np.array(UserIDs, dtype=np.int64)\n",
+ "TaskIDs = np.array(TaskIDs, dtype=np.int64)\n",
+ "VersionIDs = np.array(VersionIDs, dtype=np.int64)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " [[[0, 1, 1, 2, 1, 1, 2, 0, 1, 2, 1, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0, 2, 191, 0, 0, 1, 2, 0, 1, 1, 2, 0, 1, 0,... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 4 | \n",
+ " [[[0, 0, 0, 0, 1, 2, 2, 1, 0, 0, 0, 0, 1, 0, 2... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0, 1, 0, 2, 2, 0, 1, 0, 3, 1, 1, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 2 \n",
+ "1 1 0 3 \n",
+ "2 1 0 4 \n",
+ "3 1 0 5 \n",
+ "4 1 0 6 \n",
+ "\n",
+ " Blobs \n",
+ "0 [[[0, 1, 1, 2, 1, 1, 2, 0, 1, 2, 1, 0, 0, 0, 0... \n",
+ "1 [[[0, 2, 191, 0, 0, 1, 2, 0, 1, 1, 2, 0, 1, 0,... \n",
+ "2 [[[0, 0, 0, 0, 1, 2, 2, 1, 0, 0, 0, 0, 1, 0, 2... \n",
+ "3 [[[0, 1, 0, 2, 2, 0, 1, 0, 3, 1, 1, 0, 0, 0, 0... \n",
+ "4 [[[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0... "
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all = pd.DataFrame()\n",
+ "df_lstm_all[\"userID\"] = UserIDs\n",
+ "df_lstm_all[\"TaskID\"] = TaskIDs\n",
+ "df_lstm_all[\"VersionID\"] = VersionIDs\n",
+ "df_lstm_all[\"Blobs\"] = Blobs\n",
+ "df_lstm_all.Blobs = df_lstm_all.Blobs.map(np.array)\n",
+ "df_lstm_all.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " [[[0, 1, 1, 2, 1, 1, 2, 0, 1, 2, 1, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0, 2, 191, 0, 0, 1, 2, 0, 1, 1, 2, 0, 1, 0,... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 4 | \n",
+ " [[[0, 0, 0, 0, 1, 2, 2, 1, 0, 0, 0, 0, 1, 0, 2... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0, 1, 0, 2, 2, 0, 1, 0, 3, 1, 1, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 2 \n",
+ "1 1 0 3 \n",
+ "2 1 0 4 \n",
+ "3 1 0 5 \n",
+ "4 1 0 6 \n",
+ "\n",
+ " Blobs \n",
+ "0 [[[0, 1, 1, 2, 1, 1, 2, 0, 1, 2, 1, 0, 0, 0, 0... \n",
+ "1 [[[0, 2, 191, 0, 0, 1, 2, 0, 1, 1, 2, 0, 1, 0,... \n",
+ "2 [[[0, 0, 0, 0, 1, 2, 2, 1, 0, 0, 0, 0, 1, 0, 2... \n",
+ "3 [[[0, 1, 0, 2, 2, 0, 1, 0, 3, 1, 1, 0, 0, 0, 0... \n",
+ "4 [[[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0... "
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_lstm_all[\"Length\"] = df_lstm_all.Blobs.apply(lambda x: x.shape[0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAEQRJREFUeJzt3W2MXGd5h/HrrhNCFaPYqenKcqw6qSxVLmlTs0pSFaF1oyZO8sFBQihRRByaylWbqCBRCVPUhkKRTFWompaGmsbCaSkm5UWxEtPUdbOK+BCITY3tJA3eglOyMrbAwbCAaEPvfphnw3S7k52d2Z235/pJoznznGfO3LfPev8+Z86MIzORJNXnp/pdgCSpPwwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUu6HcBr2TNmjW5YcOGjp///e9/n4svvnjpCuqTUekD7GVQjUovo9IHdNfLkSNHvpWZr11o3kAHwIYNGzh8+HDHz5+cnGRiYmLpCuqTUekD7GVQjUovo9IHdNdLRDzfzjxPAUlSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUG+pPAvbJh56NtzTu16+ZlrkSSescjAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVasEAiIj1EfF4RDwTEU9HxNvL+KURcTAiTpb71WU8IuK+iJiKiGMRsblpW9vL/JMRsX352pIkLaSdI4CXgHdm5ibgWuDuiNgE7AQOZeZG4FB5DHAjsLHcdgD3QyMwgHuBa4CrgXtnQ0OS1HsLBkBmns7ML5fl7wHPAuuAbcDeMm0vcEtZ3gY8mA1PAqsiYi1wA3AwM89l5ovAQWDrknYjSWpbZGb7kyM2AE8ArwP+MzNXlfEAXszMVRHxCLArM79Q1h0C3gVMAK/OzD8p438I/DAz/2zOa+ygceTA2NjY6/ft29dxczMzM6xcuXLBecenz7e1vSvXXdJxLd1ot49hYC+DaVR6GZU+oLtetmzZciQzxxea1/Z/CBMRK4HPAO/IzO82fuc3ZGZGRPtJ8goyczewG2B8fDwnJiY63tbk5CTtPP/Odv9DmNs7r6Ub7fYxDOxlMI1KL6PSB/Sml7auAoqIC2n88v9EZn62DJ8pp3Yo92fL+DSwvunpl5WxVuOSpD5o5yqgAB4Ans3MDzet2g/MXsmzHXi4afyOcjXQtcD5zDwNPAZcHxGry5u/15cxSVIftHMK6NeAtwLHI+JoGfsDYBfwUETcBTwPvKWsOwDcBEwBPwDeBpCZ5yLi/cBTZd77MvPcknQhSVq0BQOgvJkbLVZfN8/8BO5usa09wJ7FFChJWh5+EliSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVWrBAIiIPRFxNiJONI29NyKmI+Joud3UtO7dETEVEc9FxA1N41vL2FRE7Fz6ViRJi9HOEcDHga3zjP95Zl5VbgcAImITcCvwi+U5fx0RKyJiBfAR4EZgE3BbmStJ6pMLFpqQmU9ExIY2t7cN2JeZPwK+HhFTwNVl3VRmfg0gIvaVuc8sumJJ0pLo5j2AeyLiWDlFtLqMrQO+0TTnhTLWalyS1CeRmQtPahwBPJKZryuPx4BvAQm8H1ibmb8ZEX8FPJmZf1/mPQB8vmxma2b+Vhl/K3BNZt4zz2vtAHYAjI2NvX7fvn0dNzczM8PKlSsXnHd8+nxb27ty3SUd19KNdvsYBvYymEall1HpA7rrZcuWLUcyc3yheQueAppPZp6ZXY6IjwGPlIfTwPqmqZeVMV5hfO62dwO7AcbHx3NiYqKTEgGYnJykneffufPRtrZ36vbOa+lGu30MA3sZTKPSy6j0Ab3ppaNTQBGxtunhm4DZK4T2A7dGxEURcTmwEfgS8BSwMSIuj4hX0XijeH/nZUuSurXgEUBEfBKYANZExAvAvcBERFxF4xTQKeC3ATLz6Yh4iMabuy8Bd2fmj8t27gEeA1YAezLz6SXvRpLUtnauArptnuEHXmH+B4APzDN+ADiwqOoGzIZ2TxXtunmZK5Gk7vlJYEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiq1YABExJ6IOBsRJ5rGLo2IgxFxstyvLuMREfdFxFREHIuIzU3P2V7mn4yI7cvTjiSpXe0cAXwc2DpnbCdwKDM3AofKY4AbgY3ltgO4HxqBAdwLXANcDdw7GxqSpP5YMAAy8wng3JzhbcDesrwXuKVp/MFseBJYFRFrgRuAg5l5LjNfBA7y/0NFktRDnb4HMJaZp8vyN4GxsrwO+EbTvBfKWKtxSVKfXNDtBjIzIyKXohiAiNhB4/QRY2NjTE5OdrytmZmZtp7/zitf6vg15tNNzfNpt49hYC+DaVR6GZU+oDe9dBoAZyJibWaeLqd4zpbxaWB907zLytg0MDFnfHK+DWfmbmA3wPj4eE5MTMw3rS2Tk5O08/w7dz7a8WvM59TtC7/mYrTbxzCwl8E0Kr2MSh/Qm146PQW0H5i9kmc78HDT+B3laqBrgfPlVNFjwPURsbq8+Xt9GZMk9cmCRwAR8Uka/3pfExEv0LiaZxfwUETcBTwPvKVMPwDcBEwBPwDeBpCZ5yLi/cBTZd77MnPuG8uSpB5aMAAy87YWq66bZ24Cd7fYzh5gz6KqkyQtGz8JLEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJler620AH2fHp80v+RW+SNCo8ApCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqlRXARARpyLieEQcjYjDZezSiDgYESfL/eoyHhFxX0RMRcSxiNi8FA1IkjqzFEcAWzLzqswcL493AocycyNwqDwGuBHYWG47gPuX4LUlSR1ajlNA24C9ZXkvcEvT+IPZ8CSwKiLWLsPrS5LaEJnZ+ZMjvg68CCTwN5m5OyK+k5mryvoAXszMVRHxCLArM79Q1h0C3pWZh+dscweNIwTGxsZev2/fvo7rO3vuPGd+2PHTO3blukuWdHszMzOsXLlySbfZL/YymEall1HpA7rrZcuWLUeazsq0dEFHW/+JN2TmdET8LHAwIv69eWVmZkQsKmEyczewG2B8fDwnJiY6Lu4vP/EwHzrebYuLd+r2iSXd3uTkJN38OQwSexlMo9LLqPQBvemlq9+OmTld7s9GxOeAq4EzEbE2M0+XUzxny/RpYH3T0y8rYyNnw85H25p3atfNy1yJJLXW8XsAEXFxRLxmdhm4HjgB7Ae2l2nbgYfL8n7gjnI10LXA+cw83XHlkqSudHMEMAZ8rnGanwuAf8jMf4qIp4CHIuIu4HngLWX+AeAmYAr4AfC2Ll5bktSljgMgM78G/PI8498GrptnPIG7O309SdLS8pPAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZW6oN8F1GzDzkfbmvfxrRcvcyWSauQRgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUl4EOgePT57mzzUtGT+26eZmrkTQqPAKQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlfIy0BHT7jeMermoJI8AJKlSBoAkVarnp4AiYivwF8AK4G8zc1eva5CniiT1+AggIlYAHwFuBDYBt0XEpl7WIElq6PURwNXAVGZ+DSAi9gHbgGd6XIfa1O6RQrv87y2lwdHrAFgHfKPp8QvANT2uQUPAU1TS8hu4y0AjYgewozyciYjnutjcGuBb3VfVX783In0AbPng0vYSH1yqLXVkZPYLo9PLqPQB3fXyc+1M6nUATAPrmx5fVsZelpm7gd1L8WIRcTgzx5diW/00Kn2AvQyqUellVPqA3vTS68tAnwI2RsTlEfEq4FZgf49rkCTR4yOAzHwpIu4BHqNxGeiezHy6lzVIkhp6/h5AZh4ADvTo5ZbkVNIAGJU+wF4G1aj0Mip9QA96icxc7teQJA0gvwpCkio1kgEQEVsj4rmImIqInf2uZ7Ei4lREHI+IoxFxuIxdGhEHI+JkuV/d7zrnExF7IuJsRJxoGpu39mi4r+ynYxGxuX+V/18t+nhvREyX/XI0Im5qWvfu0sdzEXFDf6qeX0Ssj4jHI+KZiHg6It5exodxv7TqZaj2TUS8OiK+FBFfKX38cRm/PCK+WOr9VLlYhoi4qDyeKus3LEkhmTlSNxpvLv8HcAXwKuArwKZ+17XIHk4Ba+aM/SmwsyzvBD7Y7zpb1P5GYDNwYqHagZuAzwMBXAt8sd/1L9DHe4Hfn2fupvJzdhFwefn5W9HvHprqWwtsLsuvAb5aah7G/dKql6HaN+XPdmVZvhD4Yvmzfgi4tYx/FPidsvy7wEfL8q3Ap5aijlE8Anj56yYy87+A2a+bGHbbgL1leS9wSx9raSkznwDOzRluVfs24MFseBJYFRFre1PpK2vRRyvbgH2Z+aPM/DowRePncCBk5unM/HJZ/h7wLI1P5Q/jfmnVSysDuW/Kn+1MeXhhuSXw68Cny/jcfTK7rz4NXBcR0W0doxgA833dxCv9gAyiBP45Io6UT0YDjGXm6bL8TWCsP6V1pFXtw7iv7imnRfY0nYYbmj7KqYNfofEvzqHeL3N6gSHbNxGxIiKOAmeBgzSOTr6TmS+VKc21vtxHWX8e+JluaxjFABgFb8jMzTS+NfXuiHhj88psHAcO5eVbw1w7cD/w88BVwGngQ/0tZ3EiYiXwGeAdmfnd5nXDtl/m6WXo9k1m/jgzr6LxjQhXA7/Q6xpGMQAW/LqJQZeZ0+X+LPA5Gj8cZ2YPw8v92f5VuGitah+qfZWZZ8pf2v8BPsZPTiUMfB8RcSGNX5ifyMzPluGh3C/z9TLM+yYzvwM8DvwqjdNts5/Paq715T7K+kuAb3f72qMYAEP9dRMRcXFEvGZ2GbgeOEGjh+1l2nbg4f5U2JFWte8H7ihXnVwLnG86JTFw5pwHfxON/QKNPm4tV2pcDmwEvtTr+lop54ofAJ7NzA83rRq6/dKql2HbNxHx2ohYVZZ/GvgNGu9nPA68uUybu09m99WbgX8tR23d6fe74ctxo3EVw1dpnFN7T7/rWWTtV9C4auErwNOz9dM433cIOAn8C3Bpv2ttUf8naRyC/zeNc5h3taqdxpUQHyn76Tgw3u/6F+jj70qdx8pfyLVN899T+ngOuLHf9c/p5Q00Tu8cA46W201Dul9a9TJU+wb4JeDfSr0ngD8q41fQCKgp4B+Bi8r4q8vjqbL+iqWow08CS1KlRvEUkCSpDQaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmV+l9aEnWcfWzNswAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "df_lstm_all.Length.hist(range=(0,300), bins=30)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.05110421609782807"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(df_lstm_all[df_lstm_all.Length > 50]) / len(df_lstm_all)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 5479.0\n",
+ "mean 21.2\n",
+ "std 15.5\n",
+ "min 1.0\n",
+ "25% 13.0\n",
+ "50% 18.0\n",
+ "75% 26.0\n",
+ "max 251.0\n",
+ "Name: Length, dtype: float64"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.Length.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lengths = []\n",
+ "for index, row in df_lstm_all.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])\n",
+ "df_lstm_all[\"BlobCount\"] = lengths\n",
+ "# add a column for pure gesture recognition without finger/knuckle\n",
+ "df_lstm_all[\"GestureOnly\"] = df_lstm_all.TaskID % 17"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 5479.000000\n",
+ "mean 21.239460\n",
+ "std 15.541015\n",
+ "min 1.000000\n",
+ "25% 13.000000\n",
+ "50% 18.000000\n",
+ "75% 26.000000\n",
+ "max 251.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " mean | \n",
+ " std | \n",
+ "
\n",
+ " \n",
+ " GestureOnly | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 19.000000 | \n",
+ " 15.626834 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 18.387387 | \n",
+ " 12.717864 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 13.418006 | \n",
+ " 11.615571 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 13.192182 | \n",
+ " 10.096861 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 13.439344 | \n",
+ " 9.632580 | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 13.157407 | \n",
+ " 9.709874 | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 17.198813 | \n",
+ " 8.017407 | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 18.627841 | \n",
+ " 7.706187 | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 30.096463 | \n",
+ " 14.204850 | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " 19.134375 | \n",
+ " 8.786072 | \n",
+ "
\n",
+ " \n",
+ " 10 | \n",
+ " 20.289308 | \n",
+ " 10.179677 | \n",
+ "
\n",
+ " \n",
+ " 11 | \n",
+ " 19.311526 | \n",
+ " 20.655269 | \n",
+ "
\n",
+ " \n",
+ " 12 | \n",
+ " 20.683230 | \n",
+ " 10.314326 | \n",
+ "
\n",
+ " \n",
+ " 13 | \n",
+ " 20.357367 | \n",
+ " 9.820602 | \n",
+ "
\n",
+ " \n",
+ " 14 | \n",
+ " 21.581538 | \n",
+ " 11.342465 | \n",
+ "
\n",
+ " \n",
+ " 15 | \n",
+ " 27.737654 | \n",
+ " 13.548982 | \n",
+ "
\n",
+ " \n",
+ " 16 | \n",
+ " 51.783708 | \n",
+ " 19.654648 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " mean std\n",
+ "GestureOnly \n",
+ "0 19.000000 15.626834\n",
+ "1 18.387387 12.717864\n",
+ "2 13.418006 11.615571\n",
+ "3 13.192182 10.096861\n",
+ "4 13.439344 9.632580\n",
+ "5 13.157407 9.709874\n",
+ "6 17.198813 8.017407\n",
+ "7 18.627841 7.706187\n",
+ "8 30.096463 14.204850\n",
+ "9 19.134375 8.786072\n",
+ "10 20.289308 10.179677\n",
+ "11 19.311526 20.655269\n",
+ "12 20.683230 10.314326\n",
+ "13 20.357367 9.820602\n",
+ "14 21.581538 11.342465\n",
+ "15 27.737654 13.548982\n",
+ "16 51.783708 19.654648"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.groupby(df_lstm_all.GestureOnly)[\"BlobCount\"].agg([\"mean\", \"std\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "before: 5479\n",
+ "after: 5043\n",
+ "ratio: 7.957656506661799\n"
+ ]
+ }
+ ],
+ "source": [
+ "# filter on gesture lengths\n",
+ "print(\"before: %s\" % len(df_lstm_all))\n",
+ "df_lstm = df_lstm_all[(df_lstm_all.BlobCount <= 100) & (df_lstm_all.BlobCount >= 5)]\n",
+ "print(\"after: %s\" % len(df_lstm))\n",
+ "print(\"ratio: %s\" % ((len(df_lstm_all) - len(df_lstm)) / len(df_lstm_all) * 100))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 5043.000000\n",
+ "mean 22.512195\n",
+ "std 13.430134\n",
+ "min 5.000000\n",
+ "25% 14.000000\n",
+ "50% 19.000000\n",
+ "75% 27.000000\n",
+ "max 99.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lengths = []\n",
+ "for index, row in df_lstm.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \n",
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
+ "\n",
+ "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n",
+ " \"\"\"Entry point for launching an IPython kernel.\n"
+ ]
+ }
+ ],
+ "source": [
+ "df_lstm[\"BlobCount\"] = lengths"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 5043.000000\n",
+ "mean 22.512195\n",
+ "std 13.430134\n",
+ "min 5.000000\n",
+ "25% 14.000000\n",
+ "50% 19.000000\n",
+ "75% 27.000000\n",
+ "max 99.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 27,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def lerp(a, b, c=0.5):\n",
+ " return c * b + (1.0 - c) * a\n",
+ "\n",
+ "#Svens new Blob detection\n",
+ "def detect_blobs_return_old(image, task):\n",
+ " #image = e.Image\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = np.copy(image)\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 205, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstCenter = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " contours.sort(key=lambda a: cv2.contourArea(a))\n",
+ " if len(contours) > 0:\n",
+ " # if two finger or knuckle\n",
+ " cont_count = 2 if task in [1, 6, 7, 18, 23, 24] and len(contours) > 1 else 1\n",
+ " for i in range(1, cont_count + 1):\n",
+ " max_contour = contours[-1 * i]\n",
+ " xmax, ymax = np.max(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " xmin, ymin = np.min(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " M = cv2.moments(max_contour)\n",
+ " cX = int(M[\"m10\"] / M[\"m00\"]) - 1\n",
+ " cY = int(M[\"m01\"] / M[\"m00\"]) - 1\n",
+ " #croped_im = np.zeros((27,15))\n",
+ " blob = large[max(ymin - 1, 0):min(ymax + 1, large.shape[0]),max(xmin - 1, 0):min(xmax + 1, large.shape[1])]\n",
+ " #croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " #return (1, [croped_im])\n",
+ " lstBlob.append(blob)\n",
+ " lstCenter.append((cY, cX))\n",
+ " lstMin.append(xmax-xmin)\n",
+ " lstMax.append(ymax-ymin)\n",
+ " count = count + 1\n",
+ " return (count, lstBlob, lstCenter)\n",
+ " else:\n",
+ " return (0, [np.zeros((29, 19))], 0, 0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# descides whether or not a normalization is neccessary\n",
+ "# and cuts or adds zeros\n",
+ "def normalize_blobs(blobs, new_len=50):\n",
+ " new_count = new_len - blobs.shape[0]\n",
+ " if new_count == 0:\n",
+ " return blobs\n",
+ " elif new_count > 0:\n",
+ " temp = np.array([np.zeros((27, 15))] * new_count)\n",
+ " return np.append(blobs, temp, axis=0)\n",
+ " else:\n",
+ " return blobs[0:new_len]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 1.48 s, sys: 236 ms, total: 1.71 s\n",
+ "Wall time: 1.71 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "# normalizes all image sequences\n",
+ "df_lstm_norm = df_lstm.copy(deep=True)\n",
+ "new_blobs = []\n",
+ "for index, row in df_lstm.iterrows():\n",
+ " new_blobs.append(normalize_blobs(row.Blobs, 50))\n",
+ "\n",
+ "df_lstm_norm.Blobs = new_blobs\n",
+ "\n",
+ "lengths = []\n",
+ "for index, row in df_lstm_norm.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])\n",
+ "df_lstm_norm[\"BlobCount\"] = lengths"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 5043.0\n",
+ "mean 50.0\n",
+ "std 0.0\n",
+ "min 50.0\n",
+ "25% 50.0\n",
+ "50% 50.0\n",
+ "75% 50.0\n",
+ "max 50.0\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 31,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_norm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_lstm_norm.to_pickle(\"DataStudyEvaluation/df_lstm_norm50.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ " Length | \n",
+ " BlobCount | \n",
+ " GestureOnly | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " [[[0.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 0.0, 1.0... | \n",
+ " 25 | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0.0, 2.0, 191.0, 0.0, 0.0, 1.0, 2.0, 0.0, 1... | \n",
+ " 12 | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 4 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 1.0, 0.0... | \n",
+ " 14 | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0.0, 1.0, 0.0, 2.0, 2.0, 0.0, 1.0, 0.0, 3.0... | \n",
+ " 11 | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0... | \n",
+ " 16 | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 2 \n",
+ "1 1 0 3 \n",
+ "2 1 0 4 \n",
+ "3 1 0 5 \n",
+ "4 1 0 6 \n",
+ "\n",
+ " Blobs Length BlobCount \\\n",
+ "0 [[[0.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 0.0, 1.0... 25 50 \n",
+ "1 [[[0.0, 2.0, 191.0, 0.0, 0.0, 1.0, 2.0, 0.0, 1... 12 50 \n",
+ "2 [[[0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 1.0, 0.0... 14 50 \n",
+ "3 [[[0.0, 1.0, 0.0, 2.0, 2.0, 0.0, 1.0, 0.0, 3.0... 11 50 \n",
+ "4 [[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0... 16 50 \n",
+ "\n",
+ " GestureOnly \n",
+ "0 0 \n",
+ "1 0 \n",
+ "2 0 \n",
+ "3 0 \n",
+ "4 0 "
+ ]
+ },
+ "execution_count": 33,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_norm.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])"
+ ]
+ },
+ "execution_count": 34,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_norm.userID.unique()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_38_LSTM-Report.ipynb b/python/Step_38_LSTM-Report.ipynb
new file mode 100644
index 0000000..83a97b5
--- /dev/null
+++ b/python/Step_38_LSTM-Report.ipynb
@@ -0,0 +1,378 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/requests/__init__.py:91: RequestsDependencyWarning: urllib3 (1.25.2) or chardet (3.0.4) doesn't match a supported version!\n",
+ " RequestsDependencyWarning)\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "'1.13.1'"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "\n",
+ "from sklearn.utils.multiclass import unique_labels\n",
+ "\n",
+ "target_names = [\"tap\", \"twotap\", \"swipeleft\", \"swiperight\", \"swipeup\", \"swipedown\", \"twoswipeup\", \"twoswipedown\", \"circle\", \"arrowheadleft\", \"arrowheadright\", \"checkmark\", \"flashlight\", \"l\", \"lmirrored\", \"screenshot\", \"rotate\"]\n",
+ "\n",
+ "\n",
+ "target_names = [\"Tap\", \"Two tap\", \"Swipe left\", \"Swipe right\", \"Swipe up\", \"Swipe down\",\n",
+ " \"Two swipe up\", \"Two swipe down\", \"Circle\", \"Arrowhead left\", \"Arrowhead right\",\n",
+ " \"$\\checkmark$\", \"$\\Gamma$\", \"L\", \"L mirrored\", \"S\", \"Rotate\"]\n",
+ "\n",
+ "\n",
+ "tf.__version__"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.read_pickle(\"DataStudyEvaluation/df_lstm_norm50.pkl\")\n",
+ "\n",
+ "df.TaskID = df.TaskID % 17\n",
+ "\n",
+ "x = np.concatenate(df.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "x = x / 255.0\n",
+ "\n",
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = len(df.TaskID.unique())\n",
+ "y = tf.keras.utils.to_categorical(df.TaskID, num_classes)\n",
+ "\n",
+ "labels = sorted(df.TaskID.unique())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.3\n",
+ "\n",
+ "# set session config\n",
+ "sess = tf.Session(config=config)\n",
+ "tf.keras.backend.set_session(sess)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Colocations handled automatically by placer.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/layers/core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Use tf.cast instead.\n"
+ ]
+ }
+ ],
+ "source": [
+ "model = tf.keras.models.load_model('./ModelSnapshots/LSTM-v2-00398.h5')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 30.4 s, sys: 3.28 s, total: 33.7 s\n",
+ "Wall time: 19.6 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "lst = []\n",
+ "batch = 100\n",
+ "for i in range(0, len(x), batch):\n",
+ " _x = x[i : i+batch]\n",
+ " lst.extend(model.predict(_x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"TaskIDPred\"] = lst\n",
+ "df.TaskIDPred = df.TaskIDPred.apply(lambda x: np.argmax(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_eval = df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[221 2 0 0 2 1 0 0 1 0 0 0 2 0 0 0 43]\n",
+ " [ 3 297 0 0 0 1 1 0 0 0 0 0 0 0 0 0 13]\n",
+ " [ 3 1 239 3 0 0 0 0 0 1 0 0 0 0 24 0 1]\n",
+ " [ 2 1 1 244 0 0 0 5 1 0 0 3 5 5 3 0 4]\n",
+ " [ 2 0 0 2 222 0 0 0 0 0 0 1 27 0 0 0 6]\n",
+ " [ 5 0 0 0 0 246 0 1 1 0 0 2 4 21 0 2 3]\n",
+ " [ 0 3 0 0 4 0 306 1 0 0 0 0 1 0 1 0 0]\n",
+ " [ 0 6 0 0 1 12 2 306 0 0 0 0 6 1 0 0 0]\n",
+ " [ 0 0 2 0 0 0 0 0 273 9 0 10 2 1 1 0 0]\n",
+ " [ 1 0 4 1 0 0 0 0 11 249 2 4 0 9 0 8 1]\n",
+ " [ 0 0 0 6 0 0 0 0 0 2 267 1 0 0 2 14 0]\n",
+ " [ 1 0 0 1 4 0 0 1 19 1 0 247 4 0 1 0 0]\n",
+ " [ 1 0 2 3 18 1 0 0 4 7 0 0 239 7 0 8 8]\n",
+ " [ 0 0 0 3 1 6 0 0 5 5 0 7 2 272 0 0 0]\n",
+ " [ 1 0 1 0 0 6 0 0 0 0 5 3 0 3 278 3 4]\n",
+ " [ 0 0 8 0 0 1 0 0 6 10 5 0 3 1 21 250 1]\n",
+ " [ 15 0 0 0 0 0 1 0 0 0 0 0 18 0 1 0 312]]\n",
+ "[[0.8 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.2]\n",
+ " [0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1 0. 0. ]\n",
+ " [0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0.1 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0.1 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0.1 0. 0. 0.9 0. 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0.1 0. 0. 0. 0. 0. 0. 0. 0.8 0. 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.9 0. 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1 0.8 0. ]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1 0. 0. 0. 0.9]]\n",
+ "Accuray: 0.886\n",
+ "Recall: 0.885\n",
+ "F1-Score: 0.886\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Tap 0.87 0.81 0.84 272\n",
+ " Two tap 0.96 0.94 0.95 315\n",
+ " Swipe left 0.93 0.88 0.90 272\n",
+ " Swipe right 0.93 0.89 0.91 274\n",
+ " Swipe up 0.88 0.85 0.87 260\n",
+ " Swipe down 0.90 0.86 0.88 285\n",
+ " Two swipe up 0.99 0.97 0.98 316\n",
+ " Two swipe down 0.97 0.92 0.94 334\n",
+ " Circle 0.85 0.92 0.88 298\n",
+ " Arrowhead left 0.88 0.86 0.87 290\n",
+ "Arrowhead right 0.96 0.91 0.94 292\n",
+ " $\\checkmark$ 0.89 0.89 0.89 279\n",
+ " $\\Gamma$ 0.76 0.80 0.78 298\n",
+ " L 0.85 0.90 0.88 301\n",
+ " L mirrored 0.84 0.91 0.87 304\n",
+ " S 0.88 0.82 0.85 306\n",
+ " Rotate 0.79 0.90 0.84 347\n",
+ "\n",
+ " accuracy 0.89 5043\n",
+ " macro avg 0.89 0.88 0.89 5043\n",
+ " weighted avg 0.89 0.89 0.89 5043\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_eval.TaskID.values, df_eval.TaskIDPred.values, labels=labels))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_eval.TaskID.values, df_eval.TaskIDPred.values, labels=labels)\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(np.round(cm,1))\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_eval.TaskID.values, df_eval.TaskIDPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_eval.TaskID.values, df_eval.TaskIDPred.values, average=\"macro\"))\n",
+ "#print(\"Precision: %.3f\" % metrics.average_precision_score(df_eval.TaskID.values, df_eval.TaskIDPred.values))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(df_eval.TaskID.values, df_eval.TaskIDPred.values, average=\"macro\"))\n",
+ "print(sklearn.metrics.classification_report(df_eval.TaskID.values, df_eval.TaskIDPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Normalized confusion matrix\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAGoCAYAAAATsnHAAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzsnXl4VNX5xz8vCZsgmyDIoojsmxACKIsCrkVUKqiAbFLFBXFvf7a1ltalKlSpWlux1n1rabWCigiKsu+IorIJyCKyI4EEksn7+2NucIhJmMycyRzC+3me82TmLt/5npM7eXPn3jlfUVUMwzAMwzfKJNuAYRiGYRSEFSjDMAzDS6xAGYZhGF5iBcowDMPwEitQhmEYhpdYgTIMwzC8xAqUYRiG4SVWoAzDMAwvsQJlGIZheElqsg0c75SpcKKWqVTLiVbbhjWc6IgTFfe4nPPE1z4aJY8dV9FzMCfXic7mjd+ye9eOow6XFagkU6ZSLSpf/EcnWrNfuMaJjoifbzOX03L52kej5LHjKnrWb9/vRKf/xd2j2s4+4jMMwzC8xAqUYRiG4SVWoAzDMAwvsQLlITdf3Jy5j/RhzsN9+MeobpQvW4brL2jKkj9fzp5XB1Ojcvlia95w/QhOq1eb9HZt4vY39YMptG3VjFbNGzP20Ye90Crt/fNVy0dPLrXsuCqc395xE13bNOTSnh2PWP7Kc3+jd/f29OmRztj7743LG6pqrZAGnAQsC9pWYHPE83IuXiOlxuladdDLh1vzURN1/ff7tPaw17TqoJf1v3PX601/n63dfz1Z29z6X92wbZ+ePvJfR+yT1w4cyi20TZ0+Q2fPX6QtW7YqcrsDh3I1M1sLbRlZOXp6o0b65cq1unf/QW3Tpq0u+WxFkfu40nLVv6L6mMz+HWtaPnqy4yqxWl9tyTjcXvrvFJ04ZZY2btbi8LIX/v2untWth362bqd+tSVDZy3/5oh98lqrtu01mr+PdgZVBKq6U1XbqWo74O/A43nPVfVQol43JUWoUC6FlDJCxfIpfLc7k+UbdvPtjtjvoOnW/RxqVI//NvSFCxZwxhmNOb1RI8qVK8eVVw9g8qT/JV2rtPfPRy0fPbnWsuOqcDqe1Y1q1asfseyNl/7B9bfcRbny4U95Tqp5cky+8rACFSMiMklEFovIChG5LliWKiJ7ROSJYPmHInJScXS/253JU+9+yRdP/JyVf+3HDwey+fjz7xLTiRjYsmUz9es3OPy8Xr36bN68OelarvC1fz5q+ejJtZYrfO2f67Fav3YNi+fP5upLejDkiov4fNnimLXAClQ8DFPVDkBH4E4RyftXoiowW1VbAXOB3+XfUURGisgiEVmUm/XDEeuqnlCO3h0acObtb9P8lv9QqXwqV3U9PcFdMQzDiJ+cUA579+zmjckf88vfPcgdNwyN63tmVqBi5w4R+YxwEaoPnBEszwH+HTx+BeiWf0dVnaCq6aqaXqZClSPW9Whdhw3bM9i57yA5IWXSwm/p1KRm4npRTOrWrcemTRsPP9+8eRP16tVLupYrfO2fj1o+enKt5Qpf++d6rOqcUo8Lel+GiNC2fTplypRh964dMetZgYoBETkfOAc4S1XPBJYDFQrZvFj/PmzauZ/0xjWpWC4FgHNb1WHVlh+OslfJkd6xI2vWrGb9unUcOnSIf7/5Bpf0uSzpWq7wtX8+avnoybWWK3ztn+uxOu/iPsyf/SkA69auJvvQIarXiP0fbJvqKDaqArtUNVNEWhH+mC+PVOAKYCIwCJhVHOHFa3fyzoJv+eTB3uSElM837OKFj1Zzw0XNuLVPS2pXrcjshy/hw2VbuPUf86LWHTZ4EJ9+OoOdO3bQ+PQG3HvfGIZf+4viWAMgNTWVx//yFJdechGhUIhhw0fQslWrYuu41irt/fNRy0dPrrXsuCqcu24azoK5M9mzayc9OjTllrt+yxUDhnLvnTdxac+OlC1bjj/95Zm4pn8Sl/NQlWZEZAyQoarjRKQC8D+gAbCS8O3ovwHmATuAF4Hzge+Aq1V1Z2G6qSc1Uldz8X1nc/FFja99NEoeO66ix+VcfF98tsQmi3WFqo6JeJwFXJR/GxFJDdbfVnLODMMwSid2DcowDMPwEjuDcoiq5gDVku3DMAyjNGBnUIZhGIaX2BlUkmnbsAaznndzc0ONbr9yorN79lgnOq4p7RegjeRgx1X01Kla2LdpikfZlOjOjewMyjAMw/ASK1CGYRiGl1iBMgzDMLzECpRhGIbhJVagDMMwDC+xAuUxN44cwWn1a5PePra46VFXd2PRa3ex+PW7uGXAkZOq3zboHDLnj+WkqicUW9eHuOlEavnoyVctHz251PLRk89aoVCIc85O5+p+jibnTXas+rEQzQ7cCVRIhPf2aR10/8HcAtsH02borHmLtEXLVoVuE9kqdLr7cEsbMFa/WPOdVu/+a6109q90+vxV2vKKP2mFTndr4z7369S5X+uGLbu03gX3HbFfhU53exs3bTHmfmn56Mn6l1it3ftzimwP/Gms9rtygF54ce8it2vXvsPxE/leAtHsd1J4nEbCiCduunnD2ixc8S2ZB7MJhXKZufQb+vYIn4k9esdl/Papd2OaJNOXuOlEafnoyVctHz251PLRk89amzdvYuqU9xg6fERM+xdEqShQhSEivxaRm4PHT4rI1ODxhSLyYvB4sIh8LiJfiMhDBWjcAZwMzBSRacGyCUEi7goRuS9i200i8kigN19EGpVEPwtixTdb6drudGpUOYGK5ctycZfm1K9dlT7ntGLL9r18vjq2GHlf46YtxrzktXz05FLLR08+a/3mV3fyhwcfpkwZd2WlVBcoYCbQPXicBlQTkZRg2aciUh94AOgJtAe6ikifSAFVfRzYBnRX1fODxfeoajpwJnCBiLSM2GWXqrYBngEeK8hUZOT7jh3bnXQ0PyvXb+PPL33MpCev552/XMdnq7ZQrlwqvxrWiz8+MzUhr2kYxvHJlPcnU7PWybRr38GpbmkvUAuBjiJSDcgInqcRLlAzgc7AR6q6Q1WzgdcIJ+UejYEisgRYArQAIgvU68HPV4EuBe0cGfles2atGLoVHS9OWkjXYX/hghv/xp59B/jqm+85rW4NFrxyB1+/9WvqnVyVuS/dTu0aJ0at6WvctMWYl7yWj55cavnoyVet+XPnMOXdSbRtcQa/GHYNMz/5mJEjhsbkKZJSXaBU9SDhGyaGArMJF6XzgNNUdVUsmiLSBLgN6KWqbYEpHHl9ypsEyFrVKwHQoHY1Lu/RhlfeXcRpP/sDzX/+J5r//E9s3raXs4eO5/td+6LW9DVu2mLMS17LR08utXz05KvW7//4ECtWb2D5V2t57sVX6X5uTyb886WYPEVyPEwWOxO4m3CRWg2MJZx8CzAfGCciJwF7gQHAuAI09gEnAnuAKsHzH0TkFMLBhVMitr060BhIuCjGzLAhg5gZxE03adSAe383hmHFiJt+/eGh1KhaieycELePfYu9GVnx2AH8iZtOlJaPnnzV8tGTSy0fPfmslQhKXeR7ZDR78Pwi4B2gqqpmicg3wHhVfSJYPxj4P0CASar66wI07wBuBDYCFwAvEf54cAOwH5ioqq+IyCbgFaA3kAkMVNVvivKb1iFdZ81dGH/HgZO6l+7ZzA3DSC5Zh0JOdHp268zSJYuOv8j3yGj24PkHQPmI543yrX+FcFEpSvNx4PGIRUOK2PxhVb0nWr+GYRhGwZTqa1CGYRjGsUupO4NKJqpaP9keDMMwSgtWoEoRrq4dVe/z+NE3ipKd79zuTKtMGUs+NY4fdma4mAQHTqpczokOQIVyKU50on0r20d8hmEYhpdYgTIMwzC8xAqUYRiG4SVWoAzDMAwvsQJlGIZheIkVKI+JN1E3P/EkZ47+eXsWPzOURX8fwov3/IzyZVPo0a4Bc54axLy/XsP0P19Fo1OqFkvTp/4lQud40PLRk0stHz0B/OPvT3Le2e3pdXY7/vG3J7zw5bJ/h0l2Gm6Uiba/BVYAywmn5HaOcr8/Auc7eP0ewOQotns98HgHMByoW5KJuq6SMytc9NgRrdGgZ3Tdd3u02qV/0QoXPaYTP1mp142boqs27tIzr3tBK1z0mN765DR9aeoXP9m3KL/J6p+vaaXHmpaPnkpT/zbtPlhgmzZ7iTZr3lJXb96t67fv127n9tSZi1cUur2PY5WWVkoSdUXkbKAPkBbMHn4+4Tnxjoqq3qeq0xLpLw8RqQN0VNW2wdRIw4G68WjGk6ibn3iTM1NTylCxXCopZYSK5VP5bmcGilLlhPB3LKpUKs93O/cXy5NP/XOtczxo+ejJpZaPngDWrPqadumdqHjCCaSmpnJW13N4f9LbSfXlsn+ReF+ggFOAHUF0BkF20xYR6Sgi/wUQkctFJFNEyolIhWBCWETkBRHpHzxeLyKPBmm3C0SkcbC8loj8R0QWBq1rUWZEpJKI/DPQWCoilwerpgL1RGSZiPwOSAdeDZ5XTMjIFIN4kjO37NzP+ImLWfXydax7bSQ/7D/I9CXfcvPj03jr/r6sefk6BvVqwbh/uZn0NhYsRbXktXz05FLLR08AzVq0ZMHcWezetZPMAwf46MMpbNm8Kam+XPYvkmOhQE0FGojIKhF5WkTODZYvBdoFj7sDXwAdCc8yPr8Qrb1B2u1TwPhg2V+Ax1W1I9AP+MdR/PyWcMhhJ8JJvGNFpBJwGbBWVdup6v3AIuCa4HlmMfvsFdUql6fP2Y1oMfyfNLrmWSpVKMuAXs0ZfUV7fv67t2k85B+8/OEKHhkZTdajYRjx0KRZC26+7W4GXXEJg/tfSqvWbUlJcTPDg294X6BUNQPoAIwEtgNvishwVc0B1opIC6AT4Xj1c/gxLbcgXo/4eXbw+HzgKRFZRjiWo4qIVC7C0oXAPcH2MwiHFZ5anD6VROR7fuJJzuzV/lTWf/8DO/ZmkhPK5e3Zazi7ZV3anF6LhSu3AjDxk1Wc1SKuTzTjwlJUS17LR08utXz0lMfAIdfy/ox5/Oe96VStVp1GZzRJqi/X/cvD+wIFoKohVZ2hqr8HbiF8pgPwKfAzIBuYBnQLWmEFSgt4XAY4KzjTaaeq9YKiWBgC9IvY/lRV/aqY/SmRyPdI4knO3LhtH52an0LF8uGpG3u2O5Wvv91FlUrlaVyvGgC90k5l5cZdCfN/NCxFteS1fPTkUstHT3ns2L4NgM0bv+X9yW/T98oBSfXlun95eD9ZrIg0A3JVdXWwqB3hoEAIF6KXgJdUdXuQjFub8Md9BXE18HDwc26wbCowmnDSLiLSTlWXFWHpA2C0iIxWVRWR9qq6tIDt8lJ4YybeRN1I4knOXLhyK2/NXM3cp64hJ5TLZ2u389z7n7N5xz5ev/dSclXZk5HFDY99WCxPvvQvETrHg5aPnlxq+egpj5FDB7B7905SU8vy4Ni/ULVqtaT6SlQyr/eJuiLSAXgSqAbkAGuAkaq6I7j5YA9wqapOFZEJQB1VvSzY9wXCt4dPFJH1wJuEz7gOEk67XSMiNYG/Ai0IF+xPVfXGfB56AHerap/gNccDXQiffa0LljcMXqt1sE8/4CHCybpnF3YdymWirqvZvm02c8NIPj7OZu6Krp3TWby4FCTqqupiwsWgoHWZHJmWOzLf+uH5dhmrqv+Xb5sdhM+oivIwg/D1przXvKGAbdYDrSOe/wf4T1G6hmEYRuEcE9egDMMwjOMP78+gXKGqDZPtwTAMw4geO4MyDMMwvOS4OYPyGfHs2v/uyXc406r+s0edae1+/1fOtHwkJ5TrTCs1pXT/75mb6+7mrlxPbxSrfkLZZFv4CSFH4x6tSuk+ig3DMIxjFitQhmEYhpdYgTIMwzC8xAqUYRiG4SVWoAzDMAwvsQLlMTdcP4LT6tUmvV3pikQffUU6i58dwaIJ1/Liby6lfNkUzm13KnOeHsaiCdfy7C97kxLDtEa+9C8RWps2bqT3heeR3q41Hdu34emnSl/MtyutG0eO4LT6tUlvH9/7xuWYu9Ry1b88XI37TSNH0LB+bTo68gX4E/lOkmPdC9CtC0yMYruMQpb3BVoebf/2aR30wKHcAtvU6TN09vxF2rJlq0K3iWw+RjtXOP+RI1qjq/+q67bs1mq9/6wVzn9EJ874SkeOfU83fr9XWw+boBXOf0QffHm23jDuvZ/s62P/XGrtywoV2lav26Qz5y7UfVkh3bJ9j57RuIkuXPp5oduX9rHafzC30PbBtBk6a94ibdGyVZHb5TVXY+7y97cvK+Ssfy7HPeNgbqFtSoSvorbLOJir7Y+lyHffYt1FJFVVt6hq/zhk+gIt4/FRWiPRU1PKULF8Xnx8WQ5kZXMoJ8SazbsB+Gjxevp2b1rivlzquNaqc8optGufBsCJJ55Is+bN2RJjYmlpHytX7xuXY+5Sy8e/C3m+qjvylYcXBQoPYt1FZLiIvCMiHwHTRaShiHwRrDtBRP4lIl+KyFsiMl9E0iP2fVBEPhOReSJSW0S6EE7YHRtEvp+R2OE7Or5EO2/ZmcH4iQtZ9eqNrHtzFD/sP8jET74mNaUMaU3rAPDzc5pSv1aVEvXlWse1ViQb1q9n+bJlpHfqnFRfx8JYuSLeMU+UVrz4Pu6+FChfYt3TgP6qem6+5TcDu1W1JfA7wgm/eVQC5qnqmYQDFK9X1TmE03l/qeFQw7WRYslI1PWFcHx8Y1oMeYZGA54Ox8ef15KhD07i0Rt7MvPJIew7cIhQrrtZFUoTGRkZDB54JQ+Pe4wqVYpXxI3YcDnm9vsrHl5MdaSqGUHuU3egJ+FY93tU9QURKSjWPYXoYt3zgo3OB1rKj3MKVRGRyvrT5NwPVbWgWNhuhIscqvqFiCyPWHcImBw8XgxcEEV/JwATIJwHdbTtXeBLtHOvtIas37qXHXvD8Vhvz1rFWS3r8cb0Lzn/zvCv7rwODWlSv3gfFfjSv0RpAWRnZzN4QH+uGjCIy/teEbPO8TBWrnA15q61XOHruOfhyxmUL7Hu+2Ownq16eDKvEJ4U/fz4Eu28cdsPdGpR98f4+PansfLbndSqdgIA5cqmcNfVnXl2clGhxu59udZxraWqjLrhOpo1b8Ho2+KbK7G0j5UrXI65Sy2X+DjukXhRoESkmYg0iViUP9b9dmCuqm4HTgKaUXSse97P/LHuea/XLv9OR2E2cFWwb0sgmvso4498HzyIHud0YdWqlTQ+vQEvPP9czFqRkczt2rSg35VXxR3tHIvOwq+/462ZK5kb3FJepozw3HufcceVnVj63C9Y+My1vDd3DZ8s+7ZEfbnWca01d85sXn/tFT6Z8TFdOqXRpVMaH0x5L6m+fB2rYUMG0fPcLqxetZImjRrwYozvG5dj7lLLVf/A7bgPHzKIXoGvpnH6ysOLyHdPYt2HA+mqekvwvGGg21pEKgEvEr4r72ugEXClqq4WkQxVrRzs0x/oo6rDgxsxng189M9/HSqPtA7pOnuem8h38W1adGw28+Jgs5lHz/Ewm3kZR+/nMjF8p7AwXM1m3v3sjiyJIvLdiwLliqBApWs4xt2lbgpQVlWzgjvypgHNVPVQvNpWoKLHClT0WIEqhpanfwOtQHl6vcRDTgA+FpGygAA3uyhOhmEYRuGUqgKlCYp1V9V9QPpRNzQMwzCcUbo/BzAMwzCOWUrVGdSxiquPwD28BMXOd3/pTKt6/wnOtHZPHOlMyxWl/bqRS1we67sysp1pnVylvDMtH3F57S8a7B1hGIZheIkVKMMwDMNLrEAZhmEYXmIFyjAMw/ASK1CGYRiGl1iB8hhfo519iuYefWkbFj/Rn0V/6c+Ld/aifNkUJtx6Ll89M4B5j1/BvMevoO3pJxVL08cYc1+1fPSUp3Vmq+a0btGEccXUunv0SNKaNeCCrmmHl+3ZvYtrrujNuR1bcc0Vvdm7Z3dMnnwdq3i1Nm3cyCUXnUfH9q3plBZfpP0RuI5KL4mGZ/Hw8bT2aR28jHb2MZq7wuXPHNEaXfuyrtu6V6td+Q+tcPkzOnHWGr3uLx/rS9O/1oEPT/3J9pHNxxjzY00rmZ4OHMottO3LzNbTGzXSFV+v0T0ZWdqmTVtdvOyLQrffsDPriPavSR/q5I/matPmLQ8vu2H0nfp/v7tfN+zM0v/73f164+i7frLfhp1ZXo6VS60fMkMFtlXfbNJP5yzUHzJDunlbONJ+wZLPC93+mIp8Lw6+xcMnEh+jnX2L5k5NKUPFckF8fLlUvtsVS2LKj/gaY+6jlo+eABYtPFKr/1VXF0urc5fuVKte/YhlH743iX4DBgPQb8Bgpr73TrE8+TpWrrQKjLTfEn8y7zFXoPAnHv6piOeTRaRH8DhDRB4XkRUiMl1EaiV6QKLBx5jveNmy6wDj317OqmcHse75wfxw4BDTl4W9jBnckQXj+/HoiLMplxr9Ye5rjLmPWj56AtiyeTP16tc/QiveP5Y7tm+jdp1TADi5dh12bN9WPE++jlUC3s8bNgSR9h3jj7Q/FguUL/HwhVEJWKSqrYBPgN/n3+B4jnx3SbVK5ejT6TRa3PA6jUa8Eo6PP7cx9728gDNH/Ytud79F9crlueuK4sZ/GUbhiIif07Z4QEZGBkMGXsnDY91E2h9zBUrDSbgdgJHAdsLx8MNVNQcoKB6+O9HFw58dPD4feEpElgHvEMTDF8NiLuFMKoBXCKf/5u/DBFVNV9X0mjVL5gTLx5jveOl1Zj3Wb9vHjh+yyAkpb89dx1nNa7N1dzhO/lBOLi99tJL0JtGPsa8x5j5q+egJoG69emzetOkIrbp14ztGa9Y6me+3fgfA91u/o7jvW2/HyqFWdnY2gwf256qrB3GZo0j7Y65AgRfx8DkcOXYVirJ71A6VAD7GfMfLxu0ZdGp6MhXLpQDQs209Vm7aQ53qFQ9vc1nnhnz5bfR3XPkaY+6jlo+eADqkH6k18V9vxn2Mnv+zPvznjVcA+M8br3BB70uLtb+vY+VKS1UZdeN1NGvWglscRtofc5PFikgzIFdVVweL8sfDvwS8pKrbReQkoDZFx8M/TMHx8GOD12unqsvy7bceuFlEygD1CJ+x5VEG6A+8AQwCZsXQTSAc7Tzz0xns3LGDJo0acO/vxjDs2l/EpBUZ7RwKhRg2fETcMd/x6ED8/Vu4ejtvzVnH3Mf6kRPK5bN1O3nug6/4330/o2bVigiwfN1ORv+9sP9PforL/pV2LR895Wk9Nv5JLrvkYkK5IYYOu7ZYWqOvH8Lc2TPZvXMHnVufwR333MvNt93NzSOu4c1XX6Be/VN5+p+vFtuTr2PlQmvenNm88dortGrdhq6dwzdL3PeHB7jo4t4x+crjmEvU9SQeXgh/fNcB+AqoDoxR1RkikgFMAC4EtgFXq2qhF5rSOqTrrLluEnVdJme6wuXsxydd9awzLR9nMzeix+Xfre373GWPlvbZzLNz3KQ+n9u1U+lM1FXVxUCXQtZlAuUjno/Mt354vl3Gqur/5dtmB+EzqqI8KHBNEevvLGp/wzAM4+gck9egDMMwjNLPMXcG5QpNXDx8ce74MwzDMArBzqAMwzAMLzluz6B8ItfRBd8y+HeThMsbN1ze2FC971+dae1+e5QzLaPkOalyOWdaew+4i4+vekJZJzoubyhJKeEbsewMyjAMw/ASK1CGYRiGl1iBMgzDMLzECpRhGIbhJVagDMMwDC+xAuUxmzZupPeF55HerjUd28cfo3w8xHzHqjX68jNZ/NeBLPrrAF785QWULxuegHbMkM4sf+Yalv5tIDdf2rZEPR0LWj56uuH6EZxWrzbp7drE5QfgppEjaFi/Nh3bx6e1ZvVKzuuWfrg1rn8SE56O/f3s41jdOHIEp9WvTXqcYxVJ0ubiE5HfEp5MNUQ4ouIGVS0stylyvz8Snh/PaTKuiMwA7lbVRS51j0Zah3T9dM6CAtdt/e47tm79jnbt09i3bx/dz+7IG//+L81btCxw+9SUwv/fCIVCtGnZlHff/5B69evT7ayOvPjK67RoWbBWonWSrRV5m3ndkyox/ZEraH/za2QdCvHK/13ElEUbEIFz29bj+senowq1qlZk+97Mn2gVdpt5aRkrHz0V9Xdr1sxPqVS5MtdfO4xFyz4/6msXNV3krJmfUrlyZa4fMYyFS4+ulZGVc9RtQqEQ7Zo35L3ps2hw6mmFblfYbebJHKuiysVhrRHDWHSUsep2dseo5uJLyhnU8RTbHg8FxijHmHZZ2mO+49VKTZEfo+PLh6PjR/ZuzUOvLzr8piyoOCXSk+9aPnoC6Nb9HGpUrxHTvgVpVXeklcfMGR/R8PRGRRanovB5rFxp5ZGsj/h8iG2vKCJviMhXIvIWUDFi3cBA8wsReSRYdqWIPBY8vi3CTyMRmR3h5w8isiTYv7mrAduwPohR7hRbjHKpj/mOQ2vLzv2Mf2sZq54fxrqXrw1Hxy/dyOl1qtK/e2NmPX4lb4/pwxl1q5aYp2NBy0dPxwJv//df9O1f5HzURXI8jVWyCpQPse03AQdUtQXhWPYOACJSF3gE6BV46SgifQlnTXWP8LZTROoFjz+N0N2hqmnA34C7CzJ8ROT79qNHvmdkZDB44JU8PM5NjLJxJNUqladP59Np8YuXaDT0BSqVT2VAj6aUL5vCwewQ3e74N89/8CXP3NYr2VaNY5xDhw4x9b3JXNa339E3NpJToDyJbT+HcKYTqrocWB4s7wjMUNXtgZ9XgXNUdStQWUROBBoArxXi7b/Bz8VAw0L6/2Pke62io6Ozs7MZPKA/Vw0YxOVxxCiX+pjvOLR6tavP+u9/CKLjc3l77jec1aIOm3dk8PactQD8b+43tG54Uol5Oha0fPTkOx99OIU2Z7an1sm1Y9Y4XsYKkngXnwex7bEwB7gWWMmPZ1RnA7MjtjkY/AwR51yHqsqoG66jWfMWjI4zRrm0x3zHo7VxewadmtWhYvnwr6vnmfVZuXE3k+Z9w7lt6wPQvU1d1mzeU2KejgUtHz35zlsT34zr4z04fsYKkneTRDMRaRKxKH9s++3A3CCJ9iSgGUXHtuf9zB/bnvd67fLvRLgQDgrWtwby7iFeAJwrIjVFJAXia8HZAAAgAElEQVQYCHwS4e3uYN+lQE/goKruPVqfY2HunNm8/torfDLjY7p0SqNLpzQ+mPJeTFqR0c7t2rSg35VXxR3zHY+OT1oLV33PW7PXMnf8VSz66wDKiPDclBWMm7iEvl0asfCpAdw/7GxuevLjEvN0LGj56Alg2OBB9DinC6tWraTx6Q144fnnYtIBGD5kEL3O7cLqVStp2qgBL8ahtX//fj79eDqXXNo3Zg3wd6yGDRlEz2CsmsQ5Vnkk5TZzT2LbKwLPA2cSjm2vB4xS1UUiMhD4DSDAu3mpuyJyRuC1maquEpGpwNeqemuwfj2QHvQjHRinqj2KGouibjMvLkXdZm4cic1mfmzj8u9WUbeZF5dobjOPFh9nM3clFe1t5kmJ2/Aktj0TGFDIutf58dpW5PK18GOmhapemG99w4jHi4AeRXkwDMMwCsf+5TYMwzC85JgOLExUbLthGIaRfOwMyjAMw/CSY/oMqjQguLu5IdfR1V6XMe0uCTm8mu3yxobqHW9xorN74VNOdI4HRNwdoykOD3dXNza4xOVYuZKKVsbOoAzDMAwvsQJlGIZheIkVKMMwDMNLrEAZhmEYXmIFynNcJWe6TLv0MUXVVfKpC0+jBvZg0b9/w+KJv+WWQT0AePnha5n3xj3Me+Mevn73D8x7454S95UILR89udTy0ZOvWi49HUZVnTfC8+ctC9pWYHPE83KJeM0oPP0c+GUyXruolpbWQTOztcCWkZWjpzdqpF+uXKt79x/UNm3a6pLPVhS6/f6DuYW2D6bN0FnzFmmLlq2K3G7/wdxC9WPx5FIr42BuoW1KRP+K2i6vuexfhXajDre0fg/oF6s3a/WzbtdKHUbr9HlfactLf3/ENuNfmqZ/eHrSEcsqtBvl7bgfS56sf8fGWKWlddBo/j4m5AxKVXdqMJM48HfC2Ux5M4sfSsRrRuHpLVUdm4zXjhUfkzN9TlF1kXwar6fmp9dh4RfryczKJhTKZebiNfTtdeRcxf0uSONfUxaXqK9EaPnoyaWWj5581XLpKZIS/YhPRH4tIjcHj58MJltFRC4UkReDx4Mj0mwfKkRnrIh8KSLLReQREUmNSLitKSK5ItIleD5HRE4XketEZHyw7BUR+ZuILA5CE38WLE8VkceCdN7lInJdAa/dOMiZynt+j4jcGzyeJSLjRWRZ0If0eMbLx+TM0p6iGq+nFWu30LV9Y2pUrUTFCmW5uFsr6tepfnh917Qz+H7XPtZ+e/SgSpe+EqHloyeXWj568lUrUe/lkv6i7kxgFPA0kAaUDSItugOfikh94AEgHdgLTBORPqo6OU9ARGoDvYFWqqoiUk1Vc0TkGxFpRngG88VAdxFZCtRW1XUFfFmtAeFwwibB6zQGfgFsU9VOIlIemCciU1X122L0sbyqthORXoSTfAuK+jBKKSvXfc+fX/iQSU+P4kDWIT5buYlQKPfw+qsuTuffUxYl0aFhHDuU9E0SCwlHqFcDMoLnafyYStsZ+EhVd6hqNj+m1kayC8gFnhWRnwP7g+Uzg23PAf4UaBYVFf8vVc1V1ZXARsKF6kLg2uAMaT7hOJAmhexfGK8DqOpHwMkFJPkeEfm+fUfh/0n7mJxZ2lNUXXh68e25dL3mUS74xXj2/HCA1Ru2AZCSUobLe53JxA+WJMWXay0fPbnU8tGTr1qJei+XaIFS1YOEb5gYSjiFdiZwHnCaqq6KUiOb8BnW20Bf4N1g1aeEi1I6MBmoSbhYRZPEm/dcgJsjrpedrqrT822Xw5HjViEK3fx9OBz5Xqtm4ZHvPiZnlvYUVReealUP/0/SoE51Lu91Jm++Hz5j6tW5GavWf8/mbcVL5nXly7WWj55cavnoyVetRL2XkzEXX14q7VBgNTAWmBesmw+ME5GTCH/ENwAYF7mziJwIVFDVySIyh3D8et6+LwCrVfWQiHwOXA9cXIiPK0XkFcJnSA0CLx8AN4vIJ8HHhs2Ab1U1M2K/rUBdEakOZAKXAJFXA68GZopID+B7Vd1PjEQmZ4ZCIYYNHxF7cuaQQcz8dAY7d+ygSaMG3Pu7MQy79hdJ9eRSa3hE/5o2asBvk9i/18ddR41qlcjOCXH7w/9ib0b48Lnyog7FvjnCpS/XWj56cqnloydftVx6iiThiboiMgbIUNVxwfOLgHeAqqqaFdzcMF5VnwjWDwb+j/DZzCRV/XU+vfrAfwmHGpYBHlXVl4N1c4EPVfU+ERkKPA7UDK5VXQe0VtXbg8K0D+gEVAZuV9X3g+thDwJ9gpfbBlyuqvvyebiT8LW0zcA6wkXxARGZRfhjy55ACnCthoMLC6VDh3SdPd/NNQmbLDZ6Uhz20SaLNYzi0bVzOoujSNRNSuR7sgkK1ERVfdux7izgFlVddtSNA6xARY8VKMMoHURboGwmCcMwDMNLjss8KFUdnCDdbonQNQzDOB6xMyjDMAzDS47LM6jSSq6j64llos67LFlcXjfKyg4509q14EknOtV7/d6JDsDuj/7gTKu0kxPxRep4cZWObYSx0TQMwzC8xAqUYRiG4SVWoAzDMAwvsQJlGIZheIkVKMMwDMNLrEB5jqsY5U0bN9L7wvNIb9eaju3b8PRTTyTdk89aoVCIc85K5+or4pvw8obrR3Bavdqkt4stin5U/7NY9MLNLH5xFLdceRYAD910IctevoUFz9/Emw8MoGrl/PMVHx0fY75903L5nnHlyWetEo985ziKbiecQ3V7SffHZeT7vqxQoW31uk06c+5C3ZcV0i3b9+gZjZvowqWfF7jtsRY3HYvW7gM5RbYHHh6r/a4aoBde3Puo2x44lFtomzp9hs6ev0hbtmxV5HYHDuVqhe73HdHShj6lX6zdqtXPv18r9Rij0xeu0ZYDxusld76olXqM0Qrd79Nxr87Uca/O/Mm+PsZ8+6rl4j1ztPdOaRkrVzpOIt/VotuTissY5TqnnEK79mkAnHjiiTRr3pwtMSRe+hg37Vpr86ZNTJ3yHkOHj4hp/0i6dT+HGjFG0Tc/rSYLv9pM5sEgPn7ZBvqe04LpC9ceDkFcsGIj9WpVKZaujzHfPmq5es+49OSrlleR76Uhuj3Y7r5gv1lEBBOKSJqIzA/2/Y+IVBWRU0RkQbC+g4ioiNQNnn8jIhUCP38JvH4TBCrGTKJilDesX8/yZctI79Q5qZ581frNr+7kDw88TJkyyf0EfMW6bXRteyo1qlSkYvmyXHxWE+qfXPWIbYb2TuODeauLpetjzLevWnnE855x7clHrUT9rYr1HTiTcDgghBNxqxUS3d4TaA90FZE+kQL5otvbAn9S1RwgL7q9Gz9Gt1ckiG4vwEtedPulwIQgqn0kQXR7sG6UiJya7/U7Af2AMwlnOnWKWP0KcGfgayXwO1X9DqgiIpWCfi4KvJ0BbFbVrGDfk4GuhMMU/1TQ4EWbqJsIMjIyGDzwSh4e9xhVqhTvP+/jgSnvTaZmrZNpl9Yh2VZYuWEHf35tNpP+PJR3xg3mszVbCeX+OOvBr4acQyiUyxsfLk+iy9KPvWeSR6wFqjREt58D/EdVM1V1LzAJIAhLrKCqs4PtXozwPhfoEnh6KFie1+c83tYwy4ECM481ykRd1zHK2dnZDB7Qn6sGDOLyvlfEpOFj3LRLrfnz5jDl3Um0bX4Gvxh6DTM/+ZiRI4bG5MkFL767hK7XP8MFo59nz75MVm/cCcDgi9vR++ymDL//P8XW9DHm21ctF+8Z15581PIq8l1LR3R7LHwaeKlHuKC1J3ymF+ntYMTjuCaPcxmjrKqMuuE6mjVvwejb7vDCk49av//jQ6xYs4HlX6/luZdepfu5PZnwz5di8uSCWtUqAdDg5Kpcfk4L3pz2ORd0asydg7rS/9evkXkwu9iaPsZ8+6jl6j3j0pOvWj5Gvh/r0e2fAs+IyKNAOcIpuk+o6k4RyRSRLqo6BxgCfBLR5z8QPjvMEZF9wAXAncUYt6hxGaM8d85sXn/tFVq1bkOXTuELv7//4wNcdHHvpHnyVcslwwYP4tMgir7x6Q24974xDC9GFP3r919NjaoVyc7J5fbH32VvRhaP396b8uVSmfxY+MxuwZebuPXPk6PW9DHm20ctV+8Zl5581Up65HspjW6/DxgMfE/4jHCeqo4XkTTgb0BFYA3h6Pa9wT5bgHtV9Z/B/n1VNS1Yd0RSr4hkqGrlosbVZaKuq1mZj4cZmV3OZl4+1c141ThvjBMdsNnMi4PNZl7yHBeR74mKbi9JrEAlBytQRh5WoEoei3w3DMMwjmmO6cBCTVB0u2EYhpF87AzKMAzD8JJj+gzKOBJXn3+7vC4p4md8fIWyKcm28BNcXjdyGR///VR3WuUcXa876PAaoitPhnvsN2MYhmF4iRUowzAMw0usQBmGYRheYgXKMAzD8BIrUIZhGIaXWIHyHB+jneONMU+EJ5daPnqKVysR0fFZWVn07HYWXTu1p3NaGx66f0yx9s+Pq7HKysqiV/ez6No5jbM6tI3LV2k/1l1qlXjke2luQB3gDWAt4dyp9wjPVD6xmDovAP1j9eEy8t1VJPPRosmLE2Ne2qOrfdVyFR1foft9ujczVGDbcyBHN2/fq3szQ7rjhyztkN5Jp82YXej2ezPdRaLvOZBTaNu9P1s3bdujew7k6Pa9mdohvaN+OGNWodu7OtaLOt5Ly3HlSsdJ5HtpRcJfznkLmKGqZ6hqB+DXgKpq/wK2T8r3xXyMdob4YswT5cnH6GpftBIVHS8iVK4cngs5Ozub7JzsmL/35nKsfuIrOweJMfmmNB/rLrW8inwvBfQEslX173kLVPUzYKOIfAEgIsNF5B0R+QiYHiz7vyDG/jMR+ck5bBAF/0kQQf+BiJwSj0kfo51d4mP/fPQUr1aiouMBQqEQ3Tqn0fjUOvTsdb4Xkeg/+upAk9NOoed558XsyxW+HAuJ0krU35fjdSaJ1oQ/1jsaaUBbVd0lIj8DLgc6q+oBETni3yoRKQs8STjWY7uIXE048mNEflERGUk4lp4Gp56af7VhOCUyOv5A1iGn0fEpKSnMmr+EPXv2MPjqfny54gtatmrt0n5MhH0tDvsa4I8vo3gcr2dQ0fKhqu4KHp8PPK+qBwAilufRjHDh+zCImr8XqF+QqCYh8j1Rkczx4GP/fPTkQisR0fGRVKtWje7n9mDa1A9i2j9Rx2e1atXofk4Ppn8Ymy9X+HQsJELLq8j3UsAKoEMU2+0vhqYAK/THmPk2qnphbPbC+Bjt7BIf++ejJxdaiYiO37F9O3v27AEgMzOTj6dPo2mzZsXWAbdjld/XjI+m0aRpbL5c4dOxkAgtHyPfj2U+Ah4SkZGqOgFARNoCVYvY50PgPhF5Ne8jvnxnUSuBWiJytqrODT7ya6qqK2I16WO0M8QfY54ITz5GV/uklYjo+K1bv+PG668lNxQiNzeXn/e7kot79zn6jgXgcqy2bv2Om64fQSg3hObm0veK/jH7Ks3HukutpEe+lzZEpC4wnvCZVBawHrgdeEtVW4vIcCBdVW+J2OceYChwCHhPVX8jIi8Ak1V1ooi0A54gXOhSgfGq+mxRPlwm6rrC5THh62zmpR2bzTx6XM5mbsd7dESbqHu8nkGhqluAqwpY1TpY/wLh7zhF7vMw8HC+ZcMjHi8j/F0qwzAMI06O12tQhmEYhudYgTIMwzC8xAqUYRiG4SXH7TUoo3ByHd43k2LXjJPCd1Puc6ZV++oJzrR2TbzBiU5qip83Nri8eaN82RQnOodyco++UZS4vKEkGuwMyjAMw/ASK1CGYRiGl1iBMgzDMLzECpRhGIbhJVagPMfH5MybRo6gYf3adGxfOlNGffTkWisUCnHO2elc3a/486WNvqwti5+8ikVPXMWLd513+GL+mMGdWP70AJY+dTU39ynezOEuk2t9PD5dpvy69uUqFTkRibpOCpSI9BURFZHmLvSieL31IlIzAboviEhBgYUFLs+3TXMRWSYiS4NcqJvj9RMKhbj91lH8b9L7LF3+Jf9+43W++vLLpGtdM2Q4b096P6Z9E+XJlZaPnlxrAfz9r0/QtFnx3651a1Ti5j6t6XrXf0i/9V+klCnDld0bM+S8ZtSvWYkzR71B+1ve5N8z1xRLd8jQ4bw9Of5jCvw8PsuXL887709j9vwlzJy3mOkffsDCBfO88DVpyjRmL1jKrPlLmDb1AxbOL74v18dnHq7OoAYCs4KfPyF/Iq2EKW1nb30Jx8W3B3YCcRcoH5MzIZwyWr2Upoz66Mm11ubNm5g65T2GDv9JVFlUpKaUoWK5VFLKCBXLp/Ldrv2MvLgVD72xmLxpHLfvzSqWpqvk2jwt345Plym/CfUVYyqyt4m6IlIZ6Ab8AhgQsbyHiMwUkXeAL0WkoYisFJGXgC+ABiIyMEio/UJEHgn2u1JEHgse3yYi3wSPG4nI7IiXHi0iS4L9mwfbVBKRf4rIguBM5vJgecPAy5KgdQmWi4g8FfiaBpwcRX9/kporIr0JTzR7k4h8THi+vjOCM6qxsY6tj8mZLvGxfz56cq31m1/dyR8efJgyZYr/9t+yaz/j3/qMVf8YzLoXhvLDgUNMX7aJ0+tUoX/3xsz68xW8fV9vzjilqGCAYwNfU34T4yu+VORE/X1xcRZzOTBFVVcBO0UkMmcpDbhNVZsGz5sAT6tqKyAbeAToBbQDOopIX2Am0D3YvnugWS94/GmE9g5VTQP+BtwdLPst8JGqdiIc6z5WRCoB24ALgu2vJjzjOMDPCQcNtiQ8S3mXojoakZrbX1U7AP8EHlTV94C/A4+rak/gHmBtkAv1y6OMn2GUGFPen0zNWifTrn00cWg/pVqlcvTp3JAWI1+l0bUvU6l8KgPObUL5sikcPJRDt7v+y/NTv+KZ0T3cGi8F5KX8rli9gcWLFvLlii+SbQn4MRX5yzXfssQjX+CmQA0E3ggev8GRH/MtUNV1Ec83qGreB5wdgRmqul1Vc4BXgXNUdStQWUROBBoArxGeIbw74eKVx3+Dn4uBhsHjC4F7gkTbGUAF4FSgLPCsiHwO/JtwQSLQfV1VQ8Hs5h8dpa9Rp+YWhYiMFJFFIrJo+47thW7nY3KmS3zsn4+eXGrNnzuHKe9Oom2LM/jFsGuY+cnHjBwxNOr9e51Zn/Xf/8COH7LICeXy9rx1nNW8Dpt3ZvD23PBb/X/z1tG6oZuP65KJrym/CfUVYyqyl4m6IlKD8BnQP0RkPfBL4Cr58UPM/Im00SbUzgGuJRwCmHdGdTYQ+RHfweBniB+nbBKgX0Sq7amq+hVwB/A9cCaQDpSL0kd+nKTmRhv57mNypkt87J+Pnlxq/f6PD7Fi9QaWf7WW5158le7n9mTCP1+Kev+NOzLo1Kw2FcuF33I929Zj5abdTJq/nnPb1AWge+u6rNmyt9jefMPXlN9E+oo1FdnXRN3+wMuqeniCLRH5hB8/oiuKBcATwd14uwmfeT0ZrJsJ/DFoSwl/XJepqkc76j8gfG1qtKqqiLRX1aWEAwQ3qWquiAwD8ia5+hS4QUReJHz9qSfhM7bCiDY1dx9w4lFH4Cj4mJwJMHzIIGYGKaNNGzXgt78bw7BSkjLqoyfXWvGwcNU23przDXMf70dOSPnsmx0898GXVCyfyvN3nsfoy9qyPyubm576pFi6rpJrwc/j02XKr2tfLlKRvUzUDW4IeERVp0QsuxVoAbwJ3K2qfYLlDQknz7aO2HYg8BvCZybvqur/BcvPANYAzVR1lYhMBb5W1VuD9esJp93uEJF0YJyq9hCRioRTcrsQPjtcp6p9RKQJ8B9AgSnAKFWtHJzpPQlcAHxL+LrYP1V1Yr5+vsBRUnNFZAyQoarjgn1eA9oC7xd1HcrHRN2Qw9liU8rYbLHJIOuQu0lLTxlYZCh0sXA1WazTCY0dHqM2WWx0RJuoe9xGvvuCFSgjEViBih4rUNFT0gWqtH0XyTAMwyglWIEyDMMwvMQKlGEYhuElVqAMwzAML7HId+MnuLxonBNyd4HWZcx3aadCOTcX2MHdjQ0ANc7/oxOd3dN/70THNa5ubABwdQOby/egRb4bhmEYBlagDMMwDE+xAmUYhmF4iRUowzAMw0usQHmOj5HhrnQ2bdxI7wvPI71dazq2b8PTTz1x9J1KwJePY+6rVrxR7aP6dWbR8zex+IWbuKV/OIfoih4tWfzCTez/+D7Smp0Sk25pPhbiHfP8tGvZmG6d2nHu2R3o1T22jCpITOQ7qhpVI5wYq0DzaPeJpwHrgZoJ0H2BcJ7T0ba7ERh6lG2GA08Vsu430fhJS+ugmdlaYMvIytHTGzXSL1eu1b37D2qbNm11yWcrCt2+qOZKq7g6+7JChbbV6zbpzLkLdV9WSLds36NnNG6iC5d+Xuj2PvbveNA6cCi30DZ1+gydPX+RtmzZqsjt8lqFc8YcbmnD/qpfrP1eq1/wgFbq+QedvnCtthz4Fz1zyFPa5pon9ZMl67TL9c8csU9eOx6OBVdjvjMju8jW4NTTdNX674663c6MbGf9S0vroNH8fSzOGdRxE+suIqmq+ndVjT6H4Kf8Jl4fPkaGu/RU55RTaNc+DYATTzyRZs2bsyXGFE4f+3c8aMUT1d78tFos/GozmQdzCIWUmZ9toO85LVi5YQerN+6MSRNK/7EQz5gniqRGvh8Pse4iMkNExovIIuA2ERkjIncH6zqKyPK8CHcRiYycrCsiU0RktYg8Gmz/MFAx2P7VaMa4IHyMDE9UtPOG9etZvmxZ0mOwfRxzn7XiYcW6bXRteyo1qlSkYvlULj6rMfVPjj8qvrQfC64REfpf/jN6devEi/+MbWLgRPUv2i/qHo51F5GdItJBVRcH69KA1qq6LojUaAIMU9V5IlKXcKx7B8KZT1MjYt1/Fex/1Fh3EbmZcKz7dfwY6z5CRKoBC4LCkxfrnhXEa7xOOJwwMta9NvAl4aj2giinqukAQXxGHs8D1wcZUPk/XG0HtCccoLhSRJ5U1XtE5BZVbVfQi4jISGAkQINTTy3EyvFDRkYGgwdeycPjHqNKlSrJtmOUECs37ODPr81m0rjBHMjK5rM13xNy+KVSIzre/XAGdevWY/u2bfS77GKaNG1Ol27RRPolnmg/gjteYt3fzL8gKIInqurcYFH+QMPpqrpXVbMIF7/TitAHok/U9TEy3HW0c3Z2NoMH9OeqAYO4vO8VMev42L/jQSteXnxvKV1HPssFt77Ann2ZrN4U+0d7eZT2Y8E1deuGfdQ6+WQuubQvSxYvjEkjKZHvx1mse7TeIzkY8TjSZ9z4GBnu0pOqMuqG62jWvAWjb7sjJg3Xvnwcc5+14qVWtRMAaHByFS7v3oI3p30et2ZpPxZcsn//fvbt23f48ccffUiLlsVPwk1m5PvxFut+BKq6R0T2iUhnVZ1PxDW4o5AtImVVNTva18qPj5HhLj3NnTOb1197hVat29ClU/hmid//8QEuurh30nz5OOY+a8Ub1f76/VdRo8oJZOeEuH38e+zNOMhl3Zvz2K0/o2a1E/jvw4NYvmYrl/0y+ku5pf1YiHfMI9m+7XuGDuwPQE5OiH5XDeC8Cy4qtk7SIt+Po1j3GUFfFgXPxxBEuItIZ+BZIBf4JPDVVUSGB49vCfaZHPicEdwQchmwRFWvKWx8fUzUdYlNFnvsc7S/EcWhtE8W6xJX457pMF35hPJuPiCyyHeHiEhlVc0IHt8DnKKqt7nQtgIVPVagkoMVqORgBcriNqLlEhH5NeHx2kD4C7qGYRhGArECFQWq+iYF3OFnGIZhJA77zMQwDMPwEjuDMhKKy+tGoVx310Jcpgb7yN4DMd88+hOqnlDWmZara0fVe49zogOwY/JdzrRcHlc/fpMnPlxdN0oGdgZlGIZheIkVKMMwDMNLrEAZhmEYXmIFyjAMw/ASK1CGYRiGl1iB8hwfI6d99HTTyBE0rF+bju3jj8H2sX8utdasXsl53dIPt8b1T2LC008k1ZMLrdFXdGDxhOEsmjCcF399CeXLpnBuuwbM+esQFk0YzrO//Fmx77Kz4yo5ng7jOlLdWvFaaY98d6mVcTC30DZl2gydNW+RtmjZqsjt8pqP/XOptXXvoaja5l2ZWuvk2rrw89WFbuNj/ypcMPaI1mjA33Tdd3u02iWPa4ULxurEGV/ryHHv68Zte7X18H9ohQvG6oMvz9Eb/vz+T/a146rk/y4kIvLdKGF8jJz20ROEY7CrO4jB9rV/iYrUnjnjIxqe3ogGpx41xiyhnlxopaYIFcunklIm/PNAVjaHsnNZs3k3AB8tWU/fbk2LpWnHVcl7isQKVIIQkVQRKTyNMAp8jJz20ZNLfO1fosbq7f/+i779r066p3i1tuzMYPy/F7HqlZGse+MmfjhwkImfrCQ1pQxpTWoD8PPuTalf68SY/MWLT2OVCK1EHZ9WoBKAiJQFPiQcdV/Q+pEiskhEFm3fsb1kzRlGwKFDh5j63mQu69sv2Vbiplrl8vTp0pgWQ5+l0cC/U6lCWQac14KhD03i0Rt7MvOJa9iXecjpbCRG4jl258Dwm7HABxqRoRWJqk4AJkA4bqMwER8jp3305BJf+5eIsfrowym0ObM9tU6unXRP8Wr1an8a67fuZcfeTADenrWas1rW443pX3H+XW8AcF6H02hSL/6P62LBp7FKhFbSIt+N6JEw5wP1VTXu21h8jJz20ZNLfO1fIsbqrYlvxvzxnmtP8Wpt3P4DnZqfQsVg3rme7U9j5bc7D0fKlyubwl1XdeLZd5fF5C9efBqrRGglM/LdiAIRuQ7oBZwKXOpC08fIaR89AQwfMoiZQQx200YN+O3vxjAshhhsX/vnOlJ7//79fPrxdMaOfzpmDZ/6t/Drrbw1cxVznx5CTkj5bM33PPfecsYM78bPOjeijAjPTl7GJ8s2Hl0sAjuuSt5TJJao6wgRaZxQzcoAACAASURBVAYsAPqq6sfR7lfaE3VdYrOZR4+vs5m74niYzbw0Y4m6JYyqrgSqJtuHYRhGacGuQRmGYRheYgXKMAzD8BIrUIZhGIaX2DUo4ye4vHHG5T04mYdCzrQqVyjdh77L/mXn5DrTKpvq5n/i3e/d7UQH4JRrX3Wm9e2zA51puRqrQw5/f66I1pGdQRmGYRheYgXKMAzD8BIrUIZhGIaXWIEyDMMwvMQKlGEYhuElVqA8x8do5xuuH8Fp9WqT3i6+GOwbR47gtPq1SXcQpw2wd88erh18NWentaZLhzYsnD83Jh0fx9yllqsY800bN3LJRefRsX1rOqW14emnYouNz8OXsbrp4ubM+dMlzPnTJfzj5q6UL1uGU2tV4sMxF7F43GU8N6obZVOK96fT17HKysqiZ7ez6NqpPZ3T2vDQ/WOSqpMfK1AeEwqFuP3WUfxv0vssXf4l/37jdb768sukaw0ZOpy3J78f076RDB4ynLcnxa+Tx29+dQe9zr+QuUu+YMbcxTRt1qLYGr6OuUutaxyNe2pqKg8+PJaFS79g+idzePaZp/n6q+T3Lx6tU6pX5IYLm9Hrvil0+fW7lCkjXHFWQ8Zc3Z6/TfmaDne/w979hxjS44xiefJ1rMqXL8+kKdOYvWAps+YvYdrUD1g4f17SdPJjBcoxIhISkWUR7cZYtXyMdoZwDHYNBzHYrnQAfti7l3lzZjF42AgAypUrR9Vq1Yqt4+uYu/79uYgxr3PKKbRrnwbAiSeeSLPmzdmyJbYUVZ/GKrWMUKFcCillhBPKpfL9nkzOaVmb/y34FoDXZ31D77T6xfLk61iJCJUrVwYgOzub7JxsRIo/4a0rnfxYgXJPpqq2i2h/j1XIx2hnX9mwYR0n1azJ6Bt/Qc+u6dw+aiT79+8vto6vY+7772/DhvUsX7aM9I6dY9rfl7H6bncmT773FZ+P78vXT17BD5mHWLZuF3sPZB+eTX/LrgPUrXFCTN7Ar7GC8BlZt85pND61Dj17nU96p9h8udKJxApUErDId/eEcnJYvmwp1153Ax/PXsQJlSrxxGOPJtvWcUFGRgZDBl7Jw2Mfo0qVKsm2ExdVTyhH7w71aXfn/2hx6385oXwq57c9xZm+j2OVkpLCrPlL+HLNtyxZtJAvV3yRVJ1IrEAlAVWdoKrpqppeq2atQrfzMdrZV06pV5+69erTIfiv9NLL+7F82dJi6/g65r7+/rKzsxk8sD9XXT2Iy/peEbOOL2PVo3UdNmzPYOe+g+SElEkLN9K5aS2qnlD2cNZT3RonsGXXgWL78nGsIqlWrRrdz+3BtKkfeKEDVqC8xsdoZ1+pXbsOdevVZ82qlQDM/OQjmjUv/k0Svo65j78/VWXUjdfRrFkLbrntjri0fBmrTTv3k35GTSqWSwHg3FZ1+HrzXmZ+9T2XdzoVgIHdGvH+kk3F8uTrWO3Yvp09e/YAkJmZycfTp9G0WbOk6eSndM+YeYzjY7QzwLDBg/g0iMFufHoD7r1vDMNjiMEeFhGn3aRRA+6NMU47jz+NG8+N1w0l+9AhTmvYiCf+9o9ia/g65i61XMWYz5szmzdee4VWrdvQtXP4BoD7/vAAF13cu9havozV4rU7eWfht8y4/2eEcpXl63fz4sdrmLpsC8+N6spv+5/J8g27ePmTtcXy5OtYbd36HTdefy25oRC5ubn8vN+VXNy7T9J08mOR744RkQxVrRzt9j5Gvvs6m/kBm808avIu6Lsg16GWqxm6XWKzmZc853btxNIoIt/9O1oMwzAMAytQzinO2ZNhGIZROFagDMMwDC+xAmUYhmF4Sem+UmzEhIspSvJwecOFyxsbckLuLhynFnPi0JLA5bg7PByc4TKGfss/BznTqnHRn5xp7Z76Gyc65Ty8MSVaR/45NwzDMAysQBmGYRieYgXKMAzD8BIrUIZhGIaXWIHyHF9SRhOh4zpR15WvTRs30vvC80hv15qO7eNLP/Xx9+eqfy7HCdz2z1V6bbzp0aOu6Mii565n8T+v55Z+HQGofmIFJj86kM9fupHJjw6kWuUKxdb18bhy6ekwqmrNcQMyot02La2DZmZrgS0jK0dPb9RIv1y5VvfuP6ht2rTVJZ+tKHT7oporreLq7D+YW2j7YNoMnTVvkbZo2arI7fKaS1/7skKFttXrNunMuQt1X1ZIt2zfo2c0bqILl35e6PY+/v5c9s+ljqv+/ZAZKrSt+maTfjpnof6QGdLN28K+Fiz5vNDtDxzKLbRNnT5DZ89fpC1btipyu7xWoeeDh1vatRP0i2+2afWLH9FK5z2k0xd9oy2veVr//PocvXfCR1qh54N674SPdNxrc47YL6/5eFy50klL66DR/H20MyiP8SllNBGeXCbquvRVYPppDIFwPv7+wF3/XOlACfQvxvTaeI7R5qedxMKvNpN5MIdQrjLzs2/p270Zfbo25ZUPlgPwygfLubRb02Lp+nhcufQUiRUoj/ElZTRRnlySKF8b1gfppzGkg/r4+8tPPP1zqZOw/sWZXhsPK9Ztp2ubBtSoUpGK5VO5uPMZ1D+5CidXr8TWXeG056279nNy9UrF0vXxuErU78++qJsERGQkMBKgwamnJtmNURgZGRkMHnglD4/zJ/3UJa765+s4JTu9duW3O/nzG/OY9OgADmRl89nabYRyf/oFY0uUKBw7g0oCx3Kirq/Jrq59ZWdnM3hAf64aMIjLY0w/9fH3l4eL/rnUSUj/HKTXxsuL739G1xuf54LbX2HPvkxWb9zFtt37qVMjfNZUp0Yltu8pXjqvj8dVov4uWIHyGF9SRhPlySUufakqo264jmbNWzA6jvRTH39/4K5/rnQgAf1zlF4bL7WqnQBAg5OrcHn35rw5fQXvzlnN4IvaAjD4orZMnr2qWJo+HleJ+rtgH/F5jC8po4ny5DJR16WvuXNm83qQftqlU/hi++//WPz0Ux9/f/D/7Z15vB3z+cffn9xQQSNBgiLWxNIiqyCJnaK1NGhF7W3V0tp+3astqrZSbZW2lhZV+1qKBJGNiCyyiC3UTkgsQSSR3Dy/P75zkpPr3Jszc74nZ869z/v1mte9M2fmc56ZO3ee+X6/z/d54p1fLB2Ie34xq9dWWj36prMOZs2OHVjY2MhpfxrKnLkLuPimsdzw629w9L7b8do7czjinLtS2ZTH+yqmTcV4Rd0qIGkx8FbRpj+Y2R9K7ZvHiroxiVmNtV27eFlLW3uy2JjnF5NY1ypmstj2DfHuqzwmi80jA/r3ZWIZFXW9BVUFzCx/TyzHcZw6wx+kjuM4Ti5xB+U4juPkEndQjuM4Ti5xB+U4juPkEg+ScKpKzMg7p3zyGFkI8aILGyLeV4pY0/69B38eTavzHmdH0XnvoV9H0QGIFZNbrk4+72LHcRynzeMOynEcx8kl7qAcx3GcXOIOynEcx8kl7qByTmsv7ZxHrdZe8j2mVh6v+QnHH8dGG6xD317ZyrQXE/OaV2rXyQf3Z8I/T2TitSfyg0NCfavBu27NxGtPZO6jv6b3FuutcJuKOfH449h4g3XoF0GrQG4dlKRPIulcLWnrGFoZvvsVSWtnPb6xsZHTTjmZe+59gKemPsNtN9/Es888U1OtPNoUW6t9+/acd+HvmTD5aYaPepwr/3YFzz3r16qaNsW65gBHHHkMd9/7QKZji4l5fpXatfUmXTj2670ZdMJVbP+dv7Hvjj3YdP3OTH/5XQ771a2MmfLqCrepKd+OqFUgtw4qFmb2XTP73F0lqaGl9eaQtMJC81t7aee8arX2ku95vBdilo+vpEx7MbHLmFdWPr7L0vLxjcboKa9y0M5b8fyrs5nx+ns1samUVudIWgXq1kFJOkvSdZJGS3pV0mBJF0maJulBSSsl+42Q1Df5/RNJl0iaAuyYtHAulDQJOFRST0lPSJoq6S5JnYs0/ihpAnCqpC6S7pA0PlkGJPutJWmYpOmSrgYqmmDR2ks751WrmNZY8j2P90IxscrQV0q1zi8L019+lwHbdltaPn6Hzdmg6xo1sWVFUu8TdTcDdgO2BsYCB5vZTyTdBXwNuLvJ/qsB48zs/2DJBL33zKx3sj4V+KGZjZR0DvAb4LTk2JXNrODobgQuNbMxkroBQ4Gtkv3HmNk5kr4GlCwc4yXf64O8ljJvzfg1L83zr87mkhsf496Ljwjl4198h8acllSJSb07qAfMbKGkaUAD8GCyfRqwcYn9G4E7mmy7BUDSGkAnMxuZbL8OuK3pfgl7AlsXzUDvKGl1YGdgMICZ/VfSB6WMNrMrgSsh1INq7uRae2nnvGpB6y75nsd7AeKVj49FtcqYZ+W6+5/iuvufAuDs7+3Om7M+qpktK4q67eJLWABgZouBhba0+uJiSjvf+WbW2GTb3DK/q3i/dsAOZtYzWdY3syhBHcW09tLOedVq7SXf83gvxCwfH4tqlTHPyrLl47filoen1cyWFUW9O6homNkc4ANJg5JNRwIjm9l9GPDDwoqknsmvo4DDk237Ap0rsam4jHLPbbbi4EO/GaW0cyVaebQptlahlPnIEY+y0/a92Wn73gx98P6a2pRHrTxec4Cjjzyc3XbZiRkvPE/3TTfkun9ek0kn5vnFsOum336TSdedxO3nD+G0P97PnE8WcMCgLXnxttPp/+UNuPOCw/nP77+9Qm0q5pgjD2f3RKtHhVoFclvyfXll0yWdBXxiZhcn65+Y2epNP5M0AviRmU0o3ifZ7xWgr5nNTtZ7An8DVgX+BxxrZh8UayT7rQ1cThh3ag+MMrMTJK0F3ASsDzwO7A30KeiXorWXfM8rrb3ke16Jdd3bRUzwGjOh8eLF8Z6na+11ThSdPCaLHbRjPybVc8n35ZVNN7OzmqyvXuozM9u11D7J+sZN1icDO5T4rl2brM8GvlViv/cITslxHMepEH/1cxzHcXKJOyjHcRwnl7iDchzHcXJJbsegnNqxcFG8AIKV2ufzHai1BzbEDAJpjDjwH6sSbl4DG2La9cq9v4iis94xN0TRAXjn+iOj6JR7lVr3f6njOI5Tt7iDchzHcXKJOyjHcRwnl7iDchzHcXKJOyjHcRwnl7iDyjl5K/M9f/58dh24Aztt34vte2/D7357Vs1tiq2VR5tiasUqrz5//nx2H7QDA/r3Zoc+23JeBfdCzJLvEO9a5bF8/IsznmePgX2XLJtvsBZXXlH+9Tpp36144qL9GXvh/lzzg4F8YaV2XHXyQCZcfABjL9yfvxy/I+0b0kcjxrzXl2BmvkRcgF8C04GpwGSgf0v79+7dx+YttJLLJ/MX2SabbmrPPP+SzZm7wLbZZlubNGV6s/u3tKTR+mheY7PLnE8X2Vuz5thH8xrtvY/mW5++29vDIx5rdv9YNsU8v3qzKYvWx/Mbm11mvPyGjR473j6e32hvzfrQNtu8u41/alqz+3/46aKSywdzF9ob735oH366yGbNmWd9+vazh0aMaXb/Dz9dFM2mmNdq7oLFzS5DHx5hY56YYFtt/eUW9yssMe+FmXM+W+7y5vvzrEvXdWz8tBnN7tNxyPVLli1Ous1eeedj63rUv63jkOvtzrEv2wl/HWMHX/jwkn1ue+x/dvo1TyxzXGGJdX69e/excp6n3oKKiKQdga8Dvc1sW0LdqNdbPqp58ljmWxKrrx5SGi5cuJBFixaijIk783h+ebQptlas8upN74WFCxehjEWkY5Z8j3mt8lo+vsDoEcPZeJNN2bDbRmUf09AgOqzcQEM70WHl9sz8YB4PTV6al3viS+/xpTVXTWVHtc7PHVRc1gNmm1mhTtVsM3trOcc0Sx7LfAM0NjYyoH9vNuu2Lrvtvif9MpbmzuP55dGm2FrFVFpevbGxkYH9+9B9o/XYbY89opRpr9SmPJVqL1Atm+6+81YOOuRzeaub5e0P5nHZf5/h6csG88IVh/DRvIUMn/b2ks/bN4jDBm7Cw1PSPbaqdX7uoOIyDNhQ0guSrpC0S6mdJB0vaYKkCbNmz1rBJlZOQ0MDj42bxLMvvsbECeN5ZvrTtTbJyUCM8uoNDQ2MGTeR6TNejXIveMn38vnss88Ydv99HHDQwWUf02m1lflanw3Z9tS72OLk21n1C+355oBNlnz+h2P789hz7zL2+XerYXJq3EFFxEJV3T7A8cAs4BZJx5TY70oz62tmfbus3aVZvTyW+S6mU6dODNplVx4eNrTmNuWxjHletSB+efVOnToxaOddeeShbPdCTJvyVqodqmPT8IceZJvtetGl6zplH7PrV9bl1Xc/4b2PF7Co0bh3/Gv07xGeQT8dvC1rdVyFX9yQvj5dta65O6jImFmjmY0ws98APwDKf71pQh7LfM+eNYsPP/wQgHnz5vHoIw/TfYstampTTK082hRbyyKVV296L4wY/jDde2S7F2LZBPkr1V4tm+66/ZZU3XsAr8/+lL7d16bDyg0A7PLldXn+zTkctevm7LHtenznstFkqWFbrWvuyWIjImkLYLGZzUg29QRezapXXHK6sbGRo485LkqZ70q0Zs58mxO+dyyNjY0sXryYbxx8KPvu9/Wa2hRTK482xdYqlFf/8le2YaftQ2DCb845l6/us18qnZkz3+bE7x1H4+JGbPFiDhp8CPtkvBdi2QRxr9XRRx7O6FEjeG/2bLpvuiFn/uosjj72OzW1CWDu3LmMevQRfv/HK1IdN/Gl2dwz7lVGnfc1FjUaU195n2uHz+Dtfw7h9dlzeejsfQC4d/xrXHTXtLJ1Y59fgdyWfK9HJPUBLgM6AYuAF4Hj663ke1vIZt7aae3ZzGNmo89rNvM5ny6MotPjhJuj6EC8bOYD+vdlYj2XfK9HzGwisFOt7XAcx2kN+Out4ziOk0vcQTmO4zi5xB2U4ziOk0t8DKrGLDaY91ljFK1C6GileGBD/RMziKB9nNsqt8xbGOf/D2C1L8R7pK6x6kpRdGIFNgB0/ur5UXQWzJhZ1n7+JHIcx3FyiTsox3EcJ5e4g3Icx3FyiTsox3EcJ5e4g6oDGhsb2WXHvhx2cGW5rbzibP3alFetPNoUW6vX1pszaPue7LpjH/YYlL2cSF7PrxKtkwf3ZcLV32XiNd/lB4P7AdD5i6tw30WHMe2673PfRYfRafVVshtX6wq0K6jKbSOhuu3TwL1Ap+XsvzFweBm6Ze3X0tKzVx97f+6iFpffnv97O/jQw2zvffZrcT+vOOvn59cqvdbsTxa2uGzYbSN7/pW3l7vf7E8W5vL8Ymqtsvt5S5bex11pT//vXeu870W22p7n2yMT/mdbH/FXu+TmsXbmlcNtld3PszOvHG4X3/T4Msetsvt5pi+u7xV1i5hnZj3N7CvA+8DJy9l/Y+DwMnTL3S8zb775Bg89eD9HHnNcRTpecbZ+bcqrVh5tiq0Vi7yeXyVaW3Zbm/HPvcW8BYtoXGyMnvo6Bw3qwdd36s4Nw0Ki2RuGTWP/AT0y2QZts4tvLLA+gAK/l/S0pGmSCrnrLwAGSZos6XRJG0saLWlSsuzUzH4Nid54SVMlfb9SY3/xkzM463cX0K5dZX8qrzhbvzblVSuPNsXWglDa/pAD92X3gdtz3T+uqrlNedGa/sosBmyzIWt27ECHL7Rnn/6bsUGXjnTtvBoz358LwMz359K182qZbIM2NlFXUgOwB3BNsmkwoSTGdsDawHhJo4CfAT8ys68nx60K7GVm8yV1B24C+pbY73hgjpn1k/QF4DFJw8zs5Sz2Dn3gPrp06UrPXn0YM2pExrN2HKcS/vvQCNb70vrMevddDjlgH7r32JKdBg6qtVk15/nX3uOSm8dy74Xf4tP5C5ny4jslM99XUjGjrbSgOkiaDMwE1gEeSrYPBG5Kigy+A4wE+pU4fiXgKknTgNuArZv5nr2Bo5LvGgesBXRvulNxyffZLZR8Hzf2cR74771st9VmfPfobzN65KN8/7ijyjrhpnjF2fq1Ka9aebQpthbAel8Kx3bp2pX99j+ISRPH19SmPGld98BUBpx4LXud/m8+/GQ+M954n3c/mMu6a4ZW07prrsasDz/NZBu0HQc1z8x6AhsBYvljUE05HXiH0NLqC6zczH4CfpiMd/U0s03MbFjTnYpLvq/dQsn3X59zHtNnvMqUZ1/i6uv+zaBdduPv/7g+pekBrzhbvzblVSuPNsXWmjt3Lh9//PGS30cMf4ittk5fiC+v51epVpdOqwKwYdeOHDhwC255ZDr/fXwGR+y9DQBH7L0N9z0+oyWJFmlTXXxm9qmkU4C7JV0BjAa+L+k6YE1gZ+DHhDGqLxYdugbwhpktlnQ0UMhO9nGT/YYCJ0oabmYLJfUA3jSzudU9s+XjFWfr16a8auXRpthas959h6OHHALAokWNHPzNw9hjr6/W1KY8ad101mDW7NiBhYsaOe3PQ5kzdwEX3/wEN/zqII7edztee2cOR/z27ky2QRupqCvpEzNbvWj9XuBW4AbgImBfwIBzzewWSSsRnM1awLXAfcAdyT4PAieb2eol9vsTcC6wP6E1NQs4yMzmNGdbr959bfiYcVHOM1ayWMdpS8xdsCiaVsxksXkkWrLY8Zez+KM3lltRt004qDzjDspxaos7qPJZ0Q6qrYxBOY7jOHWGOyjHcRwnl7iDchzHcXKJOyjHcRwnl3iQRI2RNAt4dTm7rQ3MjvSVrV0rjzbF1MqjTTG18mhTXrXyaFO5WhuZWfOTQBPcQdUBkiaYWV/Xqk+bYmrl0aaYWnm0Ka9aebQptpZ38TmO4zi5xB2U4ziOk0vcQdUHV7rWCtfJq1YebYqplUeb8qqVR5uiavkYlOM4jpNLvAXlOI7j5BJ3UI7jOE4ucQflVERSOXi528rU2qScbfWOpFUldai1HY6Td9xB1QmSOku6VNKTksZJukRS54xaG0u6S9I7kmZKukPSxhlNG1vmtnK4o8S229OKSFpL0mWSJkmaKOlPktbKYpCkHpKukjRM0vDCklGrt6SngBeAFxPbemfRyiOSLixnWxk6/ypnW5laa0vaO8uxJbSOj6GznO84rcz91mxpyfC9/SStW7R+lKR7JP05i14s3EHVDzcDHwHfBo5Ifr8lo9ZNwH+ADYFuwL3JtrKRtK6kPkAHSb2Sh29vSbsCq6bU2lLSwcAakgYXLccAq6TRSrgZeBc4GDiEUJcr67W6DZgEnEkoZllYsvBP4Awz28DM1gf+L9mWCkkHSnpW0hxJH0n6WNJHGW1C0jqSrpH0QLK+taTvZJDaq8S2fTPoLFMxT1ID0CeDDsAHwM8k/STj8cWcEEFjeZxR5n4TgQnJz1mEl54Zye8TM3zv34HPACTtDFwAXA/MoYKoPEkbSdoz+b2DpC8u75hlMDNf6mABni5nW5laU0tsm5JS42jgUUJV4eHJ748C9wCDU2odSHhQv5f8LCx/BnaKdK2mZbxWEyP+DZ8qZ1sZOi8C20S06wHgm4V7gFBpu+zrBZwITAM+BaYWLS8DN6TQ+XlyPy0ivIB9lKy/B5xfwfm1By4lFCldI+bfL/YCvJ5y/6uA/YrW9wX+nuF7pxT9fjlwVtH65Izn8j1gPPBSst4deCSVRrUvuC9xFkK13kOK1gcDl2bUugD4EbABobz9GcB5QEegY5kapyY/z4x4jjtG0vkDcBihh6Bd8vC9OKPWWcBJwHrAmoUlo9alyT//QGBA4oAvAbYFtk2h81jke2t88vOpom1lP5SAXsDGhFb4RkVL1uuU2RktR3dPYBSwc8bjJ1XDribf8VrK/T/3IpHm5aLomKeB9snvzxVfI7K/CE8GVm5yX6WyzedB1QmSPgDWABYSysm3JzS/AczMyu4nlvR6Cx+bmXUrQ2OymfWUNMnMooyjSOpCeOvamHB+BYOOS6nzMbAasDjZ1A6Yu1TOOqbQernEZjOzTdPYlGiNbuFjM7Ody9T5I9AFuBtYUCTwn7Q2JXojCN2hD5lZb0k7ABea2S5lHj/RzPpIesTM9shiQwnN9QlOrvg+GBVB90ZgXzNLPX4rqZGl99EyH5Hivkruz1IPXgEdzKzssryShgKjgRuSTd8mOJevlquR6PwS2I+Q5LUb0NvMTNLmwHVmNiCNXqI5zsz6S3rKzHpJak9w8tuWq9G66xO3LtaOJWRmG0aQeVbSDOBLkqYWbS/8s5Z9ExZxD+Gf7WGgMathZpaun7tlrWhRhGY2KJLUWgTne0CxPGFcMQtnJMduJukxgvM7JMXx7ST9Augh6XNjKGb2hzTGSLqA0AJ+hqX3gRFaP5XyCKFVnBoza4jw/VHvT2AI8BvgLpZeoyEZbPqdpEcIPQXDbGnLpR3ww4y2jUzuiw6S9iL0RNybRsBbUHWEpDWAzSgKHDCzxzNqbQls3UTrxpQa6wJDWfZBWdBaXgmRUnqTzaxn2uNK6JRsiWR5A5c0BhhJcJyPmdnHFdj1i2bsOi+lzspm9llWO5rRbA9sQXjBeN7MFqY4dgvgIOA04G9NPzezs1Pa8jyhy3PBcnd2AJC0mpmVat3VDEntgO8AexPuq6FmdlUqDXdQ9UESVXUGYcxoGtAPeMLMds2gdSbhptmS4GC+Cowxs8EZbesAdDOz57McX6RzLvC4md1foU7xW9oqwPaEYIfdM2htAgxKlh0IXWqjzez0DFo/bWLX14DpZnZsSp2XgDcITrPgOD/JYE+Lf28zuzOl3r5m9kBaO0roPAAcmuWc2hqSdgKuBlY3s26StgO+b2Yn1dg0JJ1qZn9a3rYWNdxB1QeSphEetGOTsZ8vA+eY2cEZtXoS+oO3k7QecG3afutEa3/gYmBlM9tEUs/Ers+1qlrQKPTJizB2tIClY22pxoya0d8Q+GOWa5Ucvx6wC8FJ7UYYyN6nEpsS3VWABzO+ZGya2DOAEN79nqWswSOppRB3yzD2tw4h2OZLZravpK0JgS/XlHn8ZYT7YH1gO0J3XPEYnfpEzgAAFopJREFU2ylp7GkLSBpH6I79j5n1SrY9bWZfqa1lUGp8ujAeVa6Gj0HVD/PNbJ6kQhfP9KRrJQvzzKxR0qJkXsJMwoB0Fs4iOM4RAGY2WSmzP0Tuky/FG8BWWQ5MWiuzgRuBa4Afmtnilo8qmy8QIinT2rQuYV5QP8KcoeeBx9LqpG25lcG1hOkBv0zWXyDMPyvLQRHm9UCYx5N1PK3NYWavSyrelHn8NgaShgCHA5tIKv47fhF4P42WO6icI6m9mS0C3pbUiTDIOFTS+4QHbxaeSrT+QXgofAQ8mVFroZnNafIPkqlZrtJZFeYArybXoFydwps4hEHenoTJtln4MyEsfAghnHqkpFFm9lJaIYUsEgW7GggD0qnGnxLeIswvOQ84pVKHKek84CIz+zBZ7wz8n5mdmVJqbTO7VdLPAcxsURL5VhZmdl3K73Pg9aSbzyStBJwKPFtjmx4H3iYEdl1StP1jwvy4svEuvpzTTDN5D0LI+X8rHUhOwkg7mlmmB7ikawhdMT8jhCqfAqxkZqln3Ut6AuhNGGMD2IYwP2MN4EQzG1amztFFq4uAV8wsdQujiebqwLEk88eyRHRJ2qyJXTOz/P0UMngMJHTxfYkwb2Vk1gd8qW6XLNMHKg1XL9KZxudfcuYQXqbONbP30ui1ZiStTZgjuSehS3wYYY5iq7hG7qByTto+2zI1h5nZ3svbVqbWqoQunSWROsBvzWx+Bq07gV+Z2fRkfWvgHOAnwJ0xIvwy2HQJwRmsTsgxOJoQJPG/FW1LE7s6EMafdiZk9Wgws9TdhYnWVKBfwVkm2hPM7MstH/k5nd7AZcBXCC8WXQiTy9O9NUsXEbqpClGlhxHSZ80EBprZ/mn0WisKKaBOMbNLa21LKZIXlMsI3esrE3oN5qYZU3YHlXMkvUHIjFCSNHNMJK1MiB4bTXjoFvrlOgIPm9mWFZhaMaUGdwvbYoWgZ7DpEIJDemdFf3dzJAPjX2RZh5m6y7FI76fA/izNC3gsYdD9ogxamcPVizRK9RpMSlpl08xsm7SarRVJ482sX63tKIWkCYSXi9uAvsBRQA8z+3m5Gj4GlX8aCG/vWt6OZXAyIVS9KzC9SPMjSsxfaYkklLvZt5s0UXxFTJf0V0KyV4BvAc8olO9I/aCLgZndLumAorlVI80s1WTDKnCgmc2MJWZmF0qaQugmgtACHlru8S2Eq/eQlDpcHWiQtL2ZPZno9yP8H0DoGnWWMkbSXwjBKEvmQWXtso+Nmb0oqcHMGoF/JuOwZTsob0HlnCxjAWVonmZmf6xQo8VxBTMbmUGzA2G2+cBk02PAFcB8YNW082IkrWpmn6a1o4nG+YQoxX8nm4YQcteVnHRbht4GQHczezRxvO3TTrBMIi9/RejegzCR+FzLMIk46SZ62Mx2S3tskUbscPV+hACewovZR8B3CS9VXzOzW7Pa2tqQ9GiJzZZlzl9sJI0ivPRcTeiefRs4xsy2K1vDHVS+qcYYVGsn5uTFZHymZyFSLnmgP2UZUjlJOg74ASGj9maSegBXmNmeyzm0qc5thBDuQlDEkcBWZpYmPVGx3iOEDPRzlrvzCkQhcwp5s8spD0kbAe8Qxp9OJwQ7XZ6mO9q7+PJPlOSbeUbSrWb2zWait8jgDC4lZMf4T3L8FDWT/qhMOrF0/sYaFeicQmiNjUvsekFS1ww63c3s0KL1X0maXIFdnwDTJD3Est1EK3RirKQjzOwGNcnnV5jCkGa8ta2QOPHfsGxr+pycOPWDLGSNmA+cDSGTBCHqsCzcQeUcM0s1sa1OOTX5+fVYghEnL55PmDf2KKG7aWdCSH0W5pvZZwW7ktZYlrHF+ZJ2MLMnEp0dCA+BrNyZLLVmteRntSdutyb+QYiY/GayfiQh2CVT2rLIHM3nndExJbY1izuoNoqk/Sh667IKc6hVMt5jZm8nD+trKxkLKSLa5EUzuymZ31OIlPppBQEKjylUdl1F0m6EoJX7MuicBPwrGcMSoVDgURltys0EWTP7e3IffJTX0Okcspktm8Lr7Apb0xWj5jNJdCRlJgkfg2qDSPodYQ5N8TyTxzNkDog93hNlLKTJ5MV2hLlZqSYvqnRWiyVkiZJKHr7Hs+ycsb9nzQQhac3Elkyt7Fhdqy1E8RV00iadfdLMtk9zTFtF0ljgx2Y2JlkfQCjOuWMNbdoI2ITQ+1Dc2/AxoZp3+Vlh3EG1PZKB/15J6Gdh7kqqQmJFWtGSVUq6h5BOqKZjIYktheioVQhzOKYQnMq2hEmsmR4ASYuuO8EhzEj1zyq1eB3M7M8pbRlEqCvVNGXWhoQsFy+WqVOI4usK7AQMT9Z3I7z4pOq6lXQpsBI5DZ3OE8kL4fUsHRv9ADg67eToaqGQQLjQ+/Ckmb2b5njv4mu7dCTczFBhn3/E8Z4oYyEKmb7/RCiPYYQJraenyf5Q6GpMslv0NrNpyfpXyFjwTtI+wJXAawRnt4Gk71mZKZwImRkgOLjtWVr87euEwItUDgr4KfBza1K7S1JHQqBJWRkbLEk6K2kYsLWZvZ2sr0dIIJuWwoTsc4q/Bqh56HSeUKi3tIWFigQdAczsoxqbtQRJhxIqHYwg3O+XSfqxmd1etohlqDXvS30vwBHAy4SuuWuAl4DDM2rdTnhrnkR46/0RcHONz+8JwmBx+2Q5AhiXUWt6OdvK1HqOMJO+sN4DeDaDzihC/sTCekfCOGJanfEtfDYtg96zTdbbZTk/X1Jd8wm1tqEF26YAXYvWuwBT0mh4C6oNYiGU91Ggf7Lp12b2Zka5EwitlfUJWbaHEgb/a8mqZvavovUbJP04o9ZUSVcDNyTr3yZlRuYiPjGzFworFsLMs1RBXYdlo/YWAOtm0OnUwmcdMug9ImkocFOy/i3g4Qw6Tvk8LOlHfL47NA/Rv+1s2S699wgvLWXjY1BtkGTMYBQhh1tZ4wz1hKQLCd2XNxO6hr4FdAZ+D+n+eRWKCp7I0ojHUcBfLVsy3CsI9Z9uTew6FHiT4NQxs7JqIEn6NfAN4I5k0zeAu8zs3JT23AQMtyZluCV9F9jLzL6VRi85djAhyzrAKDO7K62GUz6SXi6x2cxs0xVuTBMk/Z4wZlv8wjLNzH5StoY7qLaHpL1YWsa8G6GMwSgzuzyDVsXjPSU0K0pR1Mw/bYGa/fNK+lcLH5uZlR0qnqQDWuI0zWx8BnvWAe4CPiMUCYQQELIy8A2LmO/PiU8yBrWjVVhKppokLyyF1GWj076wuINqoyhENfQhZKo4GfjMzDbPoPMEcDlL35IOI1Sd7d/8Uc1qRQtZd8onmZNViLqcbmbDW9q/BZ2KyivEDldvC9RTKrTEoQ4xs38vd+fCMe6g2h7JOMEahKqso4ExZvZWRq2p1iQ8XdIUS5EQsui4ikLWJe1uZsObe9DV6gEn6f/M7BJJJVP1mNkZpbbXG6qwvELscPW2gKSLCb0Wd1pOHuZJROHJhHHp/xCmjZxMCKCaYmYHlqvlQRJtkxcI8426E5I5zpQ028w+y6D1gKSfsex4z/1ZJ5FaZSHruxAeaqXCo40KQtgr7HYsJMecnvX76wWroLyCxQ9Xbwt8n1BCZ5Gk+YRwbiu31Vol/kUYAx5LyEL/i8Sug8wsVZYLb0G1YZJEk0cR3my6mlnqyK2Y4z2SbicUZ/wLIcLwVKCvmR2W1q5YRM6UsZE1mXNUgV0Vl+2IjSKUV0h0njWzrYrW2xG6Hrdq4TAnJ6ioqGSSPeVtoFuWwCJvQbUhJLU3s0WSTiAESPQjhIZfT+jqS42ZbRLRxCgh65JeIsyFKlSbraTlEjMz+o0K2cvHJbaNMrPUOQJVVLYD2AzYiFA3K1XZjipwJCGM+AeE8gobAge3eERpPFx9OUja0syeay4ll9U268aS4qJm1ijpjSzOCbwF1abQ0rLZPyM8IMdn7NbL7XgPQNKi6E9wwgMIJcinmtk3MmiNM7P+xYPRWcfYkmNXSWzbGfge0MHMurR81Oc0JpOU7Siy6XNjgbVAoehkNzN7vkIdD1dvAUlXmtnxymHBQkmNLJ2TJcKcuk/J0P3oLai2hQDM7IIIWtHHeyKGrDcS3uIaCbnm3k2WLETLjJ5EuQ0iOKe1gQfJ1nKNVbYjKpL2J6S2WZmQybonoTbRAWm1khccj9prBjM7PvkZI/t/VMysIZaWt6DaEJLeIIzxlMRqXBAuVsi6pE+BaYRzfdhSZDEvoVVxZvQirUbCnLPzgfssRaLYJjqXEIJbjiWU3jiZkHi2rGCEaiFpIiFf3oiilt2S8YgUOhWFq7clkpeTrwEbU9TgqPX/ciy8BdW2aABWJ+LbduTxnlgpioYQJgeeBHxX0uOEbqJH0gqZ2WxCeqMYrEPoctwZOE3SZ8BjZnZ2Sp2fEMp2PEdo0Q0F/h7JxkpYaGZzmkRhZnkD/gslwtUrN69Vci8h7dU0Qm9Bq8JbUG2IwhhUZM2Y4z3RUhQlelsC+wKnkT1KMWqmDEndCd2jg5Kfb5rZgAw6mct2VAtJ1wCPEGoAHUwocb+SmZ2QUmeCmfUtHlerpwmpK5K8jD1WC29BtS2qMU4Rc7ynULb6+022H0Z4EJcVsi7pDmA7wvyj0YQ38HEZbbqR0O1YcLiHEbogs2TKeKnIpn8CJ5jZvAw6lZbtqBY/BH5JSF57E6Fl99sMOp9KWhmYLOkiQphyqiSjbYgHJO2dg799VfAWVBtC0pppWyFlaEYb74mFpL7AU8lk0Uq1YmbKaIhk03PAAZZkRpfUA7intcwTUqjI+g5h/Ol0Qjj9FdYKExtXiqRvEDLttyO8KOZhom403EE5FSHpQMJ4z/aEpKOpx3tihawrJFB93ZIkp5KOInQ1vQqclcU5x+x2lHQ+IUDiU+C/hMJ8p5vZjSltmmBmfZe3bUWTOMof8fkB+9Qhz7HC1Vs7yUT5AwlZwlvdw9wdlBOFSsZ7JJ1tZr8pysVWjJnZcWXqTAL2NLP3k8m0NxO6nXoCW5nZIeXaVKQZM1PGZDPrKekg4CBCippHM2RaiFK2IzaSpgB/I2RGX9JSNLOJzR5UWmdJuLqZVRSu3tpJsnfsamatLkAC3EE5FVJivGc0YQJpppnjFdqypOtN0uXALDM7K1mfbGY9Wzp+Bdj3tJl9RdKVwN1mdn8WuxSxbEdMJE00sz4xdIgQrt4WkHQtYWz2AcLYH+Bh5o5T4HzijfdUGrLeUEjnRCgjcnzRZ6nu9SplynhA0tOE1sXJyRyrBcs5ptR3H5nhu6uGksTAwL2STiLUmCp+WKbtWo0Vrt4WeDlZVk6WVoW3oJxMVGm8p6KQdUm/BPYDZhMKMfY2M5O0OXBdmnDuWN2OJXS7Au9byIm4OrCGmb1Z5rG5LNuRdIMapaNEU3WDJnpRwtWd+scdlJOJKo33tCcksN2FEHixFsFBNQ07b0ljB2A9YJgl2b2TwfvVrbYJNCtG0kFmdrek75T63MyuWdE2VQNJqxLC1fcmOL2hwG9r0W3s1BZ3UE4mqjHek9OQ9ZiZMqKgiGU7YiJpDDCScK0eM7OPa2ySU+e4g3IykYyl9Ey6qp4DjjezUYXPrMwquE00Kw5Zj03MTBkRbXqMUHW2orIdsZG0CeE6DSJk3lhAcOqnp9SJFq7eFpF0mpn9sdZ2xMCDJJys3ASMlDQbmEeSlTsZ75mTRdDM7gHuaRKy/hNCuv5aETNTBpL2I+TiAxhpZg+k1TCzAVq2bMdQSanLdsTGzF5WqOr6WbLsRkj4mpbbCOHqV5OuorITOANoFQ7KW1BOZmKP9+QpZL3IppiZ0X9HaIUVJuYeBjxuZmem1GlatmMaoaXSUvh51Um6Q2cTzm80MDnL/JxY4eptFUmvm9mGtbYjBu6gnNwQM0VRLGJ2O0qaCvQqnF8SFDKpaSqlMnSilO2IjaRTCddqQ0Km9ZGEa/VSmccXwtVPIbRSKw1Xb5NIes3MutXajhi4g3JqTjVC1mNTSaaMIo2pwC5m9kGy3pnQzZfWQa3N0rIdfQiOM0vZjqqQhM8fSxhH2sDKLGAXO1y9NSPpY0rPDROhSnOrGL5xB+XUnGqErEe0LVq3o6QjCNm9HyE8SHYFfmUpc/ElWlHKdsREoZDiQELNsbEsjXzMVJrEcdxBOTUnzymKYnc7SlqfpaU6xpU7SbeJRnHZjoLDTF22IzaSDiE4pHcq1PFwdQdwB+XkgGqErEewqRqZMv4JjCI8xDOXjlCksh3VQNIBLBuleG8GjSjh6k7940XAnDxQCFm/h0gh6xH4O2Fsh6Tb8QLg+sSeKzNq3ghsAlwl6SVJt0g6OYPOuZI6SmovaaikdyQdntGmaCiUEzkVeCZZTpF0XlodM3sZeIjQFToKWJVs4epOneMtKCcX5C1FUbW6HRUyoPYhJLM9GfjMzDZPqRGlbEdskiCQnoXQckkNhO7RtEEgUcLVnfqnVUR6OPWPmT1RYtsLtbAlIVpm9AKShhKqw44nPHh3MLO3MkgVvn8/4NYkuCQvb5qdgEL35xoZNf5MCLYYAvQitK7LDld3Wg/uoBynNNEzZQAvEB643QklzWdKmm1mn6XUiVK2owqcDzwl6VFClOLOhIzkqTCzPwF/KgpXP4tQoLGscHWn9eBdfI7TDNXqdpS0BnAUYZ5Q1jlVmct2VIOk63IDYBEhIz3Ak4Ugk5RaHq7uAO6gHKfqFLoKJZ1AiEzrB7zF0gfvsJoaGAlFqnobK1zdqX+8i89xqs+TQG/C+MwVwPgM3Xr1wCRJ/cxsfCUiZna7pAOS6EnIGK7u1D/egnKcKiPpKTPrVWs7qk0yh21zwlyxuYRxKMsQxXc+Iffhv5NNQwhO/RcRzXXqAHdQjlNlJL1ByIZeEjNr9rMWNCsu2xEbSRuV2p62uGKscHWn/vEuPsepPg2EAf9SSVBTU6Jsx48lDUhbtiMmiRMZamZbRpKMEa7u1DnegnKcKiNpkpn1jqgXpWxHbJJMID80s9cq1BlCyNyxTLi6md1SuZVOPeEtKMepPlFaTk3oCHyQ/P7FKuhnoTMwXdKThDEoAMzsgHIFknD1MYQcfIVw9Z9mCVd36h93UI5TffaIrHcRIWJumbIdkb8jCxXbYGYm6f4kXP0/EWxy6hjv4nOcOiRG2Y5qI2kgMMTMUiXElXQd8JdKw9Wd+sdbUI5TZ8Qq21ENJPUCDgcOBV4G7sgg0x/4tqSKwtWd+scdlOPUHzcSMlIcJakbMAEYZWaX18KYJP3TkGSZDdxC6J3ZLaPkV2PZ5tQ33sXnOHVIjLIdEW1ZTEjb9J1Ci07S/8xs0wxaDcD0iOHqTh3jBQsdp85IynaMBY4mdKPtUCvnlDAYeBt4VNJVkvYgY+RiEjr/fNIydNo43sXnOPVHrLIdUTCzu4G7Ja0GHAicBnSV9FfgrgzJcCsOV3daB97F5zh1SoyyHdVCUmdCoMS3zCxVmL2kXUptN7ORMWxz6gd3UI5TJ7SVsh1NyRqu7tQ/3sXnOPVDWynbEStc3alz3EE5Tv0gADO7oNaGVIMqhKs7dY538TlOnVCNsh15Ima4utM68BaU49QPUct25JDBwGGEcPUHgZtpvefqlIG3oBynTohdtiOvFIWrDwF2B64nW7i6U+e4g3KcOqGtlI4vppJwdaf+cQflOHWCpDXN7P3l7+k4rQN3UI7jOE4u8Vx8juM4Ti5xB+U4juPkEndQjuM4Ti5xB+U4juPkkv8HteE1637uDEsAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "def plot_confusion_matrix(y_true, y_pred, classes,\n",
+ " normalize=False,\n",
+ " title=None,\n",
+ " cmap=plt.cm.Blues):\n",
+ " \"\"\"\n",
+ " This function prints and plots the confusion matrix.\n",
+ " Normalization can be applied by setting `normalize=True`.\n",
+ " \"\"\"\n",
+ " if not title:\n",
+ " if normalize:\n",
+ " title = 'Normalized confusion matrix'\n",
+ " else:\n",
+ " title = 'Confusion matrix, without normalization'\n",
+ "\n",
+ " # Compute confusion matrix\n",
+ " cm = sklearn.metrics.confusion_matrix(y_true, y_pred)\n",
+ " # Only use the labels that appear in the data\n",
+ " #classes = classes[unique_labels(y_true, y_pred)]\n",
+ " if normalize:\n",
+ " cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ " print(\"Normalized confusion matrix\")\n",
+ " else:\n",
+ " print('Confusion matrix, without normalization')\n",
+ "\n",
+ " #print(cm)\n",
+ "\n",
+ " fig, ax = plt.subplots(figsize=(6,6))\n",
+ " im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n",
+ " #ax.figure.colorbar(im, ax=ax)\n",
+ " # We want to show all ticks...\n",
+ " ax.set(xticks=np.arange(cm.shape[1]),\n",
+ " yticks=np.arange(cm.shape[0]),\n",
+ " # ... and label them with the respective list entries\n",
+ " xticklabels=classes, yticklabels=classes)\n",
+ " #title=title,\n",
+ " #ylabel='True label',\n",
+ " #xlabel='Predicted label')\n",
+ "\n",
+ " # Rotate the tick labels and set their alignment.\n",
+ " plt.setp(ax.get_xticklabels(), rotation=90, ha=\"right\",\n",
+ " rotation_mode=\"anchor\")\n",
+ "\n",
+ " # Loop over data dimensions and create text annotations.\n",
+ " fmt = '.0f' if normalize else 'd'\n",
+ " thresh = cm.max() / 2.\n",
+ " for i in range(cm.shape[0]):\n",
+ " for j in range(cm.shape[1]):\n",
+ " ax.text(j, i, format(cm[i, j]*100, fmt),\n",
+ " ha=\"center\", va=\"center\",\n",
+ " color=\"white\" if cm[i, j] > thresh else \"black\")\n",
+ " fig.tight_layout()\n",
+ " fig.savefig(\"./out/conf_matrix.pdf\", bbox_inches='tight', transparent=False, pad_inches=0)\n",
+ " return ax\n",
+ "\n",
+ "plot_confusion_matrix(df_eval.TaskID.values, df_eval.TaskIDPred.values, classes=target_names, normalize=True,\n",
+ " title='Normalized confusion matrix')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_39_CNN-Ensemble-Report.ipynb b/python/Step_39_CNN-Ensemble-Report.ipynb
new file mode 100644
index 0000000..9b1c991
--- /dev/null
+++ b/python/Step_39_CNN-Ensemble-Report.ipynb
@@ -0,0 +1,396 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/requests/__init__.py:91: RequestsDependencyWarning: urllib3 (1.25.2) or chardet (3.0.4) doesn't match a supported version!\n",
+ " RequestsDependencyWarning)\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "'1.13.1'"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "%matplotlib inline\n",
+ "import pandas as pd\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "\n",
+ "from collections import Counter\n",
+ "\n",
+ "target_names = [\"Knuckle\", \"Finger\"]\n",
+ "\n",
+ "tf.__version__"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.read_pickle(\"DataStudyEvaluation/df_statistics.pkl\")\n",
+ "\n",
+ "lst = df.userID.unique()\n",
+ "df[\"GestureId\"] = df.TaskID % 17\n",
+ "\n",
+ "#df_train = dfAll[dfAll.userID.isin(train_ids)]\n",
+ "#df_test = dfAll[dfAll.userID.isin(test_ids)]\n",
+ "\n",
+ "x = np.concatenate(df.Blobs.values).reshape(-1,27,15,1)\n",
+ "x = x / 255.0\n",
+ "\n",
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = len(df.TaskID.unique())\n",
+ "y = tf.keras.utils.to_categorical(df.TaskID, num_classes)\n",
+ "\n",
+ "labels = sorted(df.TaskID.unique())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.stack(df.Blobs)\n",
+ "x = x.reshape(-1, 27, 15, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = 2\n",
+ "y = tf.keras.utils.to_categorical(df.InputMethod, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Text(0.5, 1.0, 'Label for image 1 is: [1. 0.]')"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAALEAAAEICAYAAAAQmxXMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAD0RJREFUeJzt3XuwnHV9x/H3JyeBQBKEAGYgXEIz0U7qDHEGQVugIKLA1An+0RRGaGhpY1XG2nopOm2D1CrjlKodKSOXQETBQShD1BQIGRjqlAqBAU0KNRgSyDEXIFwSrgn59o/nd2Rz2LNn9+xzzp4vfF4zZ/bZfW7f3fPZ3z633Z8iArPMJvS6ALNuOcSWnkNs6TnElp5DbOk5xJbeqIVY0t2S/qLueVW5RtKzku7rrkqQdISkHZL6ul3WeFHXc5J0raTXJK2vqbRO1v2u8hxeHy5Hw4ZY0npJH6qvvK4dD5wKHBYRx3a7sIh4IiKmRsTr3Zc2eiSdLOkuSc8PF6qan9M3ImJWQx0LJP23pJck3d3pwiT9jaTNkl6QtETS3s2mi4hfRcRU4L+GW2bGzYkjgfUR8WKnM0qaOAr1jJUXgSXAF3pcxzbgW8Alnc4o6SPAhcApVP/H3wG+0m1BIw6xpAMk/UTSU+Wj/SeSDhs02WxJ95V33a2SpjfM//7yjn5O0sOSTmpjnecDVwEfKB81XymP/6WkxyRtk7RM0qEN84SkT0taC6xtssxZZZqJ5f7dkr5aatsh6ceSDpT0g/I87pc0q2H+b0t6sox7QNIJDeP2kbS0vD6PSPqipI0N4w+VdHN5DR+X9JmhnntE3BcR1wHr2nidBj+n8yStk7S9rOfjwy2jRR13RsSNwG9GMPtC4OqIWBMRzwL/BJw30loGdNMSTwCuoXpHHQG8DHxn0DR/Cvw5cAiwC/g3AEkzgZ8CXwWmA58HbpZ0cKsVRsTVwF8B95aPy8WSPgh8HVhQ1rMB+OGgWc8EjgPmtvnczgLOBWYCs4F7y3OdDjwCLG6Y9n5gXhl3PfAjSZPLuMXALKoW51TgnIGZJE0Afgw8XNZzCvDZ0lrVRtIUqtf99IiYBvw+8FAZd0RpRI6oc50t/B7V8x3wMDBD0oHdLHTEIY6IZyLi5oh4KSK2A/8M/OGgya6LiNXlo/8fgAVlZ+McYHlELI+I3RGxAlgFnDGCUj4OLImIByPiVeBLVC31rIZpvh4R2yLi5TaXeU1E/Doingf+E/h1aYF2AT8C3jswYUR8v7wWuyLiUmBv4N1l9ALgaxHxbERspLyJi/cBB0fExRHxWkSsA66kegPVbTfwHkn7RMSmiFhTan8iIvaPiCdGYZ3NTAWeb7g/MDytm4V2szmxr6TvStog6QXgHmD/QXvETzYMbwAmAQdRtd5/XFqB5yQ9R7XDdsgISjm0LBuAiNgBPEPVujWrox1bGoZfbnJ/6sAdSZ8vmwrPl+fxDqrnOFBb47obh48EDh30GnwZmNFhrS2VBuRPqD7BNkn6qaTfrXMdHdgB7Ndwf2B4ezcL7WZz4nNULc5xEbEfcGJ5XA3THN4wfASwE3ia6p95XWkFBv6mRETHOwtU22ZHDtwpH58HAv0N04zKpXpl+/eLVC3uARGxP1XrMvAabAIa9xMaX48ngccHvQbTImIkn0YtRcTtEXEqVSPxKFWL3wtrgKMb7h8NbImIZ7pZaLshniRpcsPfRKqPgJeB58oO2+Im850jaa6kfYGLgZvKYZ/vAx+V9BFJfWWZJzXZMWzHDcCfSZpXDtd8Dfh5RKwfwbI6NY1qW/8pYKKkf2TPluZG4EtlJ3gmcEHDuPuA7ZL+ruwA9kl6j6T3NVuRpAllW3tSdVeTJe01XIGSZkiaX97cr1K1hrtH8mTL8vpKHROBCaWOSW3O/j3g/JKJ/YG/B64daS0D2g3xcqrADvxdRHWYZR+qlvV/gNuazHddKXIzMBn4DEBEPAnMp/r4fIqqVfpCB/X8VkTcSbW9fTNVyzeb0dmubOZ2quf9K6pNmlfYc5PhYmAj8DhwJ3ATVZAob+Y/otopfJzqdbyKanOkmROpXvvlvLEjfUcbNU4A/pbqE2sb1X7LJ2GPkyKd7NidW9Z9OXBCGf5ty16Wd0KzGSPiNuAbwF3AE1Sv2eKGedeM5MiJfFH82JH0SeCsiBi8AzwuSboSOJvqI3/2GK97DtWRn72AT0XEtUNO6xCPHkmHUB1euxeYQ3VY8TsR8a2eFvYWk/kMVgZ7Ad8FjgKeozp+/e89regtyC2xpZfx2gmzPYz55sRe2jsmM2WsV2tjaDvPPh0RLS8hqFMtIZZ0GvBtoA+4qtVJi8lM4Tid0s3Khhnf5YfL7nF9RWYKd8ZNG4afqj5db06U08yXAadTXWBztqR2L7Qx61od28THAo9FxLqIeI1qD3x+Dcs1a0sdIZ7JnmepNrLnxTdIWiRplaRVO6sTVma1GZOjExFxRUQcExHHTKLpt1HMRqyOEPez59VZh7HnFWRmo6qOEN8PzJF0VLmq6ixgWQ3LNWtL14fYImKXpAuorujqo/qWxZoRL3CYQ2h979iv5fjYuav1+Nd2th7vQ2zp1HKcOCKWU10iaDbmfNrZ0nOILT2H2NJziC09h9jSc4gtvd58PWnC0L842rff1CHHAbz0gXe1HL9zauv35f4/a32V4K5Nm1uOt/HHLbGl5xBbeg6xpecQW3oOsaXnEFt6DrGl15vjxK2u2Z3R+ucKLr6s9U/rnji55Wg+eF7rXskm+ThxOm6JLT2H2NJziC09h9jSc4gtPYfY0nOILb3eHCdu8dsS2vFSy1kXXfepluP7Xmm96iP7W3eZ5l+dyMctsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXXm+PELXox3f3Mtpazzr5imEXvGub3iXe82HoBlk5d/ditB7ZTnSvYFRHH1LFcs3bU2RKfHBFP17g8s7Z4m9jSqyvEAdwh6QFJi2papllb6tqcOD4i+iW9E1gh6dGIuGdgZAn2IoDJ7FvTKs0qtbTEEdFfbrcCt1B1lds43p0x2qipo4PyKZKmDQwDHwZWd7tcs3bVsTkxA7hF1TXCE4HrI+K2kS5s9yutLwje3f+bkS7a3qLq6IxxHXB0DbWYjYgPsVl6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl13aIJS2RtFXS6obHpktaIWltuT1gdMo0G1onLfG1wGmDHrsQWBkRc4CV5b7ZmGo7xKVLr8EdL88HlpbhpcCZNdVl1rZu++yYERGbyvBmqk5o3sT92Nloqm3HLiKCqmfRZuPcj52Nmm5DvEXSIQDldmv3JZl1ptsQLwMWluGFwK1dLs+sY50cYrsBuBd4t6SNks4HLgFOlbQW+FC5bzam2t6xi4izhxh1Sk21mI2Iz9hZeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbet32Y3eRpH5JD5W/M0anTLOhdduPHcA3I2Je+VteT1lm7eu2Hzuznqtjm/gCSb8omxtNu8WVtEjSKkmrdvJqDas0e0O3Ib4cmA3MAzYBlzabyP3Y2WjqKsQRsSUiXo+I3cCVwLH1lGXWvq5CPNARY/ExYPVQ05qNlra7ACv92J0EHCRpI7AYOEnSPKrucNcDnxiFGs1a6rYfu6trrMVsRHzGztJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NLrpB+7wyXdJel/Ja2R9Nfl8emSVkhaW26bdj5jNlo6aYl3AZ+LiLnA+4FPS5oLXAisjIg5wMpy32zMdNKP3aaIeLAMbwceAWYC84GlZbKlwJl1F2nWStvdHTSSNAt4L/BzYEZEbCqjNgMzmky/CFgEMJl9R7JKsyF1vGMnaSpwM/DZiHihcVxEBFUnNAx63P3Y2ajpKMSSJlEF+AcR8R/l4S0DXYGV2631lmjWWidHJ0TVW9IjEfGvDaOWAQvL8ELg1vrKMxteJ9vEfwCcC/xS0kPlsS8DlwA3Sjof2AAsqLdEs9Y66cfuZ4CGGH1KPeWYdc5n7Cw9h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSayvELTpivEhSv6SHyt8Zo1uu2Zu1+0vxAx0xPihpGvCApBVl3Dcj4l9Gpzyz4bUV4tJP3aYyvF3SQEeMZj03kn7sZvFGR4wAF0j6haQlQ/XrLGmRpFWSVu3k1REXa9ZMp/3YDe6I8XJgNjCPqqW+tNl87ozRRlMn/di9qSPGiNgSEa9HxG7gSuDY0SnTbGjtHp1o2hHjQE+ixceA1fWWZza8do9ODNUR49mS5lH157we+ETtFZoNo92jE0N1xLi83nLMOuczdpaeQ2zpOcSWnkNs6TnElp5DbOkpIsZ2hdJTwIZBDx8EPD2mhXTG9XXmyIg4eKxWNuYhblqEtCoijul1HUNxfeObNycsPYfY0hsvIb6i1wUMw/WNY+Nim9isG+OlJTYbMYfY0utpiCWdJun/JD0m6cJe1tKMpPWSfll+jmBVr+sBKN9l3CppdcNj0yWtkLS23Db9ruNbVc9CLKkPuAw4HZhLdYH93F7V08LJETFvHB2HvRY4bdBjFwIrI2IOsLLcf9voZUt8LPBYRKyLiNeAHwLze1hPChFxD7Bt0MPzgaVleClw5pgW1WO9DPFM4MmG+xsZf79lEcAdkh6QtKjXxbQwo/w2CMBmYEYvixlr7X7H7u3q+Ijol/ROYIWkR0tLOG5FREh6Wx037WVL3A8c3nD/sPLYuBER/eV2K3AL4/cnCbYMfPO83G7tcT1jqpchvh+YI+koSXsBZwHLeljPHiRNKb87h6QpwIcZvz9JsAxYWIYXArf2sJYx17PNiYjYJekC4HagD1gSEWt6VU8TM4Bbqp/cYCJwfUTc1tuSQNINwEnAQZI2AouBS4AbJZ1PdZnrgt5VOPZ82tnS8xk7S88htvQcYkvPIbb0HGJLzyG29BxiS+//AWtaZbVAuFrxAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "i = 1\n",
+ "plt.imshow(x[i].reshape(27, 15)) #np.sqrt(784) = 28\n",
+ "plt.title(\"Label for image %i is: %s\" % (i, y[i]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.4\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Colocations handled automatically by placer.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/layers/core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Use tf.cast instead.\n"
+ ]
+ }
+ ],
+ "source": [
+ "loadpath = \"./ModelSnapshots/CNN-33767.h5\"\n",
+ "model = tf.keras.models.load_model(loadpath)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 16 s, sys: 1.39 s, total: 17.4 s\n",
+ "Wall time: 12.5 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "lst = []\n",
+ "batch = 100\n",
+ "for i in range(0, len(x), batch):\n",
+ " _x = x[i: i+batch]\n",
+ " lst.extend(model.predict(_x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"InputMethodPred\"] = lst\n",
+ "df.InputMethodPred = df.InputMethodPred.apply(lambda x: np.argmax(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = df.groupby([\"userID\", \"TaskID\", \"VersionID\"])[[\"InputMethodPred\", \"InputMethod\"]].agg(lambda x: x.tolist()).reset_index()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.InputMethod = df.InputMethod.apply(lambda x: Counter(x).most_common()[0][0])\n",
+ "df.InputMethodPred = df.InputMethodPred.apply(lambda x: Counter(x).most_common()[0][0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ "
\n",
+ " \n",
+ " InputMethod | \n",
+ " InputMethodPred | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 1882 | \n",
+ " 1882 | \n",
+ " 1882 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 102 | \n",
+ " 102 | \n",
+ " 102 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 28 | \n",
+ " 28 | \n",
+ " 28 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2499 | \n",
+ " 2499 | \n",
+ " 2499 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID\n",
+ "InputMethod InputMethodPred \n",
+ "0 0 1882 1882 1882\n",
+ " 1 102 102 102\n",
+ "1 0 28 28 28\n",
+ " 1 2499 2499 2499"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.groupby([\"InputMethod\", \"InputMethodPred\"]).count()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[1882 102]\n",
+ " [ 28 2499]]\n",
+ "[[0.94858871 0.05141129]\n",
+ " [0.01108033 0.98891967]]\n",
+ "Accuray: 0.971\n",
+ "Recall: 0.969\n",
+ "Precision: 0.956\n",
+ "F1-Score: 0.971\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.99 0.95 0.97 1984\n",
+ " Finger 0.96 0.99 0.97 2527\n",
+ "\n",
+ " accuracy 0.97 4511\n",
+ " macro avg 0.97 0.97 0.97 4511\n",
+ "weighted avg 0.97 0.97 0.97 4511\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df.InputMethod.values, df.InputMethodPred.values, labels=[0, 1]))\n",
+ "cm = sklearn.metrics.confusion_matrix(df.InputMethod.values, df.InputMethodPred.values, labels=[0, 1], )\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(cm)\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df.InputMethod.values, df.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df.InputMethod.values, df.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(df.InputMethod.values, df.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(df.InputMethod.values, df.InputMethodPred.values, average=\"macro\"))\n",
+ "print(sklearn.metrics.classification_report(df.InputMethod.values, df.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_40_Questions.ipynb b/python/Step_40_Questions.ipynb
new file mode 100644
index 0000000..2e81be0
--- /dev/null
+++ b/python/Step_40_Questions.ipynb
@@ -0,0 +1,356 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "\n",
+ "colorDic = {\"blue\" : \"#6599FF\", \"yellow\" : \"#FFAD33\", \"purple\": \"#683b96\", \"green\" : \"#198D6D\", \"red\" : \"#FF523F\"}\n",
+ "colors = list(colorDic.values())\n",
+ "\n",
+ "lables = [\"Finger\", \"Knuckle\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfTLX = pd.read_csv(\"./DataStudyEvaluation/Knuckle Study - TLX.csv\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 51,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfTLX['Score'] = dfTLX['Mental Demand'] + dfTLX['Physical Demand'] + dfTLX['Temporal Demand'] + dfTLX['Performance'] + dfTLX['Effort'] + dfTLX['Frustration']\n",
+ "dfTLX['Score'] = (dfTLX['Score']-6) / 6.0 "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 58,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 58,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEnCAYAAAC5ebgKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAIABJREFUeJzt3X2cT3X+//HHq5lhiKQam6UyWlfDDDPGRSQyQhJRrfpqkSS1adsuRL+VqVXrW2or+q58JTZCErr87tCyKBmDiYmpSU0i6zKTwWB4//6Y8VlXYy4+n/GZOfO8325z63POvM85rzPGq7f3eZ/X25xziIhI+XdBsAMQEZHAUEIXEfEIJXQREY9QQhcR8QgldBERj1BCFxHxCCV0ERGPUEIXEfEIJXQREY8ILayBmU0FegI7nXPN8ve9ANwMHAE2A3c75/YVdq7LLrvM1atXz6+ARUQqmjVr1ux2zkUU1s4Ke/XfzK4DsoG/n5TQuwL/dM7lmtl/AzjnnijsYvHx8S4lJaUo8YuISD4zW+Ociy+sXaFDLs65ZcDe0/YlOedy8ze/AOqWKEoREQmYQIyhDwY+KeibZjbUzFLMLGXXrl0BuJyIiJyNXwndzP4fkAvMLKiNc26ycy7eORcfEVHoEJCIiJRQoQ9FC2Jmg8h7WJrg/KjBe/ToUbZu3UpOTk5JTyHlWHh4OHXr1iUsLCzYoYiUeyVK6GbWHRgBdHTOHfQngK1bt1K9enXq1auHmflzKilnnHPs2bOHrVu3EhkZGexwRMq9QodczGwWsBJoZGZbzeweYCJQHVhkZqlmNqmkAeTk5HDppZcqmVdAZsall16qf52JBEihPXTn3J1n2f1GIINQMq+49GcvEjh6U1RExCNK/FC0tNQb+VFAz5c57qZC21SrVo3s7GwyMzNp0qQJjRs3Jicnh+rVq/PAAw8waNCggMYkIsW3deTyIrWrO65DKUdSdpW5hB5sV199NevWrQPgu+++o2/fvjjnuPvuu4McmYjIuWnI5Rzq16/PSy+9xKuvvhrsUERECqWEXoi4uDjS09ODHYaISKGU0AvhxztTIiLnlRJ6IdatW0eTJk2CHYaISKGU0M8hMzOTxx57jOHDhwc7FBGRQpW5WS5FmWZYmjZv3kxsbKxv2uJDDz2kaYsiUi6UuYQeDNnZ2QDUq1ePQ4cOBTkaEZGS0ZCLiIhHKKGLiHiEErqIiEcooYuIeIQSuoiIRyihi4h4RNmbtphYI8Dnyyq0iZnRv39/ZsyYAUBubi61a9emTZs2fPjhhyW67HPPPceTTz5ZaLt69eqRkpLCZZdddsb+6tWrA3Ds2DH69u3Ln/70J8LDw0sUTyBNmzaNlJQUJk6cGOxQROQk6qEDF154IWlpab456IsWLaJOnTp+nfO5557zO64lS5awYcMGkpOT+e6777jvvvv8PqeIeJcSer4ePXrw0Ud5i2vMmjWLO+/8z8p7Bw4cYPDgwbRu3ZrY2FgWLlwI5PVU+/btS/fu3WnQoAEjRowAYOTIkRw6dIgWLVrQv39/AG655RZatmxJ06ZNmTx5crFiq1atGpMmTWLBggXs3bsXgBdeeIFWrVoRExPDmDFjgLxSBY0bN2bQoEE0bNiQ/v37s3jxYtq3b0+DBg1ITk4GIDk5mWuuuYbY2FjatWvH119/fc77AXjzzTdp2LAhrVu35rPPPiv2z1dESp8Ser477riD2bNnk5OTw/r162nTpo3ve88++yydO3cmOTmZJUuW8Pjjj3PgwAEAUlNTmTNnDhs2bGDOnDn8+OOPjBs3jipVqpCamsrMmTMBmDp1KmvWrCElJYVXX32VPXv2FCu+iy66iMjISDIyMkhKSiIjI4Pk5GRSU1NZs2YNy5YtA+Dbb7/l0UcfJT09nfT0dN5++21WrFjB+PHjff9qaNy4McuXL2fdunU888wzpwwNne1+tm/fzpgxY/jss89YsWIFGzdu9OtnLSKlo+yNoQdJTEwMmZmZzJo1ix49epzyvaSkJN5//33Gjx8PQE5ODlu2bAEgISGBGjXyxv2joqL44YcfuOKKK844/6uvvsr8+fMB+PHHH8nIyODSSy8tVownSvkmJSWRlJREbGwskFe6ICMjgyuvvJLIyEiio6MBaNq0KQkJCZgZ0dHRZGZmApCVlcXAgQPJyMjAzDh69KjvGme7n927d9OpUyciIiIA6NevH998802xYheR0qeEfpJevXrx2GOPsXTp0lN60M455s2bR6NGjU5pv2rVKipXruzbDgkJITc394zzLl26lMWLF7Ny5UqqVq1Kp06dyMnJKVZs+/fvJzMzk4YNG+KcY9SoUWeMqWdmZp4SzwUXXODbvuCCC3yxjR49muuvv5758+eTmZlJp06dfMcU5X5EpGzSkMtJBg8ezJgxY3w93BO6devGhAkTfD3kE2uOnktYWJiv55uVlUXNmjWpWrUq6enpfPHFF8WKKzs7mwceeIBbbrmFmjVr0q1bN6ZOneorKrZt2zZ27txZ5PNlZWX5HvpOmzat0PZt2rThX//6F3v27OHo0aPMnTu3WPGLyPlR9nroRZhmWFrq1q3LQw89dMb+0aNH8/DDDxMTE8Px48eJjIwsdDrj0KFDiYmJIS4ujqlTpzJp0iSaNGlCo0aNaNu2bZHiuf7663HOcfz4cfr06cPo0aMB6Nq1K5s2beKaa64B8h6azpgxg5CQkCKdd8SIEQwcOJCxY8dy002FlyuuXbs2iYmJXHPNNVx88cW0aNGiSNcRkfPLzucSa/Hx8S4lJeWUfZs2bdKKQBWcfgekKLaOXF6kdnXHdSjlSM4/M1vjnIsvrJ2GXEREPKLQhG5mU81sp5mlnbTvEjNbZGYZ+f+tWbphiohIYYrSQ58GdD9t30jgU+dcA+DT/G0REQmiQhO6c24ZsPe03b2B6fmfpwO3BDguEREpppKOof/KObc9//O/gV8FKB4RESkhvx+KurxpMgVOlTGzoWaWYmYpu3bt8vdyIiJSgJLOQ99hZrWdc9vNrDZQ4FstzrnJwGTIm7ZY2Imjp0cX1qRYNgzcUGibkJAQoqOjyc3NpUmTJkyfPp2dO3fSs2dP0tLSCj3+XCZNmkTVqlUZMGBAsY+tVq2a7+Whs8V79OhRQkNDGTBgAH/84x+54ILgT1pKTEykWrVqPPbYY8EORaTCKWkGeB8YmP95ILAwMOEEx4lCWmlpaVSqVIlJkyYF7NzDhg0rUTI/lxPxfvXVVyxatIhPPvmEp59+OqDXEJHypyjTFmcBK4FGZrbVzO4BxgE3mFkG0CV/2xM6dOjAt99+C+QtLHHvvffStGlTunbtyqFDh9i8eTNxcXG+9hkZGb7tkSNHEhUVRUxMjK+HmpiY6Cvq9e2339KlSxeaN29OXFwcmzdvJjs7m4SEBOLi4oiOjvaV5i2qWrVqMXnyZCZOnIhzjmPHjvH444/7Suu+/vrrQF49mY4dO9K7d2/q16/PyJEjmTlzJq1btyY6OprNmzcD8MEHH9CmTRtiY2Pp0qULO3bs8N3H4MGD6dSpE/Xr1+fVV1/1xfDss8/SsGFDrr32Wl8pXhE5/wodcnHO3VnAtxICHEvQ5ebm8sknn9C9e94szYyMDGbNmsX//u//8tvf/pZ58+Zx1113UaNGDVJTU2nRogVvvvkmd999N3v27GH+/Pmkp6djZuzbt++M8/fv35+RI0fSp08fcnJyOH78OJUqVWL+/PlcdNFF7N69m7Zt29KrVy/MrMhx169fn2PHjrFz504WLlxIjRo1WL16NYcPH6Z9+/Z07doVgC+//JJNmzZxySWXUL9+fYYMGUJycjKvvPIKEyZM4OWXX+baa6/liy++wMyYMmUKzz//PC+++CIA6enpLFmyhP3799OoUSPuv/9+1q9fz+zZs0lNTSU3N5e4uDhatmwZgD8NESmuslfLJQhOLEYBeT30e+65h59++onIyEjf/pYtW/rKzw4ZMoQ333yTl156iTlz5pCcnEyNGjUIDw/nnnvuoWfPnvTs2fOUa+zfv59t27bRp08fAN9SckePHuXJJ59k2bJlXHDBBWzbto0dO3Zw+eWXl+hekpKSWL9+Pe+++y6QV4grIyODSpUq0apVK2rXrg3A1Vdf7Uv00dHRLFmyBICtW7fSr18/tm/fzpEjR4iMjPSd+6abbqJy5cpUrlyZWrVqsWPHDpYvX06fPn2oWrUqkFexUkSCQwmd/4xJn+70UrInlqi79dZbefrpp+ncuTMtW7b01TVPTk7m008/5d1332XixIn885//LPTaM2fOZNeuXaxZs4awsDDq1atX7NK63333HSEhIdSqVQvnHBMmTKBbt26ntFm6dGmRSusOHz6cRx55hF69erF06VISExML/HmotK5I2aKEXgLh4eF069aN+++/nzfeeAPIK3F78OBBevToQfv27alfv/4px1SvXp26deuyYMECbrnlFg4fPsyxY8fIysqiVq1ahIWFsWTJEn744YdixbJr1y6GDRvGgw8+iJnRrVs3/va3v9G5c2fCwsL45ptvirU+6smldadPn15Ia7juuusYNGgQo0aNIjc3lw8++EBrn0qxvNivZ+GNgH6RT5RyJOVfmUvoRZlmWBb079+f+fPn+4Yt9u/fT+/evcnJycE5x0svvXTGMW+99Rb33XcfTz31FGFhYcydO5f+/ftz8803Ex0dTXx8PI0bNy702ieGiE5MW/zd737HI488AuQNB2VmZhIXF4dzjoiICBYsWFDk+0pMTOT222+nZs2adO7cme+///6c7ePi4ujXrx/NmzenVq1atGrVqsjXEpHAUvncEho/fjxZWVn8+c9/DnYo5V55/R2QwAh0D70il88tcz308qBPnz5s3ry5SGPkIiLnixJ6CZxY7FlEpCwJ/rviIiISEEroIiIeoYQuIuIRSugiIh5R5h6Kbmoc2OlrTdI3nfP7e/bsISEhryzNv//9b0JCQoiIiADy3vysVKlSQOMJhGuvvZaJEyf6yhKcvH/Xrl1UrlyZI0eOcMMNNzB27Fhq1KgRpEj/Y/HixUycOLFYc+JFpHjKXEI/3y699FLfa/9lpZa3cw7nXInqm8+ZM4cWLVpw5MgRRowYQd++ffn0009LIUqRgr02TFN6g0FDLucwffp0WrduTYsWLXjggQc4fvw4ubm5XHzxxTzyyCM0bdqUbt26sWrVKjp27Ej9+vX5+OOPAZgyZQp9+vShY8eONGjQgLFjx/rO+/zzz9OsWTOaNWvGhAkTgLzSulFRUfTv35+mTZuyfft2hg4dSnx8PE2bNuWZZ54pVuyVKlVi/PjxZGRk8NVXX/l9P5s3b6ZDhw7ExsbSsmVLVq1aBeT1vBMSEujbty+NGjU6pfb7Rx99RKNGjYiLiyt2WWARKT4l9AKkpaUxf/58Pv/8c19p2NmzZwN59U5uvPFGvvrqKypVqkRiYiKffvopc+fO5amnnvKdIzk5mQULFpCamsrbb79Namoqq1atYubMmaxevZqVK1fyP//zP2zYkFfuID09nT/+8Y9s3LiROnXqMG7cOFJSUvjyyy9ZtGgRGzduLNY9hIaGEhMTQ3p6ut/3U7t2bRYtWsS6deuYOXMmDz30kO86a9euZeLEiWzcuJFNmzbxxRdfcPDgQe677z4+/vhj1qxZw08//eTXn4eIFK7CD7kUZPHixaxevZr4+Ly3bQ8dOsQVV1wB5FVnvOGGG4C80rM1atQgNDSU6OhoX4ldgG7dulGzZk0AbrnlFlasWMHhw4e59dZbqVKlim//8uXL6dq1K1dffbXvegCzZs3ijTfeIDc3l59++omNGzcSFRVVrPs4UdrB3/s5fPgwDz74IF9++SWhoaG+BTEA2rZty69//WsAWrRoQWZmJqGhoTRs2JCrr74ayKt98/e//71YsYtI8SihF8A5x+DBg8+o1ZKbm3vKg9KCytACZyxSUdiiFRdeeKHvc0ZGBq+88grJyclcfPHF3HXXXcUuq5ubm0taWhpNmjRhy5Ytft3Piy++yBVXXMGMGTM4evQo1apV8x2jsroiZYOGXArQpUsX3nnnHXbv3g3kzYbZsmVLsc6RlJTEvn37OHjwIAsXLqR9+/Z06NCB+fPnc+jQIbKzs1m4cCEdOpxZTOiXX36hevXqXHTRRWzfvp1//OMfxbr2kSNHeOKJJ/jNb35DVFSU3/eTlZVF7dq1MTOmT59OYUXdoqKiyMjI4Pvvv8c5x6xZs4oVv4gUX5nroRc2zfB8iY6OZsyYMXTp0oXjx48TFhbGpEmTfEMLRdGqVSt69+7NTz/9xMCBA33TDO+8805fmdn777+f6Oho3zqmJ8TFxREVFUXjxo256qqraN++fZGu2a9fPypXrszhw4fp2rUr7733XkDu58EHH+S2225j6tSpvpWLzqVq1apMmjSJG2+8kQsvvJD27dsX+3+IIlI8Kp9bSqZMmUJaWhovv/xysEMp87z6O1CRFWfaYs7PZ64dcDYqn1t4+VwNuYiIeESZG3LxiiFDhgQ7BBGpYNRDFxHxCCV0ERGPUEIXEfEIJXQREY8ocw9FA12l7feTOhfaJiQkhOjoaHJzc2nSpAnTp0+natWqRb7G8uXLGTZsGGFhYaxcudL3Wr+IyPnkVw/dzP5oZl+ZWZqZzTKz8EAFdj5VqVKF1NRU0tLSqFSpEpMmTSrysceOHWPmzJmMGjWK1NTUIiVzvRovIqWhxAndzOoADwHxzrlmQAhwR6ACC5YOHTr43tqcMWOGr9zsfffdx7FjxwCoVq0ajz76KM2bN+cvf/kL77zzDqNHj6Z///4453j88cdp1qwZ0dHRzJkzB4ClS5fSoUMHevXqRVRUFJmZmTRu3JhBgwbRsGFD+vfvz+LFi2nfvj0NGjQgOTkZyKvYeM011xAbG0u7du34+uuvAZg2bRp9+/ale/fuNGjQgBEjRvju4f/+7/+Ii4ujefPmvsU7Dhw4wODBg2ndujWxsbEqZyviQf4OuYQCVczsKFAVKNc1UnNzc/nkk0/o3r07mzZtYs6cOXz22WeEhYXxwAMPMHPmTAYMGMCBAwdo06YNL774IpBXy7xnz57cdtttzJs3j9TUVL788kt2795Nq1atuO6664C8MrNpaWlERkaSmZnJt99+y9y5c5k6dSqtWrXi7bffZsWKFbz//vs899xzLFiwgMaNG7N8+XJCQ0NZvHgxTz75JPPmzQMgNTWVdevWUblyZRo1asTw4cMJDw/n3nvvZdmyZURGRrJ3714Ann32WTp37szUqVPZt28frVu3pkuXLqcUBBOR8q3ECd05t83MxgNbgENAknMu6fR2ZjYUGApw5ZVXlvRyperQoUO+OisdOnTgnnvuYfLkyaxZs8ZXc+XQoUPUqlULyBtzv/XWW896rhUrVnDnnXcSEhLCr371Kzp27Mjq1au56KKLaN26NZGRkb62kZGRREdHA9C0aVMSEhIws1PK1mZlZTFw4EAyMjIwM44ePeo7PiEhwbe8XFRUFD/88AM///wz1113ne86l1xyCZBXKOz9999n/PjxAOTk5LBlyxa9ci/iISVO6GZWE+gNRAL7gLlmdpdzbsbJ7Zxzk4HJkFfLxY9YS82JMfSTOecYOHAgf/nLX85oHx4eTkhISLGvc3pv+OQCVwWVrR09ejTXX3898+fPJzMzk06dOp31+MLK1jrnmDdvHo0aNSp23CJSPvjzULQL8L1zbpdz7ijwHtAuMGEFX0JCAu+++y47d+4EYO/evfzwww+FHtehQwfmzJnDsWPH2LVrF8uWLaN169YljiMrK4s6deoAeePmhWnbti3Lli3j+++/98UNeYttTJgwwVf2dt26dSWOSUTKJn/G0LcAbc2sKnlDLglAyrkPKVxRphmeD1FRUYwdO5auXbv6ys2+9tprXHXVVec8rk+fPqxcuZLmzZtjZjz//PNcfvnlpKenlyiOESNGMHDgQMaOHctNN91UaPuIiAgmT55M3759OX78OLVq1WLRokWMHj2ahx9+mJiYGI4fP05kZCQffvhhiWISkbLJr/K5ZvY00A/IBdYBQ5xzhwtqX5HK50rR6XfAe1Q+N7CKWj7Xr1kuzrkxwBh/ziEiIoGhV/9FRDxCCV1ExCOU0EVEPEIJXUTEI5TQRUQ8osyVz32xX8+Anu/ROYXPtT5RPveEO+64g5EjR55RFvepp57i448/pkePHrzwwgtFuv7SpUupVKkS7dp55p0rESmjylxCD4azvfoP+Mri3nXXXQBMnjyZvXv3Fvm1/9zcXJYuXUq1atWU0EWk1CmhF2DKlCm88847/OMf/+CTTz5h//79ZGdn07JlS0aNGkWbNm0YPHgwu3fvJiIigjfffJMrr7ySQYMGER4ezrp166hTpw6ff/45ISEhzJgxgwkTJtChg/deehCRskEJnVOrLQKMGjWKIUOGsGLFCl9ZXMirg36iJ3/zzTczcOBABg4cyNSpU3nooYdYsGABAFu3bvUl8sTERKpVq8Zjjz12/m9MRCoUJXQKHnI5l5UrV/Lee+8B8Lvf/e6UBSZuv/32ElVjFBHxh2a5lAItGiEiwaCEXkLt2rVj9uzZQN7D04LGxqtXr87+/fvPZ2giUkGVuSGXokwzDLTTx9C7d+/OuHHjznnMhAkTuPvuu3nhhRd8D0XP5uabb+a2225j4cKFeigqIqWqzCX0YDix+PPpTl9QIjs72/f5qquu4p//PLNE6OnHNGzYkPXr1/sdo4hIYTTkIiLiEUroIiIeoYQuIuIRSugiIh6hhC4i4hGa5SIinpKYmFgqbcuDMpfQt45cHtDzFWUF8NPL5y5YsIB69er5dd0FCxbQsGFDoqKiinXc6eV2J02aRNWqVRkwYIBf8YiI95W5hB4Mp9dyWb91H+u37vNt5+bmEhp65o8qpu7FBZ5zwYIF9OzZ86wJvaDzAWeU2x02bFiR70O8r97Ij4rcNnPcTaUYiZRFGkMvwMJ33uahu+9kSL9eDL2jN6tXruDBQf1833/uT4/7XiIaOXIkUVFRxMTE8Nhjj/H555/z/vvv8/jjj9OiRQs2b95Mp06dePjhh4mPj+eVV17hgw8+oE2bNsTGxtKlSxd27NhBZmYmkyZN4q9//SstWrRg+fLlJCYmMn78eABSU1Np27YtMTEx9OnTh59//hmATp068cQTT9C6dWsaNmzI8uWB/VeOiJQP6qFz6qv/kZGRPD0h7zX+TWnreTdpBTVq1mT1yhVnPXbPnj3Mnz+f9PR0zIx9+/Zx8cUX06tXr1NK7wIcOXKElJQUAH7++We++OILzIwpU6bw/PPP8+KLLzJs2LBTyu1++umnvuMHDBjAhAkT6NixI0899RRPP/00L7/8MpDX609OTubjjz/m6aefZvHixYH/QYlImaaEztmHXADaduhEjZo1z3lsjRo1CA8P55577qFnz5707FnwEnr9+v2nh79161b69evH9u3bOXLkCJGRkee8TlZWFvv27aNjx44ADBw4kNtvv933/b59+wLQsmVLMjMzz3kuEfEmDbmcQ5WqVX2fQ0JCcMeP+7aPHD4MQGhoKMnJydx22218+OGHdO/evcDznVxWd/jw4Tz44INs2LCB119/nZycHL9irVy5si/O3Nxcv84lIuWTeuhF9Ou6V/BdxtccOXyYnJwcVn32L3p160x2djYHDx6kR48etG/fnvr16wOFl83NysqiTp06AEyfPt23v3r16vzyyy9ntK9RowY1a9Zk+fLldOjQgbfeesvXWxc5q8QaRWyXVbpxyHnjV0I3s4uBKUAzwAGDnXMr/TlnUaYZBsPlv65L1563cGuXdvz6iqto3DQGgP3799O7d29ycnJwzvHSSy8BcMcdd3Dvvffy6quv8u67755xvsTERG6//XZq1qxJ586d+f7774Ezy+2ebPr06QwbNoyDBw9Sv379Akv2ikjFZM65kh9sNh1Y7pybYmaVgKrOuX0FtY+Pj3cnHgqesGnTJpo0aVLiGErDyVMWz+Vc0xal6Mri70BZVaxpi+H/VbSGpdBDf23YmaWlC5Lz80tFatcv8okitZsS/mnhjfKVlxeLzGyNcy6+sHYl7qGbWQ3gOmAQgHPuCHCkpOcTERH/+PNQNBLYBbxpZuvMbIqZnbGYppkNNbMUM0vZtWuXH5cTEZFz8SehhwJxwN+cc7HAAWDk6Y2cc5Odc/HOufiIiAg/LiciIufiT0LfCmx1zq3K336XvAQvIiJBUOKE7pz7N/CjmTXK35UAbAxIVCIiUmz+zkMfDszMn+HyHXC3/yGJiEhJ+JXQnXOpQKFTaYoj0NOIinq+Z599lrfffpuQkBCOHHP8adxfiYkN6K2JiJQqvSkKrFy5kg8//JC1a9dSuXJl/rV+M0ePlHwG5rnK44qIlBbVcgG2b9/OZZdd5quHUvOSS6l1eW3SUtcy4Jau3N71Wv6rZwIHsvdzOCeH0Y/8nlu7tCM2NpYlS5YAMG3aNHr16kXnzp1JSEgA4IUXXqBVq1bExMQwZsyYoN2fiFQM6kYCXbt25ZlnnqFhw4Z06dKF+OtvonnLVoz4/WCef20qzVrEkb3/FyqHV2HmG5MwM+Yt/pxK2f+ma9eufPPNNwCsXbuW9evXc8kll5CUlERGRgbJyck45+jVqxfLli3juuuuC/LdiohXqYcOVKtWjTVr1jB58mQiIiIY8fvBzJ05jYhav6JZi7yZmNWqX0RoaCjrVn/BTX1/C0Djxo256qqrfAn9hhtu4JJLLgEgKSmJpKQkYmNjiYuLIz09nYyMjKDcn4hUDOqh5wsJCaFTp0506tSJi35dn9nTpxT7HCeXx3XOMWrUKO67775ineNQWlqR21Zp1qxY5xYRb1MPHfj6669P6T1//dUG6v+mIbt27iAtdS0AB7L3k5ubS1zra/h4/lwAvvnmG7Zs2UKjRo3OOGe3bt2YOnUq2dnZAGzbto2dO3eeh7sRkYqqzPXQg1H9LDs7m+HDh7Nv3z5CQ0OJqHMVT/33y/T+bX/GPfUEh3MOUTm8CpNnzaffgHsY++Sj3NqlHdWqVGbatGm+h6kn69q1K5s2beKaa64B8oZ1ZsyYQa1atc737YlIBVGLnsSmAAAK0ElEQVTmEnowtGzZks8//9y3faJ8bs1LLmXG+4vOaP/nl14DTi2fO2jQIAYNGnRKuz/84Q/84Q9/KIWIRUTOpCEXERGPUA9dpIKLnh5d5LYbBm4oxUjEX2Wih+7PqklSvunPXiRwgp7Qw8PD2bNnj/5iV0DOOfbs2UN4eHiwQxHxhKAPudStW5etW7dSllYz2vHzoSK127S/SsCvfXTHjiK3DQsJCfj1z7fw8HDq1q0b7DBEPCHoCT0sLIzIyMhgh3GKG4u4EG/muJsCfu1NffoWuW2T9E0Bv76IlF9BH3IREZHAUEIXEfEIJXQREY9QQhcR8QgldBERj1BCFxHxCCV0ERGPUEIXEfEIJXQREY9QQhcR8QgldBERj1BCFxHxCCV0ERGP8LvaopmFACnANudcT/9DEpGyalPjJkVr2Om10g1EzioQPfQ/AKrjKiISZH4ldDOrC9wETAlMOCIiUlL+9tBfBkYAxwtqYGZDzSzFzFLK0qpEIiJeU+KEbmY9gZ3OuTXnauecm+yci3fOxUdERJT0ciIiUgh/eujtgV5mlgnMBjqb2YyARCUiIsVW4oTunBvlnKvrnKsH3AH80zl3V8AiExGRYtE8dBERj/B7HjqAc24psDQQ5xIRkZJRD11ExCOU0EVEPEIJXUTEI5TQRUQ8QgldRMQjlNBFRDxCCV1ExCOU0EVEPEIJXUTEI5TQRUQ8QgldRMQjlNBFRDxCCV1ExCOU0EVEPEIJXUTEIwJSD128Y+vI5UVqV3dch1KORESKSz10ERGPUEIXEfEIDbmUY68N+2eR2v1+UudSjkREygIl9ArgxX49i9y2X+QTpRiJiJQmDbmIiHiEErqIiEcooYuIeIQSuoiIRyihi4h4hGa5+COxRjHaZpVeHCIi+NFDN7MrzGyJmW00s6/M7A+BDExERIrHnx56LvCoc26tmVUH1pjZIufcxgDF5inR06OL1O6dUo5DRLyrxD1059x259za/M/7gU1AnUAFJiIixROQh6JmVg+IBVYF4nwiIlJ8fj8UNbNqwDzgYefcL2f5/lBgKMCVV17p7+WkjEhMTCyVtiJScn710M0sjLxkPtM5997Z2jjnJjvn4p1z8REREf5cTkREzsGfWS4GvAFscs69FLiQRESkJPzpobcHfgd0NrPU/K8eAYpLRESKqcRj6M65FYAFMBYREfGDXv0XEfEIJXQREY9QQhcR8QgldBERj1BCFxHxCCV0ERGPUEIXEfEIJXQREY9QQhcR8QgldBERj1BCFxHxCCV0ERGPUEIXEfEIJXQREY9QQhcR8QgldBERj1BCFxHxCCV0ERGPUEIXEfEIJXQREY9QQhcR8QgldBERj1BCFxHxCCV0ERGPUEIXEfEIJXQREY9QQhcR8Qi/ErqZdTezr83sWzMbGaigRESk+Eqc0M0sBHgNuBGIAu40s6hABSYiIsXjTw+9NfCtc+4759wRYDbQOzBhiYhIcZlzrmQHmt0GdHfODcnf/h3Qxjn34GnthgJD8zcbAV+XPFw5zWXA7mAHIXIW+t0MrKuccxGFNQot7Sicc5OByaV9nYrIzFKcc/HBjkPkdPrdDA5/hly2AVectF03f5+IiASBPwl9NdDAzCLNrBJwB/B+YMISEZHiKvGQi3Mu18weBP4BhABTnXNfBSwyKQoNZUlZpd/NICjxQ1ERESlb9KaoiIhHKKGLiHiEErqIiEcooYuIeIQSejliZiFmNj7YcYhI2aSEXo44544B1wY7DpGzMbOGZvapmaXlb8eY2Z+CHVdFommL5YyZ/Q2oA8wFDpzY75x7L2hBiQBm9i/gceB151xs/r4051yz4EZWcZR6LRcJuHBgD9D5pH0OUEKXYKvqnEs2s5P35QYrmIpICb2ccc7dHewYRAqw28yuJq+DcaIi6/bghlSxaAy9nNE4pZRhvwdeBxqb2TbgYeD+4IZUsWgMvZzROKWUdWZ2IXCBc25/sGOpaDTkUv5onFLKFDN7pID9ADjnXjqvAVVgSujlj8YppaypHuwAJI+GXMoZM6tPXmnSdsDPwPfAXc65zGDGJWJmlzjn9p62L9I5932wYqpolNDLKY1TSlljZp8BNzrnfsnfbgLM1fOd80dDLuXM6eOV+eOUWcAa51xqUIISyfMc8IGZ3UTegvB/B/oHN6SKRQm9/InP//ogf7snsB4YZmZznXPPBy0yqdCccx+ZWRiQRN64eh/n3DdBDqtC0ZBLOWNmy4Aezrns/O1qwEdAd/J66VHBjE8qHjObQP5D+nwJwGYgE8A591AQwqqQ1EMvf2oBh0/aPgr8yjl3yMwOF3CMSGlKOW17TVCiECX0cmgmsMrMFuZv3wy8nf+QdGPwwpKKyjk3HXwP6nPyq4JiZiFA5WDGVtFoyKUcMrNW5E1bBPjMOXd6D0nkvDOzL4Aupw0HJjnn2p37SAkU9dDLp7XANvL//MzsSufcluCGJEL4iWQO4JzLNrOqwQyoolFCL2fMbDgwBtgBHAOMvAdSMcGMSwQ4YGZxzrm1AGbWEjgU5JgqFA25lDNm9i3Qxjm3J9ixiJwsfyhwNvATeR2Ny4F+zjk9JD1P1EMvf34k70UikTLFObfazBqT91IRwNfOuaPBjKmiUQ+9nDGzN8j7C/MRJ01fVEU7CTYzG3C2/c65v5/vWCoq9dDLny35X5Xyv0TKilYnfQ4n7wWjteSVAJDzQD10ESkVZnYxMNs51z3YsVQU6qGXE2b2snPuYTP7gFNfswbAOdcrCGGJnMsBIDLYQVQkSujlx1v5/x0f1ChECnBaZ+MCIAp4J3gRVTwacikn9PKQlHVm1vGkzVzgB+fc1mDFUxEpoZcTZrbWOReX/3mec+7WYMckImXLBcEOQIrs5FWh6wctCpECmFlfM8swsywz+8XM9pvZL8GOqyLRGHr54Qr4LFJWPA/c7JzbFOxAKiol9PKjeX5vx4AqJ/V8DHDOuYuCF5oIADuUzINLY+giEhBm9gp59VsWcOpbzO8FLagKRj10EQmUi4CDQNeT9jlACf08UQ9dRPxiZlc4534s4Hs9nXMfnu+YKirNchERfy0ys3qn7zSzu4FXzns0FZgSuoj46xEgycwanNhhZqPy93cs8CgJOI2hi4hfnHMfm9lh4BMzuwUYArQGrnPO/Rzc6CoWjaGLSECYWQdgPvA58FvnXE6QQ6pwlNBFxC9mtp+82SwGVAaOctJ6t3pH4vxRQhcR8Qg9FBUR8QgldBERj1BCF88ys0vNLDX/699mtu2k7SKvx2pmK8ysRWnGKhIImrYonuWc2wO0ADCzRCDbOacVn8Sz1EOXCsnMPjCzNWb2lZkNyd8XamZvmdkGM0szs4dOOybEzGbk/89BpMxRD10qqoHOub1mVhVIMbN5QEPgMudcNPhWrT8hDJgFrHHO/ff5D1ekcOqhS0X1RzP7ElgJ1AWuBr4FGpnZq2bWDcg6qf0UlMyljFNClwrHzLoA1wFtnXPNgfVAeP6YewywHPg98PpJh30OJJhZ5fMdr0hRKaFLRVQD2OucO2RmTYFWAGYWQd7LdnOBp4C4k455HVgMzDYzDVVKmaSELhXRR0BVM9sIjAVW5e+/AlhmZqnAm8CTJx/knHse2AhMMzP93ZEyR6/+i4h4hHoZIiIeoYQuIuIRSugiIh6hhC4i4hFK6CIiHqGELiLiEUroIiIe8f8BCDR4OGQ5FbYAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ "