diff --git a/python/ModelSnapshots/CNN-33767.h5 b/python/ModelSnapshots/CNN-33767.h5
new file mode 100644
index 0000000..a4aa55f
Binary files /dev/null and b/python/ModelSnapshots/CNN-33767.h5 differ
diff --git a/python/ModelSnapshots/LSTM-v1-01605.h5 b/python/ModelSnapshots/LSTM-v1-01605.h5
new file mode 100644
index 0000000..536c217
Binary files /dev/null and b/python/ModelSnapshots/LSTM-v1-01605.h5 differ
diff --git a/python/ModelSnapshots/LSTM-v2-00398.h5 b/python/ModelSnapshots/LSTM-v2-00398.h5
new file mode 100644
index 0000000..f74ae40
Binary files /dev/null and b/python/ModelSnapshots/LSTM-v2-00398.h5 differ
diff --git a/python/Models/CNN.pb b/python/Models/CNN.pb
new file mode 100644
index 0000000..6f968d8
Binary files /dev/null and b/python/Models/CNN.pb differ
diff --git a/python/Models/LSTM.pb b/python/Models/LSTM.pb
new file mode 100644
index 0000000..76d6eab
Binary files /dev/null and b/python/Models/LSTM.pb differ
diff --git a/python/Step_01_UserData.ipynb b/python/Step_01_UserData.ipynb
new file mode 100644
index 0000000..7bc7721
--- /dev/null
+++ b/python/Step_01_UserData.ipynb
@@ -0,0 +1,340 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "from multiprocessing import Pool"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def cast_to_int(row):\n",
+ " try:\n",
+ " return np.array([a if float(a) >= 0 else 0 for a in row[2:-1]], dtype=np.uint8)\n",
+ " except Exception as e:\n",
+ " return None\n",
+ " \n",
+ "def load_csv(file):\n",
+ " temp_df = pd.read_csv(file, header=None, names = [\"UserID\", \"Age\", \"Gender\"], delimiter=\";\")\n",
+ " return temp_df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 298 ms, sys: 443 ms, total: 741 ms\n",
+ "Wall time: 937 ms\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(os.cpu_count() - 2)\n",
+ "data_files = [\"DataStudyCollection/%s\" % file for file in os.listdir(\"DataStudyCollection\") if file.endswith(\".csv\") and \"userData\" in file]\n",
+ "df_lst = pool.map(load_csv, data_files)\n",
+ "dfAll = pd.concat(df_lst)\n",
+ "dfAll = dfAll.sort_values(\"UserID\")\n",
+ "dfAll = dfAll.reset_index(drop=True)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "24.166666666666668"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.Age.mean()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1.4245742398014511"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.Age.std()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "21"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.Age.min()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "26"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.Age.max()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " UserID | \n",
+ " Age | \n",
+ " Gender | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 23 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " 25 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 4 | \n",
+ " 25 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 5 | \n",
+ " 26 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 6 | \n",
+ " 23 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 7 | \n",
+ " 21 | \n",
+ " female | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 8 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 9 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " 10 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 10 | \n",
+ " 11 | \n",
+ " 25 | \n",
+ " female | \n",
+ "
\n",
+ " \n",
+ " 11 | \n",
+ " 12 | \n",
+ " 26 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 12 | \n",
+ " 13 | \n",
+ " 22 | \n",
+ " female | \n",
+ "
\n",
+ " \n",
+ " 13 | \n",
+ " 14 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 14 | \n",
+ " 15 | \n",
+ " 24 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 15 | \n",
+ " 16 | \n",
+ " 26 | \n",
+ " female | \n",
+ "
\n",
+ " \n",
+ " 16 | \n",
+ " 17 | \n",
+ " 26 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ " 17 | \n",
+ " 18 | \n",
+ " 23 | \n",
+ " male | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " UserID Age Gender\n",
+ "0 1 23 male\n",
+ "1 2 24 male\n",
+ "2 3 25 male\n",
+ "3 4 25 male\n",
+ "4 5 26 male\n",
+ "5 6 23 male\n",
+ "6 7 21 female\n",
+ "7 8 24 male\n",
+ "8 9 24 male\n",
+ "9 10 24 male\n",
+ "10 11 25 female\n",
+ "11 12 26 male\n",
+ "12 13 22 female\n",
+ "13 14 24 male\n",
+ "14 15 24 male\n",
+ "15 16 26 female\n",
+ "16 17 26 male\n",
+ "17 18 23 male"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_02_ReadData.ipynb b/python/Step_02_ReadData.ipynb
new file mode 100644
index 0000000..b657d6a
--- /dev/null
+++ b/python/Step_02_ReadData.ipynb
@@ -0,0 +1,338 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## This notebook creates one dataframe from all participants data\n",
+ "## It also removes 1% of the data as this is corrupted"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def cast_to_int(row):\n",
+ " try:\n",
+ " return np.array([a if float(a) >= 0 else 0 for a in row[2:-1]], dtype=np.uint8)\n",
+ " except Exception as e:\n",
+ " return None\n",
+ " \n",
+ "def load_csv(file):\n",
+ " temp_df = pd.read_csv(file, delimiter=\";\")\n",
+ " temp_df.Image = temp_df.Image.str.split(',')\n",
+ " temp_df.Image = temp_df.Image.apply(cast_to_int)\n",
+ " return temp_df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['DataStudyCollection/17_studyData.csv', 'DataStudyCollection/2_studyData.csv', 'DataStudyCollection/12_studyData.csv', 'DataStudyCollection/15_studyData.csv', 'DataStudyCollection/5_studyData.csv', 'DataStudyCollection/1_studyData.csv', 'DataStudyCollection/14_studyData.csv', 'DataStudyCollection/10_studyData.csv', 'DataStudyCollection/13_studyData.csv', 'DataStudyCollection/18_studyData.csv', 'DataStudyCollection/6_studyData.csv', 'DataStudyCollection/16_studyData.csv', 'DataStudyCollection/3_studyData.csv', 'DataStudyCollection/7_studyData.csv', 'DataStudyCollection/8_studyData.csv', 'DataStudyCollection/9_studyData.csv', 'DataStudyCollection/11_studyData.csv', 'DataStudyCollection/4_studyData.csv']\n",
+ "CPU times: user 1.86 s, sys: 1.03 s, total: 2.89 s\n",
+ "Wall time: 17.3 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(cpu_count() - 2)\n",
+ "data_files = [\"DataStudyCollection/%s\" % file for file in os.listdir(\"DataStudyCollection\") if file.endswith(\".csv\") and \"studyData\" in file]\n",
+ "print(data_files)\n",
+ "df_lst = pool.map(load_csv, data_files)\n",
+ "dfAll = pd.concat(df_lst)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1010014"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df = dfAll[dfAll.Image.notnull()]\n",
+ "len(df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "loaded 1013841 values\n",
+ "removed 3827 values (thats 0.377%)\n",
+ "new df has size 1010014\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"loaded %s values\" % len(dfAll))\n",
+ "print(\"removed %s values (thats %s%%)\" % (len(dfAll) - len(df), round((len(dfAll) - len(df)) / len(dfAll) * 100, 3)))\n",
+ "print(\"new df has size %s\" % len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = df.reset_index(drop=True)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 17 | \n",
+ " 1547138602677 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 17 | \n",
+ " 1547138602697 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 17 | \n",
+ " 1547138602796 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 17 | \n",
+ " 1547138602817 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 17 | \n",
+ " 1547138602863 | \n",
+ " 0 | \n",
+ " 34 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " False | \n",
+ " False | \n",
+ " [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "0 17 1547138602677 0 34 0 0 \n",
+ "1 17 1547138602697 0 34 0 0 \n",
+ "2 17 1547138602796 0 34 0 0 \n",
+ "3 17 1547138602817 0 34 0 0 \n",
+ "4 17 1547138602863 0 34 0 0 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "0 0 False False \n",
+ "1 0 False False \n",
+ "2 0 False False \n",
+ "3 0 False False \n",
+ "4 0 False False \n",
+ "\n",
+ " Image \n",
+ "0 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... \n",
+ "1 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... \n",
+ "2 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... \n",
+ "3 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... \n",
+ "4 [1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, ... "
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyCollection/AllData.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "sorted(df.userID.unique())"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_05_CNN_PreprocessData.ipynb b/python/Step_05_CNN_PreprocessData.ipynb
new file mode 100644
index 0000000..bfafe51
--- /dev/null
+++ b/python/Step_05_CNN_PreprocessData.ipynb
@@ -0,0 +1,966 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 7919 | \n",
+ " 17 | \n",
+ " 1547138928692 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 7920 | \n",
+ " 17 | \n",
+ " 1547138928735 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 7921 | \n",
+ " 17 | \n",
+ " 1547138928773 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 7922 | \n",
+ " 17 | \n",
+ " 1547138928813 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ " 7923 | \n",
+ " 17 | \n",
+ " 1547138928861 | \n",
+ " 1 | \n",
+ " 680 | \n",
+ " 6 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "7919 17 1547138928692 1 680 6 2 \n",
+ "7920 17 1547138928735 1 680 6 2 \n",
+ "7921 17 1547138928773 1 680 6 2 \n",
+ "7922 17 1547138928813 1 680 6 2 \n",
+ "7923 17 1547138928861 1 680 6 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "7919 0 True False \n",
+ "7920 0 True False \n",
+ "7921 0 True False \n",
+ "7922 0 True False \n",
+ "7923 0 True False \n",
+ "\n",
+ " Image \n",
+ "7919 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n",
+ "7920 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n",
+ "7921 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n",
+ "7922 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n",
+ "7923 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... "
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyCollection/AllData.pkl\")\n",
+ "df = dfAll[(dfAll.Actual_Data == True) & (dfAll.Is_Pause == False)]\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 39 s, sys: 5.78 s, total: 44.8 s\n",
+ "Wall time: 43.3 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "def is_max(df):\n",
+ " df_temp = df.copy(deep=True)\n",
+ " max_version = df_temp.RepetitionID.max()\n",
+ " df_temp[\"IsMax\"] = np.where(df_temp.RepetitionID == max_version, True, False)\n",
+ " df_temp[\"MaxRepetition\"] = [max_version] * len(df_temp)\n",
+ " return df_temp\n",
+ "\n",
+ "df_grp = df.groupby([df.userID, df.TaskID, df.VersionID])\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "result_lst = pool.map(is_max, [grp for name, grp in df_grp])\n",
+ "df = pd.concat(result_lst)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.Image = df.Image.apply(lambda x: x.reshape(27, 15))\n",
+ "df.Image = df.Image.apply(lambda x: x.clip(min=0, max=255))\n",
+ "df.Image = df.Image.apply(lambda x: x.astype(np.uint8))\n",
+ "df[\"ImageSum\"] = df.Image.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyCollection/dfFiltered.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "recorded actual: 1010014, used data: 851455\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"recorded actual: %s, used data: %s\" % (len(dfAll), len(df)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.read_pickle(\"DataStudyCollection/dfFiltered.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ " IsMax | \n",
+ " MaxRepetition | \n",
+ " ImageSum | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 291980 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 307 | \n",
+ "
\n",
+ " \n",
+ " 291981 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 222 | \n",
+ "
\n",
+ " \n",
+ " 291982 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 521 | \n",
+ "
\n",
+ " \n",
+ " 291983 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 318 | \n",
+ "
\n",
+ " \n",
+ " 291984 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... | \n",
+ " True | \n",
+ " 0 | \n",
+ " 373 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "291980 1 1,54515E+12 33 680 0 2 \n",
+ "291981 1 1,54515E+12 33 680 0 2 \n",
+ "291982 1 1,54515E+12 33 680 0 2 \n",
+ "291983 1 1,54515E+12 33 680 0 2 \n",
+ "291984 1 1,54515E+12 33 680 0 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "291980 0 True False \n",
+ "291981 0 True False \n",
+ "291982 0 True False \n",
+ "291983 0 True False \n",
+ "291984 0 True False \n",
+ "\n",
+ " Image IsMax \\\n",
+ "291980 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "291981 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "291982 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "291983 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "291984 [[0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0]... True \n",
+ "\n",
+ " MaxRepetition ImageSum \n",
+ "291980 0 307 \n",
+ "291981 0 222 \n",
+ "291982 0 521 \n",
+ "291983 0 318 \n",
+ "291984 0 373 "
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Label if knuckle or finger\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " #val = \"Knuckle\"\n",
+ " val = 0\n",
+ " elif row['TaskID'] >= 17:\n",
+ " #val = \"Finger\"\n",
+ " val = 1\n",
+ " return val\n",
+ "df['InputMethod'] = df.apply(f, axis=1)\n",
+ "\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " val = \"Knuckle\"\n",
+ " elif row['TaskID'] >= 17:\n",
+ " val = \"Finger\"\n",
+ " return val\n",
+ "df['Input'] = df.apply(f, axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Svens new Blob detection\n",
+ "def detect_blobs(image, task):\n",
+ " #image = e.Image\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = np.copy(image)\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 200, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " contours.sort(key=lambda a: cv2.contourArea(a))\n",
+ " if len(contours) > 0:\n",
+ " # if two finger or knuckle\n",
+ " cont_count = 2 if task in [1, 6, 7, 18, 23, 24] and len(contours) > 1 else 1\n",
+ " for i in range(1, cont_count + 1):\n",
+ " max_contour = contours[-1 * i]\n",
+ " xmax, ymax = np.max(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " xmin, ymin = np.min(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " #croped_im = np.zeros((27,15))\n",
+ " blob = large[max(ymin - 1, 0):min(ymax + 1, large.shape[0]),max(xmin - 1, 0):min(xmax + 1, large.shape[1])]\n",
+ " #croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " #return (1, [croped_im])\n",
+ " lstBlob.append(blob)\n",
+ " lstMin.append(xmax-xmin)\n",
+ " lstMax.append(ymax-ymin)\n",
+ " count = count + 1\n",
+ " return (count, lstBlob, lstMin, lstMax)\n",
+ " else:\n",
+ " return (0, [np.zeros((29, 19))], 0, 0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 11.9 s, sys: 7.51 s, total: 19.4 s\n",
+ "Wall time: 18.6 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(os.cpu_count()-2)\n",
+ "temp_blobs = pool.starmap(detect_blobs, zip(df.Image, df.TaskID))\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobCount\"] = [a[0] for a in temp_blobs]\n",
+ "df[\"BlobImages\"] = [a[1] for a in temp_blobs]\n",
+ "df[\"BlobW\"] = [a[2] for a in temp_blobs]\n",
+ "df[\"BlobH\"] = [a[3] for a in temp_blobs]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0 710145\n",
+ "1 128117\n",
+ "2 13193\n",
+ "Name: BlobCount, dtype: int64"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.BlobCount.value_counts()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfX = df[(df.BlobCount == 1)].copy(deep=True)\n",
+ "dfX.BlobImages = dfX.BlobImages.apply(lambda x : x[0])\n",
+ "dfX.BlobW = dfX.BlobW.apply(lambda x : x[0])\n",
+ "dfX.BlobH = dfX.BlobH.apply(lambda x : x[0])\n",
+ "\n",
+ "dfY = df[(df.BlobCount == 2)].copy(deep=True)\n",
+ "dfY.BlobImages = dfY.BlobImages.apply(lambda x : x[0])\n",
+ "dfY.BlobW = dfY.BlobW.apply(lambda x : x[0])\n",
+ "dfY.BlobH = dfY.BlobH.apply(lambda x : x[0])\n",
+ "\n",
+ "dfZ = df[(df.BlobCount == 2)].copy(deep=True)\n",
+ "dfZ.BlobImages = dfZ.BlobImages.apply(lambda x : x[1])\n",
+ "dfZ.BlobW = dfZ.BlobW.apply(lambda x : x[1])\n",
+ "dfZ.BlobH = dfZ.BlobH.apply(lambda x : x[1])\n",
+ "\n",
+ "df = dfX.append([dfY, dfZ])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sample Size not Argumented: 154503\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Sample Size not Argumented:\", len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobArea\"] = df[\"BlobW\"] * df[\"BlobH\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 154503.0\n",
+ "mean 15.8\n",
+ "std 5.1\n",
+ "min 12.0\n",
+ "25% 12.0\n",
+ "50% 16.0\n",
+ "75% 16.0\n",
+ "max 110.0\n",
+ "Name: BlobArea, dtype: float64"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.BlobArea.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " count | \n",
+ " mean | \n",
+ " std | \n",
+ " min | \n",
+ " 25% | \n",
+ " 50% | \n",
+ " 75% | \n",
+ " max | \n",
+ "
\n",
+ " \n",
+ " Input | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " Finger | \n",
+ " 110839.0 | \n",
+ " 16.6 | \n",
+ " 5.3 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 16.0 | \n",
+ " 16.0 | \n",
+ " 110.0 | \n",
+ "
\n",
+ " \n",
+ " Knuckle | \n",
+ " 43664.0 | \n",
+ " 13.7 | \n",
+ " 3.7 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 12.0 | \n",
+ " 16.0 | \n",
+ " 72.0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " count mean std min 25% 50% 75% max\n",
+ "Input \n",
+ "Finger 110839.0 16.6 5.3 12.0 12.0 16.0 16.0 110.0\n",
+ "Knuckle 43664.0 13.7 3.7 12.0 12.0 12.0 16.0 72.0"
+ ]
+ },
+ "execution_count": 27,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.groupby(\"Input\").BlobArea.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"BlobSum\"] = df.BlobImages.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.BlobSum.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 27,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAD8CAYAAABkbJM/AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAG2hJREFUeJzt3X+MXfV55/H3p3YgDgnYhuyV17bWrmIlcvCGwAgcJYpm48bYpIr5I8kaodqwbrxaSJN0LXXNVlorP5DIqpSClNBawcWO0jiUJsUCU9druFrtSjY/AsEYwnoCJh7L4AQb2EmUpJN99o/zDLmMr5mvZ87MnBt9XtLVnPOc7zn3uXeu/Znz496riMDMzGwsvzfdDZiZWW9wYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFZk53Q2M10UXXRSLFi2a7ja6+vnPf85555033W2MyX3Wpxd6BPdZp17oEd7c5+OPP/6ziHj3uDcWET15u+yyy6KpHn744eluoYj7rE8v9BjhPuvUCz1GvLlP4LGYwP+7PiRlZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkWKPhpE0p8CfwwEcBC4HpgH7AQuBB4H/igifi3pXGAHcBnwCvDvI+JIbucmYAPwG+DzEbEn66uA24EZwDcj4pa6HmA3izY/MJmbZ9OyYa7rch9HbvnEpN6vmdlkGnMPQ9J84PNAX0RcTPWf+lrga8BtEfEe4BRVEJA/T2X9thyHpKW53vuBVcA3JM2QNAP4OrAaWApck2PNzKxBSg9JzQRmSZoJvAM4DnwMuDeXbweuzuk1OU8uXyFJWd8ZEb+KiBeAAeDyvA1ExPMR8WuqvZY1E3tYZmZWtzEDIyKOAX8B/IQqKF6jOgT1akQM57BBYH5OzweO5rrDOf7Czvqodc5UNzOzBhnzHIakOVR/8S8GXgX+nuqQ0pSTtBHYCNBqtWi32+PazqZlw2MPmoDWrO73Md5+J8vQ0FDjeuqmF/rshR7BfdapF3qEevssOen9B8ALEfFTAEnfAz4MzJY0M/ciFgDHcvwxYCEwmIewLqA6+T1SH9G5zpnqbxIRW4GtAH19fdHf31/Q/um6nZCu06Zlw9x68PSn9si1/ZN6v2er3W4z3udwKvVCn73QI7jPOvVCj1BvnyXnMH4CLJf0jjwXsQJ4BngY+FSOWQ/cl9O7cp5c/lB+DvsuYK2kcyUtBpYAjwCPAkskLZZ0DtWJ8V0Tf2hmZlanMfcwIuKApHuBHwDDwBNUf+U/AOyU9NWs3ZWr3AV8S9IAcJIqAIiIQ5LuoQqbYeDGiPgNgKTPAXuorsDaFhGH6nuIZmZWh6L3YUTEFmDLqPLzVFc4jR77S+DTZ9jOzcDNXeq7gd0lvZiZ2fTwO73NzKyIA8PMzIo4MMzMrIgDw8zMijgwzMysiAPDzMyKODDMzKyIA8PMzIo4MMzMrIgDw8zMijgwzMysiAPDzMyKODDMzKyIA8PMzIo4MMzMrIgDw8zMiowZGJLeK+nJjtvrkr4oaa6kvZIO5885OV6S7pA0IOkpSZd2bGt9jj8saX1H/TJJB3OdO/KrYM3MrEHGDIyIeC4iLomIS4DLgF8A3wc2A/siYgmwL+cBVlN9X/cSYCNwJ4CkuVTf2ncF1Tf1bRkJmRzz2Y71VtXy6MzMrDZne0hqBfDjiHgRWANsz/p24OqcXgPsiMp+YLakecCVwN6IOBkRp4C9wKpcdn5E7I+IAHZ0bMvMzBribANjLfCdnG5FxPGcfglo5fR84GjHOoNZe6v6YJe6mZk1yMzSgZLOAT4J3DR6WUSEpKizsTP0sJHqMBetVot2uz2u7WxaNlxjV6drzep+H+Ptd7IMDQ01rqdueqHPXugR3GedeqFHqLfP4sCgOjfxg4h4OedfljQvIo7nYaUTWT8GLOxYb0HWjgH9o+rtrC/oMv40EbEV2ArQ19cX/f393YaN6brND4xrvVKblg1z68HTn9oj1/ZP6v2erXa7zXifw6nUC332Qo/gPuvUCz1CvX2ezSGpa/jt4SiAXcDIlU7rgfs66uvyaqnlwGt56GoPsFLSnDzZvRLYk8tel7Q8r45a17EtMzNriKI9DEnnAR8H/mNH+RbgHkkbgBeBz2R9N3AVMEB1RdX1ABFxUtJXgEdz3Jcj4mRO3wDcDcwCHsybmZk1SFFgRMTPgQtH1V6humpq9NgAbjzDdrYB27rUHwMuLunFzMymh9/pbWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFzubDB22CFk3yhx6+lSO3fGLa7tvMfjd4D8PMzIo4MMzMrIgDw8zMijgwzMysiAPDzMyKODDMzKyIA8PMzIoUBYak2ZLulfQjSc9K+pCkuZL2SjqcP+fkWEm6Q9KApKckXdqxnfU5/rCk9R31yyQdzHXuyO/2NjOzBindw7gd+KeIeB/wAeBZYDOwLyKWAPtyHmA1sCRvG4E7ASTNBbYAVwCXA1tGQibHfLZjvVUTe1hmZla3MQND0gXAR4G7ACLi1xHxKrAG2J7DtgNX5/QaYEdU9gOzJc0DrgT2RsTJiDgF7AVW5bLzI2J/fh/4jo5tmZlZQ5R8NMhi4KfA30r6APA48AWgFRHHc8xLQCun5wNHO9YfzNpb1Qe71E8jaSPVXgutVot2u13Q/uk2LRse13qlWrMm/z7OVrfnamhoaNzP4VTqhT57oUdwn3XqhR6h3j5LAmMmcCnwJxFxQNLt/PbwEwAREZKilo7eQkRsBbYC9PX1RX9//7i2c90kf6bTpmXD3HqwWR/TdeTa/tNq7Xab8T6HU6kX+uyFHsF91qkXeoR6+yw5hzEIDEbEgZy/lypAXs7DSeTPE7n8GLCwY/0FWXur+oIudTMza5AxAyMiXgKOSnpvllYAzwC7gJErndYD9+X0LmBdXi21HHgtD13tAVZKmpMnu1cCe3LZ65KW59VR6zq2ZWZmDVF63ORPgG9LOgd4HrieKmzukbQBeBH4TI7dDVwFDAC/yLFExElJXwEezXFfjoiTOX0DcDcwC3gwb2Zm1iBFgRERTwJ9XRat6DI2gBvPsJ1twLYu9ceAi0t6MTOz6eF3epuZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVKQoMSUckHZT0pKTHsjZX0l5Jh/PnnKxL0h2SBiQ9JenSju2sz/GHJa3vqF+W2x/IdVX3AzUzs4k5mz2MfxcRl0TEyDfvbQb2RcQSYF/OA6wGluRtI3AnVAEDbAGuAC4HtoyETI75bMd6q8b9iMzMbFJM5JDUGmB7Tm8Hru6o74jKfmC2pHnAlcDeiDgZEaeAvcCqXHZ+ROzPr3fd0bEtMzNriNLACOCfJT0uaWPWWhFxPKdfAlo5PR842rHuYNbeqj7YpW5mZg0ys3DcRyLimKR/BeyV9KPOhRERkqL+9t4sw2ojQKvVot1uj2s7m5YN19jV6VqzJv8+zla352poaGjcz+FU6oU+e6FHcJ916oUeod4+iwIjIo7lzxOSvk91DuJlSfMi4ngeVjqRw48BCztWX5C1Y0D/qHo76wu6jO/Wx1ZgK0BfX1/09/d3Gzam6zY/MK71Sm1aNsytB0uzeGocubb/tFq73Wa8z+FU6oU+e6FHcJ916oUeod4+xzwkJek8Se8amQZWAk8Du4CRK53WA/fl9C5gXV4ttRx4LQ9d7QFWSpqTJ7tXAnty2euSlufVUes6tmVmZg1R8mdwC/h+Xuk6E/i7iPgnSY8C90jaALwIfCbH7wauAgaAXwDXA0TESUlfAR7NcV+OiJM5fQNwNzALeDBvZmbWIGMGRkQ8D3ygS/0VYEWXegA3nmFb24BtXeqPARcX9GtmZtPE7/Q2M7MiDgwzMyviwDAzsyIODDMzK+LAMDOzIg4MMzMr4sAwM7MiDgwzMyviwDAzsyIODDMzK+LAMDOzIg4MMzMr4sAwM7MiDgwzMyviwDAzsyIODDMzK+LAMDOzIsWBIWmGpCck3Z/ziyUdkDQg6buSzsn6uTk/kMsXdWzjpqw/J+nKjvqqrA1I2lzfwzMzs7qczR7GF4BnO+a/BtwWEe8BTgEbsr4BOJX123IckpYCa4H3A6uAb2QIzQC+DqwGlgLX5FgzM2uQosCQtAD4BPDNnBfwMeDeHLIduDqn1+Q8uXxFjl8D7IyIX0XEC8AAcHneBiLi+Yj4NbAzx5qZWYPMLBz3V8CfAe/K+QuBVyNiOOcHgfk5PR84ChARw5Jey/Hzgf0d2+xc5+io+hXdmpC0EdgI0Gq1aLfbhe2/2aZlw2MPmoDWrMm/j7PV7bkaGhoa93M4lXqhz17oEdxnnXqhR6i3zzEDQ9IfAici4nFJ/bXc6zhFxFZgK0BfX1/094+vnes2P1BjV6fbtGyYWw+WZvHUOHJt/2m1drvNeJ/DqdQLffZCj+A+69QLPUK9fZb8r/Zh4JOSrgLeDpwP3A7MljQz9zIWAMdy/DFgITAoaSZwAfBKR31E5zpnqpuZWUOMeQ4jIm6KiAURsYjqpPVDEXEt8DDwqRy2Hrgvp3flPLn8oYiIrK/Nq6gWA0uAR4BHgSV51dU5eR+7anl0ZmZWm4kcN/kvwE5JXwWeAO7K+l3AtyQNACepAoCIOCTpHuAZYBi4MSJ+AyDpc8AeYAawLSIOTaAvMzObBGcVGBHRBto5/TzVFU6jx/wS+PQZ1r8ZuLlLfTew+2x6MTOzqeV3epuZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVGTMwJL1d0iOSfijpkKQvZX2xpAOSBiR9N79elfwK1u9m/YCkRR3buinrz0m6sqO+KmsDkjbX/zDNzGyiSvYwfgV8LCI+AFwCrJK0HPgacFtEvAc4BWzI8RuAU1m/LcchaSnV17W+H1gFfEPSDEkzgK8Dq4GlwDU51szMGmTMwIjKUM6+LW8BfAy4N+vbgatzek3Ok8tXSFLWd0bEryLiBWCA6iteLwcGIuL5iPg1sDPHmplZgxSdw8g9gSeBE8Be4MfAqxExnEMGgfk5PR84CpDLXwMu7KyPWudMdTMza5CZJYMi4jfAJZJmA98H3jepXZ2BpI3ARoBWq0W73R7XdjYtGx570AS0Zk3+fZytbs/V0NDQuJ/DqdQLffZCj+A+69QLPUK9fRYFxoiIeFXSw8CHgNmSZuZexALgWA47BiwEBiXNBC4AXumoj+hc50z10fe/FdgK0NfXF/39/WfT/huu2/zAuNYrtWnZMLcePKundtIdubb/tFq73Wa8z+FU6oU+e6FHcJ916oUeod4+S66SenfuWSBpFvBx4FngYeBTOWw9cF9O78p5cvlDERFZX5tXUS0GlgCPAI8CS/Kqq3OoTozvquPBmZlZfUr+DJ4HbM+rmX4PuCci7pf0DLBT0leBJ4C7cvxdwLckDQAnqQKAiDgk6R7gGWAYuDEPdSHpc8AeYAawLSIO1fYIzcysFmMGRkQ8BXywS/15qiucRtd/CXz6DNu6Gbi5S303sLugXzMzmyZ+p7eZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZEQeGmZkVcWCYmVkRB4aZmRVxYJiZWREHhpmZFXFgmJlZkZKvaF0o6WFJz0g6JOkLWZ8raa+kw/lzTtYl6Q5JA5KeknRpx7bW5/jDktZ31C+TdDDXuUOSJuPBmpnZ+JXsYQwDmyJiKbAcuFHSUmAzsC8ilgD7ch5gNdX3dS8BNgJ3QhUwwBbgCqpv6tsyEjI55rMd662a+EMzM7M6jRkYEXE8In6Q0/8XeBaYD6wBtuew7cDVOb0G2BGV/cBsSfOAK4G9EXEyIk4Be4FVuez8iNgfEQHs6NiWmZk1xFmdw5C0iOr7vQ8ArYg4noteAlo5PR842rHaYNbeqj7YpW5mZg0ys3SgpHcC/wB8MSJe7zzNEBEhKSahv9E9bKQ6zEWr1aLdbo9rO5uWDdfY1elasyb/Ps5Wt+dqaGho3M/hVOqFPnuhR3CfdeqFHqHePosCQ9LbqMLi2xHxvSy/LGleRBzPw0onsn4MWNix+oKsHQP6R9XbWV/QZfxpImIrsBWgr68v+vv7uw0b03WbHxjXeqU2LRvm1oPFWTwljlzbf1qt3W4z3udwKvVCn73QI7jPOvVCj1BvnyVXSQm4C3g2Iv6yY9EuYORKp/XAfR31dXm11HLgtTx0tQdYKWlOnuxeCezJZa9LWp73ta5jW2Zm1hAlfwZ/GPgj4KCkJ7P2X4FbgHskbQBeBD6Ty3YDVwEDwC+A6wEi4qSkrwCP5rgvR8TJnL4BuBuYBTyYNzMza5AxAyMi/hdwpvdFrOgyPoAbz7CtbcC2LvXHgIvH6sXMzKaP3+ltZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRUq+03ubpBOSnu6ozZW0V9Lh/Dkn65J0h6QBSU9JurRjnfU5/rCk9R31yyQdzHXuyO/1NjOzhinZw7gbWDWqthnYFxFLgH05D7AaWJK3jcCdUAUMsAW4Argc2DISMjnmsx3rjb4vMzNrgDEDIyL+J3ByVHkNsD2ntwNXd9R3RGU/MFvSPOBKYG9EnIyIU8BeYFUuOz8i9ud3ge/o2JaZmTXIzHGu14qI4zn9EtDK6fnA0Y5xg1l7q/pgl3pXkjZS7bnQarVot9vjan7TsuFxrVeqNWvy7+NsdXuuhoaGxv0cTqVe6LMXegT3Wade6BHq7XO8gfGGiAhJUUczBfe1FdgK0NfXF/39/ePaznWbH6ixq9NtWjbMrQcn/NTW6si1/afV2u02430Op1Iv9NkLPYL7rFMv9Aj19jneq6RezsNJ5M8TWT8GLOwYtyBrb1Vf0KVuZmYNM97A2AWMXOm0Hrivo74ur5ZaDryWh672ACslzcmT3SuBPbnsdUnL8+qodR3bMjOzBhnzuImk7wD9wEWSBqmudroFuEfSBuBF4DM5fDdwFTAA/AK4HiAiTkr6CvBojvtyRIycSL+B6kqsWcCDeTMzs4YZMzAi4pozLFrRZWwAN55hO9uAbV3qjwEXj9WHmZlNL7/T28zMijgwzMysiAPDzMyKODDMzKyIA8PMzIo06+3INmkWdXl3+6Zlw5P+rvcjt3xiUrdvZlPHexhmZlbEgWFmZkUcGGZmVsSBYWZmRRwYZmZWxIFhZmZFHBhmZlbEgWFmZkUcGGZmVsSBYWZmRRrz0SCSVgG3AzOAb0bELdPcktWg20eSnK3xfoSJP5bErF6N2MOQNAP4OrAaWApcI2np9HZlZmadmrKHcTkwEBHPA0jaCawBnpnWrqyn1bF3U6pzL8h7Nva7qimBMR842jE/CFwxTb2YTchUBlUnB5VNtqYERhFJG4GNOTsk6bnp7OdMPg8XAT+b7j7G4j7r04Qe9bWiYdPeZ6Fe6LMXeoQ39/lvJrKhpgTGMWBhx/yCrL1JRGwFtk5VU+Ml6bGI6JvuPsbiPuvTCz2C+6xTL/QI9fbZiJPewKPAEkmLJZ0DrAV2TXNPZmbWoRF7GBExLOlzwB6qy2q3RcShaW7LzMw6NCIwACJiN7B7uvuoSeMPmyX3WZ9e6BHcZ516oUeosU9FRF3bMjOz32FNOYdhZmYN58AoJGmbpBOSnu6ozZW0V9Lh/Dkn65J0h6QBSU9JurRjnfU5/rCk9TX3uFDSw5KekXRI0hca2ufbJT0i6YfZ55eyvljSgeznu3kBBJLOzfmBXL6oY1s3Zf05SVfW2Wduf4akJyTd3+Aej0g6KOlJSY9lrVG/89z+bEn3SvqRpGclfahpfUp6bz6PI7fXJX2xgX3+af7beVrSd/Lf1OS/NiPCt4Ib8FHgUuDpjtp/Bzbn9Gbgazl9FfAgIGA5cCDrc4Hn8+ecnJ5TY4/zgEtz+l3A/6H6qJWm9SngnTn9NuBA3v89wNqs/zXwn3L6BuCvc3ot8N2cXgr8EDgXWAz8GJhR8+/9PwN/B9yf803s8Qhw0ahao37neR/bgT/O6XOA2U3ss6PfGcBLVO9daEyfVG90fgGY1fGavG4qXpu1P8m/yzdgEW8OjOeAeTk9D3gup/8GuGb0OOAa4G866m8aNwn93gd8vMl9Au8AfkD1zv6fATOz/iFgT07vAT6U0zNznICbgJs6tvXGuJp6WwDsAz4G3J/32agec5tHOD0wGvU7By6g+k9OTe5zVG8rgf/dtD757SdjzM3X2v3AlVPx2vQhqYlpRcTxnH4JaOV0t486mf8W9drlbucHqf56b1yfeajnSeAEsJfqr5tXI2K4y32+0U8ufw24cAr6/Cvgz4D/l/MXNrBHgAD+WdLjqj4NAZr3O18M/BT42zzE901J5zWwz05rge/kdGP6jIhjwF8APwGOU73WHmcKXpsOjJpEFdGNuORM0juBfwC+GBGvdy5rSp8R8ZuIuITqr/jLgfdNc0tvIukPgRMR8fh091LgIxFxKdWnPd8o6aOdCxvyO59JdUj3zoj4IPBzqkM7b2hInwDk8f9PAn8/etl095nnT9ZQhfC/Bs4DVk3FfTswJuZlSfMA8ueJrJ/po06KPgJlIiS9jSosvh0R32tqnyMi4lXgYapd6NmSRt4b1Hmfb/STyy8AXpnkPj8MfFLSEWAn1WGp2xvWI/DGX5xExAng+1QB3LTf+SAwGBEHcv5eqgBpWp8jVgM/iIiXc75Jff4B8EJE/DQi/gX4HtXrddJfmw6MidkFjFz9sJ7qnMFIfV1eQbEceC13Z/cAKyXNyb8SVmatFpIE3AU8GxF/2eA+3y1pdk7PojrP8ixVcHzqDH2O9P8p4KH8K28XsDavAlkMLAEeqaPHiLgpIhZExCKqQxMPRcS1TeoRQNJ5kt41Mk31u3qahv3OI+Il4Kik92ZpBdXXFzSqzw7X8NvDUSP9NKXPnwDLJb0j/82PPJeT/9qcjJNFv4s3qhfPceBfqP5a2kB1HHAfcBj4H8DcHCuqL4T6MXAQ6OvYzn8ABvJ2fc09foRqV/kp4Mm8XdXAPv8t8ET2+TTw37L++/mCHaA6FHBu1t+e8wO5/Pc7tvXn2f9zwOpJ+t3389urpBrVY/bzw7wdAv486436nef2LwEey9/7P1JdPdTEPs+j+gv8go5ao/oEvgT8KP/9fIvqSqdJf236nd5mZlbEh6TMzKyIA8PMzIo4MMzMrIgDw8zMijgwzMysiAPDzMyKODDMzKyIA8PMzIr8fxQhh7kF0BR2AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "df.BlobSum.hist()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0"
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#Small / Blobs where the pixels are only a \"little\" hit\n",
+ "dfX = df[df.BlobSum <= 255]\n",
+ "len(dfX)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Augmenting by flipping in both axis (datax4)\n",
+ "df[\"Version\"] = \"Normal\"\n",
+ "dfFlipped = df.copy(deep=True)\n",
+ "dfFlipped.BlobImages = dfFlipped.BlobImages.apply(lambda x: np.flipud(x))\n",
+ "dfFlipped[\"Version\"] = \"FlippedUD\"\n",
+ "df = df.append(dfFlipped)\n",
+ "dfFlipped = df.copy(deep=True)\n",
+ "dfFlipped.BlobImages = dfFlipped.BlobImages.apply(lambda x: np.fliplr(x))\n",
+ "dfFlipped[\"Version\"] = \"FlippedLR\"\n",
+ "df = df.append(dfFlipped)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sample Size argumented: 618012\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Sample Size argumented:\", len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def pasteToEmpty (blob):\n",
+ " croped_im = np.zeros((27,15))\n",
+ " croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " return croped_im"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"Blobs\"] = df.BlobImages.apply(lambda x: pasteToEmpty(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_pickle(\"DataStudyCollection/df_statistics.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[[\"userID\", \"TaskID\", \"Version\", \"Blobs\", \"InputMethod\"]].to_pickle(\"DataStudyCollection/df_blobs_area.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# display blobs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "userID 1\n",
+ "Timestamp 1,54515E+12\n",
+ "Current_Task 121\n",
+ "Task_amount 680\n",
+ "TaskID 0\n",
+ "VersionID 7\n",
+ "RepetitionID 0\n",
+ "Actual_Data True\n",
+ "Is_Pause False\n",
+ "Image [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]...\n",
+ "IsMax True\n",
+ "MaxRepetition 0\n",
+ "ImageSum 1495\n",
+ "BlobCount 1\n",
+ "BlobImages [[2, 2, 11, 11, 2], [2, 9, 40, 42, 9], [4, 13,...\n",
+ "BlobW 3\n",
+ "BlobH 4\n",
+ "BlobArea 12\n",
+ "BlobSum 1071\n",
+ "Version Normal\n",
+ "Blobs [[2.0, 2.0, 11.0, 11.0, 2.0, 0.0, 0.0, 0.0, 0....\n",
+ "InputMethod 0\n",
+ "Input Knuckle\n",
+ "Name: 299548, dtype: object\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAANoAAAFpCAYAAAD6NDa0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAEEtJREFUeJzt3W2MXOV5xvHrwi+AHINfCFsXHBwkHJkS4YCNU2RFriJRg6qYSBECVa0DVE7UIoVPrZugBjWq1LRRKlWKVKEG4kopSVpCsCxScGkS8gUaY/FiAsQO2AqWsQVOMSTUxPjuhznbTk9mvXP2nLl5dvz/SaudOXPveZ6Z8eU5L/fMOCIEYLTOeLcnAJwOCBqQgKABCQgakICgAQkIGpCAoAEJCBqQgKABCQgakGBu5mC2R9rvdcYZo/9/4+TJkyMfA7NLRHi6mtSgSaMNw9lnnz2ydU966623GtUTTEgtNx1tb7T9gu19trd2NSlg3Mw4aLbnSPqKpGslXSrpJtuXdjUxYJy0eUW7StK+iHgxIt6W9A1Jm7qZFjBe2gTtAkk/67v+crWssVi9eqT1J1etUswdfne0aX3T+WjdutOrPmOM0uprPNM3ftr+hKSNEfFH1fU/kLQuIm6r1W2RtKW6eiUHQzBuRn3U8aCk5X3XL6yW1Sdxl6S7pNEf3gdK1ebl5UeSLrH9ftvzJd0oaXs30wLGy4xf0SLihO3bJD0kaY6kuyPi2c5mBoyRGe+jzWgwO9hHw7gpsjOkiTPPPLNR/fLly6cv6jOT0L/00kuN6psGE+OJpmIgAUEDEhA0IAFBAxIQNCBBEUFr3Lu4dm2j+v9etUonli4duv4Xa9bonXPPHdl8iuvLo9ex+/qaos+jNT28f9FFFzWq5/A+ujDMebQiXtGAcUfQgAQEDUhA0IAERX8K1twG73KWpJtvvrlR/fnnn9+oXpK2bm32GUQcDIHEKxqQgqABCQgakICgAQkIGpCgiKA17XU8sW6dTk5MDFX74vLlOrZgwdDrfn5iQr+YP7/RfN5eu1bvNDmCWVpfHr2O3dfXpPc6Njlk3/QzQO64445G9RmH9w8fPtx4DMwu9DoChSBoQAKCBiQgaECC9F7HJh8oOmfOnEbrXrlyZaP6hQsXNqqXpLPOOqvx3wC8ogEJCBqQgKABCQgakICgAQmKCFrTXsd3Vq7UySE/p3HP0qX6ZYO2r6cWLdLRhr2Ox1evbvSd18X15dHr2H19TdGf63jOOec0Wv8999zTqH4mh/dvvfXWRvUHDhxoPAZmF3odgUIQNCABQQMSEDQgQdFv/Gz6JReXX355o/oFDd55PWn37t2N6l977bXGY2B24WAIUAiCBiQgaEACggYkIGhAAoIGJCgiaI2/LH7VqkZNvG9ceqneXrJk6PqjH/ygji9aNHT9r66+Wiff+96h64trgKWpuPv6Gs6jNcR5NNRxHg0oBEEDEhA0IAFBAxIU/Q7rph+gOm/evKbzaVQvScePH29Uf+LEicZjYHbhYAhQiFYfCW57v6Q3JL0j6URErOliUsC46eKz938nIl7tYD3A2GLTEUjQNmgh6WHbT9je0sWEgHHUNmjrI+IKSddK+hPbH6kX2N5ie5ftXVOtpHGv49q1zepXrlQ0+L7qpr2UTedfXF8evY7d19d0dnjf9p2S3oyIL52ihsP7GDsjPbxve4HthZOXJV0jac9M1weMszZHHSck3V+9KsyV9M8R8W+dzAoYM3SGNMSmI+roDAEKkf6KljYYkIRXNKAQBA1IQNCABAQNSEDQgARlBK20PjXqu63PGKO0+hoO7wMtcXgfKARBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSlBG00vrUqO+2PmOM0upr6HUEWqLXESgEQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEpQRtNL61Kjvtj5jjNLqa+h1BFqi1xEoBEEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxKUEbTS+tSo77Y+Y4zS6mvodQRaotcRKARBAxIQNCABQQMSEDQgAUEDEhA0IMG0QbN9t+0jtvf0LVtie6ftvdXvxaOdJjC7DfOK9jVJG2vLtkp6JCIukfRIdR3AFKYNWkQ8KulobfEmSduqy9skXd/xvICxMtN9tImIOFRdfkXSxFSFtrfY3mV715RrK61Pjfpu6zPGKK2+ZqheR9srJO2IiMuq6/8VEYv6bv95REy7n0avI8bRKHsdD9teJknV7yMzXA9wWphp0LZL2lxd3izpgW6mA4ynaTcdbd8raYOk8yQdlvR5Sd+R9C1J75N0QNINEVE/YDJoXWw6YuwMs+nI+9GAlng/GlAIggYkIGhAAoIGJCBoQAKCBiQoI2il9alR3219xhil1ddwHg1oifNoQCEIGpCAoAEJCBqQgKABCQgakICgAQkIGpCAoAEJCBqQoIygldanRn239RljlFZfQ68j0BK9jkAhCBqQgKABCQgakICgAQkIGpCAoAEJCBqQgKABCQgakKCMoJXWp0Z9t/UZY5RWX0OvI9ASvY5AIQgakICgAQkIGpCAoAEJCBqQgKABCQgakICgAQkIGpCgjKCV1qdGfbf1GWOUVl9DryPQEr2OQCEIGpCAoAEJCBqQgKABCQgakICgAQmmDZrtu20fsb2nb9mdtg/afrL6uW600wRmt2Fe0b4maeOA5X8XEaurnwe7nRYwXqYNWkQ8KulowlyAsdVmH+02209Xm5aLW82itD416rutzxijtPqaoXodba+QtCMiLquuT0h6VVJI+oKkZRFxyxR/u0XSlurqla1mCxRomF7HGQVt2NsG1NJUjLEzsqZi28v6rn5c0p6pagFIc6crsH2vpA2SzrP9sqTPS9pge7V6m477JX1qhHMEZj3ejwa0xPvRgEIQNCABQQMSEDQgAUEDEhA0IAFBAxKUEbTSGkKp77Y+Y4zS6ms4YQ20xAlroBAEDUhA0IAEBA1IQNCABAQNSEDQgAQEDUhA0IAEBA1IUEbQSutTo77b+owxSquvodcRaIleR6AQBA1IQNCABAQNSEDQgAQEDUhA0IAEBA1IQNCABAQNSFBG0ErrU6O+2/qMMUqrr6HXEWiJXkegEAQNSEDQgAQEDUhA0IAEBA1IQNCABAQNSEDQgAQEDUhQRtBK61Ojvtv6jDFKq6+h1xFoiV5HoBAEDUhA0IAEBA1IQNCABAQNSDBt0Gwvt/092z+2/aztz1TLl9jeaXtv9Xvx6KcLzE7TnkezvUzSsojYbXuhpCckXS/pk5KORsRf294qaXFE/Nk06+I8GsZOJ+fRIuJQROyuLr8h6TlJF0jaJGlbVbZNvfABGKDRPprtFZI+JOlxSRMRcai66RVJE53ODBgjQwfN9nsk3Sfp9og41n9b9LY/B24W2t5ie5ftXVOuvLQ+Neq7rc8Yo7T6mqF6HW3Pk7RD0kMR8eVq2QuSNkTEoWo/7vsR8YFp1sM+GsZOJ/toti3pq5KemwxZZbukzdXlzZIemMkkgdPBMEcd10v6oaRnJJ2sFn9Wvf20b0l6n6QDkm6IiKPTrItXNIydYV7ReJsM0BJvkwEKQdCABAQNSEDQgAQEDUhA0IAEBA1IUEbQSutTo77b+owxSquv4YQ10BInrIFCEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IEEZQSutT436buszxiitvoZeR6Aleh2BQhA0IAFBAxIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCBBGUErrU+N+m7rM8Yorb6GXkegJXodgUIQNCABQQMSEDQgAUEDEhA0IAFBAxIQNCABQQMSEDQgQRlBK61Pjfpu6zPGKK2+hl5HoCV6HYFCEDQgAUEDEhA0IAFBAxIQNCABQQMSTBs028ttf8/2j20/a/sz1fI7bR+0/WT1c93opwvMTtOesLa9TNKyiNhte6GkJyRdL+kGSW9GxJeGHowT1hhDw5ywnjvESg5JOlRdfsP2c5IuaD894PTRaB/N9gpJH5L0eLXoNttP277b9uIZz6K0PjXqu63PGKO0+pqhex1tv0fSDyT9VUR82/aEpFclhaQvqLd5ecuAv9siaUt19cpWswUKNMym41BBsz1P0g5JD0XElwfcvkLSjoi4bJr1sI+GsdNJU7FtS/qqpOf6Q1YdJJn0cUl7ZjJJ4HQwzFHH9ZJ+KOkZSSerxZ+VdJOk1eptOu6X9KnqwMmp1sUrGsZOZ5uOXSFoGEe8Hw0oBEEDEhA0IAFBAxIQNCABQQMSlBG00vrUqO+2PmOM0uprOI8GtMR5NKAQBA1IQNCABAQNSEDQgATTfmZIx16VdGDA8vOq27KdbuO+m2OP67gXDVOUenh/yknYuyJiDeOO79in27h1bDoCCQgakKCUoN3FuGM/9uk27v9TxD4aMO5KeUUDxlpq0GxvtP2C7X22tw64/Uzb36xuf7z6vMi2Yw78ko5azQbbr/d9YcdftB23Wu9+289U69w14Hbb/vvq/j5t+4oOxvxA3/140vYx27fXajq7v9WnVB+xvadv2RLbO23vrX4P/BRr25urmr22N3cw7t/afr56LO+3vWiKvz3l8zISEZHyI2mOpJ9KuljSfElPSbq0VvPHkv6hunyjpG92MO4ySVdUlxdK+smAcTeo9wGwXd/n/ZLOO8Xt10n6riRL+rCkx0fwmL8i6aJR3V9JH5F0haQ9fcv+RtLW6vJWSV8c8HdLJL1Y/V5cXV7cctxrJM2tLn9x0LjDPC+j+Ml8RbtK0r6IeDEi3pb0DUmbajWbJG2rLv+rpI9WH+A6YxFxKCJ2V5ffkFTSl3RskvRP0fOYpEW1D6Zt66OSfhoRg5oEOhERj0o6Wlvc/zxuU+/bh+p+V9LOiDgaET+XtFPSxjbjRsTDEXGiuvqYpAuHXd+oZQbtAkk/67v+sn79H/z/1lQP2OuSlnY1gQFf0tHvt20/Zfu7tn+royFD0sO2n6i+g6BumMekjRsl3TvFbaO4v5Mm4v8+TPcVSRMDakZ9329Rb2thkOmel85lt2C9a6ov6bhP0u0Rcax28271Nq/erL5Q8TuSLulg2PURcdD2+ZJ22n6++p945GzPl/QxSX8+4OZR3d9fExGR/YZf25+TdELS16coSX9eMl/RDkpa3nf9wmrZwBrbcyWdK+m1tgNXX9Jxn6SvR8S367dHxLGIeLO6/KCkebbPaztuRBysfh+RdL96m8/9hnlMZupaSbsj4vCAeY3k/vY5PLkJXP0+MqBmJPfd9icl/Z6k349qh6xuiOelc5lB+5GkS2y/v/rf9kZJ22s12yVNHn36hKT/mOrBGtZUX9JRq/mNyX1B21ep97i0CrjtBe59Q6psL1BvR73+RSDbJf1hdfTxw5Jej2m+v6CBmzTFZuMo7m9N//O4WdIDA2oeknSN7cXVUclrqmUzZnujpD+V9LGI+OUUNcM8L93LPPKi3lG2n6h39PFz1bK/rB4YSTpL0r9I2ifpPyVd3MGY69XbJn9a0pPVz3WSPi3p01XNbZKeVe9I6GOSru5g3Iur9T1VrXvy/vaPa0lfqR6PZySt6ehxXqBecM7tWzaS+6temA9J+pV6+1m3qrdf/YikvZL+XdKSqnaNpH/s+9tbqud6n6SbOxh3n3r7fZPP8+QR7N+U9OCpnpdR/9AZAiSgMwRIQNCABAQNSEDQgAQEDUhA0IAEBA1IQNCABP8DxXjFZ2QGIr8AAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "plt.clf()\n",
+ "plt.figure(figsize=(6, 6))\n",
+ "ax = plt.gca()\n",
+ "data_point = 100\n",
+ "data = df.Blobs.iloc[data_point]\n",
+ "print(df.iloc[data_point])\n",
+ "plt.imshow(data, cmap='gray', vmin=0, vmax=255)\n",
+ "# Loop over data dimensions and create text annotations.\n",
+ "for i in range(0, data.shape[0]):\n",
+ " for j in range(0, data.shape[1]):\n",
+ " text = ax.text(j, i, int(data[i, j]),\n",
+ " ha=\"center\", va=\"center\", color=\"cyan\", fontsize=1)\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_06_CNN_Baseline.ipynb b/python/Step_06_CNN_Baseline.ipynb
new file mode 100644
index 0000000..6b92955
--- /dev/null
+++ b/python/Step_06_CNN_Baseline.ipynb
@@ -0,0 +1,890 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "from matplotlib.patches import Ellipse\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2\n",
+ "\n",
+ "import sklearn\n",
+ "import random\n",
+ "from sklearn import neighbors\n",
+ "from sklearn import svm\n",
+ "from sklearn import tree\n",
+ "from sklearn import ensemble\n",
+ "from sklearn.model_selection import GridSearchCV\n",
+ "from sklearn.metrics import classification_report\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle\n",
+ "from sklearn.model_selection import GridSearchCV\n",
+ "from sklearn.metrics.pairwise import euclidean_distances\n",
+ "from sklearn.metrics import confusion_matrix\n",
+ "from sklearn.metrics import accuracy_score\n",
+ "\n",
+ "import pickle as pkl\n",
+ "import h5py\n",
+ "\n",
+ "from pathlib import Path\n",
+ "import os.path\n",
+ "import sys\n",
+ "import datetime\n",
+ "import time\n",
+ "\n",
+ "import skimage\n",
+ "\n",
+ "target_names = [\"Knuckle\", \"Finger\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from skimage import measure\n",
+ "from skimage.measure import find_contours, approximate_polygon, \\\n",
+ " subdivide_polygon, EllipseModel, LineModelND"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def getEllipseParams(img):\n",
+ " points = np.argwhere(img > 40)\n",
+ " \n",
+ " contours = skimage.measure.find_contours(img, 40)\n",
+ " points_to_approx = []\n",
+ " highest_val = 0\n",
+ " for n, contour in enumerate(contours):\n",
+ " if (len(contour) > highest_val):\n",
+ " points_to_approx = contour\n",
+ " highest_val = len(contour) \n",
+ " \n",
+ " try:\n",
+ " contour = np.fliplr(points_to_approx)\n",
+ " except Exception as inst:\n",
+ " return [-1, -1, -1, -1, -1]\n",
+ " \n",
+ "\n",
+ " ellipse = skimage.measure.fit.EllipseModel()\n",
+ " ellipse.estimate(contour)\n",
+ " try:\n",
+ " xc, yc, a, b, theta = ellipse.params \n",
+ " except Exception as int:\n",
+ " return [-1, -1, -1, -1, -1]\n",
+ " \n",
+ " return [xc, yc, a, b, theta]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n",
+ "13 : 5\n",
+ "0.7222222222222222 : 0.2777777777777778\n"
+ ]
+ }
+ ],
+ "source": [
+ "# the data, split between train and test sets\n",
+ "df = pd.read_pickle(\"DataStudyCollection/df_statistics.pkl\")\n",
+ "\n",
+ "lst = df.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "\n",
+ "df[\"Set\"] = \"Test\"\n",
+ "df.loc[df.userID.isin(train_ids), \"Set\"] = \"Train\"\n",
+ "print(train_ids, test_ids)\n",
+ "print(len(train_ids), \":\", len(test_ids))\n",
+ "print(len(train_ids) / len(lst), \":\", len(test_ids)/ len(lst))\n",
+ "\n",
+ "#df_train = df[df.userID.isin(train_ids)]\n",
+ "#df_test = df[df.userID.isin(test_ids) & (df.Version == \"Normal\")]\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAJ4AAAD8CAYAAACGuR0qAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAADetJREFUeJzt3XuQVPWZxvHvO8AwMAy3gCNBRbCQDV4yuoSoYArvqNkga0K8ZXHXCmrFXbWyu2VlqzS7+YdaNWx243pJQImV6LoaSjZFokhijHeZaBBULuE+AgPKZYIyzDDv/tFnKsNlunu6e/rt6Xk+VVPdfW79UvVwus85/TuvuTsixVYRXYD0TgqehFDwJISCJyEUPAmh4EkIBU9CKHgSQsGTEH2L+WaV1t+rqO58AbO061ufDP9PDrWlnZ3xGo2u4uStid273H1kpuXyCp6ZTQd+APQBfuzuc9MtX0U1X6y4uPPtVVamfb+KmkFp5/uf9qefnyFY3tycdr5k9oI/vSmb5XL+qDWzPsADwOXAROBaM5uY6/akd8nnO95kYJ27r3f3g8CTwIzClCXlLp/gjQa2dHi9NZl2GDObY2bLzWx5C/ook5RuP6p190fcfZK7T+pH/+5+O+kh8gleA3Bih9cnJNNEMsoneG8B481srJlVAtcAiwtTlpS7nE+nuHurmd0GPEfqdMoCd1+VdiWztKdMKgYOTL96htMp1jf9P6d1R2Pa+VI8eZ3Hc/clwJIC1SK9iC6ZSQgFT0IoeBKiqD8SKKR+lX353NljOHnCKCoqjLY2Z/PKzbxfv4HmT1uiy5MMelzwaoYO5Ou3XsSV151L1cCjj5BbDrbyxtKVPPEfv2T9Kp1WLFU9KnjnXnIa3773GqprBgDwx/caWP2HzTR/2kJlVT8mnHEC404bzdQr65h6ZR2/fuZNHvjOU3zSdCC4cjlSaQVv5PBOZ13x9S/yrXtm0Keigpc2bOTe3/2O9xp3HrbMwHpj5OBqbpz2l8w67/NcePVkTvnSeG6b/ywbd+7msw/uTfv2bQcU0GLpEQcXZ08Zzz/860z6VFQw7+VX+Ntnfn5U6Nrt3Lefexe/xNX3Pc77DY2MGTmM+bd+lVHDaopctaRT8sEbMqyaf5w7C4B5r7zKD19/I6v1Nu/aw+wf/g9vrtvCcUMG8dA3Z1IzLM2vn6WoSj541956IcNH1rDirfX8d5aha/fpwVbuePT/WLttF+NqP8Mt/35DN1UpXVXSwauuqWL6174AwIPfW0xbDmMimg40c/ujiznQ0srF10+lbpp+JF0KSjp4508/g6oBlbz96lo2rNme83a2fLSXh5e+DqC9Xoko6eCdOXkcAK++kP5HL9l47MV6Ptq+h7Gnn8iZ5/9F3tuT/JR08M6YNBaAFW9tyHtbrYfaWDL/1wB85ZZL8t6e5Keo5/EMsDRjZ33An69EVPbvy8hRQzl4sJVNDXvwAZWsveHBtNuvbz6Ydv4Pvnoj3/iXv2by5XX0H11LS3PrYfPbtupKR7GU7B6vZnDq6kTT3k8yjofN1kc79vLHVQ30r6rkc2efXJBtSm5KNniDaqoA2N9U2JFpq95aD8CEujEF3a50TckG75NPUh+bAwcVdmTahxtTVzxGjBpa0O1K15Rs8PZ8nLodxdACX23Ys7Mptd0R6cdvSPcq2eC1HGxl7+799O3Xh5HHDynYdv+071MABiXfISVGyQYPYM17HwJwWt1JBdtmdRK4/fqpVKiSDt47b6YOBL4wZXzBtlkzNDWEsmnPJwXbpnRdUc/jOelvFVbRcPi419eefIVv3nkZUy+cyEN3/oSz/+3WtNs/OCT9/fVOOrCasafWArBt/XZcv78LU9J7vA83NFL/m1VUDazk0uvOK8g2zzg3tfdc+dragmxPclPSwQNYPP9FAGb9/WUMqa7Ka1ujxx3HSaeO4tP9B1j7h80FqE5yVfLBe+P5d1nxyhqGjqjhjplfymtbV825EIDfLlpOa8uhQpQnOSr54AH85z/9lIPNLcw873S+ck5uv6ebcPwILrt+Cm1tbSx6eFmBK5Su6hHB27puB4/c/TQAd19/KeefPrZL6w/o15f7rrmCyv79+OXjL7Np9bbuKFO6oEcED+AXj/6W+c+9Sd8+Fcy7eQbXXXBWVusNGVjFj2+6mlNqP8Om1dt45O7/7eZKJRulNbwxg/969mUM+LvLJvPPX7uAS846lYeXvMbrHxx9oGAGl585gTsvm8LoYUP4cPc+vnfjQ7rLQImwYnboHmzDPV27gYpBGe5/V9kPgClfPovb77+BwcNTy2/ftIt3X1vD9i0f4w4jPzuMuvMnMGrMCADWvbuFe/7mIXatTn8kq3G1+XvBn65390mZlsu3z8VGoAk4BLRm84aF8Mov3ubtF9/nr26axsxbLub4MSM4PglZR7s+3M3j9y/hhafeoC1D8xUprrz2eEnwJrn7rmyWL9Qe77B1Koxxp5/AaeeMZ8iIwZgZ+3bv54P6Dax5ZxNtbX/+97Xt0Z0EultR9niloK3NWbdiC+tWbMH6HR1MKU35HtU68LyZ1ZvZnEIUJL1Dvnu8qe7eYGbHAUvN7AN3f6njAkkg5wBUkf7m2tJ75LXHc/eG5LERWESqzdSRy6jBihwlnyZ61WZW0/4cuBRYWajCpLzl81FbCyxKxsn2BX7m7r/KuFaao+i2pqY8ypGeJJ8GK+uBzxewFulFesy1WikvCp6EUPAkhIInIRQ8CaHgSQgFT0IoeBJCwZMQCp6EUPAkhIInIRQ8CaHgSQgFT0IoeBJCwZMQCp6EUPAkhIInIRQ8CaHgSQgFT0IoeBJCwZMQCp6EUPAkhIInIRQ8CaHgSQgFT0JkDJ6ZLTCzRjNb2WHacDNbamZrk8dh3VumlJts9niPAdOPmHYXsMzdxwPLktciWcsYvOQu7h8fMXkGsDB5vhC4qsB1SZnL9Tterbu3997cTup+yCJZy/vgwlM9qTq9o7aZzTGz5Wa2vIXmfN9OykSuwdthZqMAksfGzhZUnws5llyDtxiYnTyfDTxbmHKkt8jmdMoTwGvABDPbamY3AXOBS8xsLXBx8lokaxn7XLj7tZ3MuqjAtUgvoisXEkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQuTaYOW7ZtZgZu8kf1d0b5lSbnJtsAIwz93rkr8lhS1Lyl2uDVZE8pLPd7zbzGxF8lHcaS8z9bmQY8k1eA8CpwB1wDbg/s4WVJ8LOZacgufuO9z9kLu3AT8CJhe2LCl3OQWvvatPYiawsrNlRY4lY5+LpMHKNGCEmW0F7gGmmVkdqR5mG4Gbu7FGKUO5NliZ3w21SC+iKxcSQsGTEAqehFDwJISCJyEUPAmh4EkIBU9CKHgSQsGTEAqehFDwJISCJyEUPAmh4EkIBU9CKHgSQsGTEAqehFDwJISCJyEUPAmh4EkIBU9CKHgSQsGTEAqehFDwJISCJyEUPAmh4EmIbPpcnGhmvzGz98xslZndnkwfbmZLzWxt8tjpDbhFjpTNHq8V+La7TwTOAb5lZhOBu4Bl7j4eWJa8FslKNn0utrn775PnTcD7wGhgBrAwWWwhcFV3FSnlJ+OtaDsys5OBs4A3gFp335bM2g7UdrLOHGAOQBUDc61TykzWBxdmNgh4BrjD3fd1nOfuTupG3EdRnws5lqyCZ2b9SIXup+7+82Tyjva2A8ljY/eUKOUom6NaI3WX9/fd/fsdZi0GZifPZwPPFr48KVfZfMebAnwDeNfM3kmmfQeYCzxlZjcBm4BZ3VOilKNs+ly8DFgnsy8qbDnSW+jKhYRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EkLBkxAKnoRQ8CSEgichFDwJoeBJCAVPQih4EiKfPhffNbMGM3sn+bui+8uVcpHNHUHb+1z83sxqgHozW5rMm+fu93VfeVKusrkj6DZgW/K8ycza+1yI5KxL3/GO6HMBcJuZrTCzBWopJV2RT5+LB4FTgDpSe8T7O1lvjpktN7PlLTQXoGQpBzn3uXD3He5+yN3bgB8Bk4+1rhqsyLHk3OeivblKYiawsvDlSbnKp8/FtWZWR6qV1Ebg5m6pUMpSPn0ulhS+HOktdOVCQih4EkLBkxAKnoRQ8CSEgichFDwJYe5evDcz20mqqXK7EcCuohXQdaVeH5RejWPcfWSmhYoavKPe3Gy5u08KKyCDUq8PekaNx6KPWgmh4EmI6OA9Evz+mZR6fdAzajxK6Hc86b2i93jSS4UEz8ymm9lqM1tnZndF1JCJmW00s3eToZvLS6CeBWbWaGYrO0wbbmZLzWxt8thjxr0UPXhm1gd4ALgcmEjqB6UTi11Hli5w97oSOV3xGDD9iGl3AcvcfTywLHndI0Ts8SYD69x9vbsfBJ4EZgTU0aO4+0vAx0dMngEsTJ4vBK4qalF5iAjeaGBLh9dbKc1xug48b2b1ZjYnuphO1CbjngG2A7WRxXRFNmMuequp7t5gZscBS83sg2SvU5Lc3c2sx5yiiNjjNQAndnh9QjKtpLh7Q/LYCCyik+GbwXa0j/ZLHhuD68laRPDeAsab2VgzqwSuARYH1NEpM6tO7hODmVUDl1KawzcXA7OT57OBZwNr6ZKif9S6e6uZ3QY8B/QBFrj7qmLXkUEtsCg1pJi+wM/c/VeRBZnZE8A0YISZbQXuAeYCT5nZTaR+9TMrrsKu0ZULCaErFxJCwZMQCp6EUPAkhIInIRQ8CaHgSQgFT0L8Pyp94TKMVNfuAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "fig, ax = plt.subplots(1)\n",
+ "img = df.iloc[0].Blobs\n",
+ "xc, yc, a, b, theta = getEllipseParams(img)\n",
+ "ax.imshow(img)\n",
+ "e = Ellipse(xy=[xc,yc], width=a*2, height=b*2, angle=math.degrees(theta), fill=False, lw=2, edgecolor='w')\n",
+ "ax.add_artist(e)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lst = df.Blobs.apply(lambda x: getEllipseParams(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lst2 = np.vstack(lst.values)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(618012, 5)"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lst2.shape"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"XC\"] = lst2[:,0]\n",
+ "df[\"YC\"] = lst2[:,1]\n",
+ "df[\"EllipseW\"] = lst2[:,2]\n",
+ "df[\"EllipseH\"] = lst2[:,3]\n",
+ "df[\"EllipseTheta\"] = lst2[:,4]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"Area\"] = df[\"EllipseW\"] * df[\"EllipseH\"] * np.pi\n",
+ "df[\"AvgCapa\"] = df.Blobs.apply(lambda x: np.mean(x))\n",
+ "df[\"SumCapa\"] = df.Blobs.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[8, 11, 6, 7, 16, 15, 14, 10, 9, 2, 3, 13, 17, 5, 12, 1, 4]"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lst = list(range(1, df.userID.max()))\n",
+ "SEED = 42#448\n",
+ "random.seed(SEED)\n",
+ "random.shuffle(lst)\n",
+ "lst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfY = df[df.Set == \"Train\"].copy(deep=True)\n",
+ "dfT = df[(df.Set == \"Test\") & (df.Version == \"Normal\")].copy(deep=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "minmax = min(len(dfY[dfY.Input == \"Finger\"]), len(dfY[dfY.Input == \"Knuckle\"]))\n",
+ "dfX = dfY[dfY.Input == \"Finger\"].sample(minmax)\n",
+ "dfZ = dfY[dfY.Input == \"Knuckle\"].sample(minmax)\n",
+ "dfY = pd.concat([dfX,dfZ])\n",
+ "\n",
+ "minmax = min(len(dfT[dfT.Input == \"Finger\"]), len(dfT[dfT.Input == \"Knuckle\"]))\n",
+ "dfX = dfT[dfT.Input == \"Finger\"].sample(minmax)\n",
+ "dfZ = dfT[dfT.Input == \"Knuckle\"].sample(minmax)\n",
+ "dfT = pd.concat([dfX,dfZ])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ " ... | \n",
+ " InputMethod | \n",
+ " Set | \n",
+ " XC | \n",
+ " YC | \n",
+ " EllipseW | \n",
+ " EllipseH | \n",
+ " EllipseTheta | \n",
+ " Area | \n",
+ " AvgCapa | \n",
+ " SumCapa | \n",
+ "
\n",
+ " \n",
+ " Input | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " Finger | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " ... | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ "
\n",
+ " \n",
+ " Knuckle | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " ... | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ " 9421 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
2 rows × 31 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "Input \n",
+ "Finger 9421 9421 9421 9421 9421 9421 \n",
+ "Knuckle 9421 9421 9421 9421 9421 9421 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause Image ... InputMethod Set \\\n",
+ "Input ... \n",
+ "Finger 9421 9421 9421 9421 ... 9421 9421 \n",
+ "Knuckle 9421 9421 9421 9421 ... 9421 9421 \n",
+ "\n",
+ " XC YC EllipseW EllipseH EllipseTheta Area AvgCapa SumCapa \n",
+ "Input \n",
+ "Finger 9421 9421 9421 9421 9421 9421 9421 9421 \n",
+ "Knuckle 9421 9421 9421 9421 9421 9421 9421 9421 \n",
+ "\n",
+ "[2 rows x 31 columns]"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfT.groupby(\"Input\").count()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# FEATURE SET: sum of capacitance, avg of capacitance, ellipse area, ellipse width, height and theta."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "features = [\"SumCapa\", \"AvgCapa\", \"Area\", \"EllipseW\", \"EllipseH\", \"EllipseTheta\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# ZeroR"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfT[\"InputMethodPred\"] = 1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[ 0 9421]\n",
+ " [ 0 9421]]\n",
+ "Accuray: 0.50\n",
+ "Recall: 0.50\n",
+ "Precision: 0.50\n",
+ "F1-Score: 0.33\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.00 0.00 0.00 9421\n",
+ " Finger 0.50 1.00 0.67 9421\n",
+ "\n",
+ " micro avg 0.50 0.50 0.50 18842\n",
+ " macro avg 0.25 0.50 0.33 18842\n",
+ "weighted avg 0.25 0.50 0.33 18842\n",
+ "\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
+ " 'precision', 'predicted', average, warn_for)\n",
+ "/usr/local/lib/python3.6/dist-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.\n",
+ " 'precision', 'predicted', average, warn_for)\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.2f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.2f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.2f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"F1-Score: %.2f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# DecisionTreeClassifier"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 240 candidates, totalling 1200 fits\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[Parallel(n_jobs=30)]: Using backend LokyBackend with 30 concurrent workers.\n",
+ "[Parallel(n_jobs=30)]: Done 140 tasks | elapsed: 10.4s\n",
+ "[Parallel(n_jobs=30)]: Done 390 tasks | elapsed: 31.4s\n",
+ "[Parallel(n_jobs=30)]: Done 740 tasks | elapsed: 1.3min\n",
+ "[Parallel(n_jobs=30)]: Done 1200 out of 1200 | elapsed: 2.4min finished\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'max_depth': 22, 'min_samples_split': 2} 0.8120637794585754\n",
+ "[[7409 2012]\n",
+ " [3096 6325]]\n",
+ "Accuray: 0.73\n",
+ "Recall: 0.73\n",
+ "Precision: 0.67\n",
+ "F1-Score: 0.73\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.71 0.79 0.74 9421\n",
+ " Finger 0.76 0.67 0.71 9421\n",
+ "\n",
+ " micro avg 0.73 0.73 0.73 18842\n",
+ " macro avg 0.73 0.73 0.73 18842\n",
+ "weighted avg 0.73 0.73 0.73 18842\n",
+ "\n",
+ "CPU times: user 7.26 s, sys: 3.38 s, total: 10.6 s\n",
+ "Wall time: 2min 29s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "param_grid = {'max_depth': range(2,32,1),\n",
+ " 'min_samples_split':range(2,10,1)}\n",
+ "#TODO: Create Baseline for different ML stuff\n",
+ "clf = GridSearchCV(tree.DecisionTreeClassifier(), \n",
+ " param_grid,\n",
+ " cv=5 , n_jobs=os.cpu_count()-2, verbose=1)\n",
+ "clf.fit(dfY[features].values, dfY.InputMethod.values)\n",
+ "print(clf.best_params_, clf.best_score_)\n",
+ "dfT[\"InputMethodPred\"] = clf.predict(dfT[features].values) \n",
+ "\n",
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.3f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"F1-Score: %.3f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# RandomForestClassifier"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 180 candidates, totalling 900 fits\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[Parallel(n_jobs=94)]: Using backend LokyBackend with 94 concurrent workers.\n",
+ "[Parallel(n_jobs=94)]: Done 12 tasks | elapsed: 1.2min\n",
+ "[Parallel(n_jobs=94)]: Done 262 tasks | elapsed: 4.0min\n",
+ "[Parallel(n_jobs=94)]: Done 612 tasks | elapsed: 9.2min\n",
+ "[Parallel(n_jobs=94)]: Done 900 out of 900 | elapsed: 12.8min finished\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'max_depth': 60, 'n_estimators': 63} 0.8669582104371696\n",
+ "[[8175 1246]\n",
+ " [2765 6656]]\n",
+ "Accuray: 0.79\n",
+ "Recall: 0.71\n",
+ "Precision: 0.74\n",
+ "F1-Score: 0.77\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.75 0.87 0.80 9421\n",
+ " Finger 0.84 0.71 0.77 9421\n",
+ "\n",
+ " micro avg 0.79 0.79 0.79 18842\n",
+ " macro avg 0.79 0.79 0.79 18842\n",
+ "weighted avg 0.79 0.79 0.79 18842\n",
+ "\n",
+ "CPU times: user 42.1 s, sys: 834 ms, total: 42.9 s\n",
+ "Wall time: 13min 28s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "param_grid = {'n_estimators': range(55,64,1),\n",
+ " 'max_depth': range(50,70,1)}\n",
+ "#TODO: Create Baseline for different ML stuff\n",
+ "clf = GridSearchCV(ensemble.RandomForestClassifier(), \n",
+ " param_grid,\n",
+ " cv=5 , n_jobs=os.cpu_count()-2, verbose=1)\n",
+ "clf.fit(dfY[features].values, dfY.InputMethod.values)\n",
+ "print(clf.best_params_, clf.best_score_)\n",
+ "dfT[\"InputMethodPred\"] = clf.predict(dfT[features].values) \n",
+ "\n",
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.2f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.2f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Precision: %.2f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"F1-Score: %.2f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# kNN"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 62 candidates, totalling 310 fits\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[Parallel(n_jobs=94)]: Using backend LokyBackend with 94 concurrent workers.\n",
+ "[Parallel(n_jobs=94)]: Done 12 tasks | elapsed: 17.7s\n",
+ "[Parallel(n_jobs=94)]: Done 310 out of 310 | elapsed: 1.5min finished\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'n_neighbors': 2} 0.800546827088748\n",
+ "[[8187 1234]\n",
+ " [4318 5103]]\n",
+ "Accuray: 0.71\n",
+ "Recall: 0.54\n",
+ "Precision: 0.67\n",
+ "F1-Score: 0.65\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.65 0.87 0.75 9421\n",
+ " Finger 0.81 0.54 0.65 9421\n",
+ "\n",
+ " micro avg 0.71 0.71 0.71 18842\n",
+ " macro avg 0.73 0.71 0.70 18842\n",
+ "weighted avg 0.73 0.71 0.70 18842\n",
+ "\n",
+ "CPU times: user 1.74 s, sys: 300 ms, total: 2.04 s\n",
+ "Wall time: 1min 30s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "param_grid = {'n_neighbors': range(2,64,1),\n",
+ " #weights': ['uniform', 'distance']\n",
+ " }\n",
+ "#TODO: Create Baseline for different ML stuff\n",
+ "clf = GridSearchCV(neighbors.KNeighborsClassifier(),\n",
+ " param_grid,\n",
+ " cv=5 , n_jobs=os.cpu_count()-2, verbose=1)\n",
+ "clf.fit(dfY[features].values, dfY.InputMethod.values)\n",
+ "print(clf.best_params_, clf.best_score_)\n",
+ "dfT[\"InputMethodPred\"] = clf.predict(dfT[features].values) \n",
+ "\n",
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.2f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.2f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.2f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"F1-Score: %.2f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values, average=\"macro\"))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# SVM"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 9 candidates, totalling 45 fits\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[Parallel(n_jobs=94)]: Using backend LokyBackend with 94 concurrent workers.\n",
+ "[Parallel(n_jobs=94)]: Done 42 out of 45 | elapsed: 1056.5min remaining: 75.5min\n",
+ "[Parallel(n_jobs=94)]: Done 45 out of 45 | elapsed: 1080.5min finished\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'C': 10.0, 'gamma': 10.0} 0.8256943024851795\n",
+ "CPU times: user 2h 42min 9s, sys: 23.6 s, total: 2h 42min 33s\n",
+ "Wall time: 20h 43min 1s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "C_range = np.logspace(1, 3,3)\n",
+ "gamma_range = np.logspace(-1, 1, 3)\n",
+ "param_grid = dict(gamma=gamma_range, C=C_range)\n",
+ "clf = GridSearchCV(sklearn.svm.SVC(), \n",
+ " param_grid,\n",
+ " cv=5 , n_jobs=os.cpu_count()-2, verbose=1)\n",
+ "clf.fit(dfY[features].values, dfY.InputMethod.values)\n",
+ "print(clf.best_params_, clf.best_score_)\n",
+ "\n",
+ "dfT[\"InputMethodPred\"] = clf.predict(dfT[features].values)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'C': 10.0, 'gamma': 10.0} 0.8256943024851795\n",
+ "[[7106 2315]\n",
+ " [2944 6477]]\n",
+ "Accuray: 0.72\n",
+ "Recall: 0.69\n",
+ "Precision: 0.66\n",
+ "F1-Score: 0.71\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.71 0.75 0.73 9421\n",
+ " Finger 0.74 0.69 0.71 9421\n",
+ "\n",
+ " micro avg 0.72 0.72 0.72 18842\n",
+ " macro avg 0.72 0.72 0.72 18842\n",
+ "weighted avg 0.72 0.72 0.72 18842\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(clf.best_params_, clf.best_score_)\n",
+ "print(confusion_matrix(dfT.InputMethod.values, dfT.InputMethodPred.values, labels=[0, 1]))\n",
+ "print(\"Accuray: %.2f\" % accuracy_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Recall: %.2f\" % metrics.recall_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"Precision: %.2f\" % metrics.average_precision_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(\"F1-Score: %.2f\" % metrics.f1_score(dfT.InputMethod.values, dfT.InputMethodPred.values))\n",
+ "print(classification_report(dfT.InputMethod.values, dfT.InputMethodPred.values, target_names=target_names))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_07_CNN.ipynb b/python/Step_07_CNN.ipynb
new file mode 100644
index 0000000..73c53ba
--- /dev/null
+++ b/python/Step_07_CNN.ipynb
@@ -0,0 +1,13240 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "from keras.models import Sequential, load_model\n",
+ "from keras.layers import *\n",
+ "from keras import optimizers\n",
+ "from keras import utils\n",
+ "from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau\n",
+ "import keras\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle \n",
+ "\n",
+ "# Used for graph export\n",
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "from keras import backend as K\n",
+ "from keras import regularizers\n",
+ "\n",
+ "import pickle as pkl\n",
+ "import h5py\n",
+ "\n",
+ "from pathlib import Path\n",
+ "import os.path\n",
+ "import sys\n",
+ "import datetime\n",
+ "import time\n",
+ "\n",
+ "from keras.callbacks import Callback"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class LoggingTensorBoard(TensorBoard): \n",
+ "\n",
+ " def __init__(self, log_dir, settings_str_to_log, **kwargs):\n",
+ " super(LoggingTensorBoard, self).__init__(log_dir, **kwargs)\n",
+ "\n",
+ " self.settings_str = settings_str_to_log\n",
+ "\n",
+ " def on_train_begin(self, logs=None):\n",
+ " TensorBoard.on_train_begin(self, logs=logs)\n",
+ "\n",
+ " tensor = tf.convert_to_tensor(self.settings_str)\n",
+ " summary = tf.summary.text (\"Run_Settings\", tensor)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " s = sess.run(summary)\n",
+ " self.writer.add_summary(s)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n",
+ "13 : 5\n",
+ "0.7222222222222222 : 0.2777777777777778\n"
+ ]
+ }
+ ],
+ "source": [
+ "# the data, split between train and test sets\n",
+ "dfAll = pd.read_pickle(\"DataStudyCollection/df_blobs_area.pkl\")\n",
+ "\n",
+ "lst = dfAll.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)\n",
+ "print(len(train_ids), \":\", len(test_ids))\n",
+ "print(len(train_ids) / len(lst), \":\", len(test_ids)/ len(lst))\n",
+ "\n",
+ "df_train = dfAll[dfAll.userID.isin(train_ids)]\n",
+ "df_test = dfAll[dfAll.userID.isin(test_ids) & (dfAll.Version == \"Normal\")]\n",
+ "\n",
+ "df_train2 = df_train[['Blobs', 'InputMethod']].copy()\n",
+ "df_test2 = df_test[['Blobs', 'InputMethod']].copy()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "618012"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(dfAll)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_train = np.vstack(df_train2.Blobs)\n",
+ "x_test = np.vstack(df_test2.Blobs)\n",
+ "y_train = df_train2.InputMethod.values\n",
+ "y_test = df_test2.InputMethod.values\n",
+ "\n",
+ "x_train = x_train.reshape(-1, 27, 15, 1)\n",
+ "x_test = x_test.reshape(-1, 27, 15, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = 2\n",
+ "y_train_one_hot = utils.to_categorical(df_train2.InputMethod, num_classes)\n",
+ "y_test_one_hot = utils.to_categorical(df_test2.InputMethod, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Text(0.5, 1.0, 'Label for image 1 is: 0')"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAKEAAAEICAYAAAA3NZQkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAADntJREFUeJzt3XusHPV5xvHv4xsG44SLiQXmYoIQkYuEW1GTKjSBYMChTUxUlYBKZSKo0zQovdAkJG0DSavKikrTpEpRAjg4JAFRKMVNKWAsKIraBkxECAQSE2PAri+AbWyHq+23f8zvpOPD2Yv3ct7js89HWu3Mzvxm3t3znJmd2dn9KSIwyzQhuwAzh9DSOYSWziG0dA6hpXMILd2YCqGkByRd1uu2qnxT0lZJD3VXJUg6VtJOSRO7XdZYkfmc+hJCSWslze/Hsjt0OnA2cHREzOt2YRHxXEQcHBG7uy+tfySdKel+SS9LWtts3l4+J0l/KmmjpO2Slko6oNn8Y2pL2EfHAWsj4hf72lDSpD7UM1p+ASwFPjVaK5R0LnAlcBbV6/5O4AvN2oxqCCUdKul7kl4ou8bvSTp62GwnSHqo/BfdKemwWvt3S/ovSdsk/UjSGW2s81LgeuA3yu7mC+XxP5D0tKQtkpZLOqrWJiR9QtJqYPUIy5xd5plUxh+Q9Deltp2S/k3S4ZK+U57Hw5Jm19p/RdLzZdojkn6zNu1AScvK6/OkpE9LWlebfpSk28tr+IykTzZ67hHxUETcBKxp43Ua/pwukbRG0o6ynt9rtYxiEXBDRDwREVuBvwYuadoiInp+A9YC80d4/HDgd4CDgOnAPwP/Wpv+ALAeOBmYBtwOfLtMmwW8BJxH9c9zdhk/otb2sgb1XAJ8vzb+fuBF4NeAA4B/BB6sTQ9gBXAYcOAIy5td5plUW/fTwAnA24GfAD8D5gOTgG8B36y1v7i8FpOAK4CNwNQybQnwn8ChwNHAY8C6Mm0C8AjweWAK1VZmDXBui7/HfKo9QbN5fvmcymu/HTipTDsS+JUyfCywDTi2wXJ+BHykNj6jLPfwhusezRCOMN9cYOuwEC6pjc8B3gAmAp8BbhrW/h5gUQchvAH4Um38YOBNYHYthO9v5w9WW/df1KZfA/xHbfyDwKNNlrcVOKUM7xUq4LJaCE8DnhvW9rP1gPcwhNuoNhhv+SdssZyfAwtq45PLcmc3ajPau+ODJH1d0rOStgMPAocMOyJ7vjb8LNWTmEH1/uJ3y654m6RtVAccR3ZQylFl2QBExE6qreqsBnW0Y1Nt+NURxg8eGpH052VX+3J5Hm+neo5DtdXXXR8+Djhq2GvwOWDmPtbaVFTvnT8C/CGwQdK/S3pXm813Am+rjQ8N72jUYLQPTK4ATgJOi4i3Ae8tj6s2zzG14WOptlAvUv0xboqIQ2q3aRGxpIM6/pfqD1qtXJpGtXtcX5unL5cXlfd/nwYuAA6NiEOAl/n/12AD1W54SP31eB54ZthrMD0izut1nRFxT0ScTfVP/hRwXZtNnwBOqY2fAmyKiJcaNehnCCdLmlq7TaJ6H/gqsK0ccFw1QruLJc2RdBDwReC2qE4bfBv4oKRzJU0syzxjhAObdtwMfFTS3HL64G+BH0TE2k6e6D6aDuwCXgAmSfo8e285bgU+Ww7iZgGX16Y9BOyQ9JlyADNR0smSfn2kFUmaIGkq1d5E5TWb0qpASTMlLSz/nK9Tbd32tPn8vgVcWv6GhwB/CdzYrEE/Q3gXVeCGblcD/wAcSLVl+x/g7hHa3URV9EZgKvBJgIh4HlhItft5gWqr8Ck6eA4RcR/wV1QHPhuoDigu3NfldOgequf9M6q3BK+x9y73i8A64BngPuA2qiBQ/hl/m+q99DNUr+P1VLvzkbyX6rW/i2qv8ipwbxs1TgD+jGqPsQV4H/Bx2Ouk9rEjNYyIu4EvAfcDz5XnONLG5pdU3jzaGCXp48CFEfG+7Fr6ZVBOVu83JB0p6T1lV3oS1fvoO7Lr6qf9+dOA8WoK8HXgeKrTJLcA/5RaUZ95d2zpvDu2dKO6O56iA2Iq0xrPIDWeBmhiq6uMmm/VY9eYvuhlXNjB1hcj4oh9adNVCCUtAL5C9bHa9a1OHE9lGqdNPKfx8iY3L2fCIY3ORBS7m4ds95ZtzdvvcUi7dV/c9mzrufbW8e64fNT2NeADVJ/xXiRpTqfLs8HVzXvCecDTEbEmIt6gOopb2JuybJB0E8JZ7H2mfx17XwAAgKTFklZJWvVmdeLfbC99PzqOiG9ExKkRcepkml7lbQOqmxCuZ+8rPI5m76tQzNrSTQgfBk6UdHy5MuNCYHlvyrJB0vEpmojYJelyqqtCJgJLI+KJ1g0bXxE04YAWu+vpTc4xAuxo8T0mn4IZk7o6TxgRd1FdJmTWMX9sZ+kcQkvnEFo6h9DSOYSWziG0dKN6PaEmTGDCgQc2nmFW8+9w//ziGU2nT97R/HrEY766s+n0Pa+80nS69Ye3hJbOIbR0DqGlcwgtnUNo6RxCS+cQWrox9TMg0eJ7xT/96LVdLf+3bvlQ0+l71j7X1fKtM94SWjqH0NI5hJbOIbR0DqGlcwgtnUNo6cbUecIJW7c3nX7yV/+o6fRXjmzey8G7Xm3ZxZsl8JbQ0jmEls4htHQOoaVzCC2dQ2jpHEJLN6rnCWPPHva81vh3q/Xaa03bH3fLuu7W/2rz5VuObvsxWUvVo/duYFdEnNqLomyw9GJLeGZEvNiD5diA8ntCS9dtCAO4V9Ijkhb3oiAbPN3ujk+PiPWS3gGskPRURDxYn6GEczHAVA7qcnU2HnW1JYyI9eV+M1Xv5PNGmMed6VhT3XSwOE3S9KFh4Bzg8V4VZoOjm93xTOAOVX0UTwK+GxF3t2zVpC+R3S9tad621XTbL3XTmc4a4JQe1mIDyqdoLJ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpWsZQklLJW2W9HjtscMkrZC0utwf2t8ybTxrZ0t4I7Bg2GNXAisj4kRgZRk360jLEJYuIYb/WPRCYFkZXgac3+O6bIB0+pvVMyNiQxneSPUj6iNyPybWStcHJhERVD07NZrufkysqU5DuEnSkQDlfnPvSrJB02kIlwOLyvAi4M7elGODqJ1TNDcD/w2cJGmdpEuBJcDZklYD88u4WUdaHphExEUNJp3V41psQPkTE0vnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILZ1DaOkcQkvnEFo6h9DSOYSWziG0dA6hpXMILV2n/ZhcLWm9pEfL7bz+lmnjWaf9mAB8OSLmlttdvS3LBkmn/ZiY9Uw37wkvl/RY2V037FZM0mJJqyStepPXu1idjVedhvBa4ARgLrABuKbRjO7HxFrpKIQRsSkidkfEHuA6YF5vy7JB0lEIhzrSKT4MPN5oXrNWWnYhUfoxOQOYIWkdcBVwhqS5VN2JrQU+1scabZzrtB+TG/pQiw0of2Ji6RxCS+cQWjqH0NI5hJbOIbR0DqGlcwgtnUNo6RxCS+cQWjqH0NI5hJbOIbR0DqGlcwgtnUNo6RxCS+cQWjqH0NI5hJbOIbR0DqGlcwgtnUNo6RxCS+cQWjqH0NI5hJbOIbR0DqGla6cfk2Mk3S/pJ5KekPTH5fHDJK2QtLrcN/zxdLNm2tkS7gKuiIg5wLuBT0iaA1wJrIyIE4GVZdxsn7XTj8mGiPhhGd4BPAnMAhYCy8psy4Dz+1WkjW8tfy64TtJs4FeBHwAzI2JDmbQRmNmgzWJgMcBUDuq0ThvH2j4wkXQwcDvwJxGxvT4tIoLqR9Tfwv2YWCtthVDSZKoAfici/qU8vGmoK4lyv7k/Jdp4187Rsah+rf/JiPj72qTlwKIyvAi4s/fl2SBo5z3he4DfB34s6dHy2OeAJcCtki4FngUu6E+JNt6104/J9wE1mHxWb8uxQeRPTCydQ2jpHEJL5xBaOofQ0jmEls4htHQOoaVzCC2dQ2jpHEJL5xBaOofQ0jmEls4htHQOoaVzCC2dQ2jpHEJL5xBaOofQ0jmEls4htHQOoaVzCC2dQ2jpHEJL5xBaOofQ0jmEls4htHQOoaXrpjOdqyWtl/RouZ3X/3JtPGrn54KHOtP5oaTpwCOSVpRpX46Iv+tfeTYI2vm54A3AhjK8Q9JQZzpmPbFP7wmHdaYDcLmkxyQtbdS3naTFklZJWvUmr3dVrI1P3XSmcy1wAjCXakt5zUjt3JmOtdJxZzoRsSkidkfEHuA6YF7/yrTxrOPOdIZ6cyo+DDze+/JsEHTTmc5FkuZS9Wm3FvhYXyq0ca+bznTu6n05Noj8iYmlcwgtnUNo6RxCS+cQWjqH0NIpIkZvZdILVB10D5kBvDhqBey7sV4fjL0aj4uII/alwaiG8C0rl1ZFxKlpBbQw1uuD/aPGVrw7tnQOoaXLDuE3ktffylivD/aPGptKfU9oBvlbQjOH0PKlhFDSAkk/lfS0pCszamhF0lpJPy5fZ101BupZKmmzpMdrjx0maYWk1eV+xO/5jHWjHkJJE4GvAR8A5lBdHDtntOto05kRMXeMnIe7EVgw7LErgZURcSKwsozvdzK2hPOApyNiTUS8AdwCLEyoY78SEQ8CW4Y9vBBYVoaXAeePalE9khHCWcDztfF1jM3vMQdwr6RHJC3OLqaBmeV74QAbgZmZxXSqne+YDKrTI2K9pHcAKyQ9VbZGY1JEhKT98nxbxpZwPXBMbfzo8tiYEhHry/1m4A7G5ldaNw1967Hcb06upyMZIXwYOFHS8ZKmABcCyxPqaEjStPK7O0iaBpzD2PxK63JgURleBNyZWEvHRn13HBG7JF0O3ANMBJZGxBOjXUcLM4E7qq9cMwn4bkTcnVmQpJuBM4AZktYBVwFLgFslXUp1idwFeRV2zh/bWTp/YmLpHEJL5xBaOofQ0jmEls4htHQOoaX7P9Bb5SQoQkqOAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "i = 1\n",
+ "plt.imshow(x_train[i].reshape(27, 15)) #np.sqrt(784) = 28\n",
+ "plt.title(\"Label for image %i is: %s\" % (i, y_train[i]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.2\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Colocations handled automatically by placer.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
+ "CNN\n",
+ "_________________________________________________________________\n",
+ "Layer (type) Output Shape Param # \n",
+ "=================================================================\n",
+ "conv2d_1 (Conv2D) (None, 27, 15, 128) 1280 \n",
+ "_________________________________________________________________\n",
+ "batch_normalization_1 (Batch (None, 27, 15, 128) 512 \n",
+ "_________________________________________________________________\n",
+ "conv2d_2 (Conv2D) (None, 27, 15, 64) 73792 \n",
+ "_________________________________________________________________\n",
+ "batch_normalization_2 (Batch (None, 27, 15, 64) 256 \n",
+ "_________________________________________________________________\n",
+ "max_pooling2d_1 (MaxPooling2 (None, 14, 8, 64) 0 \n",
+ "_________________________________________________________________\n",
+ "dropout_1 (Dropout) (None, 14, 8, 64) 0 \n",
+ "_________________________________________________________________\n",
+ "conv2d_3 (Conv2D) (None, 14, 8, 64) 36928 \n",
+ "_________________________________________________________________\n",
+ "batch_normalization_3 (Batch (None, 14, 8, 64) 256 \n",
+ "_________________________________________________________________\n",
+ "conv2d_4 (Conv2D) (None, 14, 8, 32) 18464 \n",
+ "_________________________________________________________________\n",
+ "batch_normalization_4 (Batch (None, 14, 8, 32) 128 \n",
+ "_________________________________________________________________\n",
+ "max_pooling2d_2 (MaxPooling2 (None, 7, 4, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "dropout_2 (Dropout) (None, 7, 4, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "flatten_1 (Flatten) (None, 896) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_1 (Dense) (None, 140) 125580 \n",
+ "_________________________________________________________________\n",
+ "dropout_3 (Dropout) (None, 140) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_2 (Dense) (None, 70) 9870 \n",
+ "_________________________________________________________________\n",
+ "dropout_4 (Dropout) (None, 70) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_3 (Dense) (None, 2) 142 \n",
+ "=================================================================\n",
+ "Total params: 267,208\n",
+ "Trainable params: 266,632\n",
+ "Non-trainable params: 576\n",
+ "_________________________________________________________________\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Use tf.cast instead.\n",
+ "Train on 465844 samples, validate on 38042 samples\n",
+ "Epoch 1/100000\n",
+ " - 22s - loss: 10.6384 - acc: 0.8243 - val_loss: 1.4424 - val_acc: 0.2476\n",
+ "\n",
+ "Epoch 00001: val_acc improved from -inf to 0.24765, saving model to ./ModelSnapshots/CNN-001.h5\n",
+ "Epoch 2/100000\n",
+ " - 19s - loss: 0.6649 - acc: 0.9111 - val_loss: 0.6993 - val_acc: 0.9254\n",
+ "\n",
+ "Epoch 00002: val_acc improved from 0.24765 to 0.92542, saving model to ./ModelSnapshots/CNN-002.h5\n",
+ "Epoch 3/100000\n",
+ " - 19s - loss: 0.5491 - acc: 0.9172 - val_loss: 0.4966 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00003: val_acc improved from 0.92542 to 0.92732, saving model to ./ModelSnapshots/CNN-003.h5\n",
+ "Epoch 4/100000\n",
+ " - 20s - loss: 0.5172 - acc: 0.9224 - val_loss: 0.4912 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 00004: val_acc did not improve from 0.92732\n",
+ "Epoch 5/100000\n",
+ " - 19s - loss: 0.5110 - acc: 0.9224 - val_loss: 0.6533 - val_acc: 0.7959\n",
+ "\n",
+ "Epoch 00005: val_acc did not improve from 0.92732\n",
+ "Epoch 6/100000\n",
+ " - 19s - loss: 0.5013 - acc: 0.9244 - val_loss: 0.4590 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00006: val_acc improved from 0.92732 to 0.93449, saving model to ./ModelSnapshots/CNN-006.h5\n",
+ "Epoch 7/100000\n",
+ " - 19s - loss: 0.4876 - acc: 0.9258 - val_loss: 0.8813 - val_acc: 0.7317\n",
+ "\n",
+ "Epoch 00007: val_acc did not improve from 0.93449\n",
+ "Epoch 8/100000\n",
+ " - 19s - loss: 0.4874 - acc: 0.9272 - val_loss: 0.4632 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00008: val_acc did not improve from 0.93449\n",
+ "Epoch 9/100000\n",
+ " - 18s - loss: 0.4859 - acc: 0.9262 - val_loss: 0.4528 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00009: val_acc did not improve from 0.93449\n",
+ "Epoch 10/100000\n",
+ " - 19s - loss: 0.4795 - acc: 0.9274 - val_loss: 0.5400 - val_acc: 0.8862\n",
+ "\n",
+ "Epoch 00010: val_acc did not improve from 0.93449\n",
+ "Epoch 11/100000\n",
+ " - 18s - loss: 0.4747 - acc: 0.9271 - val_loss: 0.4750 - val_acc: 0.9164\n",
+ "\n",
+ "Epoch 00011: val_acc did not improve from 0.93449\n",
+ "Epoch 12/100000\n",
+ " - 18s - loss: 0.4673 - acc: 0.9282 - val_loss: 0.6994 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00012: val_acc did not improve from 0.93449\n",
+ "Epoch 13/100000\n",
+ " - 18s - loss: 0.4585 - acc: 0.9291 - val_loss: 0.4540 - val_acc: 0.9170\n",
+ "\n",
+ "Epoch 00013: val_acc did not improve from 0.93449\n",
+ "Epoch 14/100000\n",
+ " - 18s - loss: 0.4556 - acc: 0.9278 - val_loss: 0.4327 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 00014: val_acc did not improve from 0.93449\n",
+ "Epoch 15/100000\n",
+ " - 19s - loss: 0.4564 - acc: 0.9284 - val_loss: 0.4179 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00015: val_acc did not improve from 0.93449\n",
+ "Epoch 16/100000\n",
+ " - 19s - loss: 0.4476 - acc: 0.9295 - val_loss: 0.4111 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 00016: val_acc did not improve from 0.93449\n",
+ "Epoch 17/100000\n",
+ " - 19s - loss: 0.4486 - acc: 0.9287 - val_loss: 0.4264 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 00017: val_acc did not improve from 0.93449\n",
+ "Epoch 18/100000\n",
+ " - 20s - loss: 0.4490 - acc: 0.9292 - val_loss: 0.4275 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 00018: val_acc did not improve from 0.93449\n",
+ "Epoch 19/100000\n",
+ " - 19s - loss: 0.4426 - acc: 0.9300 - val_loss: 0.4185 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00019: val_acc did not improve from 0.93449\n",
+ "Epoch 20/100000\n",
+ " - 19s - loss: 0.4419 - acc: 0.9299 - val_loss: 0.4256 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 00020: val_acc did not improve from 0.93449\n",
+ "Epoch 21/100000\n",
+ " - 19s - loss: 0.4459 - acc: 0.9296 - val_loss: 0.4994 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 00021: val_acc did not improve from 0.93449\n",
+ "Epoch 22/100000\n",
+ " - 19s - loss: 0.4388 - acc: 0.9309 - val_loss: 0.4264 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 00022: val_acc did not improve from 0.93449\n",
+ "Epoch 23/100000\n",
+ " - 19s - loss: 0.4330 - acc: 0.9315 - val_loss: 0.3966 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00023: val_acc improved from 0.93449 to 0.93675, saving model to ./ModelSnapshots/CNN-023.h5\n",
+ "Epoch 24/100000\n",
+ " - 19s - loss: 0.4331 - acc: 0.9308 - val_loss: 0.4026 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 00024: val_acc did not improve from 0.93675\n",
+ "Epoch 25/100000\n",
+ " - 20s - loss: 0.4322 - acc: 0.9312 - val_loss: 0.4607 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 00025: val_acc did not improve from 0.93675\n",
+ "Epoch 26/100000\n",
+ " - 19s - loss: 0.4345 - acc: 0.9300 - val_loss: 0.4082 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 00026: val_acc did not improve from 0.93675\n",
+ "Epoch 27/100000\n",
+ " - 19s - loss: 0.4337 - acc: 0.9312 - val_loss: 0.4118 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 00027: val_acc did not improve from 0.93675\n",
+ "Epoch 28/100000\n",
+ " - 19s - loss: 0.4307 - acc: 0.9316 - val_loss: 0.4559 - val_acc: 0.9112\n",
+ "\n",
+ "Epoch 00028: val_acc did not improve from 0.93675\n",
+ "Epoch 29/100000\n",
+ " - 18s - loss: 0.4348 - acc: 0.9312 - val_loss: 0.4312 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00029: val_acc did not improve from 0.93675\n",
+ "Epoch 30/100000\n",
+ " - 19s - loss: 0.4290 - acc: 0.9310 - val_loss: 0.4249 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 00030: val_acc did not improve from 0.93675\n",
+ "Epoch 31/100000\n",
+ " - 19s - loss: 0.4317 - acc: 0.9312 - val_loss: 0.4365 - val_acc: 0.9219\n",
+ "\n",
+ "Epoch 00031: val_acc did not improve from 0.93675\n",
+ "Epoch 32/100000\n",
+ " - 19s - loss: 0.4269 - acc: 0.9315 - val_loss: 0.3956 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00032: val_acc did not improve from 0.93675\n",
+ "Epoch 33/100000\n",
+ " - 19s - loss: 0.4252 - acc: 0.9313 - val_loss: 0.4402 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00033: val_acc did not improve from 0.93675\n",
+ "Epoch 34/100000\n",
+ " - 19s - loss: 0.4258 - acc: 0.9323 - val_loss: 0.3936 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00034: val_acc did not improve from 0.93675\n",
+ "Epoch 35/100000\n",
+ " - 19s - loss: 0.4266 - acc: 0.9311 - val_loss: 0.4042 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00035: val_acc did not improve from 0.93675\n",
+ "Epoch 36/100000\n",
+ " - 19s - loss: 0.4259 - acc: 0.9322 - val_loss: 0.4084 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 00036: val_acc did not improve from 0.93675\n",
+ "Epoch 37/100000\n",
+ " - 19s - loss: 0.4240 - acc: 0.9318 - val_loss: 0.4365 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 00037: val_acc did not improve from 0.93675\n",
+ "Epoch 38/100000\n",
+ " - 19s - loss: 0.4194 - acc: 0.9327 - val_loss: 0.4481 - val_acc: 0.8921\n",
+ "\n",
+ "Epoch 00038: val_acc did not improve from 0.93675\n",
+ "Epoch 39/100000\n",
+ " - 19s - loss: 0.4211 - acc: 0.9312 - val_loss: 0.3893 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00039: val_acc did not improve from 0.93675\n",
+ "Epoch 40/100000\n",
+ " - 19s - loss: 0.4197 - acc: 0.9324 - val_loss: 0.4019 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00040: val_acc did not improve from 0.93675\n",
+ "Epoch 41/100000\n",
+ " - 19s - loss: 0.4215 - acc: 0.9311 - val_loss: 0.4159 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00041: val_acc did not improve from 0.93675\n",
+ "Epoch 42/100000\n",
+ " - 19s - loss: 0.4193 - acc: 0.9319 - val_loss: 0.6113 - val_acc: 0.8096\n",
+ "\n",
+ "Epoch 00042: val_acc did not improve from 0.93675\n",
+ "Epoch 43/100000\n",
+ " - 19s - loss: 0.4186 - acc: 0.9320 - val_loss: 1.0623 - val_acc: 0.7866\n",
+ "\n",
+ "Epoch 00043: val_acc did not improve from 0.93675\n",
+ "Epoch 44/100000\n",
+ " - 19s - loss: 0.4189 - acc: 0.9330 - val_loss: 0.4174 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 00044: val_acc did not improve from 0.93675\n",
+ "Epoch 45/100000\n",
+ " - 19s - loss: 0.4156 - acc: 0.9320 - val_loss: 0.4213 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00045: val_acc did not improve from 0.93675\n",
+ "Epoch 46/100000\n",
+ " - 19s - loss: 0.4156 - acc: 0.9320 - val_loss: 0.4395 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 00046: val_acc did not improve from 0.93675\n",
+ "Epoch 47/100000\n",
+ " - 19s - loss: 0.4143 - acc: 0.9324 - val_loss: 0.3903 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 00047: val_acc did not improve from 0.93675\n",
+ "Epoch 48/100000\n",
+ " - 19s - loss: 0.4120 - acc: 0.9323 - val_loss: 0.4650 - val_acc: 0.8933\n",
+ "\n",
+ "Epoch 00048: val_acc did not improve from 0.93675\n",
+ "Epoch 49/100000\n",
+ " - 19s - loss: 0.4158 - acc: 0.9317 - val_loss: 0.4139 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00049: val_acc did not improve from 0.93675\n",
+ "Epoch 50/100000\n",
+ " - 19s - loss: 0.4154 - acc: 0.9321 - val_loss: 0.4007 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 00050: val_acc did not improve from 0.93675\n",
+ "Epoch 51/100000\n",
+ " - 19s - loss: 0.4142 - acc: 0.9324 - val_loss: 0.4773 - val_acc: 0.8966\n",
+ "\n",
+ "Epoch 00051: val_acc did not improve from 0.93675\n",
+ "Epoch 52/100000\n",
+ " - 19s - loss: 0.4123 - acc: 0.9323 - val_loss: 0.9801 - val_acc: 0.8088\n",
+ "\n",
+ "Epoch 00052: val_acc did not improve from 0.93675\n",
+ "Epoch 53/100000\n",
+ " - 19s - loss: 0.4135 - acc: 0.9315 - val_loss: 0.4215 - val_acc: 0.9125\n",
+ "\n",
+ "Epoch 00053: val_acc did not improve from 0.93675\n",
+ "Epoch 54/100000\n",
+ " - 19s - loss: 0.4129 - acc: 0.9327 - val_loss: 0.3969 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00054: val_acc did not improve from 0.93675\n",
+ "Epoch 55/100000\n",
+ " - 19s - loss: 0.4087 - acc: 0.9330 - val_loss: 0.6396 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00055: val_acc did not improve from 0.93675\n",
+ "Epoch 56/100000\n",
+ " - 19s - loss: 0.4110 - acc: 0.9319 - val_loss: 0.3968 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00056: val_acc did not improve from 0.93675\n",
+ "Epoch 57/100000\n",
+ " - 19s - loss: 0.4119 - acc: 0.9323 - val_loss: 0.4063 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 00057: val_acc did not improve from 0.93675\n",
+ "Epoch 58/100000\n",
+ " - 19s - loss: 0.4097 - acc: 0.9324 - val_loss: 0.4219 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 00058: val_acc did not improve from 0.93675\n",
+ "Epoch 59/100000\n",
+ " - 19s - loss: 0.4094 - acc: 0.9327 - val_loss: 0.4067 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 00059: val_acc did not improve from 0.93675\n",
+ "Epoch 60/100000\n",
+ " - 19s - loss: 0.4070 - acc: 0.9337 - val_loss: 0.3934 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00060: val_acc did not improve from 0.93675\n",
+ "Epoch 61/100000\n",
+ " - 19s - loss: 0.4090 - acc: 0.9326 - val_loss: 0.4402 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 00061: val_acc did not improve from 0.93675\n",
+ "Epoch 62/100000\n",
+ " - 19s - loss: 0.4079 - acc: 0.9330 - val_loss: 0.4429 - val_acc: 0.9156\n",
+ "\n",
+ "Epoch 00062: val_acc did not improve from 0.93675\n",
+ "Epoch 63/100000\n",
+ " - 19s - loss: 0.4093 - acc: 0.9323 - val_loss: 0.4007 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00063: val_acc did not improve from 0.93675\n",
+ "Epoch 64/100000\n",
+ " - 19s - loss: 0.4081 - acc: 0.9333 - val_loss: 0.4042 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00064: val_acc did not improve from 0.93675\n",
+ "Epoch 65/100000\n",
+ " - 19s - loss: 0.4104 - acc: 0.9319 - val_loss: 0.3883 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00065: val_acc did not improve from 0.93675\n",
+ "Epoch 66/100000\n",
+ " - 19s - loss: 0.4095 - acc: 0.9326 - val_loss: 0.3803 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00066: val_acc did not improve from 0.93675\n",
+ "Epoch 67/100000\n",
+ " - 19s - loss: 0.4099 - acc: 0.9329 - val_loss: 0.3824 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00067: val_acc improved from 0.93675 to 0.93723, saving model to ./ModelSnapshots/CNN-067.h5\n",
+ "Epoch 68/100000\n",
+ " - 19s - loss: 0.4103 - acc: 0.9330 - val_loss: 0.3824 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 00068: val_acc improved from 0.93723 to 0.93773, saving model to ./ModelSnapshots/CNN-068.h5\n",
+ "Epoch 69/100000\n",
+ " - 19s - loss: 0.4068 - acc: 0.9335 - val_loss: 0.3933 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00069: val_acc did not improve from 0.93773\n",
+ "Epoch 70/100000\n",
+ " - 19s - loss: 0.4102 - acc: 0.9331 - val_loss: 0.4003 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 00070: val_acc did not improve from 0.93773\n",
+ "Epoch 71/100000\n",
+ " - 19s - loss: 0.4072 - acc: 0.9332 - val_loss: 0.3831 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00071: val_acc did not improve from 0.93773\n",
+ "Epoch 72/100000\n",
+ " - 19s - loss: 0.4106 - acc: 0.9330 - val_loss: 0.4424 - val_acc: 0.9048\n",
+ "\n",
+ "Epoch 00072: val_acc did not improve from 0.93773\n",
+ "Epoch 73/100000\n",
+ " - 19s - loss: 0.4077 - acc: 0.9335 - val_loss: 0.4400 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 00073: val_acc did not improve from 0.93773\n",
+ "Epoch 74/100000\n",
+ " - 19s - loss: 0.4126 - acc: 0.9320 - val_loss: 0.4159 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 00074: val_acc did not improve from 0.93773\n",
+ "Epoch 75/100000\n",
+ " - 19s - loss: 0.4082 - acc: 0.9329 - val_loss: 0.4983 - val_acc: 0.8585\n",
+ "\n",
+ "Epoch 00075: val_acc did not improve from 0.93773\n",
+ "Epoch 76/100000\n",
+ " - 19s - loss: 0.4097 - acc: 0.9330 - val_loss: 0.4032 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00076: val_acc did not improve from 0.93773\n",
+ "Epoch 77/100000\n",
+ " - 19s - loss: 0.4094 - acc: 0.9335 - val_loss: 0.4593 - val_acc: 0.8998\n",
+ "\n",
+ "Epoch 00077: val_acc did not improve from 0.93773\n",
+ "Epoch 78/100000\n",
+ " - 19s - loss: 0.4086 - acc: 0.9326 - val_loss: 0.5056 - val_acc: 0.8838\n",
+ "\n",
+ "Epoch 00078: val_acc did not improve from 0.93773\n",
+ "Epoch 79/100000\n",
+ " - 19s - loss: 0.4061 - acc: 0.9329 - val_loss: 0.4159 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 00079: val_acc did not improve from 0.93773\n",
+ "Epoch 80/100000\n",
+ " - 19s - loss: 0.4060 - acc: 0.9338 - val_loss: 0.4024 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 00080: val_acc did not improve from 0.93773\n",
+ "Epoch 81/100000\n",
+ " - 19s - loss: 0.4033 - acc: 0.9339 - val_loss: 0.3978 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00081: val_acc did not improve from 0.93773\n",
+ "Epoch 82/100000\n",
+ " - 19s - loss: 0.4089 - acc: 0.9327 - val_loss: 0.4198 - val_acc: 0.9193\n",
+ "\n",
+ "Epoch 00082: val_acc did not improve from 0.93773\n",
+ "Epoch 83/100000\n",
+ " - 19s - loss: 0.4096 - acc: 0.9330 - val_loss: 0.3905 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00083: val_acc did not improve from 0.93773\n",
+ "Epoch 84/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9332 - val_loss: 0.4277 - val_acc: 0.9073\n",
+ "\n",
+ "Epoch 00084: val_acc did not improve from 0.93773\n",
+ "Epoch 85/100000\n",
+ " - 19s - loss: 0.4064 - acc: 0.9324 - val_loss: 0.4027 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00085: val_acc did not improve from 0.93773\n",
+ "Epoch 86/100000\n",
+ " - 19s - loss: 0.4039 - acc: 0.9337 - val_loss: 0.4472 - val_acc: 0.9010\n",
+ "\n",
+ "Epoch 00086: val_acc did not improve from 0.93773\n",
+ "Epoch 87/100000\n",
+ " - 19s - loss: 0.4078 - acc: 0.9336 - val_loss: 0.3936 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00087: val_acc did not improve from 0.93773\n",
+ "Epoch 88/100000\n",
+ " - 18s - loss: 0.4056 - acc: 0.9333 - val_loss: 0.4580 - val_acc: 0.9131\n",
+ "\n",
+ "Epoch 00088: val_acc did not improve from 0.93773\n",
+ "Epoch 89/100000\n",
+ " - 19s - loss: 0.4075 - acc: 0.9333 - val_loss: 0.3881 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00089: val_acc did not improve from 0.93773\n",
+ "Epoch 90/100000\n",
+ " - 19s - loss: 0.4054 - acc: 0.9340 - val_loss: 0.3874 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 00090: val_acc did not improve from 0.93773\n",
+ "Epoch 91/100000\n",
+ " - 19s - loss: 0.4079 - acc: 0.9332 - val_loss: 0.3935 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00091: val_acc did not improve from 0.93773\n",
+ "Epoch 92/100000\n",
+ " - 19s - loss: 0.4062 - acc: 0.9340 - val_loss: 0.4137 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 00092: val_acc did not improve from 0.93773\n",
+ "Epoch 93/100000\n",
+ " - 19s - loss: 0.4079 - acc: 0.9328 - val_loss: 0.6338 - val_acc: 0.8380\n",
+ "\n",
+ "Epoch 00093: val_acc did not improve from 0.93773\n",
+ "Epoch 94/100000\n",
+ " - 19s - loss: 0.4096 - acc: 0.9331 - val_loss: 0.3798 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00094: val_acc improved from 0.93773 to 0.93817, saving model to ./ModelSnapshots/CNN-094.h5\n",
+ "Epoch 95/100000\n",
+ " - 19s - loss: 0.4054 - acc: 0.9335 - val_loss: 0.3844 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00095: val_acc did not improve from 0.93817\n",
+ "Epoch 96/100000\n",
+ " - 19s - loss: 0.4075 - acc: 0.9331 - val_loss: 0.4462 - val_acc: 0.9086\n",
+ "\n",
+ "Epoch 00096: val_acc did not improve from 0.93817\n",
+ "Epoch 97/100000\n",
+ " - 19s - loss: 0.4097 - acc: 0.9325 - val_loss: 0.4847 - val_acc: 0.8882\n",
+ "\n",
+ "Epoch 00097: val_acc did not improve from 0.93817\n",
+ "Epoch 98/100000\n",
+ " - 20s - loss: 0.4076 - acc: 0.9339 - val_loss: 0.4225 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00098: val_acc did not improve from 0.93817\n",
+ "Epoch 99/100000\n",
+ " - 19s - loss: 0.4072 - acc: 0.9334 - val_loss: 0.3894 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 00099: val_acc did not improve from 0.93817\n",
+ "Epoch 100/100000\n",
+ " - 19s - loss: 0.4018 - acc: 0.9346 - val_loss: 0.3839 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 00100: val_acc did not improve from 0.93817\n",
+ "Epoch 101/100000\n",
+ " - 19s - loss: 0.4070 - acc: 0.9328 - val_loss: 0.3996 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 00101: val_acc did not improve from 0.93817\n",
+ "Epoch 102/100000\n",
+ " - 19s - loss: 0.4043 - acc: 0.9342 - val_loss: 0.4956 - val_acc: 0.9097\n",
+ "\n",
+ "Epoch 00102: val_acc did not improve from 0.93817\n",
+ "Epoch 103/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9332 - val_loss: 0.4098 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 00103: val_acc did not improve from 0.93817\n",
+ "Epoch 104/100000\n",
+ " - 19s - loss: 0.4034 - acc: 0.9344 - val_loss: 0.4489 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00104: val_acc did not improve from 0.93817\n",
+ "Epoch 105/100000\n",
+ " - 19s - loss: 0.4046 - acc: 0.9340 - val_loss: 0.4827 - val_acc: 0.8891\n",
+ "\n",
+ "Epoch 00105: val_acc did not improve from 0.93817\n",
+ "Epoch 106/100000\n",
+ " - 19s - loss: 0.4058 - acc: 0.9343 - val_loss: 0.3853 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00106: val_acc did not improve from 0.93817\n",
+ "Epoch 107/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9336 - val_loss: 0.4043 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00107: val_acc did not improve from 0.93817\n",
+ "Epoch 108/100000\n",
+ " - 19s - loss: 0.4074 - acc: 0.9341 - val_loss: 0.3846 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00108: val_acc did not improve from 0.93817\n",
+ "Epoch 109/100000\n",
+ " - 19s - loss: 0.4036 - acc: 0.9355 - val_loss: 0.4847 - val_acc: 0.8968\n",
+ "\n",
+ "Epoch 00109: val_acc did not improve from 0.93817\n",
+ "Epoch 110/100000\n",
+ " - 19s - loss: 0.4043 - acc: 0.9343 - val_loss: 0.4023 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00110: val_acc did not improve from 0.93817\n",
+ "Epoch 111/100000\n",
+ " - 19s - loss: 0.4075 - acc: 0.9342 - val_loss: 0.3929 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00111: val_acc did not improve from 0.93817\n",
+ "Epoch 112/100000\n",
+ " - 19s - loss: 0.4058 - acc: 0.9347 - val_loss: 0.4070 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 00112: val_acc did not improve from 0.93817\n",
+ "Epoch 113/100000\n",
+ " - 19s - loss: 0.4045 - acc: 0.9348 - val_loss: 0.4018 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00113: val_acc did not improve from 0.93817\n",
+ "Epoch 114/100000\n",
+ " - 19s - loss: 0.4049 - acc: 0.9341 - val_loss: 0.3926 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 00114: val_acc did not improve from 0.93817\n",
+ "Epoch 115/100000\n",
+ " - 19s - loss: 0.4055 - acc: 0.9339 - val_loss: 0.3904 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00115: val_acc did not improve from 0.93817\n",
+ "Epoch 116/100000\n",
+ " - 19s - loss: 0.4059 - acc: 0.9332 - val_loss: 0.4077 - val_acc: 0.9229\n",
+ "\n",
+ "Epoch 00116: val_acc did not improve from 0.93817\n",
+ "Epoch 117/100000\n",
+ " - 19s - loss: 0.4051 - acc: 0.9333 - val_loss: 0.4226 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 00117: val_acc did not improve from 0.93817\n",
+ "Epoch 118/100000\n",
+ " - 19s - loss: 0.4034 - acc: 0.9341 - val_loss: 0.3989 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 00118: val_acc did not improve from 0.93817\n",
+ "Epoch 119/100000\n",
+ " - 19s - loss: 0.4010 - acc: 0.9349 - val_loss: 0.3932 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 00119: val_acc did not improve from 0.93817\n",
+ "Epoch 120/100000\n",
+ " - 19s - loss: 0.4042 - acc: 0.9342 - val_loss: 0.7033 - val_acc: 0.8155\n",
+ "\n",
+ "Epoch 00120: val_acc did not improve from 0.93817\n",
+ "Epoch 121/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9339 - val_loss: 0.4062 - val_acc: 0.9259\n",
+ "\n",
+ "Epoch 00121: val_acc did not improve from 0.93817\n",
+ "Epoch 122/100000\n",
+ " - 19s - loss: 0.4026 - acc: 0.9342 - val_loss: 0.3889 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 00122: val_acc did not improve from 0.93817\n",
+ "Epoch 123/100000\n",
+ " - 19s - loss: 0.4031 - acc: 0.9350 - val_loss: 0.4046 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 00123: val_acc did not improve from 0.93817\n",
+ "Epoch 124/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9346 - val_loss: 0.3852 - val_acc: 0.9400\n",
+ "\n",
+ "Epoch 00124: val_acc improved from 0.93817 to 0.94001, saving model to ./ModelSnapshots/CNN-124.h5\n",
+ "Epoch 125/100000\n",
+ " - 19s - loss: 0.4052 - acc: 0.9337 - val_loss: 0.3885 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00125: val_acc did not improve from 0.94001\n",
+ "Epoch 126/100000\n",
+ " - 19s - loss: 0.4039 - acc: 0.9345 - val_loss: 0.3948 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00126: val_acc did not improve from 0.94001\n",
+ "Epoch 127/100000\n",
+ " - 19s - loss: 0.4060 - acc: 0.9342 - val_loss: 0.4544 - val_acc: 0.9040\n",
+ "\n",
+ "Epoch 00127: val_acc did not improve from 0.94001\n",
+ "Epoch 128/100000\n",
+ " - 19s - loss: 0.4082 - acc: 0.9342 - val_loss: 0.4008 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00128: val_acc did not improve from 0.94001\n",
+ "Epoch 129/100000\n",
+ " - 20s - loss: 0.4062 - acc: 0.9346 - val_loss: 0.4203 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 00129: val_acc did not improve from 0.94001\n",
+ "Epoch 130/100000\n",
+ " - 20s - loss: 0.4027 - acc: 0.9348 - val_loss: 0.3881 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00130: val_acc did not improve from 0.94001\n",
+ "Epoch 131/100000\n",
+ " - 19s - loss: 0.4049 - acc: 0.9349 - val_loss: 0.3957 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00131: val_acc did not improve from 0.94001\n",
+ "Epoch 132/100000\n",
+ " - 19s - loss: 0.4051 - acc: 0.9347 - val_loss: 0.4183 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 00132: val_acc did not improve from 0.94001\n",
+ "Epoch 133/100000\n",
+ " - 19s - loss: 0.4037 - acc: 0.9346 - val_loss: 0.4063 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 00133: val_acc did not improve from 0.94001\n",
+ "Epoch 134/100000\n",
+ " - 19s - loss: 0.4056 - acc: 0.9343 - val_loss: 0.4185 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 00134: val_acc did not improve from 0.94001\n",
+ "Epoch 135/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9345 - val_loss: 0.4389 - val_acc: 0.9144\n",
+ "\n",
+ "Epoch 00135: val_acc did not improve from 0.94001\n",
+ "Epoch 136/100000\n",
+ " - 19s - loss: 0.4048 - acc: 0.9348 - val_loss: 0.3881 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00136: val_acc did not improve from 0.94001\n",
+ "Epoch 137/100000\n",
+ " - 19s - loss: 0.4058 - acc: 0.9343 - val_loss: 0.4193 - val_acc: 0.9176\n",
+ "\n",
+ "Epoch 00137: val_acc did not improve from 0.94001\n",
+ "Epoch 138/100000\n",
+ " - 19s - loss: 0.4031 - acc: 0.9353 - val_loss: 0.4260 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00138: val_acc did not improve from 0.94001\n",
+ "Epoch 139/100000\n",
+ " - 19s - loss: 0.4023 - acc: 0.9349 - val_loss: 0.3853 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00139: val_acc did not improve from 0.94001\n",
+ "Epoch 140/100000\n",
+ " - 19s - loss: 0.4062 - acc: 0.9345 - val_loss: 0.3913 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 00140: val_acc did not improve from 0.94001\n",
+ "Epoch 141/100000\n",
+ " - 19s - loss: 0.4038 - acc: 0.9350 - val_loss: 0.3837 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 00141: val_acc did not improve from 0.94001\n",
+ "Epoch 142/100000\n",
+ " - 19s - loss: 0.4040 - acc: 0.9346 - val_loss: 0.3926 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00142: val_acc did not improve from 0.94001\n",
+ "Epoch 143/100000\n",
+ " - 19s - loss: 0.4034 - acc: 0.9347 - val_loss: 0.6062 - val_acc: 0.8273\n",
+ "\n",
+ "Epoch 00143: val_acc did not improve from 0.94001\n",
+ "Epoch 144/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9343 - val_loss: 0.3798 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 00144: val_acc did not improve from 0.94001\n",
+ "Epoch 145/100000\n",
+ " - 19s - loss: 0.4047 - acc: 0.9352 - val_loss: 0.3861 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00145: val_acc did not improve from 0.94001\n",
+ "Epoch 146/100000\n",
+ " - 19s - loss: 0.4028 - acc: 0.9352 - val_loss: 0.7171 - val_acc: 0.7992\n",
+ "\n",
+ "Epoch 00146: val_acc did not improve from 0.94001\n",
+ "Epoch 147/100000\n",
+ " - 19s - loss: 0.4042 - acc: 0.9352 - val_loss: 0.4404 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 00147: val_acc did not improve from 0.94001\n",
+ "Epoch 148/100000\n",
+ " - 20s - loss: 0.4044 - acc: 0.9348 - val_loss: 0.3975 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00148: val_acc did not improve from 0.94001\n",
+ "Epoch 149/100000\n",
+ " - 19s - loss: 0.4013 - acc: 0.9355 - val_loss: 0.3931 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 00149: val_acc did not improve from 0.94001\n",
+ "Epoch 150/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9340 - val_loss: 0.4519 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 00150: val_acc did not improve from 0.94001\n",
+ "Epoch 151/100000\n",
+ " - 19s - loss: 0.4036 - acc: 0.9349 - val_loss: 0.3926 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00151: val_acc did not improve from 0.94001\n",
+ "Epoch 152/100000\n",
+ " - 18s - loss: 0.4053 - acc: 0.9345 - val_loss: 0.4576 - val_acc: 0.8998\n",
+ "\n",
+ "Epoch 00152: val_acc did not improve from 0.94001\n",
+ "Epoch 153/100000\n",
+ " - 19s - loss: 0.4064 - acc: 0.9338 - val_loss: 0.3841 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00153: val_acc did not improve from 0.94001\n",
+ "Epoch 154/100000\n",
+ " - 19s - loss: 0.4039 - acc: 0.9346 - val_loss: 0.3943 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00154: val_acc did not improve from 0.94001\n",
+ "Epoch 155/100000\n",
+ " - 19s - loss: 0.4018 - acc: 0.9355 - val_loss: 0.4134 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 00155: val_acc did not improve from 0.94001\n",
+ "Epoch 156/100000\n",
+ " - 19s - loss: 0.4061 - acc: 0.9344 - val_loss: 0.3877 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 00156: val_acc did not improve from 0.94001\n",
+ "Epoch 157/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9347 - val_loss: 0.4376 - val_acc: 0.9163\n",
+ "\n",
+ "Epoch 00157: val_acc did not improve from 0.94001\n",
+ "Epoch 158/100000\n",
+ " - 19s - loss: 0.4067 - acc: 0.9346 - val_loss: 0.4217 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 00158: val_acc did not improve from 0.94001\n",
+ "Epoch 159/100000\n",
+ " - 18s - loss: 0.4059 - acc: 0.9350 - val_loss: 0.3867 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 00159: val_acc did not improve from 0.94001\n",
+ "Epoch 160/100000\n",
+ " - 19s - loss: 0.4033 - acc: 0.9346 - val_loss: 0.4266 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 00160: val_acc did not improve from 0.94001\n",
+ "Epoch 161/100000\n",
+ " - 19s - loss: 0.4081 - acc: 0.9344 - val_loss: 0.4022 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 00161: val_acc did not improve from 0.94001\n",
+ "Epoch 162/100000\n",
+ " - 19s - loss: 0.4047 - acc: 0.9348 - val_loss: 0.3967 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00162: val_acc did not improve from 0.94001\n",
+ "Epoch 163/100000\n",
+ " - 19s - loss: 0.4053 - acc: 0.9340 - val_loss: 0.4013 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 00163: val_acc did not improve from 0.94001\n",
+ "Epoch 164/100000\n",
+ " - 19s - loss: 0.4024 - acc: 0.9351 - val_loss: 0.3972 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00164: val_acc did not improve from 0.94001\n",
+ "Epoch 165/100000\n",
+ " - 19s - loss: 0.4046 - acc: 0.9351 - val_loss: 0.3931 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 00165: val_acc did not improve from 0.94001\n",
+ "Epoch 166/100000\n",
+ " - 19s - loss: 0.4044 - acc: 0.9351 - val_loss: 0.3920 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00166: val_acc did not improve from 0.94001\n",
+ "Epoch 167/100000\n",
+ " - 19s - loss: 0.4069 - acc: 0.9351 - val_loss: 0.3930 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 00167: val_acc did not improve from 0.94001\n",
+ "Epoch 168/100000\n",
+ " - 19s - loss: 0.4038 - acc: 0.9362 - val_loss: 0.3903 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 00168: val_acc did not improve from 0.94001\n",
+ "Epoch 169/100000\n",
+ " - 19s - loss: 0.4116 - acc: 0.9342 - val_loss: 0.4017 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00169: val_acc did not improve from 0.94001\n",
+ "Epoch 170/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9350 - val_loss: 0.5106 - val_acc: 0.8779\n",
+ "\n",
+ "Epoch 00170: val_acc did not improve from 0.94001\n",
+ "Epoch 171/100000\n",
+ " - 19s - loss: 0.4066 - acc: 0.9343 - val_loss: 0.3971 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00171: val_acc did not improve from 0.94001\n",
+ "Epoch 172/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9344 - val_loss: 0.3943 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 00172: val_acc did not improve from 0.94001\n",
+ "Epoch 173/100000\n",
+ " - 19s - loss: 0.4046 - acc: 0.9355 - val_loss: 0.4040 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 00173: val_acc did not improve from 0.94001\n",
+ "Epoch 174/100000\n",
+ " - 19s - loss: 0.4047 - acc: 0.9351 - val_loss: 0.3935 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00174: val_acc did not improve from 0.94001\n",
+ "Epoch 175/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9345 - val_loss: 0.4480 - val_acc: 0.9054\n",
+ "\n",
+ "Epoch 00175: val_acc did not improve from 0.94001\n",
+ "Epoch 176/100000\n",
+ " - 19s - loss: 0.4079 - acc: 0.9340 - val_loss: 0.3912 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00176: val_acc did not improve from 0.94001\n",
+ "Epoch 177/100000\n",
+ " - 19s - loss: 0.4044 - acc: 0.9352 - val_loss: 0.4139 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 00177: val_acc did not improve from 0.94001\n",
+ "Epoch 178/100000\n",
+ " - 19s - loss: 0.4071 - acc: 0.9350 - val_loss: 0.5654 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00178: val_acc did not improve from 0.94001\n",
+ "Epoch 179/100000\n",
+ " - 19s - loss: 0.4083 - acc: 0.9349 - val_loss: 0.4514 - val_acc: 0.9079\n",
+ "\n",
+ "Epoch 00179: val_acc did not improve from 0.94001\n",
+ "Epoch 180/100000\n",
+ " - 19s - loss: 0.4057 - acc: 0.9348 - val_loss: 0.3918 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 00180: val_acc did not improve from 0.94001\n",
+ "Epoch 181/100000\n",
+ " - 18s - loss: 0.4035 - acc: 0.9356 - val_loss: 0.4328 - val_acc: 0.9174\n",
+ "\n",
+ "Epoch 00181: val_acc did not improve from 0.94001\n",
+ "Epoch 182/100000\n",
+ " - 19s - loss: 0.4064 - acc: 0.9345 - val_loss: 0.4011 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00182: val_acc did not improve from 0.94001\n",
+ "Epoch 183/100000\n",
+ " - 18s - loss: 0.4069 - acc: 0.9349 - val_loss: 0.4212 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 00183: val_acc did not improve from 0.94001\n",
+ "Epoch 184/100000\n",
+ " - 18s - loss: 0.4054 - acc: 0.9346 - val_loss: 0.4712 - val_acc: 0.9023\n",
+ "\n",
+ "Epoch 00184: val_acc did not improve from 0.94001\n",
+ "Epoch 185/100000\n",
+ " - 19s - loss: 0.4077 - acc: 0.9348 - val_loss: 0.4396 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 00185: val_acc did not improve from 0.94001\n",
+ "Epoch 186/100000\n",
+ " - 19s - loss: 0.4068 - acc: 0.9348 - val_loss: 0.3882 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 00186: val_acc did not improve from 0.94001\n",
+ "Epoch 187/100000\n",
+ " - 18s - loss: 0.4056 - acc: 0.9346 - val_loss: 0.4323 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 00187: val_acc did not improve from 0.94001\n",
+ "Epoch 188/100000\n",
+ " - 19s - loss: 0.4043 - acc: 0.9350 - val_loss: 0.4514 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 00188: val_acc did not improve from 0.94001\n",
+ "Epoch 189/100000\n",
+ " - 19s - loss: 0.4041 - acc: 0.9353 - val_loss: 0.4244 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 00189: val_acc did not improve from 0.94001\n",
+ "Epoch 190/100000\n",
+ " - 18s - loss: 0.4080 - acc: 0.9352 - val_loss: 0.3979 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00190: val_acc did not improve from 0.94001\n",
+ "Epoch 191/100000\n",
+ " - 19s - loss: 0.4092 - acc: 0.9340 - val_loss: 0.4226 - val_acc: 0.9241\n",
+ "\n",
+ "Epoch 00191: val_acc did not improve from 0.94001\n",
+ "Epoch 192/100000\n",
+ " - 18s - loss: 0.4050 - acc: 0.9345 - val_loss: 0.4195 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 00192: val_acc did not improve from 0.94001\n",
+ "Epoch 193/100000\n",
+ " - 19s - loss: 0.4084 - acc: 0.9343 - val_loss: 0.4304 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 00193: val_acc did not improve from 0.94001\n",
+ "Epoch 194/100000\n",
+ " - 19s - loss: 0.4068 - acc: 0.9352 - val_loss: 0.4290 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 00194: val_acc did not improve from 0.94001\n",
+ "Epoch 195/100000\n",
+ " - 18s - loss: 0.4046 - acc: 0.9353 - val_loss: 0.3933 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00195: val_acc did not improve from 0.94001\n",
+ "Epoch 196/100000\n",
+ " - 19s - loss: 0.4057 - acc: 0.9354 - val_loss: 0.3930 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00196: val_acc did not improve from 0.94001\n",
+ "Epoch 197/100000\n",
+ " - 19s - loss: 0.4042 - acc: 0.9353 - val_loss: 0.3876 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00197: val_acc did not improve from 0.94001\n",
+ "Epoch 198/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9355 - val_loss: 0.4373 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 00198: val_acc did not improve from 0.94001\n",
+ "Epoch 199/100000\n",
+ " - 19s - loss: 0.4090 - acc: 0.9347 - val_loss: 0.3874 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00199: val_acc did not improve from 0.94001\n",
+ "Epoch 200/100000\n",
+ " - 19s - loss: 0.4077 - acc: 0.9345 - val_loss: 0.3863 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00200: val_acc did not improve from 0.94001\n",
+ "Epoch 201/100000\n",
+ " - 19s - loss: 0.4075 - acc: 0.9353 - val_loss: 0.4163 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 00201: val_acc did not improve from 0.94001\n",
+ "Epoch 202/100000\n",
+ " - 19s - loss: 0.4082 - acc: 0.9346 - val_loss: 0.4323 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 00202: val_acc did not improve from 0.94001\n",
+ "Epoch 203/100000\n",
+ " - 18s - loss: 0.4042 - acc: 0.9353 - val_loss: 0.4121 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 00203: val_acc did not improve from 0.94001\n",
+ "Epoch 204/100000\n",
+ " - 19s - loss: 0.4067 - acc: 0.9346 - val_loss: 0.4085 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 00204: val_acc did not improve from 0.94001\n",
+ "Epoch 205/100000\n",
+ " - 18s - loss: 0.4089 - acc: 0.9341 - val_loss: 0.4019 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 00205: val_acc did not improve from 0.94001\n",
+ "Epoch 206/100000\n",
+ " - 19s - loss: 0.4062 - acc: 0.9346 - val_loss: 0.3963 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00206: val_acc did not improve from 0.94001\n",
+ "Epoch 207/100000\n",
+ " - 19s - loss: 0.4070 - acc: 0.9352 - val_loss: 0.3899 - val_acc: 0.9398\n",
+ "\n",
+ "Epoch 00207: val_acc did not improve from 0.94001\n",
+ "Epoch 208/100000\n",
+ " - 19s - loss: 0.4096 - acc: 0.9352 - val_loss: 0.3982 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00208: val_acc did not improve from 0.94001\n",
+ "Epoch 209/100000\n",
+ " - 19s - loss: 0.4100 - acc: 0.9337 - val_loss: 0.4350 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 00209: val_acc did not improve from 0.94001\n",
+ "Epoch 210/100000\n",
+ " - 19s - loss: 0.4054 - acc: 0.9352 - val_loss: 0.5032 - val_acc: 0.8937\n",
+ "\n",
+ "Epoch 00210: val_acc did not improve from 0.94001\n",
+ "Epoch 211/100000\n",
+ " - 19s - loss: 0.4053 - acc: 0.9352 - val_loss: 0.4182 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 00211: val_acc did not improve from 0.94001\n",
+ "Epoch 212/100000\n",
+ " - 18s - loss: 0.4083 - acc: 0.9349 - val_loss: 0.3891 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00212: val_acc did not improve from 0.94001\n",
+ "Epoch 213/100000\n",
+ " - 19s - loss: 0.4043 - acc: 0.9358 - val_loss: 0.4356 - val_acc: 0.9087\n",
+ "\n",
+ "Epoch 00213: val_acc did not improve from 0.94001\n",
+ "Epoch 214/100000\n",
+ " - 18s - loss: 0.4056 - acc: 0.9347 - val_loss: 0.4195 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00214: val_acc did not improve from 0.94001\n",
+ "Epoch 215/100000\n",
+ " - 19s - loss: 0.4049 - acc: 0.9344 - val_loss: 0.6084 - val_acc: 0.8258\n",
+ "\n",
+ "Epoch 00215: val_acc did not improve from 0.94001\n",
+ "Epoch 216/100000\n",
+ " - 19s - loss: 0.4108 - acc: 0.9343 - val_loss: 0.4112 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00216: val_acc did not improve from 0.94001\n",
+ "Epoch 217/100000\n",
+ " - 18s - loss: 0.4115 - acc: 0.9340 - val_loss: 0.3907 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00217: val_acc did not improve from 0.94001\n",
+ "Epoch 218/100000\n",
+ " - 19s - loss: 0.4093 - acc: 0.9350 - val_loss: 0.5302 - val_acc: 0.8689\n",
+ "\n",
+ "Epoch 00218: val_acc did not improve from 0.94001\n",
+ "Epoch 219/100000\n",
+ " - 18s - loss: 0.4070 - acc: 0.9349 - val_loss: 0.4213 - val_acc: 0.9148\n",
+ "\n",
+ "Epoch 00219: val_acc did not improve from 0.94001\n",
+ "Epoch 220/100000\n",
+ " - 19s - loss: 0.4057 - acc: 0.9344 - val_loss: 0.4425 - val_acc: 0.9089\n",
+ "\n",
+ "Epoch 00220: val_acc did not improve from 0.94001\n",
+ "Epoch 221/100000\n",
+ " - 18s - loss: 0.4078 - acc: 0.9340 - val_loss: 0.5564 - val_acc: 0.8558\n",
+ "\n",
+ "Epoch 00221: val_acc did not improve from 0.94001\n",
+ "Epoch 222/100000\n",
+ " - 19s - loss: 0.4050 - acc: 0.9345 - val_loss: 0.4350 - val_acc: 0.9088\n",
+ "\n",
+ "Epoch 00222: val_acc did not improve from 0.94001\n",
+ "Epoch 223/100000\n",
+ " - 19s - loss: 0.4054 - acc: 0.9348 - val_loss: 0.4403 - val_acc: 0.9155\n",
+ "\n",
+ "Epoch 00223: val_acc did not improve from 0.94001\n",
+ "Epoch 224/100000\n",
+ " - 19s - loss: 0.4058 - acc: 0.9351 - val_loss: 0.4111 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00224: val_acc did not improve from 0.94001\n",
+ "Epoch 225/100000\n",
+ " - 18s - loss: 0.4075 - acc: 0.9342 - val_loss: 0.4473 - val_acc: 0.9033\n",
+ "\n",
+ "Epoch 00225: val_acc did not improve from 0.94001\n",
+ "Epoch 226/100000\n",
+ " - 19s - loss: 0.4074 - acc: 0.9340 - val_loss: 0.4620 - val_acc: 0.9073\n",
+ "\n",
+ "Epoch 00226: val_acc did not improve from 0.94001\n",
+ "Epoch 227/100000\n",
+ " - 19s - loss: 0.4047 - acc: 0.9353 - val_loss: 0.3895 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00227: val_acc did not improve from 0.94001\n",
+ "Epoch 228/100000\n",
+ " - 18s - loss: 0.4057 - acc: 0.9345 - val_loss: 0.3968 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00228: val_acc did not improve from 0.94001\n",
+ "Epoch 229/100000\n",
+ " - 19s - loss: 0.4063 - acc: 0.9340 - val_loss: 0.5012 - val_acc: 0.8788\n",
+ "\n",
+ "Epoch 00229: val_acc did not improve from 0.94001\n",
+ "Epoch 230/100000\n",
+ " - 19s - loss: 0.4055 - acc: 0.9351 - val_loss: 0.3875 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 00230: val_acc did not improve from 0.94001\n",
+ "Epoch 231/100000\n",
+ " - 19s - loss: 0.4070 - acc: 0.9348 - val_loss: 0.4155 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 00231: val_acc did not improve from 0.94001\n",
+ "Epoch 232/100000\n",
+ " - 19s - loss: 0.4087 - acc: 0.9341 - val_loss: 0.4521 - val_acc: 0.9038\n",
+ "\n",
+ "Epoch 00232: val_acc did not improve from 0.94001\n",
+ "Epoch 233/100000\n",
+ " - 19s - loss: 0.4078 - acc: 0.9341 - val_loss: 0.4023 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00233: val_acc did not improve from 0.94001\n",
+ "Epoch 234/100000\n",
+ " - 19s - loss: 0.4039 - acc: 0.9347 - val_loss: 0.3896 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00234: val_acc did not improve from 0.94001\n",
+ "\n",
+ "Epoch 00234: ReduceLROnPlateau reducing learning rate to 0.0009500000451225787.\n",
+ "Epoch 235/100000\n",
+ " - 18s - loss: 0.3999 - acc: 0.9338 - val_loss: 0.3818 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00235: val_acc did not improve from 0.94001\n",
+ "Epoch 236/100000\n",
+ " - 19s - loss: 0.3963 - acc: 0.9350 - val_loss: 0.4515 - val_acc: 0.8875\n",
+ "\n",
+ "Epoch 00236: val_acc did not improve from 0.94001\n",
+ "Epoch 237/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9354 - val_loss: 0.4109 - val_acc: 0.9161\n",
+ "\n",
+ "Epoch 00237: val_acc did not improve from 0.94001\n",
+ "Epoch 238/100000\n",
+ " - 20s - loss: 0.3957 - acc: 0.9353 - val_loss: 0.3963 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 00238: val_acc did not improve from 0.94001\n",
+ "Epoch 239/100000\n",
+ " - 19s - loss: 0.3949 - acc: 0.9346 - val_loss: 0.3813 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00239: val_acc did not improve from 0.94001\n",
+ "Epoch 240/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9353 - val_loss: 0.4712 - val_acc: 0.8910\n",
+ "\n",
+ "Epoch 00240: val_acc did not improve from 0.94001\n",
+ "Epoch 241/100000\n",
+ " - 18s - loss: 0.3932 - acc: 0.9360 - val_loss: 0.4536 - val_acc: 0.8974\n",
+ "\n",
+ "Epoch 00241: val_acc did not improve from 0.94001\n",
+ "Epoch 242/100000\n",
+ " - 19s - loss: 0.3959 - acc: 0.9354 - val_loss: 0.4080 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00242: val_acc did not improve from 0.94001\n",
+ "Epoch 243/100000\n",
+ " - 19s - loss: 0.3991 - acc: 0.9340 - val_loss: 0.3888 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00243: val_acc did not improve from 0.94001\n",
+ "Epoch 244/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9338 - val_loss: 0.3851 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00244: val_acc did not improve from 0.94001\n",
+ "Epoch 245/100000\n",
+ " - 19s - loss: 0.3952 - acc: 0.9355 - val_loss: 0.4672 - val_acc: 0.8829\n",
+ "\n",
+ "Epoch 00245: val_acc did not improve from 0.94001\n",
+ "Epoch 246/100000\n",
+ " - 19s - loss: 0.3952 - acc: 0.9356 - val_loss: 0.3993 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 00246: val_acc did not improve from 0.94001\n",
+ "Epoch 247/100000\n",
+ " - 19s - loss: 0.3936 - acc: 0.9355 - val_loss: 0.3858 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00247: val_acc did not improve from 0.94001\n",
+ "Epoch 248/100000\n",
+ " - 19s - loss: 0.3959 - acc: 0.9349 - val_loss: 0.4098 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00248: val_acc did not improve from 0.94001\n",
+ "Epoch 249/100000\n",
+ " - 19s - loss: 0.3936 - acc: 0.9351 - val_loss: 0.7285 - val_acc: 0.7529\n",
+ "\n",
+ "Epoch 00249: val_acc did not improve from 0.94001\n",
+ "Epoch 250/100000\n",
+ " - 19s - loss: 0.3956 - acc: 0.9344 - val_loss: 0.5881 - val_acc: 0.8154\n",
+ "\n",
+ "Epoch 00250: val_acc did not improve from 0.94001\n",
+ "Epoch 251/100000\n",
+ " - 19s - loss: 0.3964 - acc: 0.9343 - val_loss: 0.4024 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 00251: val_acc did not improve from 0.94001\n",
+ "Epoch 252/100000\n",
+ " - 18s - loss: 0.3950 - acc: 0.9346 - val_loss: 0.4116 - val_acc: 0.9261\n",
+ "\n",
+ "Epoch 00252: val_acc did not improve from 0.94001\n",
+ "Epoch 253/100000\n",
+ " - 19s - loss: 0.3923 - acc: 0.9358 - val_loss: 0.3834 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 00253: val_acc did not improve from 0.94001\n",
+ "Epoch 254/100000\n",
+ " - 19s - loss: 0.3927 - acc: 0.9358 - val_loss: 0.3759 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00254: val_acc did not improve from 0.94001\n",
+ "Epoch 255/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9345 - val_loss: 0.6460 - val_acc: 0.8246\n",
+ "\n",
+ "Epoch 00255: val_acc did not improve from 0.94001\n",
+ "Epoch 256/100000\n",
+ " - 19s - loss: 0.3931 - acc: 0.9351 - val_loss: 0.3781 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00256: val_acc did not improve from 0.94001\n",
+ "Epoch 257/100000\n",
+ " - 19s - loss: 0.3945 - acc: 0.9349 - val_loss: 0.3822 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00257: val_acc did not improve from 0.94001\n",
+ "Epoch 258/100000\n",
+ " - 19s - loss: 0.3909 - acc: 0.9350 - val_loss: 0.3879 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 00258: val_acc did not improve from 0.94001\n",
+ "Epoch 259/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9346 - val_loss: 0.4041 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 00259: val_acc did not improve from 0.94001\n",
+ "Epoch 260/100000\n",
+ " - 19s - loss: 0.3962 - acc: 0.9344 - val_loss: 0.3822 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00260: val_acc did not improve from 0.94001\n",
+ "Epoch 261/100000\n",
+ " - 19s - loss: 0.3928 - acc: 0.9355 - val_loss: 0.4050 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00261: val_acc did not improve from 0.94001\n",
+ "Epoch 262/100000\n",
+ " - 19s - loss: 0.3973 - acc: 0.9340 - val_loss: 0.3784 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00262: val_acc did not improve from 0.94001\n",
+ "Epoch 263/100000\n",
+ " - 19s - loss: 0.3965 - acc: 0.9344 - val_loss: 0.3824 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 00263: val_acc did not improve from 0.94001\n",
+ "Epoch 264/100000\n",
+ " - 19s - loss: 0.3953 - acc: 0.9341 - val_loss: 0.3855 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00264: val_acc did not improve from 0.94001\n",
+ "Epoch 265/100000\n",
+ " - 19s - loss: 0.3925 - acc: 0.9350 - val_loss: 0.3810 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00265: val_acc did not improve from 0.94001\n",
+ "Epoch 266/100000\n",
+ " - 19s - loss: 0.3953 - acc: 0.9344 - val_loss: 0.3863 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 00266: val_acc did not improve from 0.94001\n",
+ "Epoch 267/100000\n",
+ " - 19s - loss: 0.3946 - acc: 0.9345 - val_loss: 0.3758 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00267: val_acc did not improve from 0.94001\n",
+ "Epoch 268/100000\n",
+ " - 19s - loss: 0.3936 - acc: 0.9353 - val_loss: 0.3837 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00268: val_acc did not improve from 0.94001\n",
+ "Epoch 269/100000\n",
+ " - 19s - loss: 0.3935 - acc: 0.9351 - val_loss: 0.3979 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00269: val_acc did not improve from 0.94001\n",
+ "Epoch 270/100000\n",
+ " - 19s - loss: 0.3910 - acc: 0.9350 - val_loss: 0.3745 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00270: val_acc did not improve from 0.94001\n",
+ "Epoch 271/100000\n",
+ " - 19s - loss: 0.3931 - acc: 0.9346 - val_loss: 0.3989 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 00271: val_acc did not improve from 0.94001\n",
+ "Epoch 272/100000\n",
+ " - 19s - loss: 0.3926 - acc: 0.9347 - val_loss: 0.5013 - val_acc: 0.8768\n",
+ "\n",
+ "Epoch 00272: val_acc did not improve from 0.94001\n",
+ "Epoch 273/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9344 - val_loss: 0.3885 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 00273: val_acc did not improve from 0.94001\n",
+ "Epoch 274/100000\n",
+ " - 19s - loss: 0.3927 - acc: 0.9351 - val_loss: 0.4362 - val_acc: 0.8934\n",
+ "\n",
+ "Epoch 00274: val_acc did not improve from 0.94001\n",
+ "Epoch 275/100000\n",
+ " - 19s - loss: 0.3941 - acc: 0.9343 - val_loss: 0.3803 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00275: val_acc did not improve from 0.94001\n",
+ "Epoch 276/100000\n",
+ " - 19s - loss: 0.3939 - acc: 0.9344 - val_loss: 0.3769 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 00276: val_acc did not improve from 0.94001\n",
+ "Epoch 277/100000\n",
+ " - 19s - loss: 0.3940 - acc: 0.9347 - val_loss: 0.4209 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 00277: val_acc did not improve from 0.94001\n",
+ "Epoch 278/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9351 - val_loss: 0.3839 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 00278: val_acc did not improve from 0.94001\n",
+ "Epoch 279/100000\n",
+ " - 19s - loss: 0.3933 - acc: 0.9353 - val_loss: 0.4531 - val_acc: 0.8966\n",
+ "\n",
+ "Epoch 00279: val_acc did not improve from 0.94001\n",
+ "Epoch 280/100000\n",
+ " - 19s - loss: 0.3966 - acc: 0.9338 - val_loss: 0.4798 - val_acc: 0.9050\n",
+ "\n",
+ "Epoch 00280: val_acc did not improve from 0.94001\n",
+ "Epoch 281/100000\n",
+ " - 19s - loss: 0.3929 - acc: 0.9349 - val_loss: 0.3854 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00281: val_acc did not improve from 0.94001\n",
+ "Epoch 282/100000\n",
+ " - 19s - loss: 0.3959 - acc: 0.9346 - val_loss: 0.3824 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 00282: val_acc did not improve from 0.94001\n",
+ "Epoch 283/100000\n",
+ " - 19s - loss: 0.3933 - acc: 0.9348 - val_loss: 0.4169 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00283: val_acc did not improve from 0.94001\n",
+ "Epoch 284/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9346 - val_loss: 0.3699 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 00284: val_acc did not improve from 0.94001\n",
+ "Epoch 285/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9342 - val_loss: 0.3907 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 00285: val_acc did not improve from 0.94001\n",
+ "Epoch 286/100000\n",
+ " - 19s - loss: 0.3922 - acc: 0.9353 - val_loss: 0.3721 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00286: val_acc did not improve from 0.94001\n",
+ "Epoch 287/100000\n",
+ " - 19s - loss: 0.3950 - acc: 0.9348 - val_loss: 0.3805 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00287: val_acc did not improve from 0.94001\n",
+ "Epoch 288/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9337 - val_loss: 0.4103 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00288: val_acc did not improve from 0.94001\n",
+ "Epoch 289/100000\n",
+ " - 19s - loss: 0.3923 - acc: 0.9350 - val_loss: 0.6771 - val_acc: 0.7532\n",
+ "\n",
+ "Epoch 00289: val_acc did not improve from 0.94001\n",
+ "Epoch 290/100000\n",
+ " - 19s - loss: 0.3953 - acc: 0.9342 - val_loss: 0.3949 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 00290: val_acc did not improve from 0.94001\n",
+ "Epoch 291/100000\n",
+ " - 19s - loss: 0.3964 - acc: 0.9346 - val_loss: 0.3964 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00291: val_acc did not improve from 0.94001\n",
+ "Epoch 292/100000\n",
+ " - 19s - loss: 0.3964 - acc: 0.9346 - val_loss: 0.4180 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 00292: val_acc did not improve from 0.94001\n",
+ "Epoch 293/100000\n",
+ " - 19s - loss: 0.3980 - acc: 0.9333 - val_loss: 0.3960 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 00293: val_acc did not improve from 0.94001\n",
+ "Epoch 294/100000\n",
+ " - 19s - loss: 0.3964 - acc: 0.9341 - val_loss: 0.5895 - val_acc: 0.8239\n",
+ "\n",
+ "Epoch 00294: val_acc did not improve from 0.94001\n",
+ "Epoch 295/100000\n",
+ " - 19s - loss: 0.3968 - acc: 0.9340 - val_loss: 0.4205 - val_acc: 0.9074\n",
+ "\n",
+ "Epoch 00295: val_acc did not improve from 0.94001\n",
+ "Epoch 296/100000\n",
+ " - 18s - loss: 0.3944 - acc: 0.9349 - val_loss: 0.3864 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 00296: val_acc did not improve from 0.94001\n",
+ "Epoch 297/100000\n",
+ " - 19s - loss: 0.3945 - acc: 0.9343 - val_loss: 0.4374 - val_acc: 0.9185\n",
+ "\n",
+ "Epoch 00297: val_acc did not improve from 0.94001\n",
+ "Epoch 298/100000\n",
+ " - 19s - loss: 0.3960 - acc: 0.9348 - val_loss: 0.3795 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00298: val_acc did not improve from 0.94001\n",
+ "Epoch 299/100000\n",
+ " - 19s - loss: 0.3981 - acc: 0.9342 - val_loss: 0.3761 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 00299: val_acc did not improve from 0.94001\n",
+ "Epoch 300/100000\n",
+ " - 18s - loss: 0.3946 - acc: 0.9354 - val_loss: 0.3748 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00300: val_acc did not improve from 0.94001\n",
+ "Epoch 301/100000\n",
+ " - 19s - loss: 0.3937 - acc: 0.9351 - val_loss: 0.4768 - val_acc: 0.8688\n",
+ "\n",
+ "Epoch 00301: val_acc did not improve from 0.94001\n",
+ "Epoch 302/100000\n",
+ " - 19s - loss: 0.3950 - acc: 0.9350 - val_loss: 0.3795 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00302: val_acc did not improve from 0.94001\n",
+ "Epoch 303/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9351 - val_loss: 0.5872 - val_acc: 0.8118\n",
+ "\n",
+ "Epoch 00303: val_acc did not improve from 0.94001\n",
+ "Epoch 304/100000\n",
+ " - 19s - loss: 0.3973 - acc: 0.9352 - val_loss: 0.4045 - val_acc: 0.9153\n",
+ "\n",
+ "Epoch 00304: val_acc did not improve from 0.94001\n",
+ "Epoch 305/100000\n",
+ " - 19s - loss: 0.3947 - acc: 0.9346 - val_loss: 0.3971 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 00305: val_acc did not improve from 0.94001\n",
+ "Epoch 306/100000\n",
+ " - 19s - loss: 0.3961 - acc: 0.9339 - val_loss: 0.4000 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 00306: val_acc did not improve from 0.94001\n",
+ "Epoch 307/100000\n",
+ " - 19s - loss: 0.3961 - acc: 0.9343 - val_loss: 0.3832 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 00307: val_acc did not improve from 0.94001\n",
+ "Epoch 308/100000\n",
+ " - 19s - loss: 0.3940 - acc: 0.9352 - val_loss: 0.3918 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 00308: val_acc did not improve from 0.94001\n",
+ "Epoch 309/100000\n",
+ " - 18s - loss: 0.3968 - acc: 0.9337 - val_loss: 0.3899 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00309: val_acc did not improve from 0.94001\n",
+ "Epoch 310/100000\n",
+ " - 19s - loss: 0.3944 - acc: 0.9354 - val_loss: 0.3990 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00310: val_acc did not improve from 0.94001\n",
+ "Epoch 311/100000\n",
+ " - 18s - loss: 0.3946 - acc: 0.9346 - val_loss: 0.3739 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00311: val_acc did not improve from 0.94001\n",
+ "Epoch 312/100000\n",
+ " - 19s - loss: 0.3966 - acc: 0.9340 - val_loss: 0.4089 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00312: val_acc did not improve from 0.94001\n",
+ "Epoch 313/100000\n",
+ " - 18s - loss: 0.3938 - acc: 0.9348 - val_loss: 0.4106 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 00313: val_acc did not improve from 0.94001\n",
+ "Epoch 314/100000\n",
+ " - 19s - loss: 0.3967 - acc: 0.9338 - val_loss: 0.3881 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00314: val_acc did not improve from 0.94001\n",
+ "Epoch 315/100000\n",
+ " - 19s - loss: 0.3963 - acc: 0.9345 - val_loss: 0.4076 - val_acc: 0.9191\n",
+ "\n",
+ "Epoch 00315: val_acc did not improve from 0.94001\n",
+ "Epoch 316/100000\n",
+ " - 18s - loss: 0.3965 - acc: 0.9346 - val_loss: 0.3757 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00316: val_acc did not improve from 0.94001\n",
+ "Epoch 317/100000\n",
+ " - 19s - loss: 0.3945 - acc: 0.9345 - val_loss: 0.3892 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00317: val_acc did not improve from 0.94001\n",
+ "Epoch 318/100000\n",
+ " - 18s - loss: 0.3951 - acc: 0.9349 - val_loss: 0.5040 - val_acc: 0.8924\n",
+ "\n",
+ "Epoch 00318: val_acc did not improve from 0.94001\n",
+ "Epoch 319/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9347 - val_loss: 0.3891 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 00319: val_acc did not improve from 0.94001\n",
+ "Epoch 320/100000\n",
+ " - 19s - loss: 0.3968 - acc: 0.9343 - val_loss: 0.3818 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 00320: val_acc did not improve from 0.94001\n",
+ "Epoch 321/100000\n",
+ " - 18s - loss: 0.3981 - acc: 0.9340 - val_loss: 0.3808 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00321: val_acc did not improve from 0.94001\n",
+ "Epoch 322/100000\n",
+ " - 18s - loss: 0.3976 - acc: 0.9346 - val_loss: 0.3913 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00322: val_acc did not improve from 0.94001\n",
+ "Epoch 323/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9347 - val_loss: 0.4147 - val_acc: 0.9132\n",
+ "\n",
+ "Epoch 00323: val_acc did not improve from 0.94001\n",
+ "Epoch 324/100000\n",
+ " - 18s - loss: 0.3959 - acc: 0.9348 - val_loss: 0.4095 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 00324: val_acc did not improve from 0.94001\n",
+ "Epoch 325/100000\n",
+ " - 19s - loss: 0.3998 - acc: 0.9346 - val_loss: 0.3957 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00325: val_acc did not improve from 0.94001\n",
+ "Epoch 326/100000\n",
+ " - 18s - loss: 0.3982 - acc: 0.9344 - val_loss: 0.3826 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00326: val_acc did not improve from 0.94001\n",
+ "Epoch 327/100000\n",
+ " - 18s - loss: 0.3960 - acc: 0.9347 - val_loss: 0.3958 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00327: val_acc did not improve from 0.94001\n",
+ "Epoch 328/100000\n",
+ " - 19s - loss: 0.3934 - acc: 0.9357 - val_loss: 0.5840 - val_acc: 0.8825\n",
+ "\n",
+ "Epoch 00328: val_acc did not improve from 0.94001\n",
+ "Epoch 329/100000\n",
+ " - 18s - loss: 0.3960 - acc: 0.9343 - val_loss: 0.3772 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 00329: val_acc did not improve from 0.94001\n",
+ "Epoch 330/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9349 - val_loss: 0.3994 - val_acc: 0.9233\n",
+ "\n",
+ "Epoch 00330: val_acc did not improve from 0.94001\n",
+ "Epoch 331/100000\n",
+ " - 19s - loss: 0.3977 - acc: 0.9338 - val_loss: 0.4528 - val_acc: 0.8848\n",
+ "\n",
+ "Epoch 00331: val_acc did not improve from 0.94001\n",
+ "Epoch 332/100000\n",
+ " - 19s - loss: 0.3950 - acc: 0.9349 - val_loss: 0.3940 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00332: val_acc did not improve from 0.94001\n",
+ "Epoch 333/100000\n",
+ " - 19s - loss: 0.4000 - acc: 0.9333 - val_loss: 0.4483 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 00333: val_acc did not improve from 0.94001\n",
+ "Epoch 334/100000\n",
+ " - 19s - loss: 0.3962 - acc: 0.9344 - val_loss: 0.3880 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00334: val_acc did not improve from 0.94001\n",
+ "Epoch 335/100000\n",
+ " - 19s - loss: 0.3970 - acc: 0.9340 - val_loss: 0.4581 - val_acc: 0.9046\n",
+ "\n",
+ "Epoch 00335: val_acc did not improve from 0.94001\n",
+ "Epoch 336/100000\n",
+ " - 19s - loss: 0.3953 - acc: 0.9348 - val_loss: 0.3917 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 00336: val_acc did not improve from 0.94001\n",
+ "Epoch 337/100000\n",
+ " - 19s - loss: 0.3937 - acc: 0.9352 - val_loss: 0.4428 - val_acc: 0.8998\n",
+ "\n",
+ "Epoch 00337: val_acc did not improve from 0.94001\n",
+ "Epoch 338/100000\n",
+ " - 19s - loss: 0.3956 - acc: 0.9347 - val_loss: 0.3827 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00338: val_acc did not improve from 0.94001\n",
+ "Epoch 339/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9343 - val_loss: 0.3867 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 00339: val_acc did not improve from 0.94001\n",
+ "Epoch 340/100000\n",
+ " - 19s - loss: 0.3958 - acc: 0.9342 - val_loss: 0.3905 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00340: val_acc did not improve from 0.94001\n",
+ "Epoch 341/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9344 - val_loss: 0.4146 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 00341: val_acc did not improve from 0.94001\n",
+ "Epoch 342/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9340 - val_loss: 0.4159 - val_acc: 0.9167\n",
+ "\n",
+ "Epoch 00342: val_acc did not improve from 0.94001\n",
+ "Epoch 343/100000\n",
+ " - 19s - loss: 0.3945 - acc: 0.9353 - val_loss: 0.3909 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 00343: val_acc did not improve from 0.94001\n",
+ "Epoch 344/100000\n",
+ " - 19s - loss: 0.3969 - acc: 0.9347 - val_loss: 0.3812 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00344: val_acc did not improve from 0.94001\n",
+ "Epoch 345/100000\n",
+ " - 18s - loss: 0.3976 - acc: 0.9345 - val_loss: 0.3926 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 00345: val_acc did not improve from 0.94001\n",
+ "Epoch 346/100000\n",
+ " - 19s - loss: 0.3954 - acc: 0.9349 - val_loss: 0.3950 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00346: val_acc did not improve from 0.94001\n",
+ "Epoch 347/100000\n",
+ " - 18s - loss: 0.3981 - acc: 0.9338 - val_loss: 0.4860 - val_acc: 0.9099\n",
+ "\n",
+ "Epoch 00347: val_acc did not improve from 0.94001\n",
+ "Epoch 348/100000\n",
+ " - 18s - loss: 0.3988 - acc: 0.9344 - val_loss: 0.3988 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00348: val_acc did not improve from 0.94001\n",
+ "Epoch 349/100000\n",
+ " - 19s - loss: 0.3965 - acc: 0.9343 - val_loss: 0.5226 - val_acc: 0.8434\n",
+ "\n",
+ "Epoch 00349: val_acc did not improve from 0.94001\n",
+ "Epoch 350/100000\n",
+ " - 18s - loss: 0.3960 - acc: 0.9346 - val_loss: 0.4704 - val_acc: 0.8918\n",
+ "\n",
+ "Epoch 00350: val_acc did not improve from 0.94001\n",
+ "Epoch 351/100000\n",
+ " - 19s - loss: 0.3937 - acc: 0.9347 - val_loss: 0.3920 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 00351: val_acc did not improve from 0.94001\n",
+ "Epoch 352/100000\n",
+ " - 18s - loss: 0.3948 - acc: 0.9349 - val_loss: 0.4805 - val_acc: 0.8873\n",
+ "\n",
+ "Epoch 00352: val_acc did not improve from 0.94001\n",
+ "Epoch 353/100000\n",
+ " - 19s - loss: 0.3989 - acc: 0.9339 - val_loss: 0.3707 - val_acc: 0.9422\n",
+ "\n",
+ "Epoch 00353: val_acc improved from 0.94001 to 0.94225, saving model to ./ModelSnapshots/CNN-353.h5\n",
+ "Epoch 354/100000\n",
+ " - 19s - loss: 0.3969 - acc: 0.9347 - val_loss: 0.3850 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00354: val_acc did not improve from 0.94225\n",
+ "Epoch 355/100000\n",
+ " - 19s - loss: 0.3982 - acc: 0.9344 - val_loss: 0.3779 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00355: val_acc did not improve from 0.94225\n",
+ "Epoch 356/100000\n",
+ " - 19s - loss: 0.3979 - acc: 0.9341 - val_loss: 0.4924 - val_acc: 0.8875\n",
+ "\n",
+ "Epoch 00356: val_acc did not improve from 0.94225\n",
+ "Epoch 357/100000\n",
+ " - 19s - loss: 0.3966 - acc: 0.9347 - val_loss: 0.4153 - val_acc: 0.9132\n",
+ "\n",
+ "Epoch 00357: val_acc did not improve from 0.94225\n",
+ "Epoch 358/100000\n",
+ " - 19s - loss: 0.3974 - acc: 0.9344 - val_loss: 0.4070 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 00358: val_acc did not improve from 0.94225\n",
+ "Epoch 359/100000\n",
+ " - 19s - loss: 0.3972 - acc: 0.9346 - val_loss: 0.3953 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 00359: val_acc did not improve from 0.94225\n",
+ "Epoch 360/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9347 - val_loss: 0.3962 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 00360: val_acc did not improve from 0.94225\n",
+ "Epoch 361/100000\n",
+ " - 19s - loss: 0.3957 - acc: 0.9347 - val_loss: 0.4077 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 00361: val_acc did not improve from 0.94225\n",
+ "Epoch 362/100000\n",
+ " - 19s - loss: 0.3961 - acc: 0.9345 - val_loss: 0.6163 - val_acc: 0.7919\n",
+ "\n",
+ "Epoch 00362: val_acc did not improve from 0.94225\n",
+ "Epoch 363/100000\n",
+ " - 19s - loss: 0.3992 - acc: 0.9335 - val_loss: 0.4250 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00363: val_acc did not improve from 0.94225\n",
+ "Epoch 364/100000\n",
+ " - 19s - loss: 0.3949 - acc: 0.9352 - val_loss: 0.3871 - val_acc: 0.9270\n",
+ "\n",
+ "Epoch 00364: val_acc did not improve from 0.94225\n",
+ "Epoch 365/100000\n",
+ " - 19s - loss: 0.3974 - acc: 0.9344 - val_loss: 0.3863 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 00365: val_acc did not improve from 0.94225\n",
+ "Epoch 366/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9340 - val_loss: 0.4407 - val_acc: 0.9095\n",
+ "\n",
+ "Epoch 00366: val_acc did not improve from 0.94225\n",
+ "Epoch 367/100000\n",
+ " - 18s - loss: 0.3930 - acc: 0.9349 - val_loss: 0.4103 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 00367: val_acc did not improve from 0.94225\n",
+ "Epoch 368/100000\n",
+ " - 19s - loss: 0.3969 - acc: 0.9346 - val_loss: 0.3766 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 00368: val_acc did not improve from 0.94225\n",
+ "Epoch 369/100000\n",
+ " - 18s - loss: 0.3956 - acc: 0.9348 - val_loss: 0.4009 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 00369: val_acc did not improve from 0.94225\n",
+ "Epoch 370/100000\n",
+ " - 19s - loss: 0.3948 - acc: 0.9347 - val_loss: 0.3784 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 00370: val_acc did not improve from 0.94225\n",
+ "Epoch 371/100000\n",
+ " - 18s - loss: 0.3975 - acc: 0.9337 - val_loss: 0.4197 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 00371: val_acc did not improve from 0.94225\n",
+ "Epoch 372/100000\n",
+ " - 19s - loss: 0.3972 - acc: 0.9345 - val_loss: 0.3916 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00372: val_acc did not improve from 0.94225\n",
+ "Epoch 373/100000\n",
+ " - 18s - loss: 0.3983 - acc: 0.9340 - val_loss: 0.4921 - val_acc: 0.8681\n",
+ "\n",
+ "Epoch 00373: val_acc did not improve from 0.94225\n",
+ "Epoch 374/100000\n",
+ " - 19s - loss: 0.3991 - acc: 0.9347 - val_loss: 0.4028 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 00374: val_acc did not improve from 0.94225\n",
+ "Epoch 375/100000\n",
+ " - 19s - loss: 0.3969 - acc: 0.9340 - val_loss: 0.3990 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 00375: val_acc did not improve from 0.94225\n",
+ "Epoch 376/100000\n",
+ " - 19s - loss: 0.3990 - acc: 0.9343 - val_loss: 0.4141 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 00376: val_acc did not improve from 0.94225\n",
+ "Epoch 377/100000\n",
+ " - 19s - loss: 0.3959 - acc: 0.9345 - val_loss: 0.4142 - val_acc: 0.9103\n",
+ "\n",
+ "Epoch 00377: val_acc did not improve from 0.94225\n",
+ "Epoch 378/100000\n",
+ " - 19s - loss: 0.3974 - acc: 0.9341 - val_loss: 0.3783 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 00378: val_acc did not improve from 0.94225\n",
+ "Epoch 379/100000\n",
+ " - 18s - loss: 0.3963 - acc: 0.9343 - val_loss: 0.3905 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 00379: val_acc did not improve from 0.94225\n",
+ "Epoch 380/100000\n",
+ " - 19s - loss: 0.3949 - acc: 0.9351 - val_loss: 0.3983 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 00380: val_acc did not improve from 0.94225\n",
+ "Epoch 381/100000\n",
+ " - 19s - loss: 0.3978 - acc: 0.9345 - val_loss: 0.4146 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 00381: val_acc did not improve from 0.94225\n",
+ "Epoch 382/100000\n",
+ " - 18s - loss: 0.3962 - acc: 0.9348 - val_loss: 0.3957 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 00382: val_acc did not improve from 0.94225\n",
+ "Epoch 383/100000\n",
+ " - 19s - loss: 0.3955 - acc: 0.9342 - val_loss: 0.3834 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00383: val_acc did not improve from 0.94225\n",
+ "Epoch 384/100000\n",
+ " - 18s - loss: 0.3956 - acc: 0.9343 - val_loss: 0.3966 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 00384: val_acc did not improve from 0.94225\n",
+ "Epoch 385/100000\n",
+ " - 19s - loss: 0.3942 - acc: 0.9350 - val_loss: 0.4539 - val_acc: 0.8910\n",
+ "\n",
+ "Epoch 00385: val_acc did not improve from 0.94225\n",
+ "Epoch 386/100000\n",
+ " - 18s - loss: 0.3998 - acc: 0.9334 - val_loss: 0.4513 - val_acc: 0.8985\n",
+ "\n",
+ "Epoch 00386: val_acc did not improve from 0.94225\n",
+ "Epoch 387/100000\n",
+ " - 18s - loss: 0.3960 - acc: 0.9342 - val_loss: 0.4464 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 00387: val_acc did not improve from 0.94225\n",
+ "Epoch 388/100000\n",
+ " - 19s - loss: 0.3954 - acc: 0.9344 - val_loss: 0.5134 - val_acc: 0.8496\n",
+ "\n",
+ "Epoch 00388: val_acc did not improve from 0.94225\n",
+ "Epoch 389/100000\n",
+ " - 19s - loss: 0.3965 - acc: 0.9345 - val_loss: 0.3970 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 00389: val_acc did not improve from 0.94225\n",
+ "Epoch 390/100000\n",
+ " - 19s - loss: 0.3975 - acc: 0.9341 - val_loss: 0.3818 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 00390: val_acc did not improve from 0.94225\n",
+ "Epoch 391/100000\n",
+ " - 19s - loss: 0.3958 - acc: 0.9345 - val_loss: 0.3894 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 00391: val_acc did not improve from 0.94225\n",
+ "Epoch 392/100000\n",
+ " - 18s - loss: 0.3951 - acc: 0.9342 - val_loss: 0.3928 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 00392: val_acc did not improve from 0.94225\n",
+ "Epoch 393/100000\n",
+ " - 19s - loss: 0.3982 - acc: 0.9346 - val_loss: 0.4041 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00393: val_acc did not improve from 0.94225\n",
+ "Epoch 394/100000\n",
+ " - 18s - loss: 0.3956 - acc: 0.9343 - val_loss: 0.4016 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00394: val_acc did not improve from 0.94225\n",
+ "Epoch 395/100000\n",
+ " - 19s - loss: 0.3951 - acc: 0.9346 - val_loss: 0.4112 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 00395: val_acc did not improve from 0.94225\n",
+ "Epoch 396/100000\n",
+ " - 18s - loss: 0.3957 - acc: 0.9352 - val_loss: 0.4031 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 00396: val_acc did not improve from 0.94225\n",
+ "Epoch 397/100000\n",
+ " - 19s - loss: 0.3960 - acc: 0.9349 - val_loss: 0.4604 - val_acc: 0.9044\n",
+ "\n",
+ "Epoch 00397: val_acc did not improve from 0.94225\n",
+ "Epoch 398/100000\n",
+ " - 19s - loss: 0.3996 - acc: 0.9339 - val_loss: 0.4998 - val_acc: 0.8609\n",
+ "\n",
+ "Epoch 00398: val_acc did not improve from 0.94225\n",
+ "Epoch 399/100000\n",
+ " - 18s - loss: 0.3953 - acc: 0.9351 - val_loss: 0.3926 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 00399: val_acc did not improve from 0.94225\n",
+ "Epoch 400/100000\n",
+ " - 19s - loss: 0.4040 - acc: 0.9328 - val_loss: 0.3926 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 00400: val_acc did not improve from 0.94225\n",
+ "Epoch 401/100000\n",
+ " - 19s - loss: 0.3983 - acc: 0.9335 - val_loss: 0.4238 - val_acc: 0.9105\n",
+ "\n",
+ "Epoch 00401: val_acc did not improve from 0.94225\n",
+ "Epoch 402/100000\n",
+ " - 19s - loss: 0.3954 - acc: 0.9351 - val_loss: 0.5009 - val_acc: 0.8659\n",
+ "\n",
+ "Epoch 00402: val_acc did not improve from 0.94225\n",
+ "Epoch 403/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9343 - val_loss: 0.4153 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 00403: val_acc did not improve from 0.94225\n",
+ "Epoch 404/100000\n",
+ " - 19s - loss: 0.3961 - acc: 0.9341 - val_loss: 0.4492 - val_acc: 0.9110\n",
+ "\n",
+ "Epoch 00404: val_acc did not improve from 0.94225\n",
+ "Epoch 405/100000\n",
+ " - 19s - loss: 0.4000 - acc: 0.9335 - val_loss: 0.4161 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00405: val_acc did not improve from 0.94225\n",
+ "Epoch 406/100000\n",
+ " - 18s - loss: 0.3975 - acc: 0.9341 - val_loss: 0.3899 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00406: val_acc did not improve from 0.94225\n",
+ "Epoch 407/100000\n",
+ " - 19s - loss: 0.3998 - acc: 0.9331 - val_loss: 0.4004 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 00407: val_acc did not improve from 0.94225\n",
+ "Epoch 408/100000\n",
+ " - 18s - loss: 0.3968 - acc: 0.9341 - val_loss: 0.4631 - val_acc: 0.9163\n",
+ "\n",
+ "Epoch 00408: val_acc did not improve from 0.94225\n",
+ "Epoch 409/100000\n",
+ " - 19s - loss: 0.3943 - acc: 0.9343 - val_loss: 0.5393 - val_acc: 0.8576\n",
+ "\n",
+ "Epoch 00409: val_acc did not improve from 0.94225\n",
+ "Epoch 410/100000\n",
+ " - 19s - loss: 0.3970 - acc: 0.9345 - val_loss: 0.3905 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00410: val_acc did not improve from 0.94225\n",
+ "Epoch 411/100000\n",
+ " - 18s - loss: 0.3971 - acc: 0.9347 - val_loss: 0.3771 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00411: val_acc did not improve from 0.94225\n",
+ "Epoch 412/100000\n",
+ " - 19s - loss: 0.3975 - acc: 0.9340 - val_loss: 0.4578 - val_acc: 0.9153\n",
+ "\n",
+ "Epoch 00412: val_acc did not improve from 0.94225\n",
+ "Epoch 413/100000\n",
+ " - 19s - loss: 0.3981 - acc: 0.9340 - val_loss: 0.4280 - val_acc: 0.9183\n",
+ "\n",
+ "Epoch 00418: val_acc did not improve from 0.94225\n",
+ "Epoch 419/100000\n",
+ " - 19s - loss: 0.3979 - acc: 0.9340 - val_loss: 0.3753 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00419: val_acc did not improve from 0.94225\n",
+ "Epoch 420/100000\n",
+ " - 19s - loss: 0.3985 - acc: 0.9334 - val_loss: 0.4093 - val_acc: 0.9185\n",
+ "\n",
+ "Epoch 00420: val_acc did not improve from 0.94225\n",
+ "Epoch 421/100000\n",
+ " - 19s - loss: 0.3973 - acc: 0.9341 - val_loss: 0.4216 - val_acc: 0.9139\n",
+ "\n",
+ "Epoch 00421: val_acc did not improve from 0.94225\n",
+ "Epoch 422/100000\n",
+ " - 19s - loss: 0.3983 - acc: 0.9345 - val_loss: 0.3978 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00422: val_acc did not improve from 0.94225\n",
+ "Epoch 423/100000\n",
+ " - 19s - loss: 0.3984 - acc: 0.9348 - val_loss: 0.3900 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00423: val_acc did not improve from 0.94225\n",
+ "Epoch 424/100000\n",
+ " - 19s - loss: 0.3988 - acc: 0.9341 - val_loss: 0.4112 - val_acc: 0.9127\n",
+ "\n",
+ "Epoch 00424: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 00424: ReduceLROnPlateau reducing learning rate to 0.0009025000152178108.\n",
+ "Epoch 425/100000\n",
+ " - 19s - loss: 0.3903 - acc: 0.9339 - val_loss: 0.4279 - val_acc: 0.9038\n",
+ "\n",
+ "Epoch 00425: val_acc did not improve from 0.94225\n",
+ "Epoch 426/100000\n",
+ " - 18s - loss: 0.3873 - acc: 0.9352 - val_loss: 0.3781 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00426: val_acc did not improve from 0.94225\n",
+ "Epoch 427/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9344 - val_loss: 0.4225 - val_acc: 0.9219\n",
+ "\n",
+ "Epoch 00427: val_acc did not improve from 0.94225\n",
+ "Epoch 428/100000\n",
+ " - 19s - loss: 0.3854 - acc: 0.9352 - val_loss: 0.6165 - val_acc: 0.8236\n",
+ "\n",
+ "Epoch 00428: val_acc did not improve from 0.94225\n",
+ "Epoch 429/100000\n",
+ " - 19s - loss: 0.3911 - acc: 0.9331 - val_loss: 0.5023 - val_acc: 0.8787\n",
+ "\n",
+ "Epoch 00429: val_acc did not improve from 0.94225\n",
+ "Epoch 430/100000\n",
+ " - 19s - loss: 0.3876 - acc: 0.9348 - val_loss: 0.5066 - val_acc: 0.8492\n",
+ "\n",
+ "Epoch 00430: val_acc did not improve from 0.94225\n",
+ "Epoch 431/100000\n",
+ " - 19s - loss: 0.3878 - acc: 0.9342 - val_loss: 0.3706 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 00431: val_acc did not improve from 0.94225\n",
+ "Epoch 432/100000\n",
+ " - 18s - loss: 0.3916 - acc: 0.9337 - val_loss: 0.3999 - val_acc: 0.9116\n",
+ "\n",
+ "Epoch 00432: val_acc did not improve from 0.94225\n",
+ "Epoch 433/100000\n",
+ " - 18s - loss: 0.3887 - acc: 0.9338 - val_loss: 0.4852 - val_acc: 0.8689\n",
+ "\n",
+ "Epoch 00433: val_acc did not improve from 0.94225\n",
+ "Epoch 434/100000\n",
+ " - 19s - loss: 0.3842 - acc: 0.9355 - val_loss: 0.4900 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 00434: val_acc did not improve from 0.94225\n",
+ "Epoch 435/100000\n",
+ " - 19s - loss: 0.3870 - acc: 0.9350 - val_loss: 0.5658 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00435: val_acc did not improve from 0.94225\n",
+ "Epoch 436/100000\n",
+ " - 18s - loss: 0.3904 - acc: 0.9339 - val_loss: 0.3719 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00436: val_acc did not improve from 0.94225\n",
+ "Epoch 437/100000\n",
+ " - 19s - loss: 0.3926 - acc: 0.9327 - val_loss: 0.3680 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00437: val_acc did not improve from 0.94225\n",
+ "Epoch 438/100000\n",
+ " - 18s - loss: 0.3857 - acc: 0.9353 - val_loss: 0.3827 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 00438: val_acc did not improve from 0.94225\n",
+ "Epoch 439/100000\n",
+ " - 19s - loss: 0.3866 - acc: 0.9351 - val_loss: 0.4315 - val_acc: 0.9057\n",
+ "\n",
+ "Epoch 00439: val_acc did not improve from 0.94225\n",
+ "Epoch 440/100000\n",
+ " - 18s - loss: 0.3884 - acc: 0.9341 - val_loss: 0.3742 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00440: val_acc did not improve from 0.94225\n",
+ "Epoch 441/100000\n",
+ " - 19s - loss: 0.3892 - acc: 0.9344 - val_loss: 0.3746 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00441: val_acc did not improve from 0.94225\n",
+ "Epoch 442/100000\n",
+ " - 19s - loss: 0.3875 - acc: 0.9345 - val_loss: 0.4422 - val_acc: 0.8977\n",
+ "\n",
+ "Epoch 00442: val_acc did not improve from 0.94225\n",
+ "Epoch 443/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9339 - val_loss: 0.3710 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00443: val_acc did not improve from 0.94225\n",
+ "Epoch 444/100000\n",
+ " - 18s - loss: 0.3869 - acc: 0.9352 - val_loss: 0.4202 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 00444: val_acc did not improve from 0.94225\n",
+ "Epoch 445/100000\n",
+ " - 19s - loss: 0.3880 - acc: 0.9342 - val_loss: 0.4037 - val_acc: 0.9171\n",
+ "\n",
+ "Epoch 00445: val_acc did not improve from 0.94225\n",
+ "Epoch 446/100000\n",
+ " - 19s - loss: 0.3894 - acc: 0.9348 - val_loss: 0.3825 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00446: val_acc did not improve from 0.94225\n",
+ "Epoch 447/100000\n",
+ " - 19s - loss: 0.3883 - acc: 0.9342 - val_loss: 0.3763 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00447: val_acc did not improve from 0.94225\n",
+ "Epoch 448/100000\n",
+ " - 19s - loss: 0.3861 - acc: 0.9343 - val_loss: 0.4339 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 00448: val_acc did not improve from 0.94225\n",
+ "Epoch 449/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9337 - val_loss: 0.3682 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00449: val_acc did not improve from 0.94225\n",
+ "Epoch 450/100000\n",
+ " - 19s - loss: 0.3899 - acc: 0.9343 - val_loss: 0.3819 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00450: val_acc did not improve from 0.94225\n",
+ "Epoch 451/100000\n",
+ " - 19s - loss: 0.3861 - acc: 0.9348 - val_loss: 0.4300 - val_acc: 0.9040\n",
+ "\n",
+ "Epoch 00451: val_acc did not improve from 0.94225\n",
+ "Epoch 452/100000\n",
+ " - 19s - loss: 0.3881 - acc: 0.9336 - val_loss: 0.3857 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 00452: val_acc did not improve from 0.94225\n",
+ "Epoch 453/100000\n",
+ " - 18s - loss: 0.3847 - acc: 0.9352 - val_loss: 0.3745 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 00453: val_acc did not improve from 0.94225\n",
+ "Epoch 454/100000\n",
+ " - 19s - loss: 0.3858 - acc: 0.9345 - val_loss: 0.3701 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 00454: val_acc did not improve from 0.94225\n",
+ "Epoch 455/100000\n",
+ " - 18s - loss: 0.3853 - acc: 0.9345 - val_loss: 0.4384 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 00455: val_acc did not improve from 0.94225\n",
+ "Epoch 456/100000\n",
+ " - 19s - loss: 0.3889 - acc: 0.9342 - val_loss: 0.4469 - val_acc: 0.9017\n",
+ "\n",
+ "Epoch 00456: val_acc did not improve from 0.94225\n",
+ "Epoch 457/100000\n",
+ " - 19s - loss: 0.3877 - acc: 0.9349 - val_loss: 0.3737 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00457: val_acc did not improve from 0.94225\n",
+ "Epoch 458/100000\n",
+ " - 19s - loss: 0.3918 - acc: 0.9333 - val_loss: 0.4124 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00458: val_acc did not improve from 0.94225\n",
+ "Epoch 459/100000\n",
+ " - 19s - loss: 0.3857 - acc: 0.9352 - val_loss: 0.3875 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 00459: val_acc did not improve from 0.94225\n",
+ "Epoch 460/100000\n",
+ " - 19s - loss: 0.3884 - acc: 0.9342 - val_loss: 0.4006 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00460: val_acc did not improve from 0.94225\n",
+ "Epoch 461/100000\n",
+ " - 19s - loss: 0.3885 - acc: 0.9345 - val_loss: 0.3791 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 00461: val_acc did not improve from 0.94225\n",
+ "Epoch 462/100000\n",
+ " - 19s - loss: 0.3882 - acc: 0.9349 - val_loss: 0.4018 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 00462: val_acc did not improve from 0.94225\n",
+ "Epoch 463/100000\n",
+ " - 19s - loss: 0.3898 - acc: 0.9341 - val_loss: 0.3822 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00463: val_acc did not improve from 0.94225\n",
+ "Epoch 464/100000\n",
+ " - 19s - loss: 0.3886 - acc: 0.9343 - val_loss: 0.3959 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 00464: val_acc did not improve from 0.94225\n",
+ "Epoch 465/100000\n",
+ " - 18s - loss: 0.3904 - acc: 0.9340 - val_loss: 0.3818 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 00465: val_acc did not improve from 0.94225\n",
+ "Epoch 466/100000\n",
+ " - 19s - loss: 0.3878 - acc: 0.9346 - val_loss: 0.3951 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00466: val_acc did not improve from 0.94225\n",
+ "Epoch 467/100000\n",
+ " - 18s - loss: 0.3900 - acc: 0.9335 - val_loss: 0.3747 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 00467: val_acc did not improve from 0.94225\n",
+ "Epoch 468/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9346 - val_loss: 0.3802 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 00468: val_acc did not improve from 0.94225\n",
+ "Epoch 469/100000\n",
+ " - 18s - loss: 0.3893 - acc: 0.9340 - val_loss: 0.3893 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00469: val_acc did not improve from 0.94225\n",
+ "Epoch 470/100000\n",
+ " - 18s - loss: 0.3892 - acc: 0.9344 - val_loss: 0.4014 - val_acc: 0.9155\n",
+ "\n",
+ "Epoch 00470: val_acc did not improve from 0.94225\n",
+ "Epoch 471/100000\n",
+ " - 19s - loss: 0.3885 - acc: 0.9348 - val_loss: 0.3714 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 00471: val_acc did not improve from 0.94225\n",
+ "Epoch 472/100000\n",
+ " - 18s - loss: 0.3882 - acc: 0.9349 - val_loss: 0.3981 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 00472: val_acc did not improve from 0.94225\n",
+ "Epoch 473/100000\n",
+ " - 19s - loss: 0.3850 - acc: 0.9349 - val_loss: 0.3910 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 00473: val_acc did not improve from 0.94225\n",
+ "Epoch 474/100000\n",
+ " - 19s - loss: 0.3878 - acc: 0.9345 - val_loss: 0.4567 - val_acc: 0.8801\n",
+ "\n",
+ "Epoch 00474: val_acc did not improve from 0.94225\n",
+ "Epoch 475/100000\n",
+ " - 18s - loss: 0.3892 - acc: 0.9351 - val_loss: 0.3816 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00475: val_acc did not improve from 0.94225\n",
+ "Epoch 476/100000\n",
+ " - 19s - loss: 0.3904 - acc: 0.9338 - val_loss: 0.3739 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00476: val_acc did not improve from 0.94225\n",
+ "Epoch 477/100000\n",
+ " - 18s - loss: 0.3913 - acc: 0.9344 - val_loss: 0.4849 - val_acc: 0.8807\n",
+ "\n",
+ "Epoch 00477: val_acc did not improve from 0.94225\n",
+ "Epoch 478/100000\n",
+ " - 19s - loss: 0.3912 - acc: 0.9345 - val_loss: 0.3807 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00478: val_acc did not improve from 0.94225\n",
+ "Epoch 479/100000\n",
+ " - 19s - loss: 0.3887 - acc: 0.9345 - val_loss: 0.3844 - val_acc: 0.9225\n",
+ "\n",
+ "Epoch 00479: val_acc did not improve from 0.94225\n",
+ "Epoch 480/100000\n",
+ " - 19s - loss: 0.3876 - acc: 0.9352 - val_loss: 0.3716 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00480: val_acc did not improve from 0.94225\n",
+ "Epoch 481/100000\n",
+ " - 19s - loss: 0.3880 - acc: 0.9348 - val_loss: 0.3883 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 00481: val_acc did not improve from 0.94225\n",
+ "Epoch 482/100000\n",
+ " - 19s - loss: 0.3882 - acc: 0.9342 - val_loss: 0.3689 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00482: val_acc did not improve from 0.94225\n",
+ "Epoch 483/100000\n",
+ " - 19s - loss: 0.3893 - acc: 0.9338 - val_loss: 0.3841 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00483: val_acc did not improve from 0.94225\n",
+ "Epoch 484/100000\n",
+ " - 19s - loss: 0.3870 - acc: 0.9343 - val_loss: 0.3907 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00484: val_acc did not improve from 0.94225\n",
+ "Epoch 485/100000\n",
+ " - 19s - loss: 0.3912 - acc: 0.9337 - val_loss: 0.3848 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 00485: val_acc did not improve from 0.94225\n",
+ "Epoch 486/100000\n",
+ " - 19s - loss: 0.3901 - acc: 0.9346 - val_loss: 0.3797 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00486: val_acc did not improve from 0.94225\n",
+ "Epoch 487/100000\n",
+ " - 19s - loss: 0.3894 - acc: 0.9337 - val_loss: 0.4391 - val_acc: 0.8926\n",
+ "\n",
+ "Epoch 00487: val_acc did not improve from 0.94225\n",
+ "Epoch 488/100000\n",
+ " - 18s - loss: 0.3860 - acc: 0.9350 - val_loss: 0.3975 - val_acc: 0.9171\n",
+ "\n",
+ "Epoch 00488: val_acc did not improve from 0.94225\n",
+ "Epoch 489/100000\n",
+ " - 19s - loss: 0.3867 - acc: 0.9348 - val_loss: 0.3714 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00489: val_acc did not improve from 0.94225\n",
+ "Epoch 490/100000\n",
+ " - 19s - loss: 0.3890 - acc: 0.9337 - val_loss: 0.4128 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 00490: val_acc did not improve from 0.94225\n",
+ "Epoch 491/100000\n",
+ " - 19s - loss: 0.3918 - acc: 0.9331 - val_loss: 0.5274 - val_acc: 0.8722\n",
+ "\n",
+ "Epoch 00491: val_acc did not improve from 0.94225\n",
+ "Epoch 492/100000\n",
+ " - 19s - loss: 0.3885 - acc: 0.9347 - val_loss: 0.3829 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 00492: val_acc did not improve from 0.94225\n",
+ "Epoch 493/100000\n",
+ " - 19s - loss: 0.3897 - acc: 0.9348 - val_loss: 0.3878 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00493: val_acc did not improve from 0.94225\n",
+ "Epoch 494/100000\n",
+ " - 19s - loss: 0.3904 - acc: 0.9340 - val_loss: 0.3814 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00494: val_acc did not improve from 0.94225\n",
+ "Epoch 495/100000\n",
+ " - 18s - loss: 0.3881 - acc: 0.9350 - val_loss: 0.3973 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 00495: val_acc did not improve from 0.94225\n",
+ "Epoch 496/100000\n",
+ " - 19s - loss: 0.3894 - acc: 0.9341 - val_loss: 0.3813 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00496: val_acc did not improve from 0.94225\n",
+ "Epoch 497/100000\n",
+ " - 18s - loss: 0.3891 - acc: 0.9340 - val_loss: 0.3759 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 00497: val_acc did not improve from 0.94225\n",
+ "Epoch 498/100000\n",
+ " - 19s - loss: 0.3914 - acc: 0.9332 - val_loss: 0.3937 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 00498: val_acc did not improve from 0.94225\n",
+ "Epoch 499/100000\n",
+ " - 19s - loss: 0.3878 - acc: 0.9348 - val_loss: 0.4182 - val_acc: 0.9042\n",
+ "\n",
+ "Epoch 00499: val_acc did not improve from 0.94225\n",
+ "Epoch 500/100000\n",
+ " - 18s - loss: 0.3901 - acc: 0.9348 - val_loss: 0.4485 - val_acc: 0.9114\n",
+ "\n",
+ "Epoch 00500: val_acc did not improve from 0.94225\n",
+ "Epoch 501/100000\n",
+ " - 19s - loss: 0.3908 - acc: 0.9344 - val_loss: 0.5070 - val_acc: 0.8653\n",
+ "\n",
+ "Epoch 00501: val_acc did not improve from 0.94225\n",
+ "Epoch 502/100000\n",
+ " - 18s - loss: 0.3889 - acc: 0.9345 - val_loss: 0.3704 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00502: val_acc did not improve from 0.94225\n",
+ "Epoch 503/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9344 - val_loss: 0.3717 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00503: val_acc did not improve from 0.94225\n",
+ "Epoch 504/100000\n",
+ " - 18s - loss: 0.3907 - acc: 0.9339 - val_loss: 0.3746 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00504: val_acc did not improve from 0.94225\n",
+ "Epoch 505/100000\n",
+ " - 19s - loss: 0.3897 - acc: 0.9342 - val_loss: 0.4514 - val_acc: 0.8870\n",
+ "\n",
+ "Epoch 00505: val_acc did not improve from 0.94225\n",
+ "Epoch 506/100000\n",
+ " - 19s - loss: 0.3895 - acc: 0.9337 - val_loss: 0.3828 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00506: val_acc did not improve from 0.94225\n",
+ "Epoch 507/100000\n",
+ " - 19s - loss: 0.3919 - acc: 0.9334 - val_loss: 0.3786 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00507: val_acc did not improve from 0.94225\n",
+ "Epoch 508/100000\n",
+ " - 19s - loss: 0.3893 - acc: 0.9347 - val_loss: 0.3916 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00508: val_acc did not improve from 0.94225\n",
+ "Epoch 509/100000\n",
+ " - 19s - loss: 0.3862 - acc: 0.9350 - val_loss: 0.4134 - val_acc: 0.9084\n",
+ "\n",
+ "Epoch 00509: val_acc did not improve from 0.94225\n",
+ "Epoch 510/100000\n",
+ " - 18s - loss: 0.3901 - acc: 0.9338 - val_loss: 0.3840 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00510: val_acc did not improve from 0.94225\n",
+ "Epoch 511/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9340 - val_loss: 0.3769 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00511: val_acc did not improve from 0.94225\n",
+ "Epoch 512/100000\n",
+ " - 18s - loss: 0.3912 - acc: 0.9340 - val_loss: 0.3935 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00512: val_acc did not improve from 0.94225\n",
+ "Epoch 513/100000\n",
+ " - 19s - loss: 0.3886 - acc: 0.9351 - val_loss: 0.5641 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00513: val_acc did not improve from 0.94225\n",
+ "Epoch 514/100000\n",
+ " - 18s - loss: 0.3890 - acc: 0.9346 - val_loss: 0.4265 - val_acc: 0.9001\n",
+ "\n",
+ "Epoch 00514: val_acc did not improve from 0.94225\n",
+ "Epoch 515/100000\n",
+ " - 20s - loss: 0.3876 - acc: 0.9345 - val_loss: 0.3850 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 00515: val_acc did not improve from 0.94225\n",
+ "Epoch 516/100000\n",
+ " - 19s - loss: 0.3865 - acc: 0.9347 - val_loss: 0.4356 - val_acc: 0.8957\n",
+ "\n",
+ "Epoch 00516: val_acc did not improve from 0.94225\n",
+ "Epoch 517/100000\n",
+ " - 18s - loss: 0.3890 - acc: 0.9341 - val_loss: 0.3715 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00517: val_acc did not improve from 0.94225\n",
+ "Epoch 518/100000\n",
+ " - 19s - loss: 0.3864 - acc: 0.9343 - val_loss: 0.4276 - val_acc: 0.9041\n",
+ "\n",
+ "Epoch 00518: val_acc did not improve from 0.94225\n",
+ "Epoch 519/100000\n",
+ " - 18s - loss: 0.3878 - acc: 0.9349 - val_loss: 0.3802 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00519: val_acc did not improve from 0.94225\n",
+ "Epoch 520/100000\n",
+ " - 19s - loss: 0.3894 - acc: 0.9343 - val_loss: 0.3798 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 00520: val_acc did not improve from 0.94225\n",
+ "Epoch 521/100000\n",
+ " - 19s - loss: 0.3915 - acc: 0.9336 - val_loss: 0.3790 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 00521: val_acc did not improve from 0.94225\n",
+ "Epoch 522/100000\n",
+ " - 18s - loss: 0.3897 - acc: 0.9343 - val_loss: 0.3886 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 00522: val_acc did not improve from 0.94225\n",
+ "Epoch 523/100000\n",
+ " - 19s - loss: 0.3906 - acc: 0.9340 - val_loss: 0.4196 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 00523: val_acc did not improve from 0.94225\n",
+ "Epoch 524/100000\n",
+ " - 18s - loss: 0.3910 - acc: 0.9341 - val_loss: 0.3871 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 00524: val_acc did not improve from 0.94225\n",
+ "Epoch 525/100000\n",
+ " - 19s - loss: 0.3872 - acc: 0.9344 - val_loss: 0.3840 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00525: val_acc did not improve from 0.94225\n",
+ "Epoch 526/100000\n",
+ " - 19s - loss: 0.3905 - acc: 0.9341 - val_loss: 0.4848 - val_acc: 0.8520\n",
+ "\n",
+ "Epoch 00526: val_acc did not improve from 0.94225\n",
+ "Epoch 527/100000\n",
+ " - 19s - loss: 0.3883 - acc: 0.9350 - val_loss: 0.3856 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 00527: val_acc did not improve from 0.94225\n",
+ "Epoch 528/100000\n",
+ " - 19s - loss: 0.3892 - acc: 0.9342 - val_loss: 0.3745 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00528: val_acc did not improve from 0.94225\n",
+ "Epoch 529/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9343 - val_loss: 0.4346 - val_acc: 0.9039\n",
+ "\n",
+ "Epoch 00529: val_acc did not improve from 0.94225\n",
+ "Epoch 530/100000\n",
+ " - 19s - loss: 0.3925 - acc: 0.9332 - val_loss: 0.4347 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 00530: val_acc did not improve from 0.94225\n",
+ "Epoch 531/100000\n",
+ " - 18s - loss: 0.3917 - acc: 0.9337 - val_loss: 0.3870 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00531: val_acc did not improve from 0.94225\n",
+ "Epoch 532/100000\n",
+ " - 19s - loss: 0.3893 - acc: 0.9342 - val_loss: 0.4613 - val_acc: 0.8869\n",
+ "\n",
+ "Epoch 00532: val_acc did not improve from 0.94225\n",
+ "Epoch 533/100000\n",
+ " - 18s - loss: 0.3895 - acc: 0.9340 - val_loss: 0.3790 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 00533: val_acc did not improve from 0.94225\n",
+ "Epoch 534/100000\n",
+ " - 19s - loss: 0.3904 - acc: 0.9344 - val_loss: 0.4047 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00534: val_acc did not improve from 0.94225\n",
+ "Epoch 535/100000\n",
+ " - 19s - loss: 0.3873 - acc: 0.9356 - val_loss: 0.3876 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 00535: val_acc did not improve from 0.94225\n",
+ "Epoch 536/100000\n",
+ " - 19s - loss: 0.3886 - acc: 0.9345 - val_loss: 0.3754 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00536: val_acc did not improve from 0.94225\n",
+ "Epoch 537/100000\n",
+ " - 19s - loss: 0.3903 - acc: 0.9345 - val_loss: 0.3870 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 00537: val_acc did not improve from 0.94225\n",
+ "Epoch 538/100000\n",
+ " - 19s - loss: 0.3897 - acc: 0.9340 - val_loss: 0.3874 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00538: val_acc did not improve from 0.94225\n",
+ "Epoch 539/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9339 - val_loss: 0.3879 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00539: val_acc did not improve from 0.94225\n",
+ "Epoch 540/100000\n",
+ " - 19s - loss: 0.3866 - acc: 0.9344 - val_loss: 0.4102 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00540: val_acc did not improve from 0.94225\n",
+ "Epoch 541/100000\n",
+ " - 19s - loss: 0.3909 - acc: 0.9338 - val_loss: 0.5077 - val_acc: 0.8792\n",
+ "\n",
+ "Epoch 00541: val_acc did not improve from 0.94225\n",
+ "Epoch 542/100000\n",
+ " - 18s - loss: 0.3932 - acc: 0.9332 - val_loss: 0.4151 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 00542: val_acc did not improve from 0.94225\n",
+ "Epoch 543/100000\n",
+ " - 19s - loss: 0.3863 - acc: 0.9349 - val_loss: 0.4328 - val_acc: 0.8954\n",
+ "\n",
+ "Epoch 00543: val_acc did not improve from 0.94225\n",
+ "Epoch 544/100000\n",
+ " - 19s - loss: 0.3888 - acc: 0.9342 - val_loss: 0.3837 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 00544: val_acc did not improve from 0.94225\n",
+ "Epoch 545/100000\n",
+ " - 18s - loss: 0.3895 - acc: 0.9344 - val_loss: 0.3835 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00545: val_acc did not improve from 0.94225\n",
+ "Epoch 546/100000\n",
+ " - 19s - loss: 0.3916 - acc: 0.9337 - val_loss: 0.4144 - val_acc: 0.9085\n",
+ "\n",
+ "Epoch 00546: val_acc did not improve from 0.94225\n",
+ "Epoch 547/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9343 - val_loss: 0.4783 - val_acc: 0.8717\n",
+ "\n",
+ "Epoch 00547: val_acc did not improve from 0.94225\n",
+ "Epoch 548/100000\n",
+ " - 19s - loss: 0.3905 - acc: 0.9342 - val_loss: 0.3820 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00548: val_acc did not improve from 0.94225\n",
+ "Epoch 549/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9345 - val_loss: 0.3798 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 00549: val_acc did not improve from 0.94225\n",
+ "Epoch 550/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9342 - val_loss: 0.4307 - val_acc: 0.9131\n",
+ "\n",
+ "Epoch 00550: val_acc did not improve from 0.94225\n",
+ "Epoch 551/100000\n",
+ " - 18s - loss: 0.3885 - acc: 0.9342 - val_loss: 0.3731 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 00551: val_acc did not improve from 0.94225\n",
+ "Epoch 552/100000\n",
+ " - 19s - loss: 0.3883 - acc: 0.9349 - val_loss: 0.4225 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 00552: val_acc did not improve from 0.94225\n",
+ "Epoch 553/100000\n",
+ " - 18s - loss: 0.3888 - acc: 0.9343 - val_loss: 0.4251 - val_acc: 0.9084\n",
+ "\n",
+ "Epoch 00553: val_acc did not improve from 0.94225\n",
+ "Epoch 554/100000\n",
+ " - 19s - loss: 0.3901 - acc: 0.9339 - val_loss: 0.4113 - val_acc: 0.9138\n",
+ "\n",
+ "Epoch 00554: val_acc did not improve from 0.94225\n",
+ "Epoch 555/100000\n",
+ " - 19s - loss: 0.3866 - acc: 0.9342 - val_loss: 0.3780 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00555: val_acc did not improve from 0.94225\n",
+ "Epoch 556/100000\n",
+ " - 18s - loss: 0.3869 - acc: 0.9343 - val_loss: 0.4453 - val_acc: 0.9016\n",
+ "\n",
+ "Epoch 00556: val_acc did not improve from 0.94225\n",
+ "Epoch 557/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9344 - val_loss: 0.3972 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 00557: val_acc did not improve from 0.94225\n",
+ "Epoch 558/100000\n",
+ " - 19s - loss: 0.3883 - acc: 0.9344 - val_loss: 0.4326 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 00558: val_acc did not improve from 0.94225\n",
+ "Epoch 559/100000\n",
+ " - 19s - loss: 0.3909 - acc: 0.9337 - val_loss: 0.3951 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 00559: val_acc did not improve from 0.94225\n",
+ "Epoch 560/100000\n",
+ " - 19s - loss: 0.3896 - acc: 0.9341 - val_loss: 0.4256 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 00560: val_acc did not improve from 0.94225\n",
+ "Epoch 561/100000\n",
+ " - 19s - loss: 0.3913 - acc: 0.9334 - val_loss: 0.3812 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 00561: val_acc did not improve from 0.94225\n",
+ "Epoch 562/100000\n",
+ " - 18s - loss: 0.3881 - acc: 0.9344 - val_loss: 0.4252 - val_acc: 0.8997\n",
+ "\n",
+ "Epoch 00562: val_acc did not improve from 0.94225\n",
+ "Epoch 563/100000\n",
+ " - 19s - loss: 0.3892 - acc: 0.9341 - val_loss: 0.5081 - val_acc: 0.8627\n",
+ "\n",
+ "Epoch 00563: val_acc did not improve from 0.94225\n",
+ "Epoch 564/100000\n",
+ " - 19s - loss: 0.3879 - acc: 0.9343 - val_loss: 0.4423 - val_acc: 0.8970\n",
+ "\n",
+ "Epoch 00564: val_acc did not improve from 0.94225\n",
+ "Epoch 565/100000\n",
+ " - 19s - loss: 0.3898 - acc: 0.9335 - val_loss: 0.3721 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 00565: val_acc did not improve from 0.94225\n",
+ "Epoch 566/100000\n",
+ " - 19s - loss: 0.3875 - acc: 0.9347 - val_loss: 0.4689 - val_acc: 0.8928\n",
+ "\n",
+ "Epoch 00566: val_acc did not improve from 0.94225\n",
+ "Epoch 567/100000\n",
+ " - 19s - loss: 0.3887 - acc: 0.9346 - val_loss: 0.3980 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 00567: val_acc did not improve from 0.94225\n",
+ "Epoch 568/100000\n",
+ " - 19s - loss: 0.3889 - acc: 0.9344 - val_loss: 0.3769 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 00568: val_acc did not improve from 0.94225\n",
+ "Epoch 569/100000\n",
+ " - 19s - loss: 0.3871 - acc: 0.9344 - val_loss: 0.3833 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00569: val_acc did not improve from 0.94225\n",
+ "Epoch 570/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9336 - val_loss: 0.3943 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 00570: val_acc did not improve from 0.94225\n",
+ "Epoch 571/100000\n",
+ " - 19s - loss: 0.3913 - acc: 0.9336 - val_loss: 0.4071 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 00571: val_acc did not improve from 0.94225\n",
+ "Epoch 572/100000\n",
+ " - 19s - loss: 0.3891 - acc: 0.9340 - val_loss: 0.3988 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00572: val_acc did not improve from 0.94225\n",
+ "Epoch 573/100000\n",
+ " - 19s - loss: 0.3924 - acc: 0.9335 - val_loss: 0.4232 - val_acc: 0.9095\n",
+ "\n",
+ "Epoch 00573: val_acc did not improve from 0.94225\n",
+ "Epoch 574/100000\n",
+ " - 18s - loss: 0.3889 - acc: 0.9337 - val_loss: 0.3809 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00574: val_acc did not improve from 0.94225\n",
+ "Epoch 575/100000\n",
+ " - 18s - loss: 0.3882 - acc: 0.9345 - val_loss: 0.4112 - val_acc: 0.9159\n",
+ "\n",
+ "Epoch 00575: val_acc did not improve from 0.94225\n",
+ "Epoch 576/100000\n",
+ " - 19s - loss: 0.3882 - acc: 0.9338 - val_loss: 0.4809 - val_acc: 0.8983\n",
+ "\n",
+ "Epoch 00576: val_acc did not improve from 0.94225\n",
+ "Epoch 577/100000\n",
+ " - 18s - loss: 0.3882 - acc: 0.9341 - val_loss: 0.3886 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00577: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 00577: ReduceLROnPlateau reducing learning rate to 0.0008573750033974647.\n",
+ "Epoch 578/100000\n",
+ " - 19s - loss: 0.3809 - acc: 0.9339 - val_loss: 0.3703 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00578: val_acc did not improve from 0.94225\n",
+ "Epoch 579/100000\n",
+ " - 18s - loss: 0.3780 - acc: 0.9343 - val_loss: 0.3910 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 00579: val_acc did not improve from 0.94225\n",
+ "Epoch 580/100000\n",
+ " - 19s - loss: 0.3765 - acc: 0.9348 - val_loss: 0.3937 - val_acc: 0.9092\n",
+ "\n",
+ "Epoch 00580: val_acc did not improve from 0.94225\n",
+ "Epoch 581/100000\n",
+ " - 19s - loss: 0.3816 - acc: 0.9336 - val_loss: 0.3745 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 00581: val_acc did not improve from 0.94225\n",
+ "Epoch 582/100000\n",
+ " - 18s - loss: 0.3781 - acc: 0.9345 - val_loss: 0.3646 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00582: val_acc did not improve from 0.94225\n",
+ "Epoch 583/100000\n",
+ " - 19s - loss: 0.3812 - acc: 0.9333 - val_loss: 0.5008 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00583: val_acc did not improve from 0.94225\n",
+ "Epoch 584/100000\n",
+ " - 19s - loss: 0.3791 - acc: 0.9345 - val_loss: 0.3701 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 00584: val_acc did not improve from 0.94225\n",
+ "Epoch 585/100000\n",
+ " - 19s - loss: 0.3755 - acc: 0.9352 - val_loss: 0.3850 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 00585: val_acc did not improve from 0.94225\n",
+ "Epoch 586/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9338 - val_loss: 0.3616 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00586: val_acc did not improve from 0.94225\n",
+ "Epoch 587/100000\n",
+ " - 18s - loss: 0.3754 - acc: 0.9355 - val_loss: 0.4552 - val_acc: 0.8937\n",
+ "\n",
+ "Epoch 00587: val_acc did not improve from 0.94225\n",
+ "Epoch 588/100000\n",
+ " - 19s - loss: 0.3775 - acc: 0.9339 - val_loss: 0.3803 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 00588: val_acc did not improve from 0.94225\n",
+ "Epoch 589/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9342 - val_loss: 0.3644 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00589: val_acc did not improve from 0.94225\n",
+ "Epoch 590/100000\n",
+ " - 18s - loss: 0.3784 - acc: 0.9348 - val_loss: 0.3641 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00590: val_acc did not improve from 0.94225\n",
+ "Epoch 591/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9343 - val_loss: 0.6189 - val_acc: 0.7987\n",
+ "\n",
+ "Epoch 00591: val_acc did not improve from 0.94225\n",
+ "Epoch 592/100000\n",
+ " - 19s - loss: 0.3766 - acc: 0.9348 - val_loss: 0.3623 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 00592: val_acc did not improve from 0.94225\n",
+ "Epoch 593/100000\n",
+ " - 18s - loss: 0.3801 - acc: 0.9345 - val_loss: 0.3660 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 00593: val_acc did not improve from 0.94225\n",
+ "Epoch 594/100000\n",
+ " - 19s - loss: 0.3796 - acc: 0.9339 - val_loss: 0.8390 - val_acc: 0.8398\n",
+ "\n",
+ "Epoch 00594: val_acc did not improve from 0.94225\n",
+ "Epoch 595/100000\n",
+ " - 18s - loss: 0.3793 - acc: 0.9342 - val_loss: 0.3632 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00595: val_acc did not improve from 0.94225\n",
+ "Epoch 596/100000\n",
+ " - 19s - loss: 0.3811 - acc: 0.9339 - val_loss: 0.3732 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00596: val_acc did not improve from 0.94225\n",
+ "Epoch 597/100000\n",
+ " - 18s - loss: 0.3763 - acc: 0.9347 - val_loss: 0.3957 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 00597: val_acc did not improve from 0.94225\n",
+ "Epoch 598/100000\n",
+ " - 19s - loss: 0.3770 - acc: 0.9349 - val_loss: 0.3944 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 00598: val_acc did not improve from 0.94225\n",
+ "Epoch 599/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9343 - val_loss: 0.3731 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 00599: val_acc did not improve from 0.94225\n",
+ "Epoch 600/100000\n",
+ " - 18s - loss: 0.3783 - acc: 0.9345 - val_loss: 0.7100 - val_acc: 0.8460\n",
+ "\n",
+ "Epoch 00600: val_acc did not improve from 0.94225\n",
+ "Epoch 601/100000\n",
+ " - 19s - loss: 0.3777 - acc: 0.9348 - val_loss: 0.3732 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 00601: val_acc did not improve from 0.94225\n",
+ "Epoch 602/100000\n",
+ " - 18s - loss: 0.3771 - acc: 0.9350 - val_loss: 0.3764 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00602: val_acc did not improve from 0.94225\n",
+ "Epoch 603/100000\n",
+ " - 19s - loss: 0.3766 - acc: 0.9354 - val_loss: 0.3885 - val_acc: 0.9259\n",
+ "\n",
+ "Epoch 00603: val_acc did not improve from 0.94225\n",
+ "Epoch 604/100000\n",
+ " - 18s - loss: 0.3793 - acc: 0.9341 - val_loss: 0.4155 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 00604: val_acc did not improve from 0.94225\n",
+ "Epoch 605/100000\n",
+ " - 19s - loss: 0.3784 - acc: 0.9343 - val_loss: 0.4017 - val_acc: 0.9163\n",
+ "\n",
+ "Epoch 00605: val_acc did not improve from 0.94225\n",
+ "Epoch 606/100000\n",
+ " - 18s - loss: 0.3770 - acc: 0.9354 - val_loss: 0.3908 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 00606: val_acc did not improve from 0.94225\n",
+ "Epoch 607/100000\n",
+ " - 19s - loss: 0.3778 - acc: 0.9348 - val_loss: 0.3622 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 00607: val_acc did not improve from 0.94225\n",
+ "Epoch 608/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9343 - val_loss: 0.3821 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00608: val_acc did not improve from 0.94225\n",
+ "Epoch 609/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9343 - val_loss: 0.3940 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 00609: val_acc did not improve from 0.94225\n",
+ "Epoch 610/100000\n",
+ " - 19s - loss: 0.3791 - acc: 0.9346 - val_loss: 0.4071 - val_acc: 0.9129\n",
+ "\n",
+ "Epoch 00610: val_acc did not improve from 0.94225\n",
+ "Epoch 611/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9346 - val_loss: 0.3791 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 00611: val_acc did not improve from 0.94225\n",
+ "Epoch 612/100000\n",
+ " - 19s - loss: 0.3804 - acc: 0.9343 - val_loss: 0.3829 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 00612: val_acc did not improve from 0.94225\n",
+ "Epoch 613/100000\n",
+ " - 19s - loss: 0.3789 - acc: 0.9345 - val_loss: 0.3635 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00613: val_acc did not improve from 0.94225\n",
+ "Epoch 614/100000\n",
+ " - 19s - loss: 0.3774 - acc: 0.9348 - val_loss: 0.3549 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 00614: val_acc did not improve from 0.94225\n",
+ "Epoch 615/100000\n",
+ " - 19s - loss: 0.3770 - acc: 0.9351 - val_loss: 0.3810 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 00615: val_acc did not improve from 0.94225\n",
+ "Epoch 616/100000\n",
+ " - 18s - loss: 0.3794 - acc: 0.9341 - val_loss: 0.3658 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00616: val_acc did not improve from 0.94225\n",
+ "Epoch 617/100000\n",
+ " - 19s - loss: 0.3810 - acc: 0.9341 - val_loss: 0.4580 - val_acc: 0.8960\n",
+ "\n",
+ "Epoch 00617: val_acc did not improve from 0.94225\n",
+ "Epoch 618/100000\n",
+ " - 19s - loss: 0.3796 - acc: 0.9345 - val_loss: 0.3745 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 00618: val_acc did not improve from 0.94225\n",
+ "Epoch 619/100000\n",
+ " - 18s - loss: 0.3794 - acc: 0.9347 - val_loss: 0.3897 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 00619: val_acc did not improve from 0.94225\n",
+ "Epoch 620/100000\n",
+ " - 19s - loss: 0.3794 - acc: 0.9349 - val_loss: 0.4037 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 00620: val_acc did not improve from 0.94225\n",
+ "Epoch 621/100000\n",
+ " - 18s - loss: 0.3792 - acc: 0.9347 - val_loss: 0.3791 - val_acc: 0.9254\n",
+ "\n",
+ "Epoch 00621: val_acc did not improve from 0.94225\n",
+ "Epoch 622/100000\n",
+ " - 18s - loss: 0.3835 - acc: 0.9338 - val_loss: 0.3653 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00622: val_acc did not improve from 0.94225\n",
+ "Epoch 623/100000\n",
+ " - 18s - loss: 0.3840 - acc: 0.9334 - val_loss: 0.3737 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00623: val_acc did not improve from 0.94225\n",
+ "Epoch 624/100000\n",
+ " - 19s - loss: 0.3805 - acc: 0.9342 - val_loss: 0.3861 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00624: val_acc did not improve from 0.94225\n",
+ "Epoch 625/100000\n",
+ " - 18s - loss: 0.3805 - acc: 0.9343 - val_loss: 0.3730 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00625: val_acc did not improve from 0.94225\n",
+ "Epoch 626/100000\n",
+ " - 19s - loss: 0.3799 - acc: 0.9348 - val_loss: 0.4311 - val_acc: 0.9044\n",
+ "\n",
+ "Epoch 00626: val_acc did not improve from 0.94225\n",
+ "Epoch 627/100000\n",
+ " - 18s - loss: 0.3803 - acc: 0.9341 - val_loss: 0.3959 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 00627: val_acc did not improve from 0.94225\n",
+ "Epoch 628/100000\n",
+ " - 18s - loss: 0.3775 - acc: 0.9352 - val_loss: 0.3884 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 00628: val_acc did not improve from 0.94225\n",
+ "Epoch 629/100000\n",
+ " - 19s - loss: 0.3782 - acc: 0.9353 - val_loss: 0.3964 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 00629: val_acc did not improve from 0.94225\n",
+ "Epoch 630/100000\n",
+ " - 19s - loss: 0.3792 - acc: 0.9343 - val_loss: 0.4396 - val_acc: 0.9041\n",
+ "\n",
+ "Epoch 00630: val_acc did not improve from 0.94225\n",
+ "Epoch 631/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9336 - val_loss: 0.3772 - val_acc: 0.9270\n",
+ "\n",
+ "Epoch 00631: val_acc did not improve from 0.94225\n",
+ "Epoch 632/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9347 - val_loss: 0.4404 - val_acc: 0.9050\n",
+ "\n",
+ "Epoch 00632: val_acc did not improve from 0.94225\n",
+ "Epoch 633/100000\n",
+ " - 19s - loss: 0.3806 - acc: 0.9340 - val_loss: 0.3706 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00633: val_acc did not improve from 0.94225\n",
+ "Epoch 634/100000\n",
+ " - 19s - loss: 0.3813 - acc: 0.9346 - val_loss: 0.3819 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 00634: val_acc did not improve from 0.94225\n",
+ "Epoch 635/100000\n",
+ " - 19s - loss: 0.3813 - acc: 0.9344 - val_loss: 0.3713 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00635: val_acc did not improve from 0.94225\n",
+ "Epoch 636/100000\n",
+ " - 19s - loss: 0.3829 - acc: 0.9332 - val_loss: 0.4335 - val_acc: 0.8970\n",
+ "\n",
+ "Epoch 00636: val_acc did not improve from 0.94225\n",
+ "Epoch 637/100000\n",
+ " - 19s - loss: 0.3778 - acc: 0.9346 - val_loss: 0.3979 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 00637: val_acc did not improve from 0.94225\n",
+ "Epoch 638/100000\n",
+ " - 18s - loss: 0.3779 - acc: 0.9345 - val_loss: 0.3819 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 00638: val_acc did not improve from 0.94225\n",
+ "Epoch 639/100000\n",
+ " - 19s - loss: 0.3801 - acc: 0.9343 - val_loss: 0.3714 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 00639: val_acc did not improve from 0.94225\n",
+ "Epoch 640/100000\n",
+ " - 19s - loss: 0.3760 - acc: 0.9350 - val_loss: 0.4263 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 00640: val_acc did not improve from 0.94225\n",
+ "Epoch 641/100000\n",
+ " - 19s - loss: 0.3783 - acc: 0.9347 - val_loss: 0.3699 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 00641: val_acc did not improve from 0.94225\n",
+ "Epoch 642/100000\n",
+ " - 18s - loss: 0.3790 - acc: 0.9345 - val_loss: 0.3690 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 00642: val_acc did not improve from 0.94225\n",
+ "Epoch 643/100000\n",
+ " - 19s - loss: 0.3820 - acc: 0.9337 - val_loss: 0.3767 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 00643: val_acc did not improve from 0.94225\n",
+ "Epoch 644/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9356 - val_loss: 0.3741 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00644: val_acc did not improve from 0.94225\n",
+ "Epoch 645/100000\n",
+ " - 19s - loss: 0.3841 - acc: 0.9338 - val_loss: 0.3770 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00645: val_acc did not improve from 0.94225\n",
+ "Epoch 646/100000\n",
+ " - 19s - loss: 0.3827 - acc: 0.9347 - val_loss: 0.3673 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 00646: val_acc did not improve from 0.94225\n",
+ "Epoch 647/100000\n",
+ " - 19s - loss: 0.3804 - acc: 0.9351 - val_loss: 0.3602 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 00647: val_acc did not improve from 0.94225\n",
+ "Epoch 648/100000\n",
+ " - 18s - loss: 0.3780 - acc: 0.9344 - val_loss: 0.4046 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 00648: val_acc did not improve from 0.94225\n",
+ "Epoch 649/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9341 - val_loss: 0.3771 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00649: val_acc did not improve from 0.94225\n",
+ "Epoch 650/100000\n",
+ " - 18s - loss: 0.3788 - acc: 0.9347 - val_loss: 0.4698 - val_acc: 0.8820\n",
+ "\n",
+ "Epoch 00650: val_acc did not improve from 0.94225\n",
+ "Epoch 651/100000\n",
+ " - 19s - loss: 0.3798 - acc: 0.9342 - val_loss: 0.3757 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 00651: val_acc did not improve from 0.94225\n",
+ "Epoch 652/100000\n",
+ " - 18s - loss: 0.3789 - acc: 0.9349 - val_loss: 0.4087 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 00652: val_acc did not improve from 0.94225\n",
+ "Epoch 653/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9351 - val_loss: 0.4727 - val_acc: 0.9054\n",
+ "\n",
+ "Epoch 00653: val_acc did not improve from 0.94225\n",
+ "Epoch 654/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9346 - val_loss: 0.4045 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 00654: val_acc did not improve from 0.94225\n",
+ "Epoch 655/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9338 - val_loss: 0.5151 - val_acc: 0.8732\n",
+ "\n",
+ "Epoch 00655: val_acc did not improve from 0.94225\n",
+ "Epoch 656/100000\n",
+ " - 18s - loss: 0.3773 - acc: 0.9346 - val_loss: 0.4580 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 00656: val_acc did not improve from 0.94225\n",
+ "Epoch 657/100000\n",
+ " - 19s - loss: 0.3773 - acc: 0.9351 - val_loss: 0.3740 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 00657: val_acc did not improve from 0.94225\n",
+ "Epoch 658/100000\n",
+ " - 18s - loss: 0.3808 - acc: 0.9339 - val_loss: 0.4696 - val_acc: 0.8698\n",
+ "\n",
+ "Epoch 00658: val_acc did not improve from 0.94225\n",
+ "Epoch 659/100000\n",
+ " - 19s - loss: 0.4057 - acc: 0.9326 - val_loss: 0.6827 - val_acc: 0.8110\n",
+ "\n",
+ "Epoch 00659: val_acc did not improve from 0.94225\n",
+ "Epoch 660/100000\n",
+ " - 18s - loss: 0.3829 - acc: 0.9345 - val_loss: 0.3905 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 00660: val_acc did not improve from 0.94225\n",
+ "Epoch 661/100000\n",
+ " - 18s - loss: 0.3809 - acc: 0.9351 - val_loss: 0.4302 - val_acc: 0.9010\n",
+ "\n",
+ "Epoch 00661: val_acc did not improve from 0.94225\n",
+ "Epoch 662/100000\n",
+ " - 18s - loss: 0.3830 - acc: 0.9341 - val_loss: 0.4423 - val_acc: 0.9229\n",
+ "\n",
+ "Epoch 00662: val_acc did not improve from 0.94225\n",
+ "Epoch 663/100000\n",
+ " - 18s - loss: 0.3843 - acc: 0.9337 - val_loss: 0.3998 - val_acc: 0.9192\n",
+ "\n",
+ "Epoch 00663: val_acc did not improve from 0.94225\n",
+ "Epoch 664/100000\n",
+ " - 19s - loss: 0.3801 - acc: 0.9341 - val_loss: 0.4312 - val_acc: 0.9040\n",
+ "\n",
+ "Epoch 00664: val_acc did not improve from 0.94225\n",
+ "Epoch 665/100000\n",
+ " - 18s - loss: 0.3826 - acc: 0.9334 - val_loss: 0.4027 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 00665: val_acc did not improve from 0.94225\n",
+ "Epoch 666/100000\n",
+ " - 19s - loss: 0.3770 - acc: 0.9350 - val_loss: 0.3937 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00666: val_acc did not improve from 0.94225\n",
+ "Epoch 667/100000\n",
+ " - 18s - loss: 0.3772 - acc: 0.9351 - val_loss: 0.3842 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00667: val_acc did not improve from 0.94225\n",
+ "Epoch 668/100000\n",
+ " - 18s - loss: 0.3809 - acc: 0.9339 - val_loss: 0.3844 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 00668: val_acc did not improve from 0.94225\n",
+ "Epoch 669/100000\n",
+ " - 19s - loss: 0.3804 - acc: 0.9349 - val_loss: 0.3713 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00669: val_acc did not improve from 0.94225\n",
+ "Epoch 670/100000\n",
+ " - 18s - loss: 0.3795 - acc: 0.9344 - val_loss: 0.3684 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00670: val_acc did not improve from 0.94225\n",
+ "Epoch 671/100000\n",
+ " - 19s - loss: 0.3793 - acc: 0.9338 - val_loss: 0.3813 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00671: val_acc did not improve from 0.94225\n",
+ "Epoch 672/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9344 - val_loss: 0.4438 - val_acc: 0.8856\n",
+ "\n",
+ "Epoch 00672: val_acc did not improve from 0.94225\n",
+ "Epoch 673/100000\n",
+ " - 18s - loss: 0.3779 - acc: 0.9344 - val_loss: 0.3848 - val_acc: 0.9225\n",
+ "\n",
+ "Epoch 00673: val_acc did not improve from 0.94225\n",
+ "Epoch 674/100000\n",
+ " - 19s - loss: 0.3765 - acc: 0.9349 - val_loss: 0.4575 - val_acc: 0.8904\n",
+ "\n",
+ "Epoch 00674: val_acc did not improve from 0.94225\n",
+ "Epoch 675/100000\n",
+ " - 18s - loss: 0.3755 - acc: 0.9351 - val_loss: 0.4283 - val_acc: 0.9026\n",
+ "\n",
+ "Epoch 00675: val_acc did not improve from 0.94225\n",
+ "Epoch 676/100000\n",
+ " - 19s - loss: 0.3775 - acc: 0.9347 - val_loss: 0.3565 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 00676: val_acc did not improve from 0.94225\n",
+ "Epoch 677/100000\n",
+ " - 19s - loss: 0.3818 - acc: 0.9340 - val_loss: 0.3740 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 00677: val_acc did not improve from 0.94225\n",
+ "Epoch 678/100000\n",
+ " - 19s - loss: 0.3809 - acc: 0.9342 - val_loss: 0.3740 - val_acc: 0.9407\n",
+ "\n",
+ "Epoch 00678: val_acc did not improve from 0.94225\n",
+ "Epoch 679/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9345 - val_loss: 0.3687 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 00679: val_acc did not improve from 0.94225\n",
+ "Epoch 680/100000\n",
+ " - 19s - loss: 0.3814 - acc: 0.9344 - val_loss: 0.4969 - val_acc: 0.8720\n",
+ "\n",
+ "Epoch 00680: val_acc did not improve from 0.94225\n",
+ "Epoch 681/100000\n",
+ " - 19s - loss: 0.3800 - acc: 0.9345 - val_loss: 0.3764 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 00681: val_acc did not improve from 0.94225\n",
+ "Epoch 682/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9353 - val_loss: 0.3978 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 00682: val_acc did not improve from 0.94225\n",
+ "Epoch 683/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9346 - val_loss: 0.3640 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00683: val_acc did not improve from 0.94225\n",
+ "Epoch 684/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9346 - val_loss: 0.3716 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00684: val_acc did not improve from 0.94225\n",
+ "Epoch 685/100000\n",
+ " - 19s - loss: 0.3757 - acc: 0.9347 - val_loss: 0.4724 - val_acc: 0.8705\n",
+ "\n",
+ "Epoch 00685: val_acc did not improve from 0.94225\n",
+ "Epoch 686/100000\n",
+ " - 19s - loss: 0.3757 - acc: 0.9352 - val_loss: 0.4218 - val_acc: 0.9005\n",
+ "\n",
+ "Epoch 00686: val_acc did not improve from 0.94225\n",
+ "Epoch 687/100000\n",
+ " - 19s - loss: 0.3751 - acc: 0.9354 - val_loss: 0.3734 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00687: val_acc did not improve from 0.94225\n",
+ "Epoch 688/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9342 - val_loss: 0.4124 - val_acc: 0.9099\n",
+ "\n",
+ "Epoch 00688: val_acc did not improve from 0.94225\n",
+ "Epoch 689/100000\n",
+ " - 19s - loss: 0.3804 - acc: 0.9340 - val_loss: 0.4480 - val_acc: 0.8794\n",
+ "\n",
+ "Epoch 00689: val_acc did not improve from 0.94225\n",
+ "Epoch 690/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9339 - val_loss: 0.3826 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 00690: val_acc did not improve from 0.94225\n",
+ "Epoch 691/100000\n",
+ " - 19s - loss: 0.3803 - acc: 0.9342 - val_loss: 0.3705 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 00691: val_acc did not improve from 0.94225\n",
+ "Epoch 692/100000\n",
+ " - 19s - loss: 0.3779 - acc: 0.9348 - val_loss: 0.3717 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 00692: val_acc did not improve from 0.94225\n",
+ "Epoch 693/100000\n",
+ " - 19s - loss: 0.3777 - acc: 0.9354 - val_loss: 0.4409 - val_acc: 0.8942\n",
+ "\n",
+ "Epoch 00693: val_acc did not improve from 0.94225\n",
+ "Epoch 694/100000\n",
+ " - 19s - loss: 0.3798 - acc: 0.9343 - val_loss: 0.3922 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 00694: val_acc did not improve from 0.94225\n",
+ "Epoch 695/100000\n",
+ " - 19s - loss: 0.3771 - acc: 0.9348 - val_loss: 0.3740 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00695: val_acc did not improve from 0.94225\n",
+ "Epoch 696/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9342 - val_loss: 0.3680 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00696: val_acc did not improve from 0.94225\n",
+ "Epoch 697/100000\n",
+ " - 19s - loss: 0.3761 - acc: 0.9349 - val_loss: 0.7643 - val_acc: 0.7615\n",
+ "\n",
+ "Epoch 00697: val_acc did not improve from 0.94225\n",
+ "Epoch 698/100000\n",
+ " - 19s - loss: 0.3778 - acc: 0.9352 - val_loss: 0.4694 - val_acc: 0.8857\n",
+ "\n",
+ "Epoch 00698: val_acc did not improve from 0.94225\n",
+ "Epoch 699/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9343 - val_loss: 0.3801 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 00699: val_acc did not improve from 0.94225\n",
+ "Epoch 700/100000\n",
+ " - 18s - loss: 0.3782 - acc: 0.9351 - val_loss: 0.3661 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 00700: val_acc did not improve from 0.94225\n",
+ "Epoch 701/100000\n",
+ " - 19s - loss: 0.3770 - acc: 0.9351 - val_loss: 0.3751 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 00701: val_acc did not improve from 0.94225\n",
+ "Epoch 702/100000\n",
+ " - 18s - loss: 0.3818 - acc: 0.9346 - val_loss: 0.3845 - val_acc: 0.9235\n",
+ "\n",
+ "Epoch 00702: val_acc did not improve from 0.94225\n",
+ "Epoch 703/100000\n",
+ " - 18s - loss: 0.3801 - acc: 0.9343 - val_loss: 0.3782 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 00703: val_acc did not improve from 0.94225\n",
+ "Epoch 704/100000\n",
+ " - 19s - loss: 0.3767 - acc: 0.9353 - val_loss: 0.3924 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00704: val_acc did not improve from 0.94225\n",
+ "Epoch 705/100000\n",
+ " - 19s - loss: 0.3774 - acc: 0.9344 - val_loss: 0.3659 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 00705: val_acc did not improve from 0.94225\n",
+ "Epoch 706/100000\n",
+ " - 20s - loss: 0.3777 - acc: 0.9349 - val_loss: 0.4487 - val_acc: 0.8984\n",
+ "\n",
+ "Epoch 00706: val_acc did not improve from 0.94225\n",
+ "Epoch 707/100000\n",
+ " - 19s - loss: 0.3790 - acc: 0.9346 - val_loss: 0.3673 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00707: val_acc did not improve from 0.94225\n",
+ "Epoch 708/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9347 - val_loss: 0.4078 - val_acc: 0.9167\n",
+ "\n",
+ "Epoch 00708: val_acc did not improve from 0.94225\n",
+ "Epoch 709/100000\n",
+ " - 19s - loss: 0.3796 - acc: 0.9343 - val_loss: 0.3727 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00709: val_acc did not improve from 0.94225\n",
+ "Epoch 710/100000\n",
+ " - 19s - loss: 0.3769 - acc: 0.9351 - val_loss: 0.3802 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 00710: val_acc did not improve from 0.94225\n",
+ "Epoch 711/100000\n",
+ " - 19s - loss: 0.3801 - acc: 0.9348 - val_loss: 0.3875 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 00711: val_acc did not improve from 0.94225\n",
+ "Epoch 712/100000\n",
+ " - 19s - loss: 0.3786 - acc: 0.9345 - val_loss: 0.4152 - val_acc: 0.9036\n",
+ "\n",
+ "Epoch 00712: val_acc did not improve from 0.94225\n",
+ "Epoch 713/100000\n",
+ " - 19s - loss: 0.3796 - acc: 0.9340 - val_loss: 0.3615 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 00713: val_acc did not improve from 0.94225\n",
+ "Epoch 714/100000\n",
+ " - 19s - loss: 0.3792 - acc: 0.9351 - val_loss: 0.3711 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00714: val_acc did not improve from 0.94225\n",
+ "Epoch 715/100000\n",
+ " - 18s - loss: 0.3822 - acc: 0.9342 - val_loss: 0.4784 - val_acc: 0.8695\n",
+ "\n",
+ "Epoch 00715: val_acc did not improve from 0.94225\n",
+ "Epoch 716/100000\n",
+ " - 19s - loss: 0.3792 - acc: 0.9354 - val_loss: 0.3992 - val_acc: 0.9137\n",
+ "\n",
+ "Epoch 00716: val_acc did not improve from 0.94225\n",
+ "Epoch 717/100000\n",
+ " - 18s - loss: 0.3787 - acc: 0.9343 - val_loss: 0.4005 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 00717: val_acc did not improve from 0.94225\n",
+ "Epoch 718/100000\n",
+ " - 18s - loss: 0.3802 - acc: 0.9337 - val_loss: 0.3626 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00718: val_acc did not improve from 0.94225\n",
+ "Epoch 719/100000\n",
+ " - 19s - loss: 0.3782 - acc: 0.9345 - val_loss: 0.3770 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 00719: val_acc did not improve from 0.94225\n",
+ "Epoch 720/100000\n",
+ " - 18s - loss: 0.3778 - acc: 0.9354 - val_loss: 0.3678 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 00720: val_acc did not improve from 0.94225\n",
+ "Epoch 721/100000\n",
+ " - 18s - loss: 0.3776 - acc: 0.9348 - val_loss: 0.3646 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00721: val_acc did not improve from 0.94225\n",
+ "Epoch 722/100000\n",
+ " - 18s - loss: 0.3790 - acc: 0.9347 - val_loss: 0.3897 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 00722: val_acc did not improve from 0.94225\n",
+ "Epoch 723/100000\n",
+ " - 19s - loss: 0.3793 - acc: 0.9349 - val_loss: 0.3649 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 00723: val_acc did not improve from 0.94225\n",
+ "Epoch 724/100000\n",
+ " - 19s - loss: 0.3795 - acc: 0.9343 - val_loss: 0.3645 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00724: val_acc did not improve from 0.94225\n",
+ "Epoch 725/100000\n",
+ " - 18s - loss: 0.3767 - acc: 0.9351 - val_loss: 0.3979 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 00725: val_acc did not improve from 0.94225\n",
+ "Epoch 726/100000\n",
+ " - 19s - loss: 0.3785 - acc: 0.9342 - val_loss: 0.4071 - val_acc: 0.9076\n",
+ "\n",
+ "Epoch 00726: val_acc did not improve from 0.94225\n",
+ "Epoch 727/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9339 - val_loss: 0.4158 - val_acc: 0.9085\n",
+ "\n",
+ "Epoch 00727: val_acc did not improve from 0.94225\n",
+ "Epoch 728/100000\n",
+ " - 19s - loss: 0.3781 - acc: 0.9347 - val_loss: 0.4179 - val_acc: 0.9229\n",
+ "\n",
+ "Epoch 00728: val_acc did not improve from 0.94225\n",
+ "Epoch 729/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9346 - val_loss: 0.3907 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00729: val_acc did not improve from 0.94225\n",
+ "Epoch 730/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9352 - val_loss: 0.3907 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 00730: val_acc did not improve from 0.94225\n",
+ "Epoch 731/100000\n",
+ " - 18s - loss: 0.3781 - acc: 0.9349 - val_loss: 0.3692 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00731: val_acc did not improve from 0.94225\n",
+ "Epoch 732/100000\n",
+ " - 19s - loss: 0.3805 - acc: 0.9337 - val_loss: 0.4081 - val_acc: 0.9055\n",
+ "\n",
+ "Epoch 00732: val_acc did not improve from 0.94225\n",
+ "Epoch 733/100000\n",
+ " - 19s - loss: 0.3785 - acc: 0.9344 - val_loss: 0.3692 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00733: val_acc did not improve from 0.94225\n",
+ "Epoch 734/100000\n",
+ " - 19s - loss: 0.3797 - acc: 0.9350 - val_loss: 0.3772 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 00734: val_acc did not improve from 0.94225\n",
+ "Epoch 735/100000\n",
+ " - 18s - loss: 0.3807 - acc: 0.9345 - val_loss: 0.3739 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 00735: val_acc did not improve from 0.94225\n",
+ "Epoch 736/100000\n",
+ " - 19s - loss: 0.3767 - acc: 0.9350 - val_loss: 0.3630 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 00736: val_acc did not improve from 0.94225\n",
+ "Epoch 737/100000\n",
+ " - 18s - loss: 0.3769 - acc: 0.9347 - val_loss: 0.4213 - val_acc: 0.9074\n",
+ "\n",
+ "Epoch 00737: val_acc did not improve from 0.94225\n",
+ "Epoch 738/100000\n",
+ " - 19s - loss: 0.3807 - acc: 0.9336 - val_loss: 0.3637 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00738: val_acc did not improve from 0.94225\n",
+ "Epoch 739/100000\n",
+ " - 19s - loss: 0.3765 - acc: 0.9351 - val_loss: 0.3808 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 00739: val_acc did not improve from 0.94225\n",
+ "Epoch 740/100000\n",
+ " - 18s - loss: 0.3774 - acc: 0.9346 - val_loss: 0.3783 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00740: val_acc did not improve from 0.94225\n",
+ "Epoch 741/100000\n",
+ " - 19s - loss: 0.3792 - acc: 0.9350 - val_loss: 0.3681 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 00741: val_acc did not improve from 0.94225\n",
+ "Epoch 742/100000\n",
+ " - 18s - loss: 0.3800 - acc: 0.9342 - val_loss: 0.3957 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 00742: val_acc did not improve from 0.94225\n",
+ "Epoch 743/100000\n",
+ " - 19s - loss: 0.3777 - acc: 0.9348 - val_loss: 0.3887 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 00743: val_acc did not improve from 0.94225\n",
+ "Epoch 744/100000\n",
+ " - 18s - loss: 0.3780 - acc: 0.9340 - val_loss: 0.3671 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 00744: val_acc did not improve from 0.94225\n",
+ "Epoch 745/100000\n",
+ " - 19s - loss: 0.3772 - acc: 0.9347 - val_loss: 0.3989 - val_acc: 0.9130\n",
+ "\n",
+ "Epoch 00745: val_acc did not improve from 0.94225\n",
+ "Epoch 746/100000\n",
+ " - 19s - loss: 0.3803 - acc: 0.9341 - val_loss: 0.3751 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 00746: val_acc did not improve from 0.94225\n",
+ "Epoch 747/100000\n",
+ " - 18s - loss: 0.3765 - acc: 0.9351 - val_loss: 0.4008 - val_acc: 0.9113\n",
+ "\n",
+ "Epoch 00747: val_acc did not improve from 0.94225\n",
+ "Epoch 748/100000\n",
+ " - 19s - loss: 0.3793 - acc: 0.9350 - val_loss: 0.4131 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 00748: val_acc did not improve from 0.94225\n",
+ "Epoch 749/100000\n",
+ " - 18s - loss: 0.3781 - acc: 0.9348 - val_loss: 0.4171 - val_acc: 0.9108\n",
+ "\n",
+ "Epoch 00749: val_acc did not improve from 0.94225\n",
+ "Epoch 750/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9348 - val_loss: 0.3574 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 00750: val_acc did not improve from 0.94225\n",
+ "Epoch 751/100000\n",
+ " - 19s - loss: 0.3760 - acc: 0.9349 - val_loss: 0.3613 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00751: val_acc did not improve from 0.94225\n",
+ "Epoch 752/100000\n",
+ " - 19s - loss: 0.3798 - acc: 0.9343 - val_loss: 0.3728 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 00752: val_acc did not improve from 0.94225\n",
+ "Epoch 753/100000\n",
+ " - 18s - loss: 0.3758 - acc: 0.9352 - val_loss: 0.3673 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00753: val_acc did not improve from 0.94225\n",
+ "Epoch 754/100000\n",
+ " - 19s - loss: 0.3793 - acc: 0.9345 - val_loss: 0.3728 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00754: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 00754: ReduceLROnPlateau reducing learning rate to 0.0008145062311086804.\n",
+ "Epoch 755/100000\n",
+ " - 19s - loss: 0.3713 - acc: 0.9343 - val_loss: 0.3890 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 00755: val_acc did not improve from 0.94225\n",
+ "Epoch 756/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9347 - val_loss: 0.3545 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 00756: val_acc did not improve from 0.94225\n",
+ "Epoch 757/100000\n",
+ " - 18s - loss: 0.3679 - acc: 0.9350 - val_loss: 0.4251 - val_acc: 0.9188\n",
+ "\n",
+ "Epoch 00757: val_acc did not improve from 0.94225\n",
+ "Epoch 758/100000\n",
+ " - 19s - loss: 0.3691 - acc: 0.9353 - val_loss: 0.3651 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00758: val_acc did not improve from 0.94225\n",
+ "Epoch 759/100000\n",
+ " - 19s - loss: 0.3676 - acc: 0.9355 - val_loss: 0.3806 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00759: val_acc did not improve from 0.94225\n",
+ "Epoch 760/100000\n",
+ " - 19s - loss: 0.3665 - acc: 0.9357 - val_loss: 0.3723 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 00760: val_acc did not improve from 0.94225\n",
+ "Epoch 761/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9347 - val_loss: 0.3615 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00761: val_acc did not improve from 0.94225\n",
+ "Epoch 762/100000\n",
+ " - 18s - loss: 0.3696 - acc: 0.9352 - val_loss: 0.3999 - val_acc: 0.9171\n",
+ "\n",
+ "Epoch 00762: val_acc did not improve from 0.94225\n",
+ "Epoch 763/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9357 - val_loss: 0.4208 - val_acc: 0.9109\n",
+ "\n",
+ "Epoch 00763: val_acc did not improve from 0.94225\n",
+ "Epoch 764/100000\n",
+ " - 18s - loss: 0.3675 - acc: 0.9352 - val_loss: 0.3548 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 00764: val_acc did not improve from 0.94225\n",
+ "Epoch 765/100000\n",
+ " - 19s - loss: 0.3658 - acc: 0.9356 - val_loss: 0.3548 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00765: val_acc did not improve from 0.94225\n",
+ "Epoch 766/100000\n",
+ " - 19s - loss: 0.3701 - acc: 0.9341 - val_loss: 0.3799 - val_acc: 0.9205\n",
+ "\n",
+ "Epoch 00766: val_acc did not improve from 0.94225\n",
+ "Epoch 767/100000\n",
+ " - 18s - loss: 0.3678 - acc: 0.9352 - val_loss: 0.3727 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00767: val_acc did not improve from 0.94225\n",
+ "Epoch 768/100000\n",
+ " - 18s - loss: 0.3695 - acc: 0.9346 - val_loss: 0.3752 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 00768: val_acc did not improve from 0.94225\n",
+ "Epoch 769/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9353 - val_loss: 0.3650 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 00769: val_acc did not improve from 0.94225\n",
+ "Epoch 770/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9355 - val_loss: 0.3740 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 00770: val_acc did not improve from 0.94225\n",
+ "Epoch 771/100000\n",
+ " - 19s - loss: 0.3668 - acc: 0.9355 - val_loss: 0.4004 - val_acc: 0.9144\n",
+ "\n",
+ "Epoch 00771: val_acc did not improve from 0.94225\n",
+ "Epoch 772/100000\n",
+ " - 18s - loss: 0.3708 - acc: 0.9345 - val_loss: 0.3804 - val_acc: 0.9221\n",
+ "\n",
+ "Epoch 00772: val_acc did not improve from 0.94225\n",
+ "Epoch 773/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9355 - val_loss: 0.3922 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 00773: val_acc did not improve from 0.94225\n",
+ "Epoch 774/100000\n",
+ " - 19s - loss: 0.3687 - acc: 0.9353 - val_loss: 0.3916 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 00774: val_acc did not improve from 0.94225\n",
+ "Epoch 775/100000\n",
+ " - 19s - loss: 0.3706 - acc: 0.9349 - val_loss: 0.3752 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 00775: val_acc did not improve from 0.94225\n",
+ "Epoch 776/100000\n",
+ " - 18s - loss: 0.3695 - acc: 0.9352 - val_loss: 0.3545 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00776: val_acc did not improve from 0.94225\n",
+ "Epoch 777/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9352 - val_loss: 0.4058 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 00777: val_acc did not improve from 0.94225\n",
+ "Epoch 778/100000\n",
+ " - 19s - loss: 0.3674 - acc: 0.9352 - val_loss: 0.3854 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 00778: val_acc did not improve from 0.94225\n",
+ "Epoch 779/100000\n",
+ " - 18s - loss: 0.3666 - acc: 0.9359 - val_loss: 0.3708 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 00779: val_acc did not improve from 0.94225\n",
+ "Epoch 780/100000\n",
+ " - 19s - loss: 0.3717 - acc: 0.9346 - val_loss: 0.4261 - val_acc: 0.9064\n",
+ "\n",
+ "Epoch 00780: val_acc did not improve from 0.94225\n",
+ "Epoch 781/100000\n",
+ " - 18s - loss: 0.3703 - acc: 0.9350 - val_loss: 0.3795 - val_acc: 0.9210\n",
+ "\n",
+ "Epoch 00781: val_acc did not improve from 0.94225\n",
+ "Epoch 782/100000\n",
+ " - 18s - loss: 0.3649 - acc: 0.9363 - val_loss: 0.4089 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 00782: val_acc did not improve from 0.94225\n",
+ "Epoch 783/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9345 - val_loss: 0.5078 - val_acc: 0.8677\n",
+ "\n",
+ "Epoch 00783: val_acc did not improve from 0.94225\n",
+ "Epoch 784/100000\n",
+ " - 19s - loss: 0.3679 - acc: 0.9350 - val_loss: 0.3764 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00784: val_acc did not improve from 0.94225\n",
+ "Epoch 785/100000\n",
+ " - 19s - loss: 0.3681 - acc: 0.9355 - val_loss: 0.3577 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00785: val_acc did not improve from 0.94225\n",
+ "Epoch 786/100000\n",
+ " - 19s - loss: 0.3695 - acc: 0.9353 - val_loss: 0.3614 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 00786: val_acc did not improve from 0.94225\n",
+ "Epoch 787/100000\n",
+ " - 18s - loss: 0.3695 - acc: 0.9354 - val_loss: 0.3585 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 00787: val_acc did not improve from 0.94225\n",
+ "Epoch 788/100000\n",
+ " - 19s - loss: 0.3709 - acc: 0.9352 - val_loss: 0.3749 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00788: val_acc did not improve from 0.94225\n",
+ "Epoch 789/100000\n",
+ " - 19s - loss: 0.3670 - acc: 0.9356 - val_loss: 0.3633 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00789: val_acc did not improve from 0.94225\n",
+ "Epoch 790/100000\n",
+ " - 18s - loss: 0.3695 - acc: 0.9357 - val_loss: 0.3663 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 00790: val_acc did not improve from 0.94225\n",
+ "Epoch 791/100000\n",
+ " - 19s - loss: 0.3705 - acc: 0.9345 - val_loss: 0.3555 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 00791: val_acc did not improve from 0.94225\n",
+ "Epoch 792/100000\n",
+ " - 18s - loss: 0.3698 - acc: 0.9345 - val_loss: 0.4211 - val_acc: 0.9135\n",
+ "\n",
+ "Epoch 00792: val_acc did not improve from 0.94225\n",
+ "Epoch 793/100000\n",
+ " - 19s - loss: 0.3690 - acc: 0.9351 - val_loss: 0.3914 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 00793: val_acc did not improve from 0.94225\n",
+ "Epoch 794/100000\n",
+ " - 18s - loss: 0.3662 - acc: 0.9358 - val_loss: 0.3840 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 00794: val_acc did not improve from 0.94225\n",
+ "Epoch 795/100000\n",
+ " - 19s - loss: 0.3676 - acc: 0.9353 - val_loss: 0.3589 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00795: val_acc did not improve from 0.94225\n",
+ "Epoch 796/100000\n",
+ " - 19s - loss: 0.3832 - acc: 0.9347 - val_loss: 0.3726 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00796: val_acc did not improve from 0.94225\n",
+ "Epoch 797/100000\n",
+ " - 18s - loss: 0.3701 - acc: 0.9358 - val_loss: 0.3664 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 00797: val_acc did not improve from 0.94225\n",
+ "Epoch 798/100000\n",
+ " - 19s - loss: 0.3716 - acc: 0.9347 - val_loss: 0.3745 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 00798: val_acc did not improve from 0.94225\n",
+ "Epoch 799/100000\n",
+ " - 18s - loss: 0.3690 - acc: 0.9349 - val_loss: 0.3630 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00799: val_acc did not improve from 0.94225\n",
+ "Epoch 800/100000\n",
+ " - 19s - loss: 0.3707 - acc: 0.9348 - val_loss: 0.3807 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 00800: val_acc did not improve from 0.94225\n",
+ "Epoch 801/100000\n",
+ " - 18s - loss: 0.3699 - acc: 0.9350 - val_loss: 0.3994 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 00801: val_acc did not improve from 0.94225\n",
+ "Epoch 802/100000\n",
+ " - 19s - loss: 0.3686 - acc: 0.9353 - val_loss: 0.3653 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 00802: val_acc did not improve from 0.94225\n",
+ "Epoch 803/100000\n",
+ " - 19s - loss: 0.3681 - acc: 0.9354 - val_loss: 0.3653 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00803: val_acc did not improve from 0.94225\n",
+ "Epoch 804/100000\n",
+ " - 19s - loss: 0.3674 - acc: 0.9350 - val_loss: 0.3856 - val_acc: 0.9192\n",
+ "\n",
+ "Epoch 00804: val_acc did not improve from 0.94225\n",
+ "Epoch 805/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9347 - val_loss: 0.3610 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00805: val_acc did not improve from 0.94225\n",
+ "Epoch 806/100000\n",
+ " - 19s - loss: 0.3667 - acc: 0.9351 - val_loss: 0.4244 - val_acc: 0.9028\n",
+ "\n",
+ "Epoch 00806: val_acc did not improve from 0.94225\n",
+ "Epoch 807/100000\n",
+ " - 19s - loss: 0.3660 - acc: 0.9362 - val_loss: 0.3534 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 00807: val_acc did not improve from 0.94225\n",
+ "Epoch 808/100000\n",
+ " - 18s - loss: 0.3693 - acc: 0.9349 - val_loss: 0.4597 - val_acc: 0.8820\n",
+ "\n",
+ "Epoch 00808: val_acc did not improve from 0.94225\n",
+ "Epoch 809/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9350 - val_loss: 0.3918 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 00809: val_acc did not improve from 0.94225\n",
+ "Epoch 810/100000\n",
+ " - 18s - loss: 0.3702 - acc: 0.9345 - val_loss: 0.3701 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 00810: val_acc did not improve from 0.94225\n",
+ "Epoch 811/100000\n",
+ " - 19s - loss: 0.3720 - acc: 0.9345 - val_loss: 0.3940 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 00811: val_acc did not improve from 0.94225\n",
+ "Epoch 812/100000\n",
+ " - 19s - loss: 0.3686 - acc: 0.9347 - val_loss: 0.4201 - val_acc: 0.9053\n",
+ "\n",
+ "Epoch 00812: val_acc did not improve from 0.94225\n",
+ "Epoch 813/100000\n",
+ " - 19s - loss: 0.3684 - acc: 0.9347 - val_loss: 0.4689 - val_acc: 0.9188\n",
+ "\n",
+ "Epoch 00813: val_acc did not improve from 0.94225\n",
+ "Epoch 814/100000\n",
+ " - 18s - loss: 0.3714 - acc: 0.9352 - val_loss: 0.3492 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00814: val_acc did not improve from 0.94225\n",
+ "Epoch 815/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9348 - val_loss: 0.3726 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 00815: val_acc did not improve from 0.94225\n",
+ "Epoch 816/100000\n",
+ " - 18s - loss: 0.3685 - acc: 0.9349 - val_loss: 0.3745 - val_acc: 0.9278\n",
+ "\n",
+ "Epoch 00816: val_acc did not improve from 0.94225\n",
+ "Epoch 817/100000\n",
+ " - 18s - loss: 0.3696 - acc: 0.9348 - val_loss: 0.3696 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 00817: val_acc did not improve from 0.94225\n",
+ "Epoch 818/100000\n",
+ " - 18s - loss: 0.3682 - acc: 0.9360 - val_loss: 0.4135 - val_acc: 0.9065\n",
+ "\n",
+ "Epoch 00818: val_acc did not improve from 0.94225\n",
+ "Epoch 819/100000\n",
+ " - 19s - loss: 0.3686 - acc: 0.9358 - val_loss: 0.4224 - val_acc: 0.9000\n",
+ "\n",
+ "Epoch 00819: val_acc did not improve from 0.94225\n",
+ "Epoch 820/100000\n",
+ " - 18s - loss: 0.3702 - acc: 0.9351 - val_loss: 0.4221 - val_acc: 0.9104\n",
+ "\n",
+ "Epoch 00820: val_acc did not improve from 0.94225\n",
+ "Epoch 821/100000\n",
+ " - 19s - loss: 0.3701 - acc: 0.9348 - val_loss: 0.3642 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 00821: val_acc did not improve from 0.94225\n",
+ "Epoch 822/100000\n",
+ " - 19s - loss: 0.3716 - acc: 0.9336 - val_loss: 0.3812 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 00822: val_acc did not improve from 0.94225\n",
+ "Epoch 823/100000\n",
+ " - 19s - loss: 0.3678 - acc: 0.9352 - val_loss: 0.4046 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 00823: val_acc did not improve from 0.94225\n",
+ "Epoch 824/100000\n",
+ " - 18s - loss: 0.3693 - acc: 0.9348 - val_loss: 0.3854 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 00824: val_acc did not improve from 0.94225\n",
+ "Epoch 825/100000\n",
+ " - 19s - loss: 0.3699 - acc: 0.9350 - val_loss: 0.4669 - val_acc: 0.8761\n",
+ "\n",
+ "Epoch 00825: val_acc did not improve from 0.94225\n",
+ "Epoch 826/100000\n",
+ " - 19s - loss: 0.3718 - acc: 0.9346 - val_loss: 0.5650 - val_acc: 0.8329\n",
+ "\n",
+ "Epoch 00826: val_acc did not improve from 0.94225\n",
+ "Epoch 827/100000\n",
+ " - 18s - loss: 0.3696 - acc: 0.9358 - val_loss: 0.3839 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00827: val_acc did not improve from 0.94225\n",
+ "Epoch 828/100000\n",
+ " - 19s - loss: 0.3731 - acc: 0.9352 - val_loss: 0.3533 - val_acc: 0.9397\n",
+ "\n",
+ "Epoch 00828: val_acc did not improve from 0.94225\n",
+ "Epoch 829/100000\n",
+ " - 18s - loss: 0.3722 - acc: 0.9349 - val_loss: 0.3716 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00829: val_acc did not improve from 0.94225\n",
+ "Epoch 830/100000\n",
+ " - 19s - loss: 0.3673 - acc: 0.9359 - val_loss: 0.3555 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00830: val_acc did not improve from 0.94225\n",
+ "Epoch 831/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9345 - val_loss: 0.3586 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00831: val_acc did not improve from 0.94225\n",
+ "Epoch 832/100000\n",
+ " - 19s - loss: 0.3683 - acc: 0.9353 - val_loss: 0.3840 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 00832: val_acc did not improve from 0.94225\n",
+ "Epoch 833/100000\n",
+ " - 19s - loss: 0.3674 - acc: 0.9352 - val_loss: 0.4149 - val_acc: 0.9095\n",
+ "\n",
+ "Epoch 00833: val_acc did not improve from 0.94225\n",
+ "Epoch 834/100000\n",
+ " - 19s - loss: 0.3661 - acc: 0.9360 - val_loss: 0.4385 - val_acc: 0.8936\n",
+ "\n",
+ "Epoch 00834: val_acc did not improve from 0.94225\n",
+ "Epoch 835/100000\n",
+ " - 19s - loss: 0.3715 - acc: 0.9341 - val_loss: 0.5857 - val_acc: 0.8354\n",
+ "\n",
+ "Epoch 00835: val_acc did not improve from 0.94225\n",
+ "Epoch 836/100000\n",
+ " - 19s - loss: 0.3706 - acc: 0.9348 - val_loss: 0.4281 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 00836: val_acc did not improve from 0.94225\n",
+ "Epoch 837/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9355 - val_loss: 0.3857 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 00837: val_acc did not improve from 0.94225\n",
+ "Epoch 838/100000\n",
+ " - 19s - loss: 0.3699 - acc: 0.9350 - val_loss: 0.3686 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 00838: val_acc did not improve from 0.94225\n",
+ "Epoch 839/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9346 - val_loss: 0.3547 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00839: val_acc did not improve from 0.94225\n",
+ "Epoch 840/100000\n",
+ " - 18s - loss: 0.3677 - acc: 0.9356 - val_loss: 0.3726 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00840: val_acc did not improve from 0.94225\n",
+ "Epoch 841/100000\n",
+ " - 19s - loss: 0.3678 - acc: 0.9349 - val_loss: 0.3826 - val_acc: 0.9220\n",
+ "\n",
+ "Epoch 00841: val_acc did not improve from 0.94225\n",
+ "Epoch 842/100000\n",
+ " - 19s - loss: 0.3696 - acc: 0.9346 - val_loss: 0.3728 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00842: val_acc did not improve from 0.94225\n",
+ "Epoch 843/100000\n",
+ " - 19s - loss: 0.3719 - acc: 0.9346 - val_loss: 0.4048 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 00843: val_acc did not improve from 0.94225\n",
+ "Epoch 844/100000\n",
+ " - 19s - loss: 0.3709 - acc: 0.9351 - val_loss: 0.4000 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 00844: val_acc did not improve from 0.94225\n",
+ "Epoch 845/100000\n",
+ " - 19s - loss: 0.3708 - acc: 0.9351 - val_loss: 0.3479 - val_acc: 0.9405\n",
+ "\n",
+ "Epoch 00845: val_acc did not improve from 0.94225\n",
+ "Epoch 846/100000\n",
+ " - 18s - loss: 0.3708 - acc: 0.9354 - val_loss: 0.4133 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 00846: val_acc did not improve from 0.94225\n",
+ "Epoch 847/100000\n",
+ " - 19s - loss: 0.3733 - acc: 0.9349 - val_loss: 0.3578 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 00847: val_acc did not improve from 0.94225\n",
+ "Epoch 848/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9355 - val_loss: 0.4061 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 00848: val_acc did not improve from 0.94225\n",
+ "Epoch 849/100000\n",
+ " - 19s - loss: 0.3685 - acc: 0.9348 - val_loss: 0.4407 - val_acc: 0.9061\n",
+ "\n",
+ "Epoch 00849: val_acc did not improve from 0.94225\n",
+ "Epoch 850/100000\n",
+ " - 19s - loss: 0.3691 - acc: 0.9350 - val_loss: 0.3587 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00850: val_acc did not improve from 0.94225\n",
+ "Epoch 851/100000\n",
+ " - 19s - loss: 0.3695 - acc: 0.9348 - val_loss: 0.4356 - val_acc: 0.8978\n",
+ "\n",
+ "Epoch 00851: val_acc did not improve from 0.94225\n",
+ "Epoch 852/100000\n",
+ " - 19s - loss: 0.3699 - acc: 0.9347 - val_loss: 0.3590 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 00852: val_acc did not improve from 0.94225\n",
+ "Epoch 853/100000\n",
+ " - 19s - loss: 0.3714 - acc: 0.9348 - val_loss: 0.3570 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00853: val_acc did not improve from 0.94225\n",
+ "Epoch 854/100000\n",
+ " - 19s - loss: 0.3685 - acc: 0.9358 - val_loss: 0.3602 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00854: val_acc did not improve from 0.94225\n",
+ "Epoch 855/100000\n",
+ " - 18s - loss: 0.3713 - acc: 0.9346 - val_loss: 0.3806 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 00855: val_acc did not improve from 0.94225\n",
+ "Epoch 856/100000\n",
+ " - 19s - loss: 0.3712 - acc: 0.9352 - val_loss: 0.4086 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 00856: val_acc did not improve from 0.94225\n",
+ "Epoch 857/100000\n",
+ " - 18s - loss: 0.3693 - acc: 0.9352 - val_loss: 0.3585 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00857: val_acc did not improve from 0.94225\n",
+ "Epoch 858/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9351 - val_loss: 0.3653 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00858: val_acc did not improve from 0.94225\n",
+ "Epoch 859/100000\n",
+ " - 19s - loss: 0.3697 - acc: 0.9349 - val_loss: 0.3805 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00859: val_acc did not improve from 0.94225\n",
+ "Epoch 860/100000\n",
+ " - 19s - loss: 0.3703 - acc: 0.9356 - val_loss: 0.3670 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 00860: val_acc did not improve from 0.94225\n",
+ "Epoch 861/100000\n",
+ " - 19s - loss: 0.3698 - acc: 0.9354 - val_loss: 0.3690 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 00861: val_acc did not improve from 0.94225\n",
+ "Epoch 862/100000\n",
+ " - 18s - loss: 0.3721 - acc: 0.9357 - val_loss: 0.3699 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 00862: val_acc did not improve from 0.94225\n",
+ "Epoch 863/100000\n",
+ " - 19s - loss: 0.3787 - acc: 0.9351 - val_loss: 0.3664 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 00863: val_acc did not improve from 0.94225\n",
+ "Epoch 864/100000\n",
+ " - 18s - loss: 0.3719 - acc: 0.9348 - val_loss: 0.3663 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 00864: val_acc did not improve from 0.94225\n",
+ "Epoch 865/100000\n",
+ " - 18s - loss: 0.3698 - acc: 0.9355 - val_loss: 0.4007 - val_acc: 0.9042\n",
+ "\n",
+ "Epoch 00865: val_acc did not improve from 0.94225\n",
+ "Epoch 866/100000\n",
+ " - 18s - loss: 0.3682 - acc: 0.9358 - val_loss: 0.4400 - val_acc: 0.8985\n",
+ "\n",
+ "Epoch 00866: val_acc did not improve from 0.94225\n",
+ "Epoch 867/100000\n",
+ " - 19s - loss: 0.3717 - acc: 0.9342 - val_loss: 0.5341 - val_acc: 0.8577\n",
+ "\n",
+ "Epoch 00867: val_acc did not improve from 0.94225\n",
+ "Epoch 868/100000\n",
+ " - 19s - loss: 0.3695 - acc: 0.9348 - val_loss: 0.3563 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00868: val_acc did not improve from 0.94225\n",
+ "Epoch 869/100000\n",
+ " - 18s - loss: 0.3690 - acc: 0.9349 - val_loss: 0.3616 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 00869: val_acc did not improve from 0.94225\n",
+ "Epoch 870/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9357 - val_loss: 0.3499 - val_acc: 0.9406\n",
+ "\n",
+ "Epoch 00870: val_acc did not improve from 0.94225\n",
+ "Epoch 871/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9352 - val_loss: 0.4537 - val_acc: 0.8939\n",
+ "\n",
+ "Epoch 00871: val_acc did not improve from 0.94225\n",
+ "Epoch 872/100000\n",
+ " - 18s - loss: 0.3685 - acc: 0.9349 - val_loss: 0.3643 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 00872: val_acc did not improve from 0.94225\n",
+ "Epoch 873/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9351 - val_loss: 0.4030 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 00873: val_acc did not improve from 0.94225\n",
+ "Epoch 874/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9351 - val_loss: 0.3518 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 00874: val_acc did not improve from 0.94225\n",
+ "Epoch 875/100000\n",
+ " - 19s - loss: 0.3690 - acc: 0.9348 - val_loss: 0.3889 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 00875: val_acc did not improve from 0.94225\n",
+ "Epoch 876/100000\n",
+ " - 19s - loss: 0.3704 - acc: 0.9348 - val_loss: 0.4140 - val_acc: 0.9079\n",
+ "\n",
+ "Epoch 00876: val_acc did not improve from 0.94225\n",
+ "Epoch 877/100000\n",
+ " - 18s - loss: 0.3655 - acc: 0.9362 - val_loss: 0.3707 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00877: val_acc did not improve from 0.94225\n",
+ "Epoch 878/100000\n",
+ " - 19s - loss: 0.3677 - acc: 0.9356 - val_loss: 0.3727 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00878: val_acc did not improve from 0.94225\n",
+ "Epoch 879/100000\n",
+ " - 18s - loss: 0.3679 - acc: 0.9352 - val_loss: 0.3842 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 00879: val_acc did not improve from 0.94225\n",
+ "Epoch 880/100000\n",
+ " - 19s - loss: 0.3690 - acc: 0.9350 - val_loss: 0.3659 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 00880: val_acc did not improve from 0.94225\n",
+ "Epoch 881/100000\n",
+ " - 18s - loss: 0.3689 - acc: 0.9360 - val_loss: 0.3472 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 00881: val_acc did not improve from 0.94225\n",
+ "Epoch 882/100000\n",
+ " - 19s - loss: 0.3709 - acc: 0.9351 - val_loss: 0.3796 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 00882: val_acc did not improve from 0.94225\n",
+ "Epoch 883/100000\n",
+ " - 18s - loss: 0.3703 - acc: 0.9354 - val_loss: 0.3553 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00883: val_acc did not improve from 0.94225\n",
+ "Epoch 884/100000\n",
+ " - 19s - loss: 0.3679 - acc: 0.9360 - val_loss: 0.3717 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 00884: val_acc did not improve from 0.94225\n",
+ "Epoch 885/100000\n",
+ " - 18s - loss: 0.3733 - acc: 0.9347 - val_loss: 0.3841 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 00885: val_acc did not improve from 0.94225\n",
+ "Epoch 886/100000\n",
+ " - 19s - loss: 0.3725 - acc: 0.9347 - val_loss: 0.3570 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 00886: val_acc did not improve from 0.94225\n",
+ "Epoch 887/100000\n",
+ " - 18s - loss: 0.3687 - acc: 0.9351 - val_loss: 0.3621 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 00887: val_acc did not improve from 0.94225\n",
+ "Epoch 888/100000\n",
+ " - 19s - loss: 0.3679 - acc: 0.9349 - val_loss: 0.3540 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00888: val_acc did not improve from 0.94225\n",
+ "Epoch 889/100000\n",
+ " - 19s - loss: 0.3691 - acc: 0.9351 - val_loss: 0.3810 - val_acc: 0.9213\n",
+ "\n",
+ "Epoch 00889: val_acc did not improve from 0.94225\n",
+ "Epoch 890/100000\n",
+ " - 19s - loss: 0.3720 - acc: 0.9339 - val_loss: 0.4381 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 00890: val_acc did not improve from 0.94225\n",
+ "Epoch 891/100000\n",
+ " - 19s - loss: 0.3700 - acc: 0.9356 - val_loss: 0.4386 - val_acc: 0.8918\n",
+ "\n",
+ "Epoch 00891: val_acc did not improve from 0.94225\n",
+ "Epoch 892/100000\n",
+ " - 19s - loss: 0.3788 - acc: 0.9352 - val_loss: 0.3775 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 00892: val_acc did not improve from 0.94225\n",
+ "Epoch 893/100000\n",
+ " - 19s - loss: 0.3737 - acc: 0.9349 - val_loss: 0.3662 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 00893: val_acc did not improve from 0.94225\n",
+ "Epoch 894/100000\n",
+ " - 19s - loss: 0.3704 - acc: 0.9355 - val_loss: 0.5097 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00894: val_acc did not improve from 0.94225\n",
+ "Epoch 895/100000\n",
+ " - 19s - loss: 0.3683 - acc: 0.9352 - val_loss: 0.3650 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 00895: val_acc did not improve from 0.94225\n",
+ "Epoch 896/100000\n",
+ " - 19s - loss: 0.3705 - acc: 0.9345 - val_loss: 0.4798 - val_acc: 0.8715\n",
+ "\n",
+ "Epoch 00896: val_acc did not improve from 0.94225\n",
+ "Epoch 897/100000\n",
+ " - 19s - loss: 0.3685 - acc: 0.9353 - val_loss: 0.3714 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 00897: val_acc did not improve from 0.94225\n",
+ "Epoch 898/100000\n",
+ " - 19s - loss: 0.3696 - acc: 0.9353 - val_loss: 0.4371 - val_acc: 0.8925\n",
+ "\n",
+ "Epoch 00898: val_acc did not improve from 0.94225\n",
+ "Epoch 899/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9355 - val_loss: 0.3752 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00899: val_acc did not improve from 0.94225\n",
+ "Epoch 900/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9357 - val_loss: 0.3795 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 00900: val_acc did not improve from 0.94225\n",
+ "Epoch 901/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9354 - val_loss: 0.3578 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 00901: val_acc did not improve from 0.94225\n",
+ "Epoch 902/100000\n",
+ " - 19s - loss: 0.3711 - acc: 0.9342 - val_loss: 0.3597 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 00902: val_acc did not improve from 0.94225\n",
+ "Epoch 903/100000\n",
+ " - 19s - loss: 0.3700 - acc: 0.9351 - val_loss: 0.3584 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 00903: val_acc did not improve from 0.94225\n",
+ "Epoch 904/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9356 - val_loss: 0.3729 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 00904: val_acc did not improve from 0.94225\n",
+ "Epoch 905/100000\n",
+ " - 19s - loss: 0.3691 - acc: 0.9354 - val_loss: 0.3595 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00905: val_acc did not improve from 0.94225\n",
+ "Epoch 906/100000\n",
+ " - 18s - loss: 0.3675 - acc: 0.9353 - val_loss: 0.3856 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 00906: val_acc did not improve from 0.94225\n",
+ "Epoch 907/100000\n",
+ " - 19s - loss: 0.3710 - acc: 0.9339 - val_loss: 0.3753 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 00907: val_acc did not improve from 0.94225\n",
+ "Epoch 908/100000\n",
+ " - 19s - loss: 0.3676 - acc: 0.9356 - val_loss: 0.3600 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00908: val_acc did not improve from 0.94225\n",
+ "Epoch 909/100000\n",
+ " - 19s - loss: 0.3701 - acc: 0.9353 - val_loss: 0.3770 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 00909: val_acc did not improve from 0.94225\n",
+ "Epoch 910/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9347 - val_loss: 0.3586 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 00910: val_acc did not improve from 0.94225\n",
+ "Epoch 911/100000\n",
+ " - 19s - loss: 0.3777 - acc: 0.9345 - val_loss: 0.3757 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 00911: val_acc did not improve from 0.94225\n",
+ "Epoch 912/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9354 - val_loss: 0.3734 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 00912: val_acc did not improve from 0.94225\n",
+ "Epoch 913/100000\n",
+ " - 19s - loss: 0.3698 - acc: 0.9354 - val_loss: 0.3631 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00913: val_acc did not improve from 0.94225\n",
+ "Epoch 914/100000\n",
+ " - 18s - loss: 0.3670 - acc: 0.9361 - val_loss: 0.3696 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00914: val_acc did not improve from 0.94225\n",
+ "Epoch 915/100000\n",
+ " - 19s - loss: 0.3666 - acc: 0.9358 - val_loss: 0.3705 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 00915: val_acc did not improve from 0.94225\n",
+ "Epoch 916/100000\n",
+ " - 19s - loss: 0.3715 - acc: 0.9343 - val_loss: 0.3684 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00916: val_acc did not improve from 0.94225\n",
+ "Epoch 917/100000\n",
+ " - 19s - loss: 0.3690 - acc: 0.9350 - val_loss: 0.4192 - val_acc: 0.9105\n",
+ "\n",
+ "Epoch 00917: val_acc did not improve from 0.94225\n",
+ "Epoch 918/100000\n",
+ " - 19s - loss: 0.3682 - acc: 0.9354 - val_loss: 0.3831 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 00918: val_acc did not improve from 0.94225\n",
+ "Epoch 919/100000\n",
+ " - 19s - loss: 0.3700 - acc: 0.9348 - val_loss: 0.3727 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 00919: val_acc did not improve from 0.94225\n",
+ "Epoch 920/100000\n",
+ " - 18s - loss: 0.3711 - acc: 0.9355 - val_loss: 0.3746 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00920: val_acc did not improve from 0.94225\n",
+ "Epoch 921/100000\n",
+ " - 18s - loss: 0.3710 - acc: 0.9345 - val_loss: 0.5080 - val_acc: 0.8611\n",
+ "\n",
+ "Epoch 00921: val_acc did not improve from 0.94225\n",
+ "Epoch 922/100000\n",
+ " - 18s - loss: 0.3692 - acc: 0.9350 - val_loss: 0.3991 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 00922: val_acc did not improve from 0.94225\n",
+ "Epoch 923/100000\n",
+ " - 19s - loss: 0.3711 - acc: 0.9355 - val_loss: 0.3512 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 00923: val_acc did not improve from 0.94225\n",
+ "Epoch 924/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9351 - val_loss: 0.3697 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 00924: val_acc did not improve from 0.94225\n",
+ "Epoch 925/100000\n",
+ " - 18s - loss: 0.3703 - acc: 0.9348 - val_loss: 0.3726 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 00925: val_acc did not improve from 0.94225\n",
+ "Epoch 926/100000\n",
+ " - 19s - loss: 0.3722 - acc: 0.9347 - val_loss: 0.3712 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00926: val_acc did not improve from 0.94225\n",
+ "Epoch 927/100000\n",
+ " - 18s - loss: 0.3684 - acc: 0.9355 - val_loss: 0.4057 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 00927: val_acc did not improve from 0.94225\n",
+ "Epoch 928/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9346 - val_loss: 0.4334 - val_acc: 0.9022\n",
+ "\n",
+ "Epoch 00928: val_acc did not improve from 0.94225\n",
+ "Epoch 929/100000\n",
+ " - 18s - loss: 0.3699 - acc: 0.9349 - val_loss: 0.4640 - val_acc: 0.9127\n",
+ "\n",
+ "Epoch 00929: val_acc did not improve from 0.94225\n",
+ "Epoch 930/100000\n",
+ " - 19s - loss: 0.3703 - acc: 0.9351 - val_loss: 0.3636 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00930: val_acc did not improve from 0.94225\n",
+ "Epoch 931/100000\n",
+ " - 18s - loss: 0.3701 - acc: 0.9346 - val_loss: 0.3531 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00931: val_acc did not improve from 0.94225\n",
+ "Epoch 932/100000\n",
+ " - 19s - loss: 0.3676 - acc: 0.9357 - val_loss: 0.3653 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 00932: val_acc did not improve from 0.94225\n",
+ "Epoch 933/100000\n",
+ " - 18s - loss: 0.3756 - acc: 0.9345 - val_loss: 0.3529 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 00933: val_acc did not improve from 0.94225\n",
+ "Epoch 934/100000\n",
+ " - 19s - loss: 0.3711 - acc: 0.9356 - val_loss: 0.4170 - val_acc: 0.9113\n",
+ "\n",
+ "Epoch 00934: val_acc did not improve from 0.94225\n",
+ "Epoch 935/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9355 - val_loss: 0.3608 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 00935: val_acc did not improve from 0.94225\n",
+ "Epoch 936/100000\n",
+ " - 18s - loss: 0.3688 - acc: 0.9356 - val_loss: 0.4597 - val_acc: 0.8720\n",
+ "\n",
+ "Epoch 00936: val_acc did not improve from 0.94225\n",
+ "Epoch 937/100000\n",
+ " - 19s - loss: 0.3703 - acc: 0.9348 - val_loss: 0.3580 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00937: val_acc did not improve from 0.94225\n",
+ "Epoch 938/100000\n",
+ " - 19s - loss: 0.3664 - acc: 0.9359 - val_loss: 0.4087 - val_acc: 0.9040\n",
+ "\n",
+ "Epoch 00938: val_acc did not improve from 0.94225\n",
+ "Epoch 939/100000\n",
+ " - 19s - loss: 0.3741 - acc: 0.9349 - val_loss: 0.3821 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 00939: val_acc did not improve from 0.94225\n",
+ "Epoch 940/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9348 - val_loss: 0.3700 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 00940: val_acc did not improve from 0.94225\n",
+ "Epoch 941/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9345 - val_loss: 0.3679 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 00941: val_acc did not improve from 0.94225\n",
+ "Epoch 942/100000\n",
+ " - 18s - loss: 0.3711 - acc: 0.9349 - val_loss: 0.3549 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00942: val_acc did not improve from 0.94225\n",
+ "Epoch 943/100000\n",
+ " - 19s - loss: 0.3654 - acc: 0.9361 - val_loss: 0.3527 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 00943: val_acc did not improve from 0.94225\n",
+ "Epoch 944/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9351 - val_loss: 0.3573 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 00944: val_acc did not improve from 0.94225\n",
+ "Epoch 945/100000\n",
+ " - 19s - loss: 0.3687 - acc: 0.9358 - val_loss: 0.3673 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 00950: val_acc did not improve from 0.94225\n",
+ "Epoch 951/100000\n",
+ " - 19s - loss: 0.3675 - acc: 0.9357 - val_loss: 0.3832 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 00951: val_acc did not improve from 0.94225\n",
+ "Epoch 952/100000\n",
+ " - 19s - loss: 0.3693 - acc: 0.9354 - val_loss: 0.4486 - val_acc: 0.8881\n",
+ "\n",
+ "Epoch 00952: val_acc did not improve from 0.94225\n",
+ "Epoch 953/100000\n",
+ " - 19s - loss: 0.3696 - acc: 0.9348 - val_loss: 0.4675 - val_acc: 0.8906\n",
+ "\n",
+ "Epoch 00953: val_acc did not improve from 0.94225\n",
+ "Epoch 954/100000\n",
+ " - 19s - loss: 0.3689 - acc: 0.9347 - val_loss: 0.3679 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00954: val_acc did not improve from 0.94225\n",
+ "Epoch 955/100000\n",
+ " - 18s - loss: 0.3666 - acc: 0.9361 - val_loss: 0.3597 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 00955: val_acc did not improve from 0.94225\n",
+ "Epoch 956/100000\n",
+ " - 19s - loss: 0.3726 - acc: 0.9349 - val_loss: 0.3635 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 00956: val_acc did not improve from 0.94225\n",
+ "Epoch 957/100000\n",
+ " - 19s - loss: 0.3707 - acc: 0.9356 - val_loss: 0.4386 - val_acc: 0.9002\n",
+ "\n",
+ "Epoch 00957: val_acc did not improve from 0.94225\n",
+ "Epoch 958/100000\n",
+ " - 19s - loss: 0.3684 - acc: 0.9353 - val_loss: 0.3702 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 00958: val_acc did not improve from 0.94225\n",
+ "Epoch 959/100000\n",
+ " - 19s - loss: 0.3669 - acc: 0.9360 - val_loss: 0.3556 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 00959: val_acc did not improve from 0.94225\n",
+ "Epoch 960/100000\n",
+ " - 19s - loss: 0.3710 - acc: 0.9350 - val_loss: 0.3578 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 00960: val_acc did not improve from 0.94225\n",
+ "Epoch 961/100000\n",
+ " - 19s - loss: 0.3681 - acc: 0.9352 - val_loss: 0.3766 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 00961: val_acc did not improve from 0.94225\n",
+ "Epoch 962/100000\n",
+ " - 19s - loss: 0.3706 - acc: 0.9346 - val_loss: 0.3936 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 00962: val_acc did not improve from 0.94225\n",
+ "Epoch 963/100000\n",
+ " - 19s - loss: 0.3680 - acc: 0.9353 - val_loss: 0.3806 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 00963: val_acc did not improve from 0.94225\n",
+ "Epoch 964/100000\n",
+ " - 18s - loss: 0.3697 - acc: 0.9355 - val_loss: 0.3882 - val_acc: 0.9182\n",
+ "\n",
+ "Epoch 00964: val_acc did not improve from 0.94225\n",
+ "Epoch 965/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9355 - val_loss: 0.4790 - val_acc: 0.8930\n",
+ "\n",
+ "Epoch 00965: val_acc did not improve from 0.94225\n",
+ "Epoch 966/100000\n",
+ " - 18s - loss: 0.3680 - acc: 0.9356 - val_loss: 0.3589 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 00966: val_acc did not improve from 0.94225\n",
+ "Epoch 967/100000\n",
+ " - 19s - loss: 0.3679 - acc: 0.9358 - val_loss: 0.5199 - val_acc: 0.8695\n",
+ "\n",
+ "Epoch 00967: val_acc did not improve from 0.94225\n",
+ "Epoch 968/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9356 - val_loss: 0.3737 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 00968: val_acc did not improve from 0.94225\n",
+ "Epoch 969/100000\n",
+ " - 18s - loss: 0.3689 - acc: 0.9351 - val_loss: 0.3668 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 00969: val_acc did not improve from 0.94225\n",
+ "Epoch 970/100000\n",
+ " - 19s - loss: 0.3769 - acc: 0.9355 - val_loss: 0.3705 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 00970: val_acc did not improve from 0.94225\n",
+ "Epoch 971/100000\n",
+ " - 18s - loss: 0.3741 - acc: 0.9342 - val_loss: 0.4023 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 00971: val_acc did not improve from 0.94225\n",
+ "Epoch 972/100000\n",
+ " - 19s - loss: 0.3709 - acc: 0.9355 - val_loss: 0.3594 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 00972: val_acc did not improve from 0.94225\n",
+ "Epoch 973/100000\n",
+ " - 18s - loss: 0.3688 - acc: 0.9348 - val_loss: 0.3753 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 00973: val_acc did not improve from 0.94225\n",
+ "Epoch 974/100000\n",
+ " - 18s - loss: 0.3688 - acc: 0.9351 - val_loss: 0.4787 - val_acc: 0.8756\n",
+ "\n",
+ "Epoch 00974: val_acc did not improve from 0.94225\n",
+ "Epoch 975/100000\n",
+ " - 19s - loss: 0.3675 - acc: 0.9353 - val_loss: 0.3528 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 00975: val_acc did not improve from 0.94225\n",
+ "Epoch 976/100000\n",
+ " - 18s - loss: 0.3698 - acc: 0.9355 - val_loss: 0.3602 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00976: val_acc did not improve from 0.94225\n",
+ "Epoch 977/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9355 - val_loss: 0.3692 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 00977: val_acc did not improve from 0.94225\n",
+ "Epoch 978/100000\n",
+ " - 18s - loss: 0.3673 - acc: 0.9353 - val_loss: 0.3905 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 00978: val_acc did not improve from 0.94225\n",
+ "Epoch 979/100000\n",
+ " - 19s - loss: 0.3662 - acc: 0.9355 - val_loss: 0.5235 - val_acc: 0.8545\n",
+ "\n",
+ "Epoch 00979: val_acc did not improve from 0.94225\n",
+ "Epoch 980/100000\n",
+ " - 19s - loss: 0.3711 - acc: 0.9344 - val_loss: 0.3732 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 00980: val_acc did not improve from 0.94225\n",
+ "Epoch 981/100000\n",
+ " - 19s - loss: 0.3689 - acc: 0.9350 - val_loss: 0.3575 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 00981: val_acc did not improve from 0.94225\n",
+ "Epoch 982/100000\n",
+ " - 19s - loss: 0.3659 - acc: 0.9362 - val_loss: 0.3526 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 00982: val_acc did not improve from 0.94225\n",
+ "Epoch 983/100000\n",
+ " - 19s - loss: 0.3649 - acc: 0.9355 - val_loss: 0.3654 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 00983: val_acc did not improve from 0.94225\n",
+ "Epoch 984/100000\n",
+ " - 18s - loss: 0.3696 - acc: 0.9349 - val_loss: 0.3543 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 00984: val_acc did not improve from 0.94225\n",
+ "Epoch 985/100000\n",
+ " - 19s - loss: 0.3678 - acc: 0.9355 - val_loss: 0.3865 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 00985: val_acc did not improve from 0.94225\n",
+ "Epoch 986/100000\n",
+ " - 18s - loss: 0.3683 - acc: 0.9355 - val_loss: 0.3663 - val_acc: 0.9261\n",
+ "\n",
+ "Epoch 00986: val_acc did not improve from 0.94225\n",
+ "Epoch 987/100000\n",
+ " - 19s - loss: 0.3692 - acc: 0.9351 - val_loss: 0.3641 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 00987: val_acc did not improve from 0.94225\n",
+ "Epoch 988/100000\n",
+ " - 18s - loss: 0.3711 - acc: 0.9350 - val_loss: 0.3741 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 00988: val_acc did not improve from 0.94225\n",
+ "Epoch 989/100000\n",
+ " - 19s - loss: 0.3714 - acc: 0.9352 - val_loss: 0.3934 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 00989: val_acc did not improve from 0.94225\n",
+ "Epoch 990/100000\n",
+ " - 18s - loss: 0.3706 - acc: 0.9352 - val_loss: 0.3845 - val_acc: 0.9182\n",
+ "\n",
+ "Epoch 00990: val_acc did not improve from 0.94225\n",
+ "Epoch 991/100000\n",
+ " - 19s - loss: 0.3686 - acc: 0.9357 - val_loss: 0.4132 - val_acc: 0.9096\n",
+ "\n",
+ "Epoch 00991: val_acc did not improve from 0.94225\n",
+ "Epoch 992/100000\n",
+ " - 18s - loss: 0.3685 - acc: 0.9353 - val_loss: 0.4026 - val_acc: 0.9117\n",
+ "\n",
+ "Epoch 00992: val_acc did not improve from 0.94225\n",
+ "Epoch 993/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9361 - val_loss: 0.4523 - val_acc: 0.8918\n",
+ "\n",
+ "Epoch 00993: val_acc did not improve from 0.94225\n",
+ "Epoch 994/100000\n",
+ " - 19s - loss: 0.3655 - acc: 0.9353 - val_loss: 0.3633 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 00994: val_acc did not improve from 0.94225\n",
+ "Epoch 995/100000\n",
+ " - 18s - loss: 0.3707 - acc: 0.9350 - val_loss: 0.3904 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 00995: val_acc did not improve from 0.94225\n",
+ "Epoch 996/100000\n",
+ " - 19s - loss: 0.3713 - acc: 0.9350 - val_loss: 0.3561 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 00996: val_acc did not improve from 0.94225\n",
+ "Epoch 997/100000\n",
+ " - 18s - loss: 0.3665 - acc: 0.9359 - val_loss: 0.3723 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 00997: val_acc did not improve from 0.94225\n",
+ "Epoch 998/100000\n",
+ " - 19s - loss: 0.3659 - acc: 0.9362 - val_loss: 0.3898 - val_acc: 0.9191\n",
+ "\n",
+ "Epoch 00998: val_acc did not improve from 0.94225\n",
+ "Epoch 999/100000\n",
+ " - 19s - loss: 0.3689 - acc: 0.9345 - val_loss: 0.3863 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 00999: val_acc did not improve from 0.94225\n",
+ "Epoch 1000/100000\n",
+ " - 19s - loss: 0.3655 - acc: 0.9360 - val_loss: 0.3560 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01000: val_acc did not improve from 0.94225\n",
+ "Epoch 1001/100000\n",
+ " - 19s - loss: 0.3689 - acc: 0.9357 - val_loss: 0.4617 - val_acc: 0.8773\n",
+ "\n",
+ "Epoch 01001: val_acc did not improve from 0.94225\n",
+ "Epoch 1002/100000\n",
+ " - 19s - loss: 0.3702 - acc: 0.9354 - val_loss: 0.4845 - val_acc: 0.8792\n",
+ "\n",
+ "Epoch 01002: val_acc did not improve from 0.94225\n",
+ "Epoch 1003/100000\n",
+ " - 18s - loss: 0.3726 - acc: 0.9343 - val_loss: 0.4135 - val_acc: 0.9026\n",
+ "\n",
+ "Epoch 01003: val_acc did not improve from 0.94225\n",
+ "Epoch 1004/100000\n",
+ " - 19s - loss: 0.3658 - acc: 0.9359 - val_loss: 0.3567 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01004: val_acc did not improve from 0.94225\n",
+ "Epoch 1005/100000\n",
+ " - 18s - loss: 0.3694 - acc: 0.9351 - val_loss: 0.3876 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 01005: val_acc did not improve from 0.94225\n",
+ "Epoch 1006/100000\n",
+ " - 20s - loss: 0.3688 - acc: 0.9342 - val_loss: 0.4080 - val_acc: 0.9221\n",
+ "\n",
+ "Epoch 01006: val_acc did not improve from 0.94225\n",
+ "Epoch 1007/100000\n",
+ " - 19s - loss: 0.3708 - acc: 0.9352 - val_loss: 0.3818 - val_acc: 0.9192\n",
+ "\n",
+ "Epoch 01007: val_acc did not improve from 0.94225\n",
+ "Epoch 1008/100000\n",
+ " - 19s - loss: 0.3699 - acc: 0.9350 - val_loss: 0.3908 - val_acc: 0.9166\n",
+ "\n",
+ "Epoch 01008: val_acc did not improve from 0.94225\n",
+ "Epoch 1009/100000\n",
+ " - 19s - loss: 0.3729 - acc: 0.9347 - val_loss: 0.4244 - val_acc: 0.9037\n",
+ "\n",
+ "Epoch 01009: val_acc did not improve from 0.94225\n",
+ "Epoch 1010/100000\n",
+ " - 18s - loss: 0.3700 - acc: 0.9356 - val_loss: 0.3676 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01010: val_acc did not improve from 0.94225\n",
+ "Epoch 1011/100000\n",
+ " - 19s - loss: 0.3694 - acc: 0.9349 - val_loss: 0.3629 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 01011: val_acc did not improve from 0.94225\n",
+ "Epoch 1012/100000\n",
+ " - 18s - loss: 0.3699 - acc: 0.9352 - val_loss: 0.3848 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01012: val_acc did not improve from 0.94225\n",
+ "Epoch 1013/100000\n",
+ " - 19s - loss: 0.3672 - acc: 0.9358 - val_loss: 0.4152 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 01013: val_acc did not improve from 0.94225\n",
+ "Epoch 1014/100000\n",
+ " - 18s - loss: 0.3681 - acc: 0.9350 - val_loss: 0.4354 - val_acc: 0.8935\n",
+ "\n",
+ "Epoch 01014: val_acc did not improve from 0.94225\n",
+ "Epoch 1015/100000\n",
+ " - 18s - loss: 0.3664 - acc: 0.9353 - val_loss: 0.3524 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01015: val_acc did not improve from 0.94225\n",
+ "Epoch 1016/100000\n",
+ " - 19s - loss: 0.3688 - acc: 0.9359 - val_loss: 0.4288 - val_acc: 0.9082\n",
+ "\n",
+ "Epoch 01016: val_acc did not improve from 0.94225\n",
+ "Epoch 1017/100000\n",
+ " - 19s - loss: 0.3750 - acc: 0.9343 - val_loss: 0.3659 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01017: val_acc did not improve from 0.94225\n",
+ "Epoch 1018/100000\n",
+ " - 19s - loss: 0.3671 - acc: 0.9364 - val_loss: 0.3569 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01018: val_acc did not improve from 0.94225\n",
+ "Epoch 1019/100000\n",
+ " - 19s - loss: 0.3704 - acc: 0.9347 - val_loss: 0.3696 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01019: val_acc did not improve from 0.94225\n",
+ "Epoch 1020/100000\n",
+ " - 19s - loss: 0.3664 - acc: 0.9361 - val_loss: 0.3648 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01020: val_acc did not improve from 0.94225\n",
+ "Epoch 1021/100000\n",
+ " - 19s - loss: 0.3674 - acc: 0.9356 - val_loss: 0.3922 - val_acc: 0.9088\n",
+ "\n",
+ "Epoch 01021: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 01021: ReduceLROnPlateau reducing learning rate to 0.0007737808919046074.\n",
+ "Epoch 1022/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9354 - val_loss: 0.3905 - val_acc: 0.9103\n",
+ "\n",
+ "Epoch 01022: val_acc did not improve from 0.94225\n",
+ "Epoch 1023/100000\n",
+ " - 19s - loss: 0.3582 - acc: 0.9358 - val_loss: 0.3477 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01023: val_acc did not improve from 0.94225\n",
+ "Epoch 1024/100000\n",
+ " - 19s - loss: 0.3586 - acc: 0.9362 - val_loss: 0.4202 - val_acc: 0.9004\n",
+ "\n",
+ "Epoch 01024: val_acc did not improve from 0.94225\n",
+ "Epoch 1025/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9349 - val_loss: 0.3616 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01025: val_acc did not improve from 0.94225\n",
+ "Epoch 1026/100000\n",
+ " - 18s - loss: 0.3586 - acc: 0.9364 - val_loss: 0.3522 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01026: val_acc did not improve from 0.94225\n",
+ "Epoch 1027/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9356 - val_loss: 0.3971 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 01027: val_acc did not improve from 0.94225\n",
+ "Epoch 1028/100000\n",
+ " - 19s - loss: 0.3606 - acc: 0.9352 - val_loss: 0.3717 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01028: val_acc did not improve from 0.94225\n",
+ "Epoch 1029/100000\n",
+ " - 18s - loss: 0.3629 - acc: 0.9355 - val_loss: 0.3563 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01029: val_acc did not improve from 0.94225\n",
+ "Epoch 1030/100000\n",
+ " - 18s - loss: 0.3625 - acc: 0.9353 - val_loss: 0.3926 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01030: val_acc did not improve from 0.94225\n",
+ "Epoch 1031/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9354 - val_loss: 0.3511 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01031: val_acc did not improve from 0.94225\n",
+ "Epoch 1032/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9356 - val_loss: 0.3683 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 01032: val_acc did not improve from 0.94225\n",
+ "Epoch 1033/100000\n",
+ " - 18s - loss: 0.3598 - acc: 0.9362 - val_loss: 0.3531 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01033: val_acc did not improve from 0.94225\n",
+ "Epoch 1034/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9353 - val_loss: 0.3663 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01034: val_acc did not improve from 0.94225\n",
+ "Epoch 1035/100000\n",
+ " - 18s - loss: 0.3640 - acc: 0.9360 - val_loss: 0.3815 - val_acc: 0.9166\n",
+ "\n",
+ "Epoch 01035: val_acc did not improve from 0.94225\n",
+ "Epoch 1036/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9357 - val_loss: 0.3550 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 01036: val_acc did not improve from 0.94225\n",
+ "Epoch 1037/100000\n",
+ " - 19s - loss: 0.3655 - acc: 0.9343 - val_loss: 0.4479 - val_acc: 0.8834\n",
+ "\n",
+ "Epoch 01037: val_acc did not improve from 0.94225\n",
+ "Epoch 1038/100000\n",
+ " - 19s - loss: 0.3594 - acc: 0.9357 - val_loss: 0.4071 - val_acc: 0.9060\n",
+ "\n",
+ "Epoch 01038: val_acc did not improve from 0.94225\n",
+ "Epoch 1039/100000\n",
+ " - 18s - loss: 0.3608 - acc: 0.9356 - val_loss: 0.3546 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01039: val_acc did not improve from 0.94225\n",
+ "Epoch 1040/100000\n",
+ " - 19s - loss: 0.3588 - acc: 0.9354 - val_loss: 0.3529 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 01040: val_acc did not improve from 0.94225\n",
+ "Epoch 1041/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9354 - val_loss: 0.3611 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01041: val_acc did not improve from 0.94225\n",
+ "Epoch 1042/100000\n",
+ " - 19s - loss: 0.3586 - acc: 0.9356 - val_loss: 0.3821 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 01042: val_acc did not improve from 0.94225\n",
+ "Epoch 1043/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9360 - val_loss: 0.3599 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 01043: val_acc did not improve from 0.94225\n",
+ "Epoch 1044/100000\n",
+ " - 18s - loss: 0.3586 - acc: 0.9359 - val_loss: 0.4377 - val_acc: 0.8873\n",
+ "\n",
+ "Epoch 01044: val_acc did not improve from 0.94225\n",
+ "Epoch 1045/100000\n",
+ " - 19s - loss: 0.3599 - acc: 0.9367 - val_loss: 0.3597 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01045: val_acc did not improve from 0.94225\n",
+ "Epoch 1046/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9354 - val_loss: 0.3557 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01046: val_acc did not improve from 0.94225\n",
+ "Epoch 1047/100000\n",
+ " - 19s - loss: 0.3574 - acc: 0.9366 - val_loss: 0.4169 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 01047: val_acc did not improve from 0.94225\n",
+ "Epoch 1048/100000\n",
+ " - 19s - loss: 0.3611 - acc: 0.9353 - val_loss: 0.3805 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 01048: val_acc did not improve from 0.94225\n",
+ "Epoch 1049/100000\n",
+ " - 19s - loss: 0.3616 - acc: 0.9369 - val_loss: 0.3614 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 01049: val_acc did not improve from 0.94225\n",
+ "Epoch 1050/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9362 - val_loss: 0.3883 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01050: val_acc did not improve from 0.94225\n",
+ "Epoch 1051/100000\n",
+ " - 19s - loss: 0.3592 - acc: 0.9362 - val_loss: 0.3851 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 01051: val_acc did not improve from 0.94225\n",
+ "Epoch 1052/100000\n",
+ " - 19s - loss: 0.3614 - acc: 0.9350 - val_loss: 0.3693 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01052: val_acc did not improve from 0.94225\n",
+ "Epoch 1053/100000\n",
+ " - 19s - loss: 0.3604 - acc: 0.9357 - val_loss: 0.3481 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01053: val_acc did not improve from 0.94225\n",
+ "Epoch 1054/100000\n",
+ " - 19s - loss: 0.3616 - acc: 0.9358 - val_loss: 0.3653 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 01054: val_acc did not improve from 0.94225\n",
+ "Epoch 1055/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9362 - val_loss: 0.3789 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 01055: val_acc did not improve from 0.94225\n",
+ "Epoch 1056/100000\n",
+ " - 18s - loss: 0.3603 - acc: 0.9357 - val_loss: 0.3585 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01056: val_acc did not improve from 0.94225\n",
+ "Epoch 1057/100000\n",
+ " - 18s - loss: 0.3579 - acc: 0.9354 - val_loss: 0.3861 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 01057: val_acc did not improve from 0.94225\n",
+ "Epoch 1058/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9352 - val_loss: 0.3684 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 01058: val_acc did not improve from 0.94225\n",
+ "Epoch 1059/100000\n",
+ " - 18s - loss: 0.3592 - acc: 0.9366 - val_loss: 0.3538 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01059: val_acc did not improve from 0.94225\n",
+ "Epoch 1060/100000\n",
+ " - 19s - loss: 0.3617 - acc: 0.9355 - val_loss: 0.3526 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01060: val_acc did not improve from 0.94225\n",
+ "Epoch 1061/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9354 - val_loss: 0.4439 - val_acc: 0.9052\n",
+ "\n",
+ "Epoch 01061: val_acc did not improve from 0.94225\n",
+ "Epoch 1062/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9361 - val_loss: 0.3466 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01062: val_acc did not improve from 0.94225\n",
+ "Epoch 1063/100000\n",
+ " - 19s - loss: 0.3620 - acc: 0.9349 - val_loss: 0.3642 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 01063: val_acc did not improve from 0.94225\n",
+ "Epoch 1064/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9355 - val_loss: 0.3436 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01064: val_acc did not improve from 0.94225\n",
+ "Epoch 1065/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9367 - val_loss: 0.3443 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01065: val_acc did not improve from 0.94225\n",
+ "Epoch 1066/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9358 - val_loss: 0.3527 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01066: val_acc did not improve from 0.94225\n",
+ "Epoch 1067/100000\n",
+ " - 19s - loss: 0.3625 - acc: 0.9363 - val_loss: 0.3588 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01067: val_acc did not improve from 0.94225\n",
+ "Epoch 1068/100000\n",
+ " - 19s - loss: 0.3609 - acc: 0.9352 - val_loss: 0.3745 - val_acc: 0.9270\n",
+ "\n",
+ "Epoch 01068: val_acc did not improve from 0.94225\n",
+ "Epoch 1069/100000\n",
+ " - 19s - loss: 0.3591 - acc: 0.9362 - val_loss: 0.3635 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 01069: val_acc did not improve from 0.94225\n",
+ "Epoch 1070/100000\n",
+ " - 19s - loss: 0.3607 - acc: 0.9359 - val_loss: 0.3800 - val_acc: 0.9260\n",
+ "\n",
+ "Epoch 01070: val_acc did not improve from 0.94225\n",
+ "Epoch 1071/100000\n",
+ " - 19s - loss: 0.3626 - acc: 0.9359 - val_loss: 0.3877 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 01071: val_acc did not improve from 0.94225\n",
+ "Epoch 1072/100000\n",
+ " - 19s - loss: 0.3612 - acc: 0.9355 - val_loss: 0.3566 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01072: val_acc did not improve from 0.94225\n",
+ "Epoch 1073/100000\n",
+ " - 18s - loss: 0.3633 - acc: 0.9351 - val_loss: 0.3567 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01073: val_acc did not improve from 0.94225\n",
+ "Epoch 1074/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9356 - val_loss: 0.3495 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01074: val_acc did not improve from 0.94225\n",
+ "Epoch 1075/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9355 - val_loss: 0.3666 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 01075: val_acc did not improve from 0.94225\n",
+ "Epoch 1076/100000\n",
+ " - 19s - loss: 0.3577 - acc: 0.9362 - val_loss: 0.3483 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01076: val_acc did not improve from 0.94225\n",
+ "Epoch 1077/100000\n",
+ " - 19s - loss: 0.3592 - acc: 0.9358 - val_loss: 0.3499 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01077: val_acc did not improve from 0.94225\n",
+ "Epoch 1078/100000\n",
+ " - 18s - loss: 0.3560 - acc: 0.9368 - val_loss: 0.4120 - val_acc: 0.9058\n",
+ "\n",
+ "Epoch 01078: val_acc did not improve from 0.94225\n",
+ "Epoch 1079/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9357 - val_loss: 0.3637 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01079: val_acc did not improve from 0.94225\n",
+ "Epoch 1080/100000\n",
+ " - 18s - loss: 0.3582 - acc: 0.9359 - val_loss: 0.3686 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 01080: val_acc did not improve from 0.94225\n",
+ "Epoch 1081/100000\n",
+ " - 19s - loss: 0.3609 - acc: 0.9353 - val_loss: 0.3763 - val_acc: 0.9190\n",
+ "\n",
+ "Epoch 01081: val_acc did not improve from 0.94225\n",
+ "Epoch 1082/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9355 - val_loss: 0.3695 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01082: val_acc did not improve from 0.94225\n",
+ "Epoch 1083/100000\n",
+ " - 19s - loss: 0.3629 - acc: 0.9355 - val_loss: 0.3737 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 01083: val_acc did not improve from 0.94225\n",
+ "Epoch 1084/100000\n",
+ " - 18s - loss: 0.3574 - acc: 0.9369 - val_loss: 0.3719 - val_acc: 0.9184\n",
+ "\n",
+ "Epoch 01084: val_acc did not improve from 0.94225\n",
+ "Epoch 1085/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9356 - val_loss: 0.3503 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01085: val_acc did not improve from 0.94225\n",
+ "Epoch 1086/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9359 - val_loss: 0.3958 - val_acc: 0.9151\n",
+ "\n",
+ "Epoch 01086: val_acc did not improve from 0.94225\n",
+ "Epoch 1087/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9356 - val_loss: 0.3657 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01087: val_acc did not improve from 0.94225\n",
+ "Epoch 1088/100000\n",
+ " - 19s - loss: 0.3619 - acc: 0.9356 - val_loss: 0.3708 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 01088: val_acc did not improve from 0.94225\n",
+ "Epoch 1089/100000\n",
+ " - 19s - loss: 0.3614 - acc: 0.9355 - val_loss: 0.3594 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 01089: val_acc did not improve from 0.94225\n",
+ "Epoch 1090/100000\n",
+ " - 18s - loss: 0.3632 - acc: 0.9355 - val_loss: 0.3885 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 01090: val_acc did not improve from 0.94225\n",
+ "Epoch 1091/100000\n",
+ " - 19s - loss: 0.3611 - acc: 0.9352 - val_loss: 0.3528 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01091: val_acc did not improve from 0.94225\n",
+ "Epoch 1092/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9357 - val_loss: 0.4545 - val_acc: 0.8758\n",
+ "\n",
+ "Epoch 01092: val_acc did not improve from 0.94225\n",
+ "Epoch 1093/100000\n",
+ " - 18s - loss: 0.3593 - acc: 0.9356 - val_loss: 0.3640 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01093: val_acc did not improve from 0.94225\n",
+ "Epoch 1094/100000\n",
+ " - 19s - loss: 0.3573 - acc: 0.9365 - val_loss: 0.3825 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 01094: val_acc did not improve from 0.94225\n",
+ "Epoch 1095/100000\n",
+ " - 18s - loss: 0.3582 - acc: 0.9358 - val_loss: 0.3581 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01095: val_acc did not improve from 0.94225\n",
+ "Epoch 1096/100000\n",
+ " - 19s - loss: 0.3564 - acc: 0.9358 - val_loss: 0.3449 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01096: val_acc did not improve from 0.94225\n",
+ "Epoch 1097/100000\n",
+ " - 18s - loss: 0.3602 - acc: 0.9354 - val_loss: 0.3632 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01097: val_acc did not improve from 0.94225\n",
+ "Epoch 1098/100000\n",
+ " - 19s - loss: 0.3576 - acc: 0.9361 - val_loss: 0.4003 - val_acc: 0.9083\n",
+ "\n",
+ "Epoch 01098: val_acc did not improve from 0.94225\n",
+ "Epoch 1099/100000\n",
+ " - 19s - loss: 0.3591 - acc: 0.9355 - val_loss: 0.3667 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 01099: val_acc did not improve from 0.94225\n",
+ "Epoch 1100/100000\n",
+ " - 19s - loss: 0.3607 - acc: 0.9351 - val_loss: 0.4439 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 01100: val_acc did not improve from 0.94225\n",
+ "Epoch 1101/100000\n",
+ " - 18s - loss: 0.3569 - acc: 0.9362 - val_loss: 0.3593 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01101: val_acc did not improve from 0.94225\n",
+ "Epoch 1102/100000\n",
+ " - 18s - loss: 0.3618 - acc: 0.9352 - val_loss: 0.4063 - val_acc: 0.9079\n",
+ "\n",
+ "Epoch 01102: val_acc did not improve from 0.94225\n",
+ "Epoch 1103/100000\n",
+ " - 19s - loss: 0.3613 - acc: 0.9356 - val_loss: 0.3780 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 01103: val_acc did not improve from 0.94225\n",
+ "Epoch 1104/100000\n",
+ " - 18s - loss: 0.3572 - acc: 0.9363 - val_loss: 0.3644 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01104: val_acc did not improve from 0.94225\n",
+ "Epoch 1105/100000\n",
+ " - 19s - loss: 0.3599 - acc: 0.9351 - val_loss: 0.3491 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01105: val_acc did not improve from 0.94225\n",
+ "Epoch 1106/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9358 - val_loss: 0.4023 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 01106: val_acc did not improve from 0.94225\n",
+ "Epoch 1107/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9367 - val_loss: 0.3596 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01107: val_acc did not improve from 0.94225\n",
+ "Epoch 1108/100000\n",
+ " - 19s - loss: 0.3593 - acc: 0.9354 - val_loss: 0.3698 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01108: val_acc did not improve from 0.94225\n",
+ "Epoch 1109/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9368 - val_loss: 0.3541 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01109: val_acc did not improve from 0.94225\n",
+ "Epoch 1110/100000\n",
+ " - 19s - loss: 0.3609 - acc: 0.9358 - val_loss: 0.3623 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01110: val_acc did not improve from 0.94225\n",
+ "Epoch 1111/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9358 - val_loss: 0.4410 - val_acc: 0.9088\n",
+ "\n",
+ "Epoch 01111: val_acc did not improve from 0.94225\n",
+ "Epoch 1112/100000\n",
+ " - 19s - loss: 0.3606 - acc: 0.9348 - val_loss: 0.3723 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01112: val_acc did not improve from 0.94225\n",
+ "Epoch 1113/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9366 - val_loss: 0.3540 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01113: val_acc did not improve from 0.94225\n",
+ "Epoch 1114/100000\n",
+ " - 19s - loss: 0.3583 - acc: 0.9360 - val_loss: 0.3552 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01114: val_acc did not improve from 0.94225\n",
+ "Epoch 1115/100000\n",
+ " - 18s - loss: 0.3563 - acc: 0.9367 - val_loss: 0.3485 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01115: val_acc did not improve from 0.94225\n",
+ "Epoch 1116/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9362 - val_loss: 0.3540 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01116: val_acc did not improve from 0.94225\n",
+ "Epoch 1117/100000\n",
+ " - 18s - loss: 0.3630 - acc: 0.9350 - val_loss: 0.4921 - val_acc: 0.8713\n",
+ "\n",
+ "Epoch 01117: val_acc did not improve from 0.94225\n",
+ "Epoch 1118/100000\n",
+ " - 18s - loss: 0.3612 - acc: 0.9363 - val_loss: 0.3579 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01118: val_acc did not improve from 0.94225\n",
+ "Epoch 1119/100000\n",
+ " - 19s - loss: 0.3641 - acc: 0.9357 - val_loss: 0.3490 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01119: val_acc did not improve from 0.94225\n",
+ "Epoch 1120/100000\n",
+ " - 19s - loss: 0.3560 - acc: 0.9368 - val_loss: 0.3551 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 01120: val_acc did not improve from 0.94225\n",
+ "Epoch 1121/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9358 - val_loss: 0.3585 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 01121: val_acc did not improve from 0.94225\n",
+ "Epoch 1122/100000\n",
+ " - 19s - loss: 0.3572 - acc: 0.9366 - val_loss: 0.3454 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 01122: val_acc did not improve from 0.94225\n",
+ "Epoch 1123/100000\n",
+ " - 19s - loss: 0.3565 - acc: 0.9365 - val_loss: 0.3447 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01123: val_acc did not improve from 0.94225\n",
+ "Epoch 1124/100000\n",
+ " - 19s - loss: 0.3571 - acc: 0.9358 - val_loss: 0.3658 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 01124: val_acc did not improve from 0.94225\n",
+ "Epoch 1125/100000\n",
+ " - 18s - loss: 0.3639 - acc: 0.9360 - val_loss: 0.3565 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01125: val_acc did not improve from 0.94225\n",
+ "Epoch 1126/100000\n",
+ " - 19s - loss: 0.3652 - acc: 0.9359 - val_loss: 0.4050 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 01126: val_acc did not improve from 0.94225\n",
+ "Epoch 1127/100000\n",
+ " - 19s - loss: 0.3605 - acc: 0.9361 - val_loss: 0.3539 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01127: val_acc did not improve from 0.94225\n",
+ "Epoch 1128/100000\n",
+ " - 18s - loss: 0.3621 - acc: 0.9366 - val_loss: 0.3543 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 01128: val_acc did not improve from 0.94225\n",
+ "Epoch 1129/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9362 - val_loss: 0.3643 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 01129: val_acc did not improve from 0.94225\n",
+ "Epoch 1130/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9364 - val_loss: 0.3483 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01130: val_acc did not improve from 0.94225\n",
+ "Epoch 1131/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9351 - val_loss: 0.4475 - val_acc: 0.8930\n",
+ "\n",
+ "Epoch 01131: val_acc did not improve from 0.94225\n",
+ "Epoch 1132/100000\n",
+ " - 18s - loss: 0.3563 - acc: 0.9364 - val_loss: 0.3564 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01132: val_acc did not improve from 0.94225\n",
+ "Epoch 1133/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9357 - val_loss: 0.3730 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01133: val_acc did not improve from 0.94225\n",
+ "Epoch 1134/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9366 - val_loss: 0.4411 - val_acc: 0.8936\n",
+ "\n",
+ "Epoch 01134: val_acc did not improve from 0.94225\n",
+ "Epoch 1135/100000\n",
+ " - 18s - loss: 0.3608 - acc: 0.9361 - val_loss: 0.4857 - val_acc: 0.8861\n",
+ "\n",
+ "Epoch 01135: val_acc did not improve from 0.94225\n",
+ "Epoch 1136/100000\n",
+ " - 18s - loss: 0.3579 - acc: 0.9358 - val_loss: 0.4125 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01136: val_acc did not improve from 0.94225\n",
+ "Epoch 1137/100000\n",
+ " - 18s - loss: 0.3558 - acc: 0.9361 - val_loss: 0.3608 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 01137: val_acc did not improve from 0.94225\n",
+ "Epoch 1138/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9359 - val_loss: 0.3970 - val_acc: 0.9120\n",
+ "\n",
+ "Epoch 01138: val_acc did not improve from 0.94225\n",
+ "Epoch 1139/100000\n",
+ " - 18s - loss: 0.3571 - acc: 0.9358 - val_loss: 0.4211 - val_acc: 0.8972\n",
+ "\n",
+ "Epoch 01139: val_acc did not improve from 0.94225\n",
+ "Epoch 1140/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9353 - val_loss: 0.3916 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 01140: val_acc did not improve from 0.94225\n",
+ "Epoch 1141/100000\n",
+ " - 18s - loss: 0.3594 - acc: 0.9355 - val_loss: 0.3521 - val_acc: 0.9401\n",
+ "\n",
+ "Epoch 01141: val_acc did not improve from 0.94225\n",
+ "Epoch 1142/100000\n",
+ " - 19s - loss: 0.3593 - acc: 0.9366 - val_loss: 0.3431 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01142: val_acc did not improve from 0.94225\n",
+ "Epoch 1143/100000\n",
+ " - 18s - loss: 0.3612 - acc: 0.9358 - val_loss: 0.3640 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01143: val_acc did not improve from 0.94225\n",
+ "Epoch 1144/100000\n",
+ " - 19s - loss: 0.3584 - acc: 0.9358 - val_loss: 0.3558 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01144: val_acc did not improve from 0.94225\n",
+ "Epoch 1145/100000\n",
+ " - 18s - loss: 0.3606 - acc: 0.9360 - val_loss: 0.3875 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 01145: val_acc did not improve from 0.94225\n",
+ "Epoch 1146/100000\n",
+ " - 19s - loss: 0.3598 - acc: 0.9367 - val_loss: 0.3411 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 01146: val_acc did not improve from 0.94225\n",
+ "Epoch 1147/100000\n",
+ " - 18s - loss: 0.3593 - acc: 0.9358 - val_loss: 0.3730 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 01147: val_acc did not improve from 0.94225\n",
+ "Epoch 1148/100000\n",
+ " - 18s - loss: 0.3572 - acc: 0.9367 - val_loss: 0.3561 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01148: val_acc did not improve from 0.94225\n",
+ "Epoch 1149/100000\n",
+ " - 19s - loss: 0.3627 - acc: 0.9357 - val_loss: 0.3520 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01149: val_acc did not improve from 0.94225\n",
+ "Epoch 1150/100000\n",
+ " - 19s - loss: 0.3573 - acc: 0.9361 - val_loss: 0.3520 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01150: val_acc did not improve from 0.94225\n",
+ "Epoch 1151/100000\n",
+ " - 18s - loss: 0.3613 - acc: 0.9353 - val_loss: 0.3449 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01151: val_acc did not improve from 0.94225\n",
+ "Epoch 1152/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9361 - val_loss: 0.4182 - val_acc: 0.9023\n",
+ "\n",
+ "Epoch 01152: val_acc did not improve from 0.94225\n",
+ "Epoch 1153/100000\n",
+ " - 18s - loss: 0.3603 - acc: 0.9359 - val_loss: 0.3829 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 01153: val_acc did not improve from 0.94225\n",
+ "Epoch 1154/100000\n",
+ " - 19s - loss: 0.3611 - acc: 0.9352 - val_loss: 0.3667 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01154: val_acc did not improve from 0.94225\n",
+ "Epoch 1155/100000\n",
+ " - 18s - loss: 0.3588 - acc: 0.9361 - val_loss: 0.3589 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01155: val_acc did not improve from 0.94225\n",
+ "Epoch 1156/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9361 - val_loss: 0.4061 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 01156: val_acc did not improve from 0.94225\n",
+ "Epoch 1157/100000\n",
+ " - 19s - loss: 0.3615 - acc: 0.9355 - val_loss: 0.3540 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01157: val_acc did not improve from 0.94225\n",
+ "Epoch 1158/100000\n",
+ " - 19s - loss: 0.3584 - acc: 0.9359 - val_loss: 0.3480 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01158: val_acc did not improve from 0.94225\n",
+ "Epoch 1159/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9365 - val_loss: 0.6041 - val_acc: 0.8105\n",
+ "\n",
+ "Epoch 01159: val_acc did not improve from 0.94225\n",
+ "Epoch 1160/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9364 - val_loss: 0.3692 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01160: val_acc did not improve from 0.94225\n",
+ "Epoch 1161/100000\n",
+ " - 19s - loss: 0.3609 - acc: 0.9356 - val_loss: 0.3574 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01161: val_acc did not improve from 0.94225\n",
+ "Epoch 1162/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9359 - val_loss: 0.3821 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01162: val_acc did not improve from 0.94225\n",
+ "Epoch 1163/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9360 - val_loss: 0.3619 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01163: val_acc did not improve from 0.94225\n",
+ "Epoch 1164/100000\n",
+ " - 19s - loss: 0.3723 - acc: 0.9346 - val_loss: 0.3601 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01164: val_acc did not improve from 0.94225\n",
+ "Epoch 1165/100000\n",
+ " - 18s - loss: 0.3625 - acc: 0.9358 - val_loss: 0.3693 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01165: val_acc did not improve from 0.94225\n",
+ "Epoch 1166/100000\n",
+ " - 19s - loss: 0.3606 - acc: 0.9363 - val_loss: 0.3890 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 01166: val_acc did not improve from 0.94225\n",
+ "Epoch 1167/100000\n",
+ " - 18s - loss: 0.3588 - acc: 0.9366 - val_loss: 0.3762 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 01167: val_acc did not improve from 0.94225\n",
+ "Epoch 1168/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9358 - val_loss: 0.3602 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01168: val_acc did not improve from 0.94225\n",
+ "Epoch 1169/100000\n",
+ " - 18s - loss: 0.3548 - acc: 0.9369 - val_loss: 0.3650 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01169: val_acc did not improve from 0.94225\n",
+ "Epoch 1170/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9356 - val_loss: 0.3917 - val_acc: 0.9190\n",
+ "\n",
+ "Epoch 01170: val_acc did not improve from 0.94225\n",
+ "Epoch 1171/100000\n",
+ " - 19s - loss: 0.3592 - acc: 0.9357 - val_loss: 0.5134 - val_acc: 0.8695\n",
+ "\n",
+ "Epoch 01171: val_acc did not improve from 0.94225\n",
+ "Epoch 1172/100000\n",
+ " - 18s - loss: 0.3580 - acc: 0.9363 - val_loss: 0.3624 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01172: val_acc did not improve from 0.94225\n",
+ "Epoch 1173/100000\n",
+ " - 19s - loss: 0.3577 - acc: 0.9356 - val_loss: 0.3523 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01173: val_acc did not improve from 0.94225\n",
+ "Epoch 1174/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9360 - val_loss: 0.3914 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 01174: val_acc did not improve from 0.94225\n",
+ "Epoch 1175/100000\n",
+ " - 18s - loss: 0.3570 - acc: 0.9364 - val_loss: 0.4204 - val_acc: 0.9030\n",
+ "\n",
+ "Epoch 01175: val_acc did not improve from 0.94225\n",
+ "Epoch 1176/100000\n",
+ " - 19s - loss: 0.3591 - acc: 0.9358 - val_loss: 0.3784 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 01176: val_acc did not improve from 0.94225\n",
+ "Epoch 1177/100000\n",
+ " - 19s - loss: 0.3616 - acc: 0.9355 - val_loss: 0.3564 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 01177: val_acc did not improve from 0.94225\n",
+ "Epoch 1178/100000\n",
+ " - 18s - loss: 0.3605 - acc: 0.9352 - val_loss: 0.3502 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01178: val_acc did not improve from 0.94225\n",
+ "Epoch 1179/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9356 - val_loss: 0.4329 - val_acc: 0.9030\n",
+ "\n",
+ "Epoch 01179: val_acc did not improve from 0.94225\n",
+ "Epoch 1180/100000\n",
+ " - 18s - loss: 0.3597 - acc: 0.9359 - val_loss: 0.3773 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 01180: val_acc did not improve from 0.94225\n",
+ "Epoch 1181/100000\n",
+ " - 19s - loss: 0.3578 - acc: 0.9365 - val_loss: 0.3541 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01181: val_acc did not improve from 0.94225\n",
+ "Epoch 1182/100000\n",
+ " - 19s - loss: 0.3647 - acc: 0.9351 - val_loss: 0.3621 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 01182: val_acc did not improve from 0.94225\n",
+ "Epoch 1183/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9362 - val_loss: 0.3877 - val_acc: 0.9190\n",
+ "\n",
+ "Epoch 01183: val_acc did not improve from 0.94225\n",
+ "Epoch 1184/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9358 - val_loss: 0.3559 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01184: val_acc did not improve from 0.94225\n",
+ "Epoch 1185/100000\n",
+ " - 18s - loss: 0.3565 - acc: 0.9370 - val_loss: 0.4044 - val_acc: 0.9096\n",
+ "\n",
+ "Epoch 01185: val_acc did not improve from 0.94225\n",
+ "Epoch 1186/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9352 - val_loss: 0.3478 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 01186: val_acc did not improve from 0.94225\n",
+ "Epoch 1187/100000\n",
+ " - 19s - loss: 0.3560 - acc: 0.9363 - val_loss: 0.3496 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01187: val_acc did not improve from 0.94225\n",
+ "Epoch 1188/100000\n",
+ " - 19s - loss: 0.3588 - acc: 0.9356 - val_loss: 0.3934 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 01188: val_acc did not improve from 0.94225\n",
+ "Epoch 1189/100000\n",
+ " - 19s - loss: 0.3583 - acc: 0.9361 - val_loss: 0.4334 - val_acc: 0.8921\n",
+ "\n",
+ "Epoch 01189: val_acc did not improve from 0.94225\n",
+ "Epoch 1190/100000\n",
+ " - 19s - loss: 0.3598 - acc: 0.9358 - val_loss: 0.3973 - val_acc: 0.9169\n",
+ "\n",
+ "Epoch 01190: val_acc did not improve from 0.94225\n",
+ "Epoch 1191/100000\n",
+ " - 19s - loss: 0.3587 - acc: 0.9362 - val_loss: 0.3464 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01191: val_acc did not improve from 0.94225\n",
+ "Epoch 1192/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9361 - val_loss: 0.3521 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01192: val_acc did not improve from 0.94225\n",
+ "Epoch 1193/100000\n",
+ " - 19s - loss: 0.3621 - acc: 0.9354 - val_loss: 0.4276 - val_acc: 0.9015\n",
+ "\n",
+ "Epoch 01193: val_acc did not improve from 0.94225\n",
+ "Epoch 1194/100000\n",
+ " - 19s - loss: 0.3568 - acc: 0.9363 - val_loss: 0.3681 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 01194: val_acc did not improve from 0.94225\n",
+ "Epoch 1195/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9353 - val_loss: 0.3727 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 01195: val_acc did not improve from 0.94225\n",
+ "Epoch 1196/100000\n",
+ " - 19s - loss: 0.3595 - acc: 0.9354 - val_loss: 0.3542 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01196: val_acc did not improve from 0.94225\n",
+ "Epoch 1197/100000\n",
+ " - 19s - loss: 0.3567 - acc: 0.9366 - val_loss: 0.3694 - val_acc: 0.9229\n",
+ "\n",
+ "Epoch 01197: val_acc did not improve from 0.94225\n",
+ "Epoch 1198/100000\n",
+ " - 19s - loss: 0.3587 - acc: 0.9361 - val_loss: 0.3853 - val_acc: 0.9106\n",
+ "\n",
+ "Epoch 01198: val_acc did not improve from 0.94225\n",
+ "Epoch 1199/100000\n",
+ " - 19s - loss: 0.3594 - acc: 0.9362 - val_loss: 0.3656 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01199: val_acc did not improve from 0.94225\n",
+ "Epoch 1200/100000\n",
+ " - 19s - loss: 0.3636 - acc: 0.9342 - val_loss: 0.3509 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01200: val_acc did not improve from 0.94225\n",
+ "Epoch 1201/100000\n",
+ " - 18s - loss: 0.3594 - acc: 0.9356 - val_loss: 0.4513 - val_acc: 0.9011\n",
+ "\n",
+ "Epoch 01201: val_acc did not improve from 0.94225\n",
+ "Epoch 1202/100000\n",
+ " - 19s - loss: 0.3575 - acc: 0.9359 - val_loss: 0.3671 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 01202: val_acc did not improve from 0.94225\n",
+ "Epoch 1203/100000\n",
+ " - 18s - loss: 0.3581 - acc: 0.9360 - val_loss: 0.4738 - val_acc: 0.8876\n",
+ "\n",
+ "Epoch 01203: val_acc did not improve from 0.94225\n",
+ "Epoch 1204/100000\n",
+ " - 19s - loss: 0.3630 - acc: 0.9355 - val_loss: 0.3852 - val_acc: 0.9197\n",
+ "\n",
+ "Epoch 01204: val_acc did not improve from 0.94225\n",
+ "Epoch 1205/100000\n",
+ " - 18s - loss: 0.3604 - acc: 0.9362 - val_loss: 0.4052 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 01205: val_acc did not improve from 0.94225\n",
+ "Epoch 1206/100000\n",
+ " - 19s - loss: 0.3593 - acc: 0.9352 - val_loss: 0.4701 - val_acc: 0.8816\n",
+ "\n",
+ "Epoch 01206: val_acc did not improve from 0.94225\n",
+ "Epoch 1207/100000\n",
+ " - 19s - loss: 0.3571 - acc: 0.9364 - val_loss: 0.3683 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 01207: val_acc did not improve from 0.94225\n",
+ "Epoch 1208/100000\n",
+ " - 18s - loss: 0.3577 - acc: 0.9354 - val_loss: 0.4464 - val_acc: 0.8817\n",
+ "\n",
+ "Epoch 01208: val_acc did not improve from 0.94225\n",
+ "Epoch 1209/100000\n",
+ " - 19s - loss: 0.3583 - acc: 0.9357 - val_loss: 0.3472 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01209: val_acc did not improve from 0.94225\n",
+ "Epoch 1210/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9356 - val_loss: 0.3925 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 01210: val_acc did not improve from 0.94225\n",
+ "Epoch 1211/100000\n",
+ " - 19s - loss: 0.3618 - acc: 0.9353 - val_loss: 0.3874 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 01211: val_acc did not improve from 0.94225\n",
+ "Epoch 1212/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9359 - val_loss: 0.3558 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01212: val_acc did not improve from 0.94225\n",
+ "Epoch 1213/100000\n",
+ " - 19s - loss: 0.3574 - acc: 0.9365 - val_loss: 0.3701 - val_acc: 0.9213\n",
+ "\n",
+ "Epoch 01213: val_acc did not improve from 0.94225\n",
+ "Epoch 1214/100000\n",
+ " - 19s - loss: 0.3567 - acc: 0.9364 - val_loss: 0.3620 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01214: val_acc did not improve from 0.94225\n",
+ "Epoch 1215/100000\n",
+ " - 19s - loss: 0.3576 - acc: 0.9362 - val_loss: 0.3854 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01215: val_acc did not improve from 0.94225\n",
+ "Epoch 1216/100000\n",
+ " - 19s - loss: 0.3570 - acc: 0.9363 - val_loss: 0.3704 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 01216: val_acc did not improve from 0.94225\n",
+ "Epoch 1217/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9358 - val_loss: 0.3468 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01217: val_acc did not improve from 0.94225\n",
+ "Epoch 1218/100000\n",
+ " - 19s - loss: 0.3587 - acc: 0.9353 - val_loss: 0.3458 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 01218: val_acc did not improve from 0.94225\n",
+ "Epoch 1219/100000\n",
+ " - 18s - loss: 0.3593 - acc: 0.9363 - val_loss: 0.3549 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01219: val_acc did not improve from 0.94225\n",
+ "Epoch 1220/100000\n",
+ " - 19s - loss: 0.3592 - acc: 0.9364 - val_loss: 0.3557 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01220: val_acc did not improve from 0.94225\n",
+ "Epoch 1221/100000\n",
+ " - 18s - loss: 0.3588 - acc: 0.9360 - val_loss: 0.3977 - val_acc: 0.9138\n",
+ "\n",
+ "Epoch 01221: val_acc did not improve from 0.94225\n",
+ "Epoch 1222/100000\n",
+ " - 18s - loss: 0.3586 - acc: 0.9362 - val_loss: 0.3557 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01222: val_acc did not improve from 0.94225\n",
+ "Epoch 1223/100000\n",
+ " - 19s - loss: 0.3636 - acc: 0.9364 - val_loss: 0.3560 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01223: val_acc did not improve from 0.94225\n",
+ "Epoch 1224/100000\n",
+ " - 18s - loss: 0.3623 - acc: 0.9364 - val_loss: 0.3531 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01224: val_acc did not improve from 0.94225\n",
+ "Epoch 1225/100000\n",
+ " - 19s - loss: 0.3585 - acc: 0.9361 - val_loss: 0.3497 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01225: val_acc did not improve from 0.94225\n",
+ "Epoch 1226/100000\n",
+ " - 18s - loss: 0.3593 - acc: 0.9351 - val_loss: 0.3582 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 01226: val_acc did not improve from 0.94225\n",
+ "Epoch 1227/100000\n",
+ " - 18s - loss: 0.3575 - acc: 0.9360 - val_loss: 0.3502 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01227: val_acc did not improve from 0.94225\n",
+ "Epoch 1228/100000\n",
+ " - 18s - loss: 0.3587 - acc: 0.9359 - val_loss: 0.3858 - val_acc: 0.9169\n",
+ "\n",
+ "Epoch 01228: val_acc did not improve from 0.94225\n",
+ "Epoch 1229/100000\n",
+ " - 18s - loss: 0.3568 - acc: 0.9363 - val_loss: 0.5091 - val_acc: 0.8639\n",
+ "\n",
+ "Epoch 01229: val_acc did not improve from 0.94225\n",
+ "Epoch 1230/100000\n",
+ " - 19s - loss: 0.3594 - acc: 0.9354 - val_loss: 0.3561 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01230: val_acc did not improve from 0.94225\n",
+ "Epoch 1231/100000\n",
+ " - 19s - loss: 0.3598 - acc: 0.9356 - val_loss: 0.3674 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 01231: val_acc did not improve from 0.94225\n",
+ "Epoch 1232/100000\n",
+ " - 18s - loss: 0.3609 - acc: 0.9355 - val_loss: 0.3586 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 01232: val_acc did not improve from 0.94225\n",
+ "Epoch 1233/100000\n",
+ " - 18s - loss: 0.3581 - acc: 0.9361 - val_loss: 0.3548 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 01233: val_acc did not improve from 0.94225\n",
+ "Epoch 1234/100000\n",
+ " - 19s - loss: 0.3586 - acc: 0.9361 - val_loss: 0.4034 - val_acc: 0.9129\n",
+ "\n",
+ "Epoch 01234: val_acc did not improve from 0.94225\n",
+ "Epoch 1235/100000\n",
+ " - 18s - loss: 0.3548 - acc: 0.9369 - val_loss: 0.3567 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01235: val_acc did not improve from 0.94225\n",
+ "Epoch 1236/100000\n",
+ " - 19s - loss: 0.3588 - acc: 0.9359 - val_loss: 0.3822 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 01236: val_acc did not improve from 0.94225\n",
+ "Epoch 1237/100000\n",
+ " - 19s - loss: 0.3576 - acc: 0.9356 - val_loss: 0.4058 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01237: val_acc did not improve from 0.94225\n",
+ "Epoch 1238/100000\n",
+ " - 19s - loss: 0.3600 - acc: 0.9361 - val_loss: 0.3624 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01238: val_acc did not improve from 0.94225\n",
+ "Epoch 1239/100000\n",
+ " - 19s - loss: 0.3645 - acc: 0.9360 - val_loss: 0.3573 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01239: val_acc did not improve from 0.94225\n",
+ "Epoch 1240/100000\n",
+ " - 19s - loss: 0.3621 - acc: 0.9357 - val_loss: 0.4141 - val_acc: 0.9023\n",
+ "\n",
+ "Epoch 01240: val_acc did not improve from 0.94225\n",
+ "Epoch 1241/100000\n",
+ " - 19s - loss: 0.3652 - acc: 0.9360 - val_loss: 0.3802 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 01241: val_acc did not improve from 0.94225\n",
+ "Epoch 1242/100000\n",
+ " - 19s - loss: 0.3644 - acc: 0.9354 - val_loss: 0.3492 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01242: val_acc did not improve from 0.94225\n",
+ "Epoch 1243/100000\n",
+ " - 19s - loss: 0.3598 - acc: 0.9357 - val_loss: 0.3558 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01243: val_acc did not improve from 0.94225\n",
+ "Epoch 1244/100000\n",
+ " - 19s - loss: 0.3570 - acc: 0.9363 - val_loss: 0.3535 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01244: val_acc did not improve from 0.94225\n",
+ "Epoch 1245/100000\n",
+ " - 19s - loss: 0.3564 - acc: 0.9358 - val_loss: 0.3835 - val_acc: 0.9206\n",
+ "\n",
+ "Epoch 01245: val_acc did not improve from 0.94225\n",
+ "Epoch 1246/100000\n",
+ " - 19s - loss: 0.3597 - acc: 0.9356 - val_loss: 0.4218 - val_acc: 0.8994\n",
+ "\n",
+ "Epoch 01246: val_acc did not improve from 0.94225\n",
+ "Epoch 1247/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9356 - val_loss: 0.3473 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 01247: val_acc did not improve from 0.94225\n",
+ "Epoch 1248/100000\n",
+ " - 19s - loss: 0.3587 - acc: 0.9352 - val_loss: 0.3813 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 01248: val_acc did not improve from 0.94225\n",
+ "Epoch 1249/100000\n",
+ " - 19s - loss: 0.3558 - acc: 0.9365 - val_loss: 0.3492 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01249: val_acc did not improve from 0.94225\n",
+ "Epoch 1250/100000\n",
+ " - 19s - loss: 0.3608 - acc: 0.9362 - val_loss: 0.3969 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 01250: val_acc did not improve from 0.94225\n",
+ "Epoch 1251/100000\n",
+ " - 19s - loss: 0.3637 - acc: 0.9357 - val_loss: 0.3599 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01251: val_acc did not improve from 0.94225\n",
+ "Epoch 1252/100000\n",
+ " - 18s - loss: 0.3621 - acc: 0.9354 - val_loss: 0.3641 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 01252: val_acc did not improve from 0.94225\n",
+ "Epoch 1253/100000\n",
+ " - 19s - loss: 0.3582 - acc: 0.9358 - val_loss: 0.3521 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01253: val_acc did not improve from 0.94225\n",
+ "Epoch 1254/100000\n",
+ " - 18s - loss: 0.3621 - acc: 0.9345 - val_loss: 0.3537 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01254: val_acc did not improve from 0.94225\n",
+ "Epoch 1255/100000\n",
+ " - 18s - loss: 0.3571 - acc: 0.9357 - val_loss: 0.3619 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01255: val_acc did not improve from 0.94225\n",
+ "Epoch 1256/100000\n",
+ " - 18s - loss: 0.3549 - acc: 0.9362 - val_loss: 0.3444 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 01256: val_acc did not improve from 0.94225\n",
+ "Epoch 1257/100000\n",
+ " - 18s - loss: 0.3567 - acc: 0.9360 - val_loss: 0.3557 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01257: val_acc did not improve from 0.94225\n",
+ "Epoch 1258/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9359 - val_loss: 0.3829 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01258: val_acc did not improve from 0.94225\n",
+ "Epoch 1259/100000\n",
+ " - 19s - loss: 0.3605 - acc: 0.9357 - val_loss: 0.3874 - val_acc: 0.9197\n",
+ "\n",
+ "Epoch 01259: val_acc did not improve from 0.94225\n",
+ "Epoch 1260/100000\n",
+ " - 19s - loss: 0.3556 - acc: 0.9366 - val_loss: 0.4037 - val_acc: 0.9101\n",
+ "\n",
+ "Epoch 01260: val_acc did not improve from 0.94225\n",
+ "Epoch 1261/100000\n",
+ " - 19s - loss: 0.3573 - acc: 0.9354 - val_loss: 0.3705 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01261: val_acc did not improve from 0.94225\n",
+ "Epoch 1262/100000\n",
+ " - 18s - loss: 0.3577 - acc: 0.9354 - val_loss: 0.3475 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 01262: val_acc did not improve from 0.94225\n",
+ "Epoch 1263/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9355 - val_loss: 0.3562 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01263: val_acc did not improve from 0.94225\n",
+ "Epoch 1264/100000\n",
+ " - 19s - loss: 0.3579 - acc: 0.9351 - val_loss: 0.3526 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01264: val_acc did not improve from 0.94225\n",
+ "Epoch 1265/100000\n",
+ " - 19s - loss: 0.3563 - acc: 0.9352 - val_loss: 0.5401 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 01265: val_acc did not improve from 0.94225\n",
+ "Epoch 1266/100000\n",
+ " - 18s - loss: 0.3538 - acc: 0.9368 - val_loss: 0.3755 - val_acc: 0.9176\n",
+ "\n",
+ "Epoch 01266: val_acc did not improve from 0.94225\n",
+ "Epoch 1267/100000\n",
+ " - 19s - loss: 0.3590 - acc: 0.9358 - val_loss: 0.3439 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01267: val_acc did not improve from 0.94225\n",
+ "Epoch 1268/100000\n",
+ " - 20s - loss: 0.3589 - acc: 0.9359 - val_loss: 0.3707 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 01268: val_acc did not improve from 0.94225\n",
+ "Epoch 1269/100000\n",
+ " - 19s - loss: 0.3557 - acc: 0.9364 - val_loss: 0.3529 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01269: val_acc did not improve from 0.94225\n",
+ "Epoch 1270/100000\n",
+ " - 19s - loss: 0.3576 - acc: 0.9351 - val_loss: 0.3546 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01270: val_acc did not improve from 0.94225\n",
+ "Epoch 1271/100000\n",
+ " - 18s - loss: 0.3602 - acc: 0.9360 - val_loss: 0.3715 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01271: val_acc did not improve from 0.94225\n",
+ "Epoch 1272/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9358 - val_loss: 0.3762 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 01272: val_acc did not improve from 0.94225\n",
+ "Epoch 1273/100000\n",
+ " - 19s - loss: 0.3562 - acc: 0.9365 - val_loss: 0.3410 - val_acc: 0.9407\n",
+ "\n",
+ "Epoch 01273: val_acc did not improve from 0.94225\n",
+ "Epoch 1274/100000\n",
+ " - 18s - loss: 0.3565 - acc: 0.9356 - val_loss: 0.3539 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01274: val_acc did not improve from 0.94225\n",
+ "Epoch 1275/100000\n",
+ " - 19s - loss: 0.3545 - acc: 0.9364 - val_loss: 0.3632 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 01275: val_acc did not improve from 0.94225\n",
+ "Epoch 1276/100000\n",
+ " - 19s - loss: 0.3568 - acc: 0.9360 - val_loss: 0.4181 - val_acc: 0.9032\n",
+ "\n",
+ "Epoch 01276: val_acc did not improve from 0.94225\n",
+ "Epoch 1277/100000\n",
+ " - 18s - loss: 0.3574 - acc: 0.9364 - val_loss: 0.4111 - val_acc: 0.9107\n",
+ "\n",
+ "Epoch 01277: val_acc did not improve from 0.94225\n",
+ "Epoch 1278/100000\n",
+ " - 18s - loss: 0.3598 - acc: 0.9349 - val_loss: 0.5373 - val_acc: 0.8367\n",
+ "\n",
+ "Epoch 01278: val_acc did not improve from 0.94225\n",
+ "Epoch 1279/100000\n",
+ " - 19s - loss: 0.3550 - acc: 0.9369 - val_loss: 0.5288 - val_acc: 0.8457\n",
+ "\n",
+ "Epoch 01279: val_acc did not improve from 0.94225\n",
+ "Epoch 1280/100000\n",
+ " - 18s - loss: 0.3568 - acc: 0.9362 - val_loss: 0.3934 - val_acc: 0.9126\n",
+ "\n",
+ "Epoch 01280: val_acc did not improve from 0.94225\n",
+ "Epoch 1281/100000\n",
+ " - 20s - loss: 0.3569 - acc: 0.9364 - val_loss: 0.3653 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 01281: val_acc did not improve from 0.94225\n",
+ "Epoch 1282/100000\n",
+ " - 19s - loss: 0.3610 - acc: 0.9351 - val_loss: 0.3441 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01282: val_acc did not improve from 0.94225\n",
+ "Epoch 1283/100000\n",
+ " - 19s - loss: 0.3596 - acc: 0.9360 - val_loss: 0.4125 - val_acc: 0.9167\n",
+ "\n",
+ "Epoch 01283: val_acc did not improve from 0.94225\n",
+ "Epoch 1284/100000\n",
+ " - 19s - loss: 0.3595 - acc: 0.9356 - val_loss: 0.3550 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01284: val_acc did not improve from 0.94225\n",
+ "Epoch 1285/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9361 - val_loss: 0.3451 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 01285: val_acc did not improve from 0.94225\n",
+ "Epoch 1286/100000\n",
+ " - 19s - loss: 0.3580 - acc: 0.9358 - val_loss: 0.4535 - val_acc: 0.8897\n",
+ "\n",
+ "Epoch 01286: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 01286: ReduceLROnPlateau reducing learning rate to 0.000735091819660738.\n",
+ "Epoch 1287/100000\n",
+ " - 19s - loss: 0.3520 - acc: 0.9362 - val_loss: 0.3907 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 01287: val_acc did not improve from 0.94225\n",
+ "Epoch 1288/100000\n",
+ " - 19s - loss: 0.3477 - acc: 0.9366 - val_loss: 0.3336 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01288: val_acc did not improve from 0.94225\n",
+ "Epoch 1289/100000\n",
+ " - 18s - loss: 0.3484 - acc: 0.9362 - val_loss: 0.3497 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01289: val_acc did not improve from 0.94225\n",
+ "Epoch 1290/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9368 - val_loss: 0.3646 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01290: val_acc did not improve from 0.94225\n",
+ "Epoch 1291/100000\n",
+ " - 18s - loss: 0.3487 - acc: 0.9361 - val_loss: 0.3363 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01291: val_acc did not improve from 0.94225\n",
+ "Epoch 1292/100000\n",
+ " - 19s - loss: 0.3510 - acc: 0.9361 - val_loss: 0.3603 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 01292: val_acc did not improve from 0.94225\n",
+ "Epoch 1293/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9367 - val_loss: 0.4813 - val_acc: 0.8736\n",
+ "\n",
+ "Epoch 01293: val_acc did not improve from 0.94225\n",
+ "Epoch 1294/100000\n",
+ " - 19s - loss: 0.3482 - acc: 0.9363 - val_loss: 0.3447 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01294: val_acc did not improve from 0.94225\n",
+ "Epoch 1295/100000\n",
+ " - 19s - loss: 0.3506 - acc: 0.9364 - val_loss: 0.3877 - val_acc: 0.9054\n",
+ "\n",
+ "Epoch 01295: val_acc did not improve from 0.94225\n",
+ "Epoch 1296/100000\n",
+ " - 19s - loss: 0.3538 - acc: 0.9353 - val_loss: 0.4202 - val_acc: 0.8993\n",
+ "\n",
+ "Epoch 01296: val_acc did not improve from 0.94225\n",
+ "Epoch 1297/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9369 - val_loss: 0.3981 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 01297: val_acc did not improve from 0.94225\n",
+ "Epoch 1298/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9365 - val_loss: 0.3663 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 01298: val_acc did not improve from 0.94225\n",
+ "Epoch 1299/100000\n",
+ " - 19s - loss: 0.3506 - acc: 0.9359 - val_loss: 0.3596 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 01299: val_acc did not improve from 0.94225\n",
+ "Epoch 1300/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9360 - val_loss: 0.3594 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 01300: val_acc did not improve from 0.94225\n",
+ "Epoch 1301/100000\n",
+ " - 19s - loss: 0.3512 - acc: 0.9353 - val_loss: 0.3475 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01301: val_acc did not improve from 0.94225\n",
+ "Epoch 1302/100000\n",
+ " - 19s - loss: 0.3484 - acc: 0.9368 - val_loss: 0.4905 - val_acc: 0.8686\n",
+ "\n",
+ "Epoch 01302: val_acc did not improve from 0.94225\n",
+ "Epoch 1303/100000\n",
+ " - 19s - loss: 0.3524 - acc: 0.9360 - val_loss: 0.3535 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 01303: val_acc did not improve from 0.94225\n",
+ "Epoch 1304/100000\n",
+ " - 19s - loss: 0.3479 - acc: 0.9365 - val_loss: 0.4334 - val_acc: 0.8951\n",
+ "\n",
+ "Epoch 01304: val_acc did not improve from 0.94225\n",
+ "Epoch 1305/100000\n",
+ " - 19s - loss: 0.3511 - acc: 0.9361 - val_loss: 0.3498 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 01305: val_acc did not improve from 0.94225\n",
+ "Epoch 1306/100000\n",
+ " - 19s - loss: 0.3513 - acc: 0.9365 - val_loss: 0.3516 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01306: val_acc did not improve from 0.94225\n",
+ "Epoch 1307/100000\n",
+ " - 19s - loss: 0.3503 - acc: 0.9365 - val_loss: 0.4595 - val_acc: 0.8725\n",
+ "\n",
+ "Epoch 01307: val_acc did not improve from 0.94225\n",
+ "Epoch 1308/100000\n",
+ " - 18s - loss: 0.3502 - acc: 0.9364 - val_loss: 0.3387 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01308: val_acc did not improve from 0.94225\n",
+ "Epoch 1309/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9358 - val_loss: 0.3703 - val_acc: 0.9181\n",
+ "\n",
+ "Epoch 01309: val_acc did not improve from 0.94225\n",
+ "Epoch 1310/100000\n",
+ " - 19s - loss: 0.3502 - acc: 0.9359 - val_loss: 0.3550 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01310: val_acc did not improve from 0.94225\n",
+ "Epoch 1311/100000\n",
+ " - 18s - loss: 0.3490 - acc: 0.9361 - val_loss: 0.3437 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01311: val_acc did not improve from 0.94225\n",
+ "Epoch 1312/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9362 - val_loss: 0.5847 - val_acc: 0.8269\n",
+ "\n",
+ "Epoch 01312: val_acc did not improve from 0.94225\n",
+ "Epoch 1313/100000\n",
+ " - 19s - loss: 0.3522 - acc: 0.9358 - val_loss: 0.4976 - val_acc: 0.8589\n",
+ "\n",
+ "Epoch 01313: val_acc did not improve from 0.94225\n",
+ "Epoch 1314/100000\n",
+ " - 19s - loss: 0.3546 - acc: 0.9355 - val_loss: 0.3692 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 01314: val_acc did not improve from 0.94225\n",
+ "Epoch 1315/100000\n",
+ " - 19s - loss: 0.3506 - acc: 0.9359 - val_loss: 0.3653 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 01315: val_acc did not improve from 0.94225\n",
+ "Epoch 1316/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9355 - val_loss: 0.3492 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01316: val_acc did not improve from 0.94225\n",
+ "Epoch 1317/100000\n",
+ " - 19s - loss: 0.3491 - acc: 0.9371 - val_loss: 0.3431 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01317: val_acc did not improve from 0.94225\n",
+ "Epoch 1318/100000\n",
+ " - 19s - loss: 0.3510 - acc: 0.9359 - val_loss: 0.3548 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 01318: val_acc did not improve from 0.94225\n",
+ "Epoch 1319/100000\n",
+ " - 19s - loss: 0.3505 - acc: 0.9360 - val_loss: 0.3685 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 01319: val_acc did not improve from 0.94225\n",
+ "Epoch 1320/100000\n",
+ " - 19s - loss: 0.3478 - acc: 0.9367 - val_loss: 0.4238 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 01320: val_acc did not improve from 0.94225\n",
+ "Epoch 1321/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9361 - val_loss: 0.3664 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 01321: val_acc did not improve from 0.94225\n",
+ "Epoch 1322/100000\n",
+ " - 19s - loss: 0.3485 - acc: 0.9366 - val_loss: 0.3918 - val_acc: 0.9128\n",
+ "\n",
+ "Epoch 01322: val_acc did not improve from 0.94225\n",
+ "Epoch 1323/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9362 - val_loss: 0.3484 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01323: val_acc did not improve from 0.94225\n",
+ "Epoch 1324/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9363 - val_loss: 0.3639 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01324: val_acc did not improve from 0.94225\n",
+ "Epoch 1325/100000\n",
+ " - 18s - loss: 0.3497 - acc: 0.9361 - val_loss: 0.3365 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01325: val_acc did not improve from 0.94225\n",
+ "Epoch 1326/100000\n",
+ " - 19s - loss: 0.3508 - acc: 0.9354 - val_loss: 0.3898 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 01326: val_acc did not improve from 0.94225\n",
+ "Epoch 1327/100000\n",
+ " - 19s - loss: 0.3467 - acc: 0.9368 - val_loss: 0.3324 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01327: val_acc did not improve from 0.94225\n",
+ "Epoch 1328/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9366 - val_loss: 0.4140 - val_acc: 0.9076\n",
+ "\n",
+ "Epoch 01328: val_acc did not improve from 0.94225\n",
+ "Epoch 1329/100000\n",
+ " - 18s - loss: 0.3501 - acc: 0.9362 - val_loss: 0.3408 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01329: val_acc did not improve from 0.94225\n",
+ "Epoch 1330/100000\n",
+ " - 18s - loss: 0.3471 - acc: 0.9369 - val_loss: 0.3520 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 01330: val_acc did not improve from 0.94225\n",
+ "Epoch 1331/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9358 - val_loss: 0.3488 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01331: val_acc did not improve from 0.94225\n",
+ "Epoch 1332/100000\n",
+ " - 19s - loss: 0.3477 - acc: 0.9366 - val_loss: 0.3378 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01332: val_acc did not improve from 0.94225\n",
+ "Epoch 1333/100000\n",
+ " - 19s - loss: 0.3461 - acc: 0.9375 - val_loss: 0.3621 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01333: val_acc did not improve from 0.94225\n",
+ "Epoch 1334/100000\n",
+ " - 19s - loss: 0.3496 - acc: 0.9362 - val_loss: 0.3540 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01334: val_acc did not improve from 0.94225\n",
+ "Epoch 1335/100000\n",
+ " - 18s - loss: 0.3498 - acc: 0.9358 - val_loss: 0.3483 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 01335: val_acc did not improve from 0.94225\n",
+ "Epoch 1336/100000\n",
+ " - 19s - loss: 0.3528 - acc: 0.9359 - val_loss: 0.3635 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01336: val_acc did not improve from 0.94225\n",
+ "Epoch 1337/100000\n",
+ " - 19s - loss: 0.3522 - acc: 0.9358 - val_loss: 0.3385 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01337: val_acc did not improve from 0.94225\n",
+ "Epoch 1338/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9373 - val_loss: 0.3531 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01338: val_acc did not improve from 0.94225\n",
+ "Epoch 1339/100000\n",
+ " - 18s - loss: 0.3497 - acc: 0.9363 - val_loss: 0.3799 - val_acc: 0.9136\n",
+ "\n",
+ "Epoch 01339: val_acc did not improve from 0.94225\n",
+ "Epoch 1340/100000\n",
+ " - 19s - loss: 0.3504 - acc: 0.9361 - val_loss: 0.3649 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01340: val_acc did not improve from 0.94225\n",
+ "Epoch 1341/100000\n",
+ " - 19s - loss: 0.3517 - acc: 0.9361 - val_loss: 0.3956 - val_acc: 0.9161\n",
+ "\n",
+ "Epoch 01341: val_acc did not improve from 0.94225\n",
+ "Epoch 1342/100000\n",
+ " - 18s - loss: 0.3510 - acc: 0.9367 - val_loss: 0.3512 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01342: val_acc did not improve from 0.94225\n",
+ "Epoch 1343/100000\n",
+ " - 18s - loss: 0.3492 - acc: 0.9368 - val_loss: 0.3510 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 01343: val_acc did not improve from 0.94225\n",
+ "Epoch 1344/100000\n",
+ " - 19s - loss: 0.3507 - acc: 0.9363 - val_loss: 0.3877 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 01344: val_acc did not improve from 0.94225\n",
+ "Epoch 1345/100000\n",
+ " - 18s - loss: 0.3487 - acc: 0.9362 - val_loss: 0.3785 - val_acc: 0.9118\n",
+ "\n",
+ "Epoch 01345: val_acc did not improve from 0.94225\n",
+ "Epoch 1346/100000\n",
+ " - 19s - loss: 0.3520 - acc: 0.9361 - val_loss: 0.3456 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 01346: val_acc did not improve from 0.94225\n",
+ "Epoch 1347/100000\n",
+ " - 18s - loss: 0.3491 - acc: 0.9365 - val_loss: 0.3323 - val_acc: 0.9415\n",
+ "\n",
+ "Epoch 01347: val_acc did not improve from 0.94225\n",
+ "Epoch 1348/100000\n",
+ " - 19s - loss: 0.3467 - acc: 0.9373 - val_loss: 0.3378 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01348: val_acc did not improve from 0.94225\n",
+ "Epoch 1349/100000\n",
+ " - 19s - loss: 0.3503 - acc: 0.9358 - val_loss: 0.3460 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01349: val_acc did not improve from 0.94225\n",
+ "Epoch 1350/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9357 - val_loss: 0.3574 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01350: val_acc did not improve from 0.94225\n",
+ "Epoch 1351/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9361 - val_loss: 0.3418 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01351: val_acc did not improve from 0.94225\n",
+ "Epoch 1352/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9365 - val_loss: 0.3556 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01352: val_acc did not improve from 0.94225\n",
+ "Epoch 1353/100000\n",
+ " - 18s - loss: 0.3534 - acc: 0.9354 - val_loss: 0.4185 - val_acc: 0.9064\n",
+ "\n",
+ "Epoch 01353: val_acc did not improve from 0.94225\n",
+ "Epoch 1354/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9369 - val_loss: 0.3780 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 01354: val_acc did not improve from 0.94225\n",
+ "Epoch 1355/100000\n",
+ " - 19s - loss: 0.3530 - acc: 0.9364 - val_loss: 0.3556 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 01355: val_acc did not improve from 0.94225\n",
+ "Epoch 1356/100000\n",
+ " - 18s - loss: 0.3495 - acc: 0.9364 - val_loss: 0.3535 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01356: val_acc did not improve from 0.94225\n",
+ "Epoch 1357/100000\n",
+ " - 20s - loss: 0.3484 - acc: 0.9370 - val_loss: 0.4011 - val_acc: 0.9021\n",
+ "\n",
+ "Epoch 01357: val_acc did not improve from 0.94225\n",
+ "Epoch 1358/100000\n",
+ " - 19s - loss: 0.3522 - acc: 0.9360 - val_loss: 0.3770 - val_acc: 0.9220\n",
+ "\n",
+ "Epoch 01358: val_acc did not improve from 0.94225\n",
+ "Epoch 1359/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9363 - val_loss: 0.3472 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 01359: val_acc did not improve from 0.94225\n",
+ "Epoch 1360/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9367 - val_loss: 0.3615 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 01360: val_acc did not improve from 0.94225\n",
+ "Epoch 1361/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9362 - val_loss: 0.4447 - val_acc: 0.8936\n",
+ "\n",
+ "Epoch 01361: val_acc did not improve from 0.94225\n",
+ "Epoch 1362/100000\n",
+ " - 19s - loss: 0.3480 - acc: 0.9365 - val_loss: 0.3556 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 01362: val_acc did not improve from 0.94225\n",
+ "Epoch 1363/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9361 - val_loss: 0.3450 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01363: val_acc did not improve from 0.94225\n",
+ "Epoch 1364/100000\n",
+ " - 18s - loss: 0.3499 - acc: 0.9366 - val_loss: 0.3697 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 01364: val_acc did not improve from 0.94225\n",
+ "Epoch 1365/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9362 - val_loss: 0.3614 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 01365: val_acc did not improve from 0.94225\n",
+ "Epoch 1366/100000\n",
+ " - 18s - loss: 0.3520 - acc: 0.9356 - val_loss: 0.3464 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01366: val_acc did not improve from 0.94225\n",
+ "Epoch 1367/100000\n",
+ " - 19s - loss: 0.3573 - acc: 0.9357 - val_loss: 0.4283 - val_acc: 0.8999\n",
+ "\n",
+ "Epoch 01367: val_acc did not improve from 0.94225\n",
+ "Epoch 1368/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9369 - val_loss: 0.3501 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 01368: val_acc did not improve from 0.94225\n",
+ "Epoch 1369/100000\n",
+ " - 18s - loss: 0.3482 - acc: 0.9367 - val_loss: 0.4369 - val_acc: 0.8915\n",
+ "\n",
+ "Epoch 01369: val_acc did not improve from 0.94225\n",
+ "Epoch 1370/100000\n",
+ " - 19s - loss: 0.3479 - acc: 0.9363 - val_loss: 0.4671 - val_acc: 0.8727\n",
+ "\n",
+ "Epoch 01370: val_acc did not improve from 0.94225\n",
+ "Epoch 1371/100000\n",
+ " - 18s - loss: 0.3489 - acc: 0.9359 - val_loss: 0.3383 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 01371: val_acc did not improve from 0.94225\n",
+ "Epoch 1372/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9365 - val_loss: 0.3334 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01372: val_acc did not improve from 0.94225\n",
+ "Epoch 1373/100000\n",
+ " - 18s - loss: 0.3484 - acc: 0.9359 - val_loss: 0.3782 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 01373: val_acc did not improve from 0.94225\n",
+ "Epoch 1374/100000\n",
+ " - 19s - loss: 0.3489 - acc: 0.9372 - val_loss: 0.3432 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01374: val_acc did not improve from 0.94225\n",
+ "Epoch 1375/100000\n",
+ " - 18s - loss: 0.3507 - acc: 0.9366 - val_loss: 0.4488 - val_acc: 0.8877\n",
+ "\n",
+ "Epoch 01375: val_acc did not improve from 0.94225\n",
+ "Epoch 1376/100000\n",
+ " - 19s - loss: 0.3525 - acc: 0.9355 - val_loss: 0.3381 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01376: val_acc did not improve from 0.94225\n",
+ "Epoch 1377/100000\n",
+ " - 18s - loss: 0.3502 - acc: 0.9362 - val_loss: 0.3431 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01377: val_acc did not improve from 0.94225\n",
+ "Epoch 1378/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9369 - val_loss: 0.4166 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 01378: val_acc did not improve from 0.94225\n",
+ "Epoch 1379/100000\n",
+ " - 18s - loss: 0.3491 - acc: 0.9363 - val_loss: 0.3777 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 01379: val_acc did not improve from 0.94225\n",
+ "Epoch 1380/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9365 - val_loss: 0.3459 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01380: val_acc did not improve from 0.94225\n",
+ "Epoch 1381/100000\n",
+ " - 19s - loss: 0.3553 - acc: 0.9357 - val_loss: 0.3466 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01381: val_acc did not improve from 0.94225\n",
+ "Epoch 1382/100000\n",
+ " - 18s - loss: 0.3523 - acc: 0.9364 - val_loss: 0.3564 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 01382: val_acc did not improve from 0.94225\n",
+ "Epoch 1383/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9366 - val_loss: 0.4778 - val_acc: 0.8678\n",
+ "\n",
+ "Epoch 01383: val_acc did not improve from 0.94225\n",
+ "Epoch 1384/100000\n",
+ " - 18s - loss: 0.3506 - acc: 0.9362 - val_loss: 0.4096 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 01384: val_acc did not improve from 0.94225\n",
+ "Epoch 1385/100000\n",
+ " - 18s - loss: 0.3499 - acc: 0.9352 - val_loss: 0.3587 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 01385: val_acc did not improve from 0.94225\n",
+ "Epoch 1386/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9369 - val_loss: 0.3566 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 01386: val_acc did not improve from 0.94225\n",
+ "Epoch 1387/100000\n",
+ " - 18s - loss: 0.3481 - acc: 0.9369 - val_loss: 0.4287 - val_acc: 0.9017\n",
+ "\n",
+ "Epoch 01387: val_acc did not improve from 0.94225\n",
+ "Epoch 1388/100000\n",
+ " - 19s - loss: 0.3513 - acc: 0.9354 - val_loss: 0.3424 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01388: val_acc did not improve from 0.94225\n",
+ "Epoch 1389/100000\n",
+ " - 19s - loss: 0.3523 - acc: 0.9361 - val_loss: 0.3537 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 01389: val_acc did not improve from 0.94225\n",
+ "Epoch 1390/100000\n",
+ " - 19s - loss: 0.3468 - acc: 0.9367 - val_loss: 0.3287 - val_acc: 0.9418\n",
+ "\n",
+ "Epoch 01390: val_acc did not improve from 0.94225\n",
+ "Epoch 1391/100000\n",
+ " - 19s - loss: 0.3535 - acc: 0.9348 - val_loss: 0.3419 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01391: val_acc did not improve from 0.94225\n",
+ "Epoch 1392/100000\n",
+ " - 19s - loss: 0.3498 - acc: 0.9371 - val_loss: 0.3676 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 01392: val_acc did not improve from 0.94225\n",
+ "Epoch 1393/100000\n",
+ " - 19s - loss: 0.3536 - acc: 0.9356 - val_loss: 0.3657 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 01393: val_acc did not improve from 0.94225\n",
+ "Epoch 1394/100000\n",
+ " - 18s - loss: 0.3476 - acc: 0.9364 - val_loss: 0.3571 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 01394: val_acc did not improve from 0.94225\n",
+ "Epoch 1395/100000\n",
+ " - 18s - loss: 0.3513 - acc: 0.9362 - val_loss: 0.4587 - val_acc: 0.8906\n",
+ "\n",
+ "Epoch 01395: val_acc did not improve from 0.94225\n",
+ "Epoch 1396/100000\n",
+ " - 18s - loss: 0.3477 - acc: 0.9367 - val_loss: 0.3422 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01396: val_acc did not improve from 0.94225\n",
+ "Epoch 1397/100000\n",
+ " - 18s - loss: 0.3479 - acc: 0.9364 - val_loss: 0.3876 - val_acc: 0.9124\n",
+ "\n",
+ "Epoch 01397: val_acc did not improve from 0.94225\n",
+ "Epoch 1398/100000\n",
+ " - 18s - loss: 0.3481 - acc: 0.9367 - val_loss: 0.3598 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01398: val_acc did not improve from 0.94225\n",
+ "Epoch 1399/100000\n",
+ " - 18s - loss: 0.3494 - acc: 0.9362 - val_loss: 0.3834 - val_acc: 0.9194\n",
+ "\n",
+ "Epoch 01399: val_acc did not improve from 0.94225\n",
+ "Epoch 1400/100000\n",
+ " - 18s - loss: 0.3488 - acc: 0.9366 - val_loss: 0.3538 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 01400: val_acc did not improve from 0.94225\n",
+ "Epoch 1401/100000\n",
+ " - 19s - loss: 0.3506 - acc: 0.9352 - val_loss: 0.3365 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 01401: val_acc did not improve from 0.94225\n",
+ "Epoch 1402/100000\n",
+ " - 19s - loss: 0.3538 - acc: 0.9358 - val_loss: 0.3517 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01402: val_acc did not improve from 0.94225\n",
+ "Epoch 1403/100000\n",
+ " - 19s - loss: 0.3511 - acc: 0.9363 - val_loss: 0.3398 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01403: val_acc did not improve from 0.94225\n",
+ "Epoch 1404/100000\n",
+ " - 19s - loss: 0.3514 - acc: 0.9360 - val_loss: 0.3674 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 01404: val_acc did not improve from 0.94225\n",
+ "Epoch 1405/100000\n",
+ " - 18s - loss: 0.3513 - acc: 0.9357 - val_loss: 0.3635 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 01405: val_acc did not improve from 0.94225\n",
+ "Epoch 1406/100000\n",
+ " - 19s - loss: 0.3516 - acc: 0.9358 - val_loss: 0.5827 - val_acc: 0.8352\n",
+ "\n",
+ "Epoch 01406: val_acc did not improve from 0.94225\n",
+ "Epoch 1407/100000\n",
+ " - 18s - loss: 0.3533 - acc: 0.9349 - val_loss: 0.3791 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 01407: val_acc did not improve from 0.94225\n",
+ "Epoch 1408/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9364 - val_loss: 0.3378 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 01408: val_acc did not improve from 0.94225\n",
+ "Epoch 1409/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9359 - val_loss: 0.4120 - val_acc: 0.9102\n",
+ "\n",
+ "Epoch 01409: val_acc did not improve from 0.94225\n",
+ "Epoch 1410/100000\n",
+ " - 18s - loss: 0.3529 - acc: 0.9359 - val_loss: 0.3996 - val_acc: 0.9085\n",
+ "\n",
+ "Epoch 01410: val_acc did not improve from 0.94225\n",
+ "Epoch 1411/100000\n",
+ " - 19s - loss: 0.3528 - acc: 0.9361 - val_loss: 0.3415 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01411: val_acc did not improve from 0.94225\n",
+ "Epoch 1412/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9364 - val_loss: 0.3477 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01412: val_acc did not improve from 0.94225\n",
+ "Epoch 1413/100000\n",
+ " - 19s - loss: 0.3496 - acc: 0.9363 - val_loss: 0.4005 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 01413: val_acc did not improve from 0.94225\n",
+ "Epoch 1414/100000\n",
+ " - 18s - loss: 0.3503 - acc: 0.9366 - val_loss: 0.3504 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 01414: val_acc did not improve from 0.94225\n",
+ "Epoch 1415/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9375 - val_loss: 0.3837 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 01415: val_acc did not improve from 0.94225\n",
+ "Epoch 1416/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9360 - val_loss: 0.4320 - val_acc: 0.8960\n",
+ "\n",
+ "Epoch 01416: val_acc did not improve from 0.94225\n",
+ "Epoch 1417/100000\n",
+ " - 19s - loss: 0.3505 - acc: 0.9356 - val_loss: 0.3397 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01417: val_acc did not improve from 0.94225\n",
+ "Epoch 1418/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9361 - val_loss: 0.4234 - val_acc: 0.9027\n",
+ "\n",
+ "Epoch 01418: val_acc did not improve from 0.94225\n",
+ "Epoch 1419/100000\n",
+ " - 19s - loss: 0.3484 - acc: 0.9362 - val_loss: 0.3763 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01419: val_acc did not improve from 0.94225\n",
+ "Epoch 1420/100000\n",
+ " - 19s - loss: 0.3476 - acc: 0.9363 - val_loss: 0.3391 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01420: val_acc did not improve from 0.94225\n",
+ "Epoch 1421/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9367 - val_loss: 0.3594 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 01421: val_acc did not improve from 0.94225\n",
+ "Epoch 1422/100000\n",
+ " - 19s - loss: 0.3533 - acc: 0.9362 - val_loss: 0.3419 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 01422: val_acc did not improve from 0.94225\n",
+ "Epoch 1423/100000\n",
+ " - 19s - loss: 0.3535 - acc: 0.9368 - val_loss: 0.3701 - val_acc: 0.9259\n",
+ "\n",
+ "Epoch 01423: val_acc did not improve from 0.94225\n",
+ "Epoch 1424/100000\n",
+ " - 18s - loss: 0.3520 - acc: 0.9358 - val_loss: 0.3560 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01424: val_acc did not improve from 0.94225\n",
+ "Epoch 1425/100000\n",
+ " - 19s - loss: 0.3510 - acc: 0.9367 - val_loss: 0.3446 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01425: val_acc did not improve from 0.94225\n",
+ "Epoch 1426/100000\n",
+ " - 18s - loss: 0.3493 - acc: 0.9359 - val_loss: 0.3683 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 01426: val_acc did not improve from 0.94225\n",
+ "Epoch 1427/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9370 - val_loss: 0.3341 - val_acc: 0.9406\n",
+ "\n",
+ "Epoch 01427: val_acc did not improve from 0.94225\n",
+ "Epoch 1428/100000\n",
+ " - 18s - loss: 0.3489 - acc: 0.9359 - val_loss: 0.3567 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 01428: val_acc did not improve from 0.94225\n",
+ "Epoch 1429/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9361 - val_loss: 0.3644 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 01429: val_acc did not improve from 0.94225\n",
+ "Epoch 1430/100000\n",
+ " - 18s - loss: 0.3497 - acc: 0.9361 - val_loss: 0.3904 - val_acc: 0.9063\n",
+ "\n",
+ "Epoch 01430: val_acc did not improve from 0.94225\n",
+ "Epoch 1431/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9368 - val_loss: 0.3572 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 01431: val_acc did not improve from 0.94225\n",
+ "Epoch 1432/100000\n",
+ " - 18s - loss: 0.3555 - acc: 0.9355 - val_loss: 0.3418 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01432: val_acc did not improve from 0.94225\n",
+ "Epoch 1433/100000\n",
+ " - 19s - loss: 0.3512 - acc: 0.9365 - val_loss: 0.3448 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01433: val_acc did not improve from 0.94225\n",
+ "Epoch 1434/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9359 - val_loss: 0.4012 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01434: val_acc did not improve from 0.94225\n",
+ "Epoch 1435/100000\n",
+ " - 19s - loss: 0.3531 - acc: 0.9357 - val_loss: 0.3553 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 01435: val_acc did not improve from 0.94225\n",
+ "Epoch 1436/100000\n",
+ " - 19s - loss: 0.3511 - acc: 0.9357 - val_loss: 0.3551 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01436: val_acc did not improve from 0.94225\n",
+ "Epoch 1437/100000\n",
+ " - 19s - loss: 0.3511 - acc: 0.9358 - val_loss: 0.3416 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01437: val_acc did not improve from 0.94225\n",
+ "Epoch 1438/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9365 - val_loss: 0.3846 - val_acc: 0.9108\n",
+ "\n",
+ "Epoch 01438: val_acc did not improve from 0.94225\n",
+ "Epoch 1439/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9361 - val_loss: 0.3707 - val_acc: 0.9206\n",
+ "\n",
+ "Epoch 01439: val_acc did not improve from 0.94225\n",
+ "Epoch 1440/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9361 - val_loss: 0.3389 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01440: val_acc did not improve from 0.94225\n",
+ "Epoch 1441/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9361 - val_loss: 0.3400 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01441: val_acc did not improve from 0.94225\n",
+ "Epoch 1442/100000\n",
+ " - 18s - loss: 0.3519 - acc: 0.9353 - val_loss: 0.3477 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 01442: val_acc did not improve from 0.94225\n",
+ "Epoch 1443/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9366 - val_loss: 0.4107 - val_acc: 0.9003\n",
+ "\n",
+ "Epoch 01443: val_acc did not improve from 0.94225\n",
+ "Epoch 1444/100000\n",
+ " - 18s - loss: 0.3545 - acc: 0.9353 - val_loss: 0.3454 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01444: val_acc did not improve from 0.94225\n",
+ "Epoch 1445/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9368 - val_loss: 0.4156 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 01445: val_acc did not improve from 0.94225\n",
+ "Epoch 1446/100000\n",
+ " - 19s - loss: 0.3478 - acc: 0.9366 - val_loss: 0.4297 - val_acc: 0.8980\n",
+ "\n",
+ "Epoch 01446: val_acc did not improve from 0.94225\n",
+ "Epoch 1447/100000\n",
+ " - 18s - loss: 0.3505 - acc: 0.9360 - val_loss: 0.3440 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01447: val_acc did not improve from 0.94225\n",
+ "Epoch 1448/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9366 - val_loss: 0.3783 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 01448: val_acc did not improve from 0.94225\n",
+ "Epoch 1449/100000\n",
+ " - 18s - loss: 0.3545 - acc: 0.9357 - val_loss: 0.3403 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01449: val_acc did not improve from 0.94225\n",
+ "Epoch 1450/100000\n",
+ " - 19s - loss: 0.3494 - acc: 0.9364 - val_loss: 0.3599 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 01450: val_acc did not improve from 0.94225\n",
+ "Epoch 1451/100000\n",
+ " - 18s - loss: 0.3495 - acc: 0.9360 - val_loss: 0.3365 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01451: val_acc did not improve from 0.94225\n",
+ "Epoch 1452/100000\n",
+ " - 19s - loss: 0.3513 - acc: 0.9361 - val_loss: 0.3477 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 01452: val_acc did not improve from 0.94225\n",
+ "Epoch 1453/100000\n",
+ " - 18s - loss: 0.3509 - acc: 0.9364 - val_loss: 0.3647 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 01453: val_acc did not improve from 0.94225\n",
+ "Epoch 1454/100000\n",
+ " - 19s - loss: 0.3491 - acc: 0.9362 - val_loss: 0.3393 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01454: val_acc did not improve from 0.94225\n",
+ "Epoch 1455/100000\n",
+ " - 18s - loss: 0.3517 - acc: 0.9364 - val_loss: 0.3953 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 01455: val_acc did not improve from 0.94225\n",
+ "Epoch 1456/100000\n",
+ " - 18s - loss: 0.3523 - acc: 0.9358 - val_loss: 0.3341 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 01456: val_acc did not improve from 0.94225\n",
+ "Epoch 1457/100000\n",
+ " - 19s - loss: 0.3508 - acc: 0.9365 - val_loss: 0.3426 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01457: val_acc did not improve from 0.94225\n",
+ "Epoch 1458/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9364 - val_loss: 0.3435 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01458: val_acc did not improve from 0.94225\n",
+ "Epoch 1459/100000\n",
+ " - 19s - loss: 0.3508 - acc: 0.9364 - val_loss: 0.3481 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01459: val_acc did not improve from 0.94225\n",
+ "Epoch 1460/100000\n",
+ " - 18s - loss: 0.3535 - acc: 0.9361 - val_loss: 0.3494 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01460: val_acc did not improve from 0.94225\n",
+ "Epoch 1461/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9364 - val_loss: 0.3750 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 01461: val_acc did not improve from 0.94225\n",
+ "Epoch 1462/100000\n",
+ " - 19s - loss: 0.3516 - acc: 0.9369 - val_loss: 0.3636 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 01462: val_acc did not improve from 0.94225\n",
+ "Epoch 1463/100000\n",
+ " - 18s - loss: 0.3530 - acc: 0.9357 - val_loss: 0.3581 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01463: val_acc did not improve from 0.94225\n",
+ "Epoch 1464/100000\n",
+ " - 18s - loss: 0.3494 - acc: 0.9363 - val_loss: 0.6312 - val_acc: 0.8087\n",
+ "\n",
+ "Epoch 01464: val_acc did not improve from 0.94225\n",
+ "Epoch 1465/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9358 - val_loss: 0.4659 - val_acc: 0.8841\n",
+ "\n",
+ "Epoch 01465: val_acc did not improve from 0.94225\n",
+ "Epoch 1466/100000\n",
+ " - 18s - loss: 0.3462 - acc: 0.9370 - val_loss: 0.3509 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01466: val_acc did not improve from 0.94225\n",
+ "Epoch 1467/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9369 - val_loss: 0.3432 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01467: val_acc did not improve from 0.94225\n",
+ "Epoch 1468/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9360 - val_loss: 0.3659 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 01468: val_acc did not improve from 0.94225\n",
+ "Epoch 1469/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9361 - val_loss: 0.3613 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 01469: val_acc did not improve from 0.94225\n",
+ "Epoch 1470/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9365 - val_loss: 0.3663 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 01470: val_acc did not improve from 0.94225\n",
+ "Epoch 1471/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9367 - val_loss: 0.3404 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01471: val_acc did not improve from 0.94225\n",
+ "Epoch 1472/100000\n",
+ " - 19s - loss: 0.3519 - acc: 0.9363 - val_loss: 0.3744 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 01472: val_acc did not improve from 0.94225\n",
+ "Epoch 1473/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9362 - val_loss: 0.3351 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 01473: val_acc did not improve from 0.94225\n",
+ "Epoch 1474/100000\n",
+ " - 19s - loss: 0.3569 - acc: 0.9359 - val_loss: 0.3683 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 01474: val_acc did not improve from 0.94225\n",
+ "Epoch 1475/100000\n",
+ " - 19s - loss: 0.3504 - acc: 0.9362 - val_loss: 0.4214 - val_acc: 0.8992\n",
+ "\n",
+ "Epoch 01475: val_acc did not improve from 0.94225\n",
+ "Epoch 1476/100000\n",
+ " - 19s - loss: 0.3469 - acc: 0.9367 - val_loss: 0.3385 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01476: val_acc did not improve from 0.94225\n",
+ "Epoch 1477/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9360 - val_loss: 0.3513 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01477: val_acc did not improve from 0.94225\n",
+ "Epoch 1478/100000\n",
+ " - 19s - loss: 0.3521 - acc: 0.9364 - val_loss: 0.3562 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 01478: val_acc did not improve from 0.94225\n",
+ "Epoch 1479/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9364 - val_loss: 0.3768 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 01479: val_acc did not improve from 0.94225\n",
+ "Epoch 1480/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9364 - val_loss: 0.3593 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 01480: val_acc did not improve from 0.94225\n",
+ "Epoch 1481/100000\n",
+ " - 19s - loss: 0.3502 - acc: 0.9361 - val_loss: 0.3468 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01481: val_acc did not improve from 0.94225\n",
+ "Epoch 1482/100000\n",
+ " - 18s - loss: 0.3479 - acc: 0.9367 - val_loss: 0.3384 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 01482: val_acc did not improve from 0.94225\n",
+ "Epoch 1483/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9365 - val_loss: 0.4288 - val_acc: 0.8928\n",
+ "\n",
+ "Epoch 01483: val_acc did not improve from 0.94225\n",
+ "Epoch 1484/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9369 - val_loss: 0.4086 - val_acc: 0.8979\n",
+ "\n",
+ "Epoch 01484: val_acc did not improve from 0.94225\n",
+ "Epoch 1485/100000\n",
+ " - 19s - loss: 0.3510 - acc: 0.9358 - val_loss: 0.3734 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 01485: val_acc did not improve from 0.94225\n",
+ "Epoch 1486/100000\n",
+ " - 19s - loss: 0.3470 - acc: 0.9365 - val_loss: 0.3493 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01486: val_acc did not improve from 0.94225\n",
+ "Epoch 1487/100000\n",
+ " - 19s - loss: 0.3515 - acc: 0.9356 - val_loss: 0.3544 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 01487: val_acc did not improve from 0.94225\n",
+ "Epoch 1488/100000\n",
+ " - 19s - loss: 0.3498 - acc: 0.9361 - val_loss: 0.4364 - val_acc: 0.8996\n",
+ "\n",
+ "Epoch 01488: val_acc did not improve from 0.94225\n",
+ "Epoch 1489/100000\n",
+ " - 19s - loss: 0.3591 - acc: 0.9352 - val_loss: 0.4778 - val_acc: 0.8883\n",
+ "\n",
+ "Epoch 01489: val_acc did not improve from 0.94225\n",
+ "Epoch 1490/100000\n",
+ " - 19s - loss: 0.3585 - acc: 0.9358 - val_loss: 0.3359 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01490: val_acc did not improve from 0.94225\n",
+ "Epoch 1491/100000\n",
+ " - 19s - loss: 0.3500 - acc: 0.9367 - val_loss: 0.3578 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 01491: val_acc did not improve from 0.94225\n",
+ "Epoch 1492/100000\n",
+ " - 19s - loss: 0.3529 - acc: 0.9355 - val_loss: 0.3451 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01492: val_acc did not improve from 0.94225\n",
+ "Epoch 1493/100000\n",
+ " - 18s - loss: 0.3507 - acc: 0.9362 - val_loss: 0.3988 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 01493: val_acc did not improve from 0.94225\n",
+ "Epoch 1494/100000\n",
+ " - 19s - loss: 0.3505 - acc: 0.9363 - val_loss: 0.5826 - val_acc: 0.8449\n",
+ "\n",
+ "Epoch 01494: val_acc did not improve from 0.94225\n",
+ "Epoch 1495/100000\n",
+ " - 18s - loss: 0.3493 - acc: 0.9362 - val_loss: 0.3989 - val_acc: 0.9055\n",
+ "\n",
+ "Epoch 01495: val_acc did not improve from 0.94225\n",
+ "Epoch 1496/100000\n",
+ " - 19s - loss: 0.3509 - acc: 0.9356 - val_loss: 0.3461 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01496: val_acc did not improve from 0.94225\n",
+ "Epoch 1497/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9360 - val_loss: 0.3541 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 01497: val_acc did not improve from 0.94225\n",
+ "Epoch 1498/100000\n",
+ " - 19s - loss: 0.3477 - acc: 0.9372 - val_loss: 0.4306 - val_acc: 0.8910\n",
+ "\n",
+ "Epoch 01498: val_acc did not improve from 0.94225\n",
+ "Epoch 1499/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9372 - val_loss: 0.3435 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01499: val_acc did not improve from 0.94225\n",
+ "Epoch 1500/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9362 - val_loss: 0.3405 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01500: val_acc did not improve from 0.94225\n",
+ "Epoch 1501/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9361 - val_loss: 0.4006 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 01501: val_acc did not improve from 0.94225\n",
+ "Epoch 1502/100000\n",
+ " - 19s - loss: 0.3476 - acc: 0.9373 - val_loss: 0.3332 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01502: val_acc did not improve from 0.94225\n",
+ "Epoch 1503/100000\n",
+ " - 19s - loss: 0.3776 - acc: 0.9333 - val_loss: 0.5306 - val_acc: 0.9109\n",
+ "\n",
+ "Epoch 01503: val_acc did not improve from 0.94225\n",
+ "Epoch 1504/100000\n",
+ " - 19s - loss: 0.3992 - acc: 0.9342 - val_loss: 0.3567 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01504: val_acc did not improve from 0.94225\n",
+ "Epoch 1505/100000\n",
+ " - 18s - loss: 0.3694 - acc: 0.9332 - val_loss: 0.3941 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01505: val_acc did not improve from 0.94225\n",
+ "Epoch 1506/100000\n",
+ " - 19s - loss: 0.3810 - acc: 0.9341 - val_loss: 0.4813 - val_acc: 0.8712\n",
+ "\n",
+ "Epoch 01506: val_acc did not improve from 0.94225\n",
+ "Epoch 1507/100000\n",
+ " - 18s - loss: 0.3643 - acc: 0.9347 - val_loss: 0.3607 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01507: val_acc did not improve from 0.94225\n",
+ "Epoch 1508/100000\n",
+ " - 19s - loss: 0.3639 - acc: 0.9346 - val_loss: 0.3479 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01508: val_acc did not improve from 0.94225\n",
+ "Epoch 1509/100000\n",
+ " - 18s - loss: 0.3628 - acc: 0.9343 - val_loss: 0.4007 - val_acc: 0.9044\n",
+ "\n",
+ "Epoch 01509: val_acc did not improve from 0.94225\n",
+ "Epoch 1510/100000\n",
+ " - 18s - loss: 0.3621 - acc: 0.9346 - val_loss: 0.3396 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01510: val_acc did not improve from 0.94225\n",
+ "Epoch 1511/100000\n",
+ " - 19s - loss: 0.3599 - acc: 0.9352 - val_loss: 0.3535 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01511: val_acc did not improve from 0.94225\n",
+ "Epoch 1512/100000\n",
+ " - 19s - loss: 0.3616 - acc: 0.9350 - val_loss: 0.4083 - val_acc: 0.8999\n",
+ "\n",
+ "Epoch 01512: val_acc did not improve from 0.94225\n",
+ "Epoch 1513/100000\n",
+ " - 19s - loss: 0.3603 - acc: 0.9345 - val_loss: 0.3352 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01513: val_acc did not improve from 0.94225\n",
+ "Epoch 1514/100000\n",
+ " - 18s - loss: 0.3585 - acc: 0.9350 - val_loss: 0.3534 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01514: val_acc did not improve from 0.94225\n",
+ "Epoch 1515/100000\n",
+ " - 19s - loss: 0.3614 - acc: 0.9337 - val_loss: 0.3539 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01515: val_acc did not improve from 0.94225\n",
+ "Epoch 1516/100000\n",
+ " - 19s - loss: 0.3582 - acc: 0.9345 - val_loss: 0.4412 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 01516: val_acc did not improve from 0.94225\n",
+ "Epoch 1517/100000\n",
+ " - 19s - loss: 0.3604 - acc: 0.9345 - val_loss: 0.3486 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 01517: val_acc did not improve from 0.94225\n",
+ "Epoch 1518/100000\n",
+ " - 19s - loss: 0.3601 - acc: 0.9341 - val_loss: 0.3562 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01518: val_acc did not improve from 0.94225\n",
+ "Epoch 1519/100000\n",
+ " - 18s - loss: 0.3559 - acc: 0.9347 - val_loss: 0.3548 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 01519: val_acc did not improve from 0.94225\n",
+ "Epoch 1520/100000\n",
+ " - 19s - loss: 0.3577 - acc: 0.9343 - val_loss: 0.3877 - val_acc: 0.9121\n",
+ "\n",
+ "Epoch 01520: val_acc did not improve from 0.94225\n",
+ "Epoch 1521/100000\n",
+ " - 19s - loss: 0.3589 - acc: 0.9342 - val_loss: 0.3806 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 01521: val_acc did not improve from 0.94225\n",
+ "Epoch 1522/100000\n",
+ " - 19s - loss: 0.3570 - acc: 0.9346 - val_loss: 0.4517 - val_acc: 0.8753\n",
+ "\n",
+ "Epoch 01522: val_acc did not improve from 0.94225\n",
+ "Epoch 1523/100000\n",
+ " - 19s - loss: 0.3562 - acc: 0.9345 - val_loss: 0.3592 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01523: val_acc did not improve from 0.94225\n",
+ "Epoch 1524/100000\n",
+ " - 19s - loss: 0.3584 - acc: 0.9343 - val_loss: 0.3479 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01524: val_acc did not improve from 0.94225\n",
+ "Epoch 1525/100000\n",
+ " - 19s - loss: 0.3581 - acc: 0.9340 - val_loss: 0.3395 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01525: val_acc did not improve from 0.94225\n",
+ "Epoch 1526/100000\n",
+ " - 19s - loss: 0.3541 - acc: 0.9350 - val_loss: 0.3697 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 01526: val_acc did not improve from 0.94225\n",
+ "Epoch 1527/100000\n",
+ " - 18s - loss: 0.3566 - acc: 0.9343 - val_loss: 0.5285 - val_acc: 0.8809\n",
+ "\n",
+ "Epoch 01527: val_acc did not improve from 0.94225\n",
+ "Epoch 1528/100000\n",
+ " - 19s - loss: 0.3588 - acc: 0.9340 - val_loss: 0.3725 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 01528: val_acc did not improve from 0.94225\n",
+ "Epoch 1529/100000\n",
+ " - 19s - loss: 0.3604 - acc: 0.9343 - val_loss: 0.3467 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01529: val_acc did not improve from 0.94225\n",
+ "Epoch 1530/100000\n",
+ " - 19s - loss: 0.3602 - acc: 0.9336 - val_loss: 0.3526 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01530: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 01530: ReduceLROnPlateau reducing learning rate to 0.0006983372120885178.\n",
+ "Epoch 1531/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9353 - val_loss: 0.3665 - val_acc: 0.9218\n",
+ "\n",
+ "Epoch 01531: val_acc did not improve from 0.94225\n",
+ "Epoch 1532/100000\n",
+ " - 18s - loss: 0.3486 - acc: 0.9353 - val_loss: 0.3354 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01532: val_acc did not improve from 0.94225\n",
+ "Epoch 1533/100000\n",
+ " - 19s - loss: 0.3492 - acc: 0.9350 - val_loss: 0.3638 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 01533: val_acc did not improve from 0.94225\n",
+ "Epoch 1534/100000\n",
+ " - 18s - loss: 0.3462 - acc: 0.9355 - val_loss: 0.3396 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01534: val_acc did not improve from 0.94225\n",
+ "Epoch 1535/100000\n",
+ " - 18s - loss: 0.3471 - acc: 0.9351 - val_loss: 0.3609 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 01535: val_acc did not improve from 0.94225\n",
+ "Epoch 1536/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9350 - val_loss: 0.3394 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01536: val_acc did not improve from 0.94225\n",
+ "Epoch 1537/100000\n",
+ " - 18s - loss: 0.3518 - acc: 0.9342 - val_loss: 0.3750 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 01537: val_acc did not improve from 0.94225\n",
+ "Epoch 1538/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9351 - val_loss: 0.3339 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01538: val_acc did not improve from 0.94225\n",
+ "Epoch 1539/100000\n",
+ " - 18s - loss: 0.3491 - acc: 0.9345 - val_loss: 0.3946 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 01539: val_acc did not improve from 0.94225\n",
+ "Epoch 1540/100000\n",
+ " - 19s - loss: 0.3539 - acc: 0.9330 - val_loss: 0.3448 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01540: val_acc did not improve from 0.94225\n",
+ "Epoch 1541/100000\n",
+ " - 18s - loss: 0.3482 - acc: 0.9349 - val_loss: 0.3439 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01541: val_acc did not improve from 0.94225\n",
+ "Epoch 1542/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9355 - val_loss: 0.4212 - val_acc: 0.8824\n",
+ "\n",
+ "Epoch 01542: val_acc did not improve from 0.94225\n",
+ "Epoch 1543/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9351 - val_loss: 0.3338 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01543: val_acc did not improve from 0.94225\n",
+ "Epoch 1544/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9351 - val_loss: 0.4214 - val_acc: 0.9045\n",
+ "\n",
+ "Epoch 01544: val_acc did not improve from 0.94225\n",
+ "Epoch 1545/100000\n",
+ " - 18s - loss: 0.3503 - acc: 0.9352 - val_loss: 0.4191 - val_acc: 0.8949\n",
+ "\n",
+ "Epoch 01545: val_acc did not improve from 0.94225\n",
+ "Epoch 1546/100000\n",
+ " - 19s - loss: 0.3474 - acc: 0.9351 - val_loss: 0.3665 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 01546: val_acc did not improve from 0.94225\n",
+ "Epoch 1547/100000\n",
+ " - 19s - loss: 0.3488 - acc: 0.9345 - val_loss: 0.3359 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01547: val_acc did not improve from 0.94225\n",
+ "Epoch 1548/100000\n",
+ " - 18s - loss: 0.3481 - acc: 0.9353 - val_loss: 0.3314 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01548: val_acc did not improve from 0.94225\n",
+ "Epoch 1549/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9343 - val_loss: 0.3400 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 01549: val_acc did not improve from 0.94225\n",
+ "Epoch 1550/100000\n",
+ " - 19s - loss: 0.3479 - acc: 0.9347 - val_loss: 0.3540 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01550: val_acc did not improve from 0.94225\n",
+ "Epoch 1551/100000\n",
+ " - 18s - loss: 0.3486 - acc: 0.9345 - val_loss: 0.3953 - val_acc: 0.9038\n",
+ "\n",
+ "Epoch 01551: val_acc did not improve from 0.94225\n",
+ "Epoch 1552/100000\n",
+ " - 18s - loss: 0.3487 - acc: 0.9353 - val_loss: 0.3591 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01552: val_acc did not improve from 0.94225\n",
+ "Epoch 1553/100000\n",
+ " - 18s - loss: 0.3469 - acc: 0.9354 - val_loss: 0.4223 - val_acc: 0.8981\n",
+ "\n",
+ "Epoch 01553: val_acc did not improve from 0.94225\n",
+ "Epoch 1554/100000\n",
+ " - 18s - loss: 0.3462 - acc: 0.9353 - val_loss: 0.3439 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 01554: val_acc did not improve from 0.94225\n",
+ "Epoch 1555/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9344 - val_loss: 0.3527 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 01555: val_acc did not improve from 0.94225\n",
+ "Epoch 1556/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9345 - val_loss: 0.3484 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 01556: val_acc did not improve from 0.94225\n",
+ "Epoch 1557/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9348 - val_loss: 0.3360 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 01557: val_acc did not improve from 0.94225\n",
+ "Epoch 1558/100000\n",
+ " - 18s - loss: 0.3495 - acc: 0.9351 - val_loss: 0.3474 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01558: val_acc did not improve from 0.94225\n",
+ "Epoch 1559/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9358 - val_loss: 0.3691 - val_acc: 0.9205\n",
+ "\n",
+ "Epoch 01559: val_acc did not improve from 0.94225\n",
+ "Epoch 1560/100000\n",
+ " - 18s - loss: 0.3494 - acc: 0.9343 - val_loss: 0.3717 - val_acc: 0.9190\n",
+ "\n",
+ "Epoch 01560: val_acc did not improve from 0.94225\n",
+ "Epoch 1561/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9361 - val_loss: 0.3438 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01561: val_acc did not improve from 0.94225\n",
+ "Epoch 1562/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9345 - val_loss: 0.3556 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 01562: val_acc did not improve from 0.94225\n",
+ "Epoch 1563/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9356 - val_loss: 0.3443 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 01563: val_acc did not improve from 0.94225\n",
+ "Epoch 1564/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9347 - val_loss: 0.3411 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01564: val_acc did not improve from 0.94225\n",
+ "Epoch 1565/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9349 - val_loss: 0.3609 - val_acc: 0.9210\n",
+ "\n",
+ "Epoch 01565: val_acc did not improve from 0.94225\n",
+ "Epoch 1566/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9346 - val_loss: 0.3521 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01566: val_acc did not improve from 0.94225\n",
+ "Epoch 1567/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9349 - val_loss: 0.3436 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01567: val_acc did not improve from 0.94225\n",
+ "Epoch 1568/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9351 - val_loss: 0.3788 - val_acc: 0.9105\n",
+ "\n",
+ "Epoch 01568: val_acc did not improve from 0.94225\n",
+ "Epoch 1569/100000\n",
+ " - 19s - loss: 0.3482 - acc: 0.9347 - val_loss: 0.5227 - val_acc: 0.8554\n",
+ "\n",
+ "Epoch 01569: val_acc did not improve from 0.94225\n",
+ "Epoch 1570/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9351 - val_loss: 0.3496 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01570: val_acc did not improve from 0.94225\n",
+ "Epoch 1571/100000\n",
+ " - 19s - loss: 0.3493 - acc: 0.9350 - val_loss: 0.3829 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 01571: val_acc did not improve from 0.94225\n",
+ "Epoch 1572/100000\n",
+ " - 18s - loss: 0.3498 - acc: 0.9344 - val_loss: 0.3900 - val_acc: 0.9012\n",
+ "\n",
+ "Epoch 01572: val_acc did not improve from 0.94225\n",
+ "Epoch 1573/100000\n",
+ " - 18s - loss: 0.3479 - acc: 0.9351 - val_loss: 0.3395 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01573: val_acc did not improve from 0.94225\n",
+ "Epoch 1574/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9352 - val_loss: 0.3400 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01574: val_acc did not improve from 0.94225\n",
+ "Epoch 1575/100000\n",
+ " - 18s - loss: 0.3497 - acc: 0.9352 - val_loss: 0.3423 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01575: val_acc did not improve from 0.94225\n",
+ "Epoch 1576/100000\n",
+ " - 18s - loss: 0.3481 - acc: 0.9352 - val_loss: 0.3398 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01576: val_acc did not improve from 0.94225\n",
+ "Epoch 1577/100000\n",
+ " - 19s - loss: 0.3498 - acc: 0.9345 - val_loss: 0.3477 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01577: val_acc did not improve from 0.94225\n",
+ "Epoch 1578/100000\n",
+ " - 19s - loss: 0.3491 - acc: 0.9352 - val_loss: 0.3490 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 01578: val_acc did not improve from 0.94225\n",
+ "Epoch 1579/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9351 - val_loss: 0.3394 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01579: val_acc did not improve from 0.94225\n",
+ "Epoch 1580/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9359 - val_loss: 0.3401 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 01580: val_acc did not improve from 0.94225\n",
+ "Epoch 1581/100000\n",
+ " - 19s - loss: 0.3468 - acc: 0.9353 - val_loss: 0.3471 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01581: val_acc did not improve from 0.94225\n",
+ "Epoch 1582/100000\n",
+ " - 18s - loss: 0.3449 - acc: 0.9359 - val_loss: 0.3564 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 01582: val_acc did not improve from 0.94225\n",
+ "Epoch 1583/100000\n",
+ " - 19s - loss: 0.3483 - acc: 0.9348 - val_loss: 0.3328 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01583: val_acc did not improve from 0.94225\n",
+ "Epoch 1584/100000\n",
+ " - 18s - loss: 0.3464 - acc: 0.9360 - val_loss: 0.3413 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01584: val_acc did not improve from 0.94225\n",
+ "Epoch 1585/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9362 - val_loss: 0.3646 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 01585: val_acc did not improve from 0.94225\n",
+ "Epoch 1586/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9352 - val_loss: 0.3585 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 01586: val_acc did not improve from 0.94225\n",
+ "Epoch 1587/100000\n",
+ " - 19s - loss: 0.3474 - acc: 0.9353 - val_loss: 0.3410 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01587: val_acc did not improve from 0.94225\n",
+ "Epoch 1588/100000\n",
+ " - 18s - loss: 0.3488 - acc: 0.9350 - val_loss: 0.4182 - val_acc: 0.9034\n",
+ "\n",
+ "Epoch 01588: val_acc did not improve from 0.94225\n",
+ "Epoch 1589/100000\n",
+ " - 19s - loss: 0.3477 - acc: 0.9351 - val_loss: 0.3368 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01589: val_acc did not improve from 0.94225\n",
+ "Epoch 1590/100000\n",
+ " - 18s - loss: 0.3435 - acc: 0.9362 - val_loss: 0.3360 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01590: val_acc did not improve from 0.94225\n",
+ "Epoch 1591/100000\n",
+ " - 18s - loss: 0.3445 - acc: 0.9356 - val_loss: 0.3583 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 01591: val_acc did not improve from 0.94225\n",
+ "Epoch 1592/100000\n",
+ " - 18s - loss: 0.3474 - acc: 0.9354 - val_loss: 0.3424 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01592: val_acc did not improve from 0.94225\n",
+ "Epoch 1593/100000\n",
+ " - 19s - loss: 0.3489 - acc: 0.9349 - val_loss: 0.3690 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 01593: val_acc did not improve from 0.94225\n",
+ "Epoch 1594/100000\n",
+ " - 18s - loss: 0.3464 - acc: 0.9350 - val_loss: 0.3367 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01594: val_acc did not improve from 0.94225\n",
+ "Epoch 1595/100000\n",
+ " - 19s - loss: 0.3434 - acc: 0.9362 - val_loss: 0.3363 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 01595: val_acc did not improve from 0.94225\n",
+ "Epoch 1596/100000\n",
+ " - 19s - loss: 0.3446 - acc: 0.9355 - val_loss: 0.3364 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01596: val_acc did not improve from 0.94225\n",
+ "Epoch 1597/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9358 - val_loss: 0.3494 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01597: val_acc did not improve from 0.94225\n",
+ "Epoch 1598/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9358 - val_loss: 0.3393 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01598: val_acc did not improve from 0.94225\n",
+ "Epoch 1599/100000\n",
+ " - 18s - loss: 0.3464 - acc: 0.9351 - val_loss: 0.3433 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 01599: val_acc did not improve from 0.94225\n",
+ "Epoch 1600/100000\n",
+ " - 18s - loss: 0.3488 - acc: 0.9344 - val_loss: 0.3429 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01600: val_acc did not improve from 0.94225\n",
+ "Epoch 1601/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9362 - val_loss: 0.4774 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 01601: val_acc did not improve from 0.94225\n",
+ "Epoch 1602/100000\n",
+ " - 18s - loss: 0.3483 - acc: 0.9346 - val_loss: 0.3416 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01602: val_acc did not improve from 0.94225\n",
+ "Epoch 1603/100000\n",
+ " - 19s - loss: 0.3445 - acc: 0.9357 - val_loss: 0.3289 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01603: val_acc did not improve from 0.94225\n",
+ "Epoch 1604/100000\n",
+ " - 18s - loss: 0.3447 - acc: 0.9366 - val_loss: 0.3392 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01604: val_acc did not improve from 0.94225\n",
+ "Epoch 1605/100000\n",
+ " - 19s - loss: 0.3473 - acc: 0.9351 - val_loss: 0.3371 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 01605: val_acc did not improve from 0.94225\n",
+ "Epoch 1606/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9350 - val_loss: 0.3333 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01606: val_acc did not improve from 0.94225\n",
+ "Epoch 1607/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9355 - val_loss: 0.3370 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01607: val_acc did not improve from 0.94225\n",
+ "Epoch 1608/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9354 - val_loss: 0.3343 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01608: val_acc did not improve from 0.94225\n",
+ "Epoch 1609/100000\n",
+ " - 19s - loss: 0.3466 - acc: 0.9356 - val_loss: 0.3300 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 01609: val_acc did not improve from 0.94225\n",
+ "Epoch 1610/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9358 - val_loss: 0.3603 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01610: val_acc did not improve from 0.94225\n",
+ "Epoch 1611/100000\n",
+ " - 18s - loss: 0.3464 - acc: 0.9362 - val_loss: 0.3817 - val_acc: 0.9193\n",
+ "\n",
+ "Epoch 01611: val_acc did not improve from 0.94225\n",
+ "Epoch 1612/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9362 - val_loss: 0.3258 - val_acc: 0.9417\n",
+ "\n",
+ "Epoch 01612: val_acc did not improve from 0.94225\n",
+ "Epoch 1613/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9360 - val_loss: 0.3432 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01613: val_acc did not improve from 0.94225\n",
+ "Epoch 1614/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9356 - val_loss: 0.3327 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01614: val_acc did not improve from 0.94225\n",
+ "Epoch 1615/100000\n",
+ " - 18s - loss: 0.3446 - acc: 0.9359 - val_loss: 0.3321 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 01615: val_acc did not improve from 0.94225\n",
+ "Epoch 1616/100000\n",
+ " - 19s - loss: 0.3469 - acc: 0.9355 - val_loss: 0.3439 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01616: val_acc did not improve from 0.94225\n",
+ "Epoch 1617/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9355 - val_loss: 0.3449 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01617: val_acc did not improve from 0.94225\n",
+ "Epoch 1618/100000\n",
+ " - 19s - loss: 0.3466 - acc: 0.9346 - val_loss: 0.3747 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 01618: val_acc did not improve from 0.94225\n",
+ "Epoch 1619/100000\n",
+ " - 18s - loss: 0.3439 - acc: 0.9360 - val_loss: 0.3319 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01619: val_acc did not improve from 0.94225\n",
+ "Epoch 1620/100000\n",
+ " - 19s - loss: 0.3460 - acc: 0.9355 - val_loss: 0.3611 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01620: val_acc did not improve from 0.94225\n",
+ "Epoch 1621/100000\n",
+ " - 18s - loss: 0.3443 - acc: 0.9361 - val_loss: 0.3344 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01621: val_acc did not improve from 0.94225\n",
+ "Epoch 1622/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9357 - val_loss: 0.3352 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01622: val_acc did not improve from 0.94225\n",
+ "Epoch 1623/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9354 - val_loss: 0.3325 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01623: val_acc did not improve from 0.94225\n",
+ "Epoch 1624/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9346 - val_loss: 0.3577 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 01624: val_acc did not improve from 0.94225\n",
+ "Epoch 1625/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9352 - val_loss: 0.3412 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01625: val_acc did not improve from 0.94225\n",
+ "Epoch 1626/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9362 - val_loss: 0.3484 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 01626: val_acc did not improve from 0.94225\n",
+ "Epoch 1627/100000\n",
+ " - 18s - loss: 0.3444 - acc: 0.9361 - val_loss: 0.3314 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01627: val_acc did not improve from 0.94225\n",
+ "Epoch 1628/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9360 - val_loss: 0.3709 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 01628: val_acc did not improve from 0.94225\n",
+ "Epoch 1629/100000\n",
+ " - 18s - loss: 0.3443 - acc: 0.9358 - val_loss: 0.3754 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 01629: val_acc did not improve from 0.94225\n",
+ "Epoch 1630/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9360 - val_loss: 0.3456 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01630: val_acc did not improve from 0.94225\n",
+ "Epoch 1631/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9352 - val_loss: 0.3426 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 01631: val_acc did not improve from 0.94225\n",
+ "Epoch 1632/100000\n",
+ " - 18s - loss: 0.3438 - acc: 0.9361 - val_loss: 0.3754 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 01632: val_acc did not improve from 0.94225\n",
+ "Epoch 1633/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9358 - val_loss: 0.3397 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01633: val_acc did not improve from 0.94225\n",
+ "Epoch 1634/100000\n",
+ " - 19s - loss: 0.3453 - acc: 0.9359 - val_loss: 0.4063 - val_acc: 0.8991\n",
+ "\n",
+ "Epoch 01634: val_acc did not improve from 0.94225\n",
+ "Epoch 1635/100000\n",
+ " - 19s - loss: 0.3429 - acc: 0.9365 - val_loss: 0.3493 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 01635: val_acc did not improve from 0.94225\n",
+ "Epoch 1636/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9354 - val_loss: 0.3527 - val_acc: 0.9253\n",
+ "\n",
+ "Epoch 01636: val_acc did not improve from 0.94225\n",
+ "Epoch 1637/100000\n",
+ " - 19s - loss: 0.3425 - acc: 0.9363 - val_loss: 0.3715 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 01637: val_acc did not improve from 0.94225\n",
+ "Epoch 1638/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9350 - val_loss: 0.3477 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01638: val_acc did not improve from 0.94225\n",
+ "Epoch 1639/100000\n",
+ " - 19s - loss: 0.3435 - acc: 0.9357 - val_loss: 0.3380 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01639: val_acc did not improve from 0.94225\n",
+ "Epoch 1640/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9361 - val_loss: 0.3594 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 01640: val_acc did not improve from 0.94225\n",
+ "Epoch 1641/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9358 - val_loss: 0.3258 - val_acc: 0.9397\n",
+ "\n",
+ "Epoch 01641: val_acc did not improve from 0.94225\n",
+ "Epoch 1642/100000\n",
+ " - 18s - loss: 0.3408 - acc: 0.9363 - val_loss: 0.3678 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 01642: val_acc did not improve from 0.94225\n",
+ "Epoch 1643/100000\n",
+ " - 18s - loss: 0.3461 - acc: 0.9348 - val_loss: 0.3350 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01643: val_acc did not improve from 0.94225\n",
+ "Epoch 1644/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9355 - val_loss: 0.3369 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 01644: val_acc did not improve from 0.94225\n",
+ "Epoch 1645/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9364 - val_loss: 0.3309 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 01645: val_acc did not improve from 0.94225\n",
+ "Epoch 1646/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9361 - val_loss: 0.3353 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01646: val_acc did not improve from 0.94225\n",
+ "Epoch 1647/100000\n",
+ " - 19s - loss: 0.3414 - acc: 0.9366 - val_loss: 0.3882 - val_acc: 0.9058\n",
+ "\n",
+ "Epoch 01647: val_acc did not improve from 0.94225\n",
+ "Epoch 1648/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9358 - val_loss: 0.3841 - val_acc: 0.9138\n",
+ "\n",
+ "Epoch 01648: val_acc did not improve from 0.94225\n",
+ "Epoch 1649/100000\n",
+ " - 18s - loss: 0.3428 - acc: 0.9357 - val_loss: 0.3569 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01649: val_acc did not improve from 0.94225\n",
+ "Epoch 1650/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9355 - val_loss: 0.4562 - val_acc: 0.8792\n",
+ "\n",
+ "Epoch 01650: val_acc did not improve from 0.94225\n",
+ "Epoch 1651/100000\n",
+ " - 19s - loss: 0.3428 - acc: 0.9360 - val_loss: 0.3273 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01651: val_acc did not improve from 0.94225\n",
+ "Epoch 1652/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9355 - val_loss: 0.3486 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01652: val_acc did not improve from 0.94225\n",
+ "Epoch 1653/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9355 - val_loss: 0.3413 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01653: val_acc did not improve from 0.94225\n",
+ "Epoch 1654/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9356 - val_loss: 0.3870 - val_acc: 0.9082\n",
+ "\n",
+ "Epoch 01654: val_acc did not improve from 0.94225\n",
+ "Epoch 1655/100000\n",
+ " - 19s - loss: 0.3412 - acc: 0.9367 - val_loss: 0.4030 - val_acc: 0.9109\n",
+ "\n",
+ "Epoch 01655: val_acc did not improve from 0.94225\n",
+ "Epoch 1656/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9360 - val_loss: 0.3620 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 01656: val_acc did not improve from 0.94225\n",
+ "Epoch 1657/100000\n",
+ " - 18s - loss: 0.3476 - acc: 0.9351 - val_loss: 0.3447 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 01657: val_acc did not improve from 0.94225\n",
+ "Epoch 1658/100000\n",
+ " - 18s - loss: 0.3434 - acc: 0.9354 - val_loss: 0.3915 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 01658: val_acc did not improve from 0.94225\n",
+ "Epoch 1659/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9361 - val_loss: 0.4365 - val_acc: 0.9025\n",
+ "\n",
+ "Epoch 01659: val_acc did not improve from 0.94225\n",
+ "Epoch 1660/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9366 - val_loss: 0.3494 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01660: val_acc did not improve from 0.94225\n",
+ "Epoch 1661/100000\n",
+ " - 18s - loss: 0.3466 - acc: 0.9356 - val_loss: 0.3381 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01661: val_acc did not improve from 0.94225\n",
+ "Epoch 1662/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9357 - val_loss: 0.3647 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01662: val_acc did not improve from 0.94225\n",
+ "Epoch 1663/100000\n",
+ " - 18s - loss: 0.3427 - acc: 0.9363 - val_loss: 0.3479 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01663: val_acc did not improve from 0.94225\n",
+ "Epoch 1664/100000\n",
+ " - 19s - loss: 0.3469 - acc: 0.9354 - val_loss: 0.3681 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 01664: val_acc did not improve from 0.94225\n",
+ "Epoch 1665/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9361 - val_loss: 0.3655 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01665: val_acc did not improve from 0.94225\n",
+ "Epoch 1666/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9355 - val_loss: 0.3244 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01666: val_acc did not improve from 0.94225\n",
+ "Epoch 1667/100000\n",
+ " - 18s - loss: 0.3452 - acc: 0.9353 - val_loss: 0.3734 - val_acc: 0.9197\n",
+ "\n",
+ "Epoch 01667: val_acc did not improve from 0.94225\n",
+ "Epoch 1668/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9359 - val_loss: 0.3355 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 01668: val_acc did not improve from 0.94225\n",
+ "Epoch 1669/100000\n",
+ " - 18s - loss: 0.3443 - acc: 0.9362 - val_loss: 0.3602 - val_acc: 0.9214\n",
+ "\n",
+ "Epoch 01669: val_acc did not improve from 0.94225\n",
+ "Epoch 1670/100000\n",
+ " - 18s - loss: 0.3449 - acc: 0.9359 - val_loss: 0.3741 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 01670: val_acc did not improve from 0.94225\n",
+ "Epoch 1671/100000\n",
+ " - 18s - loss: 0.3432 - acc: 0.9362 - val_loss: 0.3407 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01671: val_acc did not improve from 0.94225\n",
+ "Epoch 1672/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9361 - val_loss: 0.4221 - val_acc: 0.8875\n",
+ "\n",
+ "Epoch 01672: val_acc did not improve from 0.94225\n",
+ "Epoch 1673/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9346 - val_loss: 0.3327 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 01673: val_acc did not improve from 0.94225\n",
+ "Epoch 1674/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9360 - val_loss: 0.3337 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01674: val_acc did not improve from 0.94225\n",
+ "Epoch 1675/100000\n",
+ " - 18s - loss: 0.3420 - acc: 0.9367 - val_loss: 0.3317 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01675: val_acc did not improve from 0.94225\n",
+ "Epoch 1676/100000\n",
+ " - 19s - loss: 0.3458 - acc: 0.9359 - val_loss: 0.3403 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 01676: val_acc did not improve from 0.94225\n",
+ "Epoch 1677/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9363 - val_loss: 0.3326 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01677: val_acc did not improve from 0.94225\n",
+ "Epoch 1678/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9359 - val_loss: 0.4178 - val_acc: 0.8981\n",
+ "\n",
+ "Epoch 01678: val_acc did not improve from 0.94225\n",
+ "Epoch 1679/100000\n",
+ " - 18s - loss: 0.3452 - acc: 0.9360 - val_loss: 0.3279 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 01679: val_acc did not improve from 0.94225\n",
+ "Epoch 1680/100000\n",
+ " - 19s - loss: 0.3480 - acc: 0.9356 - val_loss: 0.3342 - val_acc: 0.9407\n",
+ "\n",
+ "Epoch 01680: val_acc did not improve from 0.94225\n",
+ "Epoch 1681/100000\n",
+ " - 19s - loss: 0.3467 - acc: 0.9359 - val_loss: 0.3473 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 01681: val_acc did not improve from 0.94225\n",
+ "Epoch 1682/100000\n",
+ " - 19s - loss: 0.3464 - acc: 0.9359 - val_loss: 0.3478 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 01682: val_acc did not improve from 0.94225\n",
+ "Epoch 1683/100000\n",
+ " - 18s - loss: 0.3445 - acc: 0.9359 - val_loss: 0.3420 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 01683: val_acc did not improve from 0.94225\n",
+ "Epoch 1684/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9362 - val_loss: 0.3478 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01684: val_acc did not improve from 0.94225\n",
+ "Epoch 1685/100000\n",
+ " - 19s - loss: 0.3470 - acc: 0.9355 - val_loss: 0.3705 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 01685: val_acc did not improve from 0.94225\n",
+ "Epoch 1686/100000\n",
+ " - 19s - loss: 0.3464 - acc: 0.9354 - val_loss: 0.3491 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 01686: val_acc did not improve from 0.94225\n",
+ "Epoch 1687/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9358 - val_loss: 0.3414 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01687: val_acc did not improve from 0.94225\n",
+ "Epoch 1688/100000\n",
+ " - 19s - loss: 0.3431 - acc: 0.9353 - val_loss: 0.3396 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01688: val_acc did not improve from 0.94225\n",
+ "Epoch 1689/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9359 - val_loss: 0.3668 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 01689: val_acc did not improve from 0.94225\n",
+ "Epoch 1690/100000\n",
+ " - 18s - loss: 0.3441 - acc: 0.9361 - val_loss: 0.3416 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 01690: val_acc did not improve from 0.94225\n",
+ "Epoch 1691/100000\n",
+ " - 19s - loss: 0.3458 - acc: 0.9357 - val_loss: 0.3389 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01691: val_acc did not improve from 0.94225\n",
+ "Epoch 1692/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9351 - val_loss: 0.3362 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01692: val_acc did not improve from 0.94225\n",
+ "Epoch 1693/100000\n",
+ " - 18s - loss: 0.3433 - acc: 0.9363 - val_loss: 0.3345 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01693: val_acc did not improve from 0.94225\n",
+ "Epoch 1694/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9359 - val_loss: 0.4655 - val_acc: 0.8590\n",
+ "\n",
+ "Epoch 01694: val_acc did not improve from 0.94225\n",
+ "Epoch 1695/100000\n",
+ " - 18s - loss: 0.3440 - acc: 0.9354 - val_loss: 0.3398 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01695: val_acc did not improve from 0.94225\n",
+ "Epoch 1696/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9355 - val_loss: 0.3628 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 01696: val_acc did not improve from 0.94225\n",
+ "Epoch 1697/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9363 - val_loss: 0.3391 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01697: val_acc did not improve from 0.94225\n",
+ "Epoch 1698/100000\n",
+ " - 18s - loss: 0.3475 - acc: 0.9351 - val_loss: 0.3350 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01698: val_acc did not improve from 0.94225\n",
+ "Epoch 1699/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9350 - val_loss: 0.3376 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01699: val_acc did not improve from 0.94225\n",
+ "Epoch 1700/100000\n",
+ " - 18s - loss: 0.3490 - acc: 0.9349 - val_loss: 0.3493 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 01700: val_acc did not improve from 0.94225\n",
+ "Epoch 1701/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9363 - val_loss: 0.3804 - val_acc: 0.9165\n",
+ "\n",
+ "Epoch 01701: val_acc did not improve from 0.94225\n",
+ "Epoch 1702/100000\n",
+ " - 19s - loss: 0.3431 - acc: 0.9354 - val_loss: 0.3645 - val_acc: 0.9149\n",
+ "\n",
+ "Epoch 01702: val_acc did not improve from 0.94225\n",
+ "Epoch 1703/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9354 - val_loss: 0.3316 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01703: val_acc did not improve from 0.94225\n",
+ "Epoch 1704/100000\n",
+ " - 19s - loss: 0.3448 - acc: 0.9358 - val_loss: 0.3767 - val_acc: 0.9126\n",
+ "\n",
+ "Epoch 01704: val_acc did not improve from 0.94225\n",
+ "Epoch 1705/100000\n",
+ " - 19s - loss: 0.3428 - acc: 0.9358 - val_loss: 0.3345 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01705: val_acc did not improve from 0.94225\n",
+ "Epoch 1706/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9359 - val_loss: 0.3294 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01706: val_acc did not improve from 0.94225\n",
+ "Epoch 1707/100000\n",
+ " - 19s - loss: 0.3434 - acc: 0.9363 - val_loss: 0.5831 - val_acc: 0.8240\n",
+ "\n",
+ "Epoch 01707: val_acc did not improve from 0.94225\n",
+ "Epoch 1708/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9357 - val_loss: 0.3494 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01708: val_acc did not improve from 0.94225\n",
+ "Epoch 1709/100000\n",
+ " - 18s - loss: 0.3461 - acc: 0.9349 - val_loss: 0.3415 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01709: val_acc did not improve from 0.94225\n",
+ "Epoch 1710/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9365 - val_loss: 0.3345 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01710: val_acc did not improve from 0.94225\n",
+ "Epoch 1711/100000\n",
+ " - 19s - loss: 0.3448 - acc: 0.9355 - val_loss: 0.3742 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01711: val_acc did not improve from 0.94225\n",
+ "Epoch 1712/100000\n",
+ " - 18s - loss: 0.3457 - acc: 0.9356 - val_loss: 0.3552 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01712: val_acc did not improve from 0.94225\n",
+ "Epoch 1713/100000\n",
+ " - 19s - loss: 0.3476 - acc: 0.9357 - val_loss: 0.3498 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 01713: val_acc did not improve from 0.94225\n",
+ "Epoch 1714/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9357 - val_loss: 0.3344 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 01714: val_acc did not improve from 0.94225\n",
+ "Epoch 1715/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9355 - val_loss: 0.3500 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 01715: val_acc did not improve from 0.94225\n",
+ "Epoch 1716/100000\n",
+ " - 18s - loss: 0.3433 - acc: 0.9358 - val_loss: 0.3420 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01716: val_acc did not improve from 0.94225\n",
+ "Epoch 1717/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9361 - val_loss: 0.3423 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 01717: val_acc did not improve from 0.94225\n",
+ "Epoch 1718/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9359 - val_loss: 0.3352 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 01718: val_acc did not improve from 0.94225\n",
+ "Epoch 1719/100000\n",
+ " - 18s - loss: 0.3438 - acc: 0.9365 - val_loss: 0.3395 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01719: val_acc did not improve from 0.94225\n",
+ "Epoch 1720/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9348 - val_loss: 0.3764 - val_acc: 0.9224\n",
+ "\n",
+ "Epoch 01720: val_acc did not improve from 0.94225\n",
+ "Epoch 1721/100000\n",
+ " - 19s - loss: 0.3417 - acc: 0.9370 - val_loss: 0.3564 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 01721: val_acc did not improve from 0.94225\n",
+ "Epoch 1722/100000\n",
+ " - 18s - loss: 0.3439 - acc: 0.9359 - val_loss: 0.3345 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01722: val_acc did not improve from 0.94225\n",
+ "Epoch 1723/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9353 - val_loss: 0.3339 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01723: val_acc did not improve from 0.94225\n",
+ "Epoch 1724/100000\n",
+ " - 19s - loss: 0.3426 - acc: 0.9361 - val_loss: 0.3722 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 01724: val_acc did not improve from 0.94225\n",
+ "Epoch 1725/100000\n",
+ " - 19s - loss: 0.3446 - acc: 0.9360 - val_loss: 0.3671 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 01725: val_acc did not improve from 0.94225\n",
+ "Epoch 1726/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9360 - val_loss: 0.3396 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01726: val_acc did not improve from 0.94225\n",
+ "Epoch 1727/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9357 - val_loss: 0.5618 - val_acc: 0.8532\n",
+ "\n",
+ "Epoch 01727: val_acc did not improve from 0.94225\n",
+ "Epoch 1728/100000\n",
+ " - 19s - loss: 0.3446 - acc: 0.9367 - val_loss: 0.3351 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01728: val_acc did not improve from 0.94225\n",
+ "Epoch 1729/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9349 - val_loss: 0.3545 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01729: val_acc did not improve from 0.94225\n",
+ "Epoch 1730/100000\n",
+ " - 18s - loss: 0.3458 - acc: 0.9359 - val_loss: 0.3827 - val_acc: 0.9148\n",
+ "\n",
+ "Epoch 01730: val_acc did not improve from 0.94225\n",
+ "Epoch 1731/100000\n",
+ " - 19s - loss: 0.3466 - acc: 0.9350 - val_loss: 0.4218 - val_acc: 0.8952\n",
+ "\n",
+ "Epoch 01731: val_acc did not improve from 0.94225\n",
+ "Epoch 1732/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9361 - val_loss: 0.3861 - val_acc: 0.9171\n",
+ "\n",
+ "Epoch 01732: val_acc did not improve from 0.94225\n",
+ "Epoch 1733/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9349 - val_loss: 0.3724 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 01733: val_acc did not improve from 0.94225\n",
+ "Epoch 1734/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9362 - val_loss: 0.3649 - val_acc: 0.9167\n",
+ "\n",
+ "Epoch 01734: val_acc did not improve from 0.94225\n",
+ "Epoch 1735/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9361 - val_loss: 0.3591 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 01735: val_acc did not improve from 0.94225\n",
+ "Epoch 1736/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9363 - val_loss: 0.3331 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01736: val_acc did not improve from 0.94225\n",
+ "Epoch 1737/100000\n",
+ " - 18s - loss: 0.3444 - acc: 0.9356 - val_loss: 0.3535 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 01737: val_acc did not improve from 0.94225\n",
+ "Epoch 1738/100000\n",
+ " - 18s - loss: 0.3451 - acc: 0.9352 - val_loss: 0.3405 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01738: val_acc did not improve from 0.94225\n",
+ "Epoch 1739/100000\n",
+ " - 18s - loss: 0.3450 - acc: 0.9360 - val_loss: 0.3644 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 01739: val_acc did not improve from 0.94225\n",
+ "Epoch 1740/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9354 - val_loss: 0.3492 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 01740: val_acc did not improve from 0.94225\n",
+ "Epoch 1741/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9364 - val_loss: 0.3353 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 01741: val_acc did not improve from 0.94225\n",
+ "Epoch 1742/100000\n",
+ " - 19s - loss: 0.3432 - acc: 0.9358 - val_loss: 0.3533 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 01742: val_acc did not improve from 0.94225\n",
+ "Epoch 1743/100000\n",
+ " - 19s - loss: 0.3495 - acc: 0.9349 - val_loss: 0.3384 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01743: val_acc did not improve from 0.94225\n",
+ "Epoch 1744/100000\n",
+ " - 19s - loss: 0.3461 - acc: 0.9351 - val_loss: 0.3420 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 01744: val_acc did not improve from 0.94225\n",
+ "Epoch 1745/100000\n",
+ " - 19s - loss: 0.3434 - acc: 0.9356 - val_loss: 0.5406 - val_acc: 0.8353\n",
+ "\n",
+ "Epoch 01745: val_acc did not improve from 0.94225\n",
+ "Epoch 1746/100000\n",
+ " - 19s - loss: 0.3448 - acc: 0.9356 - val_loss: 0.3458 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01746: val_acc did not improve from 0.94225\n",
+ "Epoch 1747/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9362 - val_loss: 0.3567 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 01747: val_acc did not improve from 0.94225\n",
+ "Epoch 1748/100000\n",
+ " - 18s - loss: 0.3457 - acc: 0.9356 - val_loss: 0.3527 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 01748: val_acc did not improve from 0.94225\n",
+ "Epoch 1749/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9360 - val_loss: 0.3767 - val_acc: 0.9055\n",
+ "\n",
+ "Epoch 01749: val_acc did not improve from 0.94225\n",
+ "Epoch 1750/100000\n",
+ " - 19s - loss: 0.3460 - acc: 0.9357 - val_loss: 0.3426 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 01750: val_acc did not improve from 0.94225\n",
+ "Epoch 1751/100000\n",
+ " - 18s - loss: 0.3447 - acc: 0.9355 - val_loss: 0.3402 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01751: val_acc did not improve from 0.94225\n",
+ "Epoch 1752/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9366 - val_loss: 0.3238 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 01752: val_acc did not improve from 0.94225\n",
+ "Epoch 1753/100000\n",
+ " - 18s - loss: 0.3451 - acc: 0.9357 - val_loss: 0.4722 - val_acc: 0.8796\n",
+ "\n",
+ "Epoch 01753: val_acc did not improve from 0.94225\n",
+ "Epoch 1754/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9354 - val_loss: 0.3667 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 01754: val_acc did not improve from 0.94225\n",
+ "Epoch 1755/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9365 - val_loss: 0.3503 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 01755: val_acc did not improve from 0.94225\n",
+ "Epoch 1756/100000\n",
+ " - 18s - loss: 0.3440 - acc: 0.9365 - val_loss: 0.3486 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 01756: val_acc did not improve from 0.94225\n",
+ "Epoch 1757/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9360 - val_loss: 0.3721 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 01757: val_acc did not improve from 0.94225\n",
+ "Epoch 1758/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9370 - val_loss: 0.3434 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 01758: val_acc did not improve from 0.94225\n",
+ "Epoch 1759/100000\n",
+ " - 18s - loss: 0.3469 - acc: 0.9354 - val_loss: 0.3970 - val_acc: 0.9048\n",
+ "\n",
+ "Epoch 01759: val_acc did not improve from 0.94225\n",
+ "Epoch 1760/100000\n",
+ " - 18s - loss: 0.3462 - acc: 0.9358 - val_loss: 0.4063 - val_acc: 0.9033\n",
+ "\n",
+ "Epoch 01760: val_acc did not improve from 0.94225\n",
+ "Epoch 1761/100000\n",
+ " - 19s - loss: 0.3471 - acc: 0.9356 - val_loss: 0.3360 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01761: val_acc did not improve from 0.94225\n",
+ "Epoch 1762/100000\n",
+ " - 18s - loss: 0.3458 - acc: 0.9357 - val_loss: 0.3582 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 01762: val_acc did not improve from 0.94225\n",
+ "Epoch 1763/100000\n",
+ " - 19s - loss: 0.3464 - acc: 0.9352 - val_loss: 0.3693 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 01763: val_acc did not improve from 0.94225\n",
+ "Epoch 1764/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9360 - val_loss: 0.3368 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 01764: val_acc did not improve from 0.94225\n",
+ "Epoch 1765/100000\n",
+ " - 19s - loss: 0.3435 - acc: 0.9361 - val_loss: 0.3365 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01765: val_acc did not improve from 0.94225\n",
+ "Epoch 1766/100000\n",
+ " - 19s - loss: 0.3482 - acc: 0.9354 - val_loss: 0.3367 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 01766: val_acc did not improve from 0.94225\n",
+ "Epoch 1767/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9360 - val_loss: 0.3322 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01767: val_acc did not improve from 0.94225\n",
+ "Epoch 1768/100000\n",
+ " - 19s - loss: 0.3461 - acc: 0.9358 - val_loss: 0.5011 - val_acc: 0.8784\n",
+ "\n",
+ "Epoch 01768: val_acc did not improve from 0.94225\n",
+ "Epoch 1769/100000\n",
+ " - 18s - loss: 0.3445 - acc: 0.9361 - val_loss: 0.3607 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 01769: val_acc did not improve from 0.94225\n",
+ "Epoch 1770/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9361 - val_loss: 0.3617 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 01770: val_acc did not improve from 0.94225\n",
+ "Epoch 1771/100000\n",
+ " - 19s - loss: 0.3460 - acc: 0.9355 - val_loss: 0.3482 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01771: val_acc did not improve from 0.94225\n",
+ "Epoch 1772/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9363 - val_loss: 0.3517 - val_acc: 0.9241\n",
+ "\n",
+ "Epoch 01772: val_acc did not improve from 0.94225\n",
+ "Epoch 1773/100000\n",
+ " - 19s - loss: 0.3476 - acc: 0.9357 - val_loss: 0.3437 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 01773: val_acc did not improve from 0.94225\n",
+ "Epoch 1774/100000\n",
+ " - 18s - loss: 0.3475 - acc: 0.9351 - val_loss: 0.3953 - val_acc: 0.9081\n",
+ "\n",
+ "Epoch 01774: val_acc did not improve from 0.94225\n",
+ "Epoch 1775/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9353 - val_loss: 0.3874 - val_acc: 0.9141\n",
+ "\n",
+ "Epoch 01775: val_acc did not improve from 0.94225\n",
+ "Epoch 1776/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9358 - val_loss: 0.3388 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01776: val_acc did not improve from 0.94225\n",
+ "Epoch 1777/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9360 - val_loss: 0.3924 - val_acc: 0.9108\n",
+ "\n",
+ "Epoch 01777: val_acc did not improve from 0.94225\n",
+ "Epoch 1778/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9354 - val_loss: 0.3360 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 01778: val_acc did not improve from 0.94225\n",
+ "Epoch 1779/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9362 - val_loss: 0.4493 - val_acc: 0.9014\n",
+ "\n",
+ "Epoch 01779: val_acc did not improve from 0.94225\n",
+ "Epoch 1780/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9362 - val_loss: 0.3414 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 01780: val_acc did not improve from 0.94225\n",
+ "Epoch 1781/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9357 - val_loss: 0.3748 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 01781: val_acc did not improve from 0.94225\n",
+ "Epoch 1782/100000\n",
+ " - 18s - loss: 0.3441 - acc: 0.9363 - val_loss: 0.3718 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01782: val_acc did not improve from 0.94225\n",
+ "Epoch 1783/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9358 - val_loss: 0.3416 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 01783: val_acc did not improve from 0.94225\n",
+ "Epoch 1784/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9360 - val_loss: 0.4069 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 01784: val_acc did not improve from 0.94225\n",
+ "Epoch 1785/100000\n",
+ " - 19s - loss: 0.3436 - acc: 0.9367 - val_loss: 0.3453 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01785: val_acc did not improve from 0.94225\n",
+ "Epoch 1786/100000\n",
+ " - 18s - loss: 0.3436 - acc: 0.9363 - val_loss: 0.3952 - val_acc: 0.9058\n",
+ "\n",
+ "Epoch 01786: val_acc did not improve from 0.94225\n",
+ "Epoch 1787/100000\n",
+ " - 19s - loss: 0.3453 - acc: 0.9362 - val_loss: 0.3863 - val_acc: 0.9085\n",
+ "\n",
+ "Epoch 01787: val_acc did not improve from 0.94225\n",
+ "Epoch 1788/100000\n",
+ " - 18s - loss: 0.3433 - acc: 0.9366 - val_loss: 0.3388 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01788: val_acc did not improve from 0.94225\n",
+ "Epoch 1789/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9351 - val_loss: 0.3632 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 01789: val_acc did not improve from 0.94225\n",
+ "Epoch 1790/100000\n",
+ " - 18s - loss: 0.3439 - acc: 0.9360 - val_loss: 0.3549 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01790: val_acc did not improve from 0.94225\n",
+ "Epoch 1791/100000\n",
+ " - 19s - loss: 0.3456 - acc: 0.9351 - val_loss: 0.3600 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 01791: val_acc did not improve from 0.94225\n",
+ "Epoch 1792/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9356 - val_loss: 0.3565 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01792: val_acc did not improve from 0.94225\n",
+ "Epoch 1793/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9359 - val_loss: 0.3614 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 01793: val_acc did not improve from 0.94225\n",
+ "Epoch 1794/100000\n",
+ " - 19s - loss: 0.3473 - acc: 0.9358 - val_loss: 0.3562 - val_acc: 0.9309\n",
+ "\n",
+ "Epoch 01794: val_acc did not improve from 0.94225\n",
+ "Epoch 1795/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9357 - val_loss: 0.3765 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 01795: val_acc did not improve from 0.94225\n",
+ "Epoch 1796/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9355 - val_loss: 0.3620 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 01796: val_acc did not improve from 0.94225\n",
+ "Epoch 1797/100000\n",
+ " - 19s - loss: 0.3472 - acc: 0.9349 - val_loss: 0.3366 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01797: val_acc did not improve from 0.94225\n",
+ "Epoch 1798/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9369 - val_loss: 0.3318 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01798: val_acc did not improve from 0.94225\n",
+ "Epoch 1799/100000\n",
+ " - 19s - loss: 0.3499 - acc: 0.9347 - val_loss: 0.3550 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01799: val_acc did not improve from 0.94225\n",
+ "Epoch 1800/100000\n",
+ " - 19s - loss: 0.3434 - acc: 0.9360 - val_loss: 0.3579 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 01800: val_acc did not improve from 0.94225\n",
+ "Epoch 1801/100000\n",
+ " - 18s - loss: 0.3461 - acc: 0.9351 - val_loss: 0.3316 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 01801: val_acc did not improve from 0.94225\n",
+ "Epoch 1802/100000\n",
+ " - 19s - loss: 0.3431 - acc: 0.9370 - val_loss: 0.3450 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01802: val_acc did not improve from 0.94225\n",
+ "Epoch 1803/100000\n",
+ " - 19s - loss: 0.3478 - acc: 0.9351 - val_loss: 0.3380 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 01803: val_acc did not improve from 0.94225\n",
+ "Epoch 1804/100000\n",
+ " - 19s - loss: 0.3470 - acc: 0.9360 - val_loss: 0.3379 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 01804: val_acc did not improve from 0.94225\n",
+ "Epoch 1805/100000\n",
+ " - 18s - loss: 0.3482 - acc: 0.9349 - val_loss: 0.3516 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01805: val_acc did not improve from 0.94225\n",
+ "Epoch 1806/100000\n",
+ " - 19s - loss: 0.3464 - acc: 0.9354 - val_loss: 0.3631 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01806: val_acc did not improve from 0.94225\n",
+ "Epoch 1807/100000\n",
+ " - 19s - loss: 0.3455 - acc: 0.9356 - val_loss: 0.3397 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01807: val_acc did not improve from 0.94225\n",
+ "Epoch 1808/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9356 - val_loss: 0.4644 - val_acc: 0.8650\n",
+ "\n",
+ "Epoch 01808: val_acc did not improve from 0.94225\n",
+ "Epoch 1809/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9365 - val_loss: 0.4945 - val_acc: 0.8542\n",
+ "\n",
+ "Epoch 01809: val_acc did not improve from 0.94225\n",
+ "Epoch 1810/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9360 - val_loss: 0.3433 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 01810: val_acc did not improve from 0.94225\n",
+ "Epoch 1811/100000\n",
+ " - 18s - loss: 0.3458 - acc: 0.9351 - val_loss: 0.3511 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 01811: val_acc did not improve from 0.94225\n",
+ "Epoch 1812/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9354 - val_loss: 0.3510 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01812: val_acc did not improve from 0.94225\n",
+ "Epoch 1813/100000\n",
+ " - 19s - loss: 0.3432 - acc: 0.9359 - val_loss: 0.3347 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01813: val_acc did not improve from 0.94225\n",
+ "Epoch 1814/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9358 - val_loss: 0.3679 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 01814: val_acc did not improve from 0.94225\n",
+ "Epoch 1815/100000\n",
+ " - 18s - loss: 0.3439 - acc: 0.9360 - val_loss: 0.3394 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01815: val_acc did not improve from 0.94225\n",
+ "Epoch 1816/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9356 - val_loss: 0.4141 - val_acc: 0.8937\n",
+ "\n",
+ "Epoch 01816: val_acc did not improve from 0.94225\n",
+ "Epoch 1817/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9359 - val_loss: 0.3329 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 01817: val_acc did not improve from 0.94225\n",
+ "Epoch 1818/100000\n",
+ " - 18s - loss: 0.3493 - acc: 0.9352 - val_loss: 0.3462 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 01818: val_acc did not improve from 0.94225\n",
+ "Epoch 1819/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9366 - val_loss: 0.3872 - val_acc: 0.9099\n",
+ "\n",
+ "Epoch 01819: val_acc did not improve from 0.94225\n",
+ "Epoch 1820/100000\n",
+ " - 18s - loss: 0.3465 - acc: 0.9351 - val_loss: 0.4943 - val_acc: 0.8578\n",
+ "\n",
+ "Epoch 01820: val_acc did not improve from 0.94225\n",
+ "Epoch 1821/100000\n",
+ " - 19s - loss: 0.3467 - acc: 0.9355 - val_loss: 0.3535 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 01821: val_acc did not improve from 0.94225\n",
+ "Epoch 1822/100000\n",
+ " - 18s - loss: 0.3455 - acc: 0.9355 - val_loss: 0.3351 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01822: val_acc did not improve from 0.94225\n",
+ "Epoch 1823/100000\n",
+ " - 19s - loss: 0.3469 - acc: 0.9359 - val_loss: 0.3380 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 01823: val_acc did not improve from 0.94225\n",
+ "Epoch 1824/100000\n",
+ " - 18s - loss: 0.3442 - acc: 0.9356 - val_loss: 0.3514 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 01824: val_acc did not improve from 0.94225\n",
+ "Epoch 1825/100000\n",
+ " - 19s - loss: 0.3450 - acc: 0.9359 - val_loss: 0.4265 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 01825: val_acc did not improve from 0.94225\n",
+ "Epoch 1826/100000\n",
+ " - 19s - loss: 0.3453 - acc: 0.9362 - val_loss: 0.3394 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01826: val_acc did not improve from 0.94225\n",
+ "Epoch 1827/100000\n",
+ " - 18s - loss: 0.3455 - acc: 0.9353 - val_loss: 0.3601 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 01827: val_acc did not improve from 0.94225\n",
+ "Epoch 1828/100000\n",
+ " - 19s - loss: 0.3453 - acc: 0.9359 - val_loss: 0.3357 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 01828: val_acc did not improve from 0.94225\n",
+ "Epoch 1829/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9356 - val_loss: 0.3413 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 01829: val_acc did not improve from 0.94225\n",
+ "Epoch 1830/100000\n",
+ " - 19s - loss: 0.3445 - acc: 0.9353 - val_loss: 0.3349 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01830: val_acc did not improve from 0.94225\n",
+ "Epoch 1831/100000\n",
+ " - 18s - loss: 0.3448 - acc: 0.9364 - val_loss: 0.3533 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 01831: val_acc did not improve from 0.94225\n",
+ "Epoch 1832/100000\n",
+ " - 19s - loss: 0.3459 - acc: 0.9356 - val_loss: 0.3687 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 01832: val_acc did not improve from 0.94225\n",
+ "Epoch 1833/100000\n",
+ " - 19s - loss: 0.3490 - acc: 0.9345 - val_loss: 0.3467 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01833: val_acc did not improve from 0.94225\n",
+ "Epoch 1834/100000\n",
+ " - 19s - loss: 0.3482 - acc: 0.9358 - val_loss: 0.3371 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01834: val_acc did not improve from 0.94225\n",
+ "Epoch 1835/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9363 - val_loss: 0.3371 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01835: val_acc did not improve from 0.94225\n",
+ "Epoch 1836/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9353 - val_loss: 0.4199 - val_acc: 0.8958\n",
+ "\n",
+ "Epoch 01836: val_acc did not improve from 0.94225\n",
+ "Epoch 1837/100000\n",
+ " - 19s - loss: 0.3425 - acc: 0.9361 - val_loss: 0.4083 - val_acc: 0.9120\n",
+ "\n",
+ "Epoch 01837: val_acc did not improve from 0.94225\n",
+ "Epoch 1838/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9363 - val_loss: 0.3328 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01838: val_acc did not improve from 0.94225\n",
+ "Epoch 1839/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9363 - val_loss: 0.3676 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 01839: val_acc did not improve from 0.94225\n",
+ "Epoch 1840/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9357 - val_loss: 0.3266 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01840: val_acc did not improve from 0.94225\n",
+ "Epoch 1841/100000\n",
+ " - 19s - loss: 0.3429 - acc: 0.9358 - val_loss: 0.4048 - val_acc: 0.8994\n",
+ "\n",
+ "Epoch 01841: val_acc did not improve from 0.94225\n",
+ "Epoch 1842/100000\n",
+ " - 19s - loss: 0.3487 - acc: 0.9354 - val_loss: 0.3389 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 01842: val_acc did not improve from 0.94225\n",
+ "Epoch 1843/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9361 - val_loss: 0.3264 - val_acc: 0.9400\n",
+ "\n",
+ "Epoch 01843: val_acc did not improve from 0.94225\n",
+ "Epoch 1844/100000\n",
+ " - 19s - loss: 0.3454 - acc: 0.9353 - val_loss: 0.3441 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01844: val_acc did not improve from 0.94225\n",
+ "Epoch 1845/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9360 - val_loss: 0.3494 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 01845: val_acc did not improve from 0.94225\n",
+ "Epoch 1846/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9351 - val_loss: 0.3424 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 01846: val_acc did not improve from 0.94225\n",
+ "Epoch 1847/100000\n",
+ " - 19s - loss: 0.3446 - acc: 0.9356 - val_loss: 0.3768 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 01847: val_acc did not improve from 0.94225\n",
+ "Epoch 1848/100000\n",
+ " - 19s - loss: 0.3474 - acc: 0.9363 - val_loss: 0.3597 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01848: val_acc did not improve from 0.94225\n",
+ "Epoch 1849/100000\n",
+ " - 19s - loss: 0.3456 - acc: 0.9359 - val_loss: 0.3336 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01849: val_acc did not improve from 0.94225\n",
+ "Epoch 1850/100000\n",
+ " - 19s - loss: 0.3447 - acc: 0.9362 - val_loss: 0.3817 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 01850: val_acc did not improve from 0.94225\n",
+ "Epoch 1851/100000\n",
+ " - 19s - loss: 0.3429 - acc: 0.9366 - val_loss: 0.3690 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 01851: val_acc did not improve from 0.94225\n",
+ "Epoch 1852/100000\n",
+ " - 19s - loss: 0.3431 - acc: 0.9359 - val_loss: 0.3440 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 01852: val_acc did not improve from 0.94225\n",
+ "Epoch 1853/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9356 - val_loss: 0.3455 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 01853: val_acc did not improve from 0.94225\n",
+ "Epoch 1854/100000\n",
+ " - 18s - loss: 0.3458 - acc: 0.9353 - val_loss: 0.3345 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01854: val_acc did not improve from 0.94225\n",
+ "Epoch 1855/100000\n",
+ " - 19s - loss: 0.3426 - acc: 0.9366 - val_loss: 0.3273 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01855: val_acc did not improve from 0.94225\n",
+ "Epoch 1856/100000\n",
+ " - 19s - loss: 0.3475 - acc: 0.9348 - val_loss: 0.3643 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 01856: val_acc did not improve from 0.94225\n",
+ "Epoch 1857/100000\n",
+ " - 18s - loss: 0.3485 - acc: 0.9351 - val_loss: 0.3886 - val_acc: 0.9213\n",
+ "\n",
+ "Epoch 01857: val_acc did not improve from 0.94225\n",
+ "Epoch 1858/100000\n",
+ " - 19s - loss: 0.3481 - acc: 0.9356 - val_loss: 0.3429 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 01858: val_acc did not improve from 0.94225\n",
+ "Epoch 1859/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9355 - val_loss: 0.3502 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 01859: val_acc did not improve from 0.94225\n",
+ "Epoch 1860/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9357 - val_loss: 0.3297 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 01860: val_acc did not improve from 0.94225\n",
+ "Epoch 1861/100000\n",
+ " - 18s - loss: 0.3473 - acc: 0.9359 - val_loss: 0.3376 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01861: val_acc did not improve from 0.94225\n",
+ "Epoch 1862/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9366 - val_loss: 0.3787 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 01862: val_acc did not improve from 0.94225\n",
+ "Epoch 1863/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9356 - val_loss: 0.3275 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01863: val_acc did not improve from 0.94225\n",
+ "Epoch 1864/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9361 - val_loss: 0.3474 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 01864: val_acc did not improve from 0.94225\n",
+ "Epoch 1865/100000\n",
+ " - 19s - loss: 0.3452 - acc: 0.9356 - val_loss: 0.4720 - val_acc: 0.8775\n",
+ "\n",
+ "Epoch 01865: val_acc did not improve from 0.94225\n",
+ "Epoch 1866/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9364 - val_loss: 0.3485 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 01866: val_acc did not improve from 0.94225\n",
+ "Epoch 1867/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9360 - val_loss: 0.3654 - val_acc: 0.9213\n",
+ "\n",
+ "Epoch 01867: val_acc did not improve from 0.94225\n",
+ "Epoch 1868/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9363 - val_loss: 0.4282 - val_acc: 0.8903\n",
+ "\n",
+ "Epoch 01868: val_acc did not improve from 0.94225\n",
+ "Epoch 1869/100000\n",
+ " - 19s - loss: 0.3466 - acc: 0.9356 - val_loss: 0.3756 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 01869: val_acc did not improve from 0.94225\n",
+ "Epoch 1870/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9366 - val_loss: 0.3423 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 01870: val_acc did not improve from 0.94225\n",
+ "Epoch 1871/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9356 - val_loss: 0.3364 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01871: val_acc did not improve from 0.94225\n",
+ "Epoch 1872/100000\n",
+ " - 19s - loss: 0.3428 - acc: 0.9361 - val_loss: 0.3392 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 01872: val_acc did not improve from 0.94225\n",
+ "Epoch 1873/100000\n",
+ " - 19s - loss: 0.3440 - acc: 0.9363 - val_loss: 0.3449 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01873: val_acc did not improve from 0.94225\n",
+ "Epoch 1874/100000\n",
+ " - 19s - loss: 0.3462 - acc: 0.9351 - val_loss: 0.3953 - val_acc: 0.9092\n",
+ "\n",
+ "Epoch 01874: val_acc did not improve from 0.94225\n",
+ "Epoch 1875/100000\n",
+ " - 19s - loss: 0.3439 - acc: 0.9367 - val_loss: 0.3386 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 01875: val_acc did not improve from 0.94225\n",
+ "Epoch 1876/100000\n",
+ " - 19s - loss: 0.3451 - acc: 0.9361 - val_loss: 0.3408 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01876: val_acc did not improve from 0.94225\n",
+ "Epoch 1877/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9359 - val_loss: 0.3338 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 01877: val_acc did not improve from 0.94225\n",
+ "Epoch 1878/100000\n",
+ " - 19s - loss: 0.3448 - acc: 0.9360 - val_loss: 0.3281 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 01878: val_acc did not improve from 0.94225\n",
+ "Epoch 1879/100000\n",
+ " - 19s - loss: 0.3501 - acc: 0.9347 - val_loss: 0.3600 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 01879: val_acc did not improve from 0.94225\n",
+ "Epoch 1880/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9362 - val_loss: 0.3468 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 01880: val_acc did not improve from 0.94225\n",
+ "Epoch 1881/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9356 - val_loss: 0.3463 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 01881: val_acc did not improve from 0.94225\n",
+ "Epoch 1882/100000\n",
+ " - 19s - loss: 0.3456 - acc: 0.9362 - val_loss: 0.3378 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 01882: val_acc did not improve from 0.94225\n",
+ "Epoch 1883/100000\n",
+ " - 19s - loss: 0.3486 - acc: 0.9348 - val_loss: 0.4030 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 01883: val_acc did not improve from 0.94225\n",
+ "Epoch 1884/100000\n",
+ " - 18s - loss: 0.3457 - acc: 0.9350 - val_loss: 0.3446 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 01884: val_acc did not improve from 0.94225\n",
+ "Epoch 1885/100000\n",
+ " - 19s - loss: 0.3457 - acc: 0.9359 - val_loss: 0.3403 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 01885: val_acc did not improve from 0.94225\n",
+ "Epoch 1886/100000\n",
+ " - 18s - loss: 0.3436 - acc: 0.9359 - val_loss: 0.3296 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01886: val_acc did not improve from 0.94225\n",
+ "Epoch 1887/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9356 - val_loss: 0.3730 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 01887: val_acc did not improve from 0.94225\n",
+ "Epoch 1888/100000\n",
+ " - 18s - loss: 0.3432 - acc: 0.9364 - val_loss: 0.3392 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 01888: val_acc did not improve from 0.94225\n",
+ "Epoch 1889/100000\n",
+ " - 19s - loss: 0.3470 - acc: 0.9346 - val_loss: 0.3721 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01889: val_acc did not improve from 0.94225\n",
+ "Epoch 1890/100000\n",
+ " - 18s - loss: 0.3470 - acc: 0.9361 - val_loss: 0.4126 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 01890: val_acc did not improve from 0.94225\n",
+ "Epoch 1891/100000\n",
+ " - 19s - loss: 0.3465 - acc: 0.9352 - val_loss: 0.3599 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 01891: val_acc did not improve from 0.94225\n",
+ "Epoch 1892/100000\n",
+ " - 18s - loss: 0.3444 - acc: 0.9359 - val_loss: 0.3766 - val_acc: 0.9184\n",
+ "\n",
+ "Epoch 01892: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 01892: ReduceLROnPlateau reducing learning rate to 0.0006634203542489559.\n",
+ "Epoch 1893/100000\n",
+ " - 18s - loss: 0.3396 - acc: 0.9357 - val_loss: 0.3297 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01893: val_acc did not improve from 0.94225\n",
+ "Epoch 1894/100000\n",
+ " - 18s - loss: 0.3397 - acc: 0.9355 - val_loss: 0.3516 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 01894: val_acc did not improve from 0.94225\n",
+ "Epoch 1895/100000\n",
+ " - 18s - loss: 0.3364 - acc: 0.9359 - val_loss: 0.3380 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 01895: val_acc did not improve from 0.94225\n",
+ "Epoch 1896/100000\n",
+ " - 19s - loss: 0.3376 - acc: 0.9362 - val_loss: 0.3287 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01896: val_acc did not improve from 0.94225\n",
+ "Epoch 1897/100000\n",
+ " - 18s - loss: 0.3398 - acc: 0.9353 - val_loss: 0.5274 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 01897: val_acc did not improve from 0.94225\n",
+ "Epoch 1898/100000\n",
+ " - 19s - loss: 0.3387 - acc: 0.9359 - val_loss: 0.6311 - val_acc: 0.8186\n",
+ "\n",
+ "Epoch 01898: val_acc did not improve from 0.94225\n",
+ "Epoch 1899/100000\n",
+ " - 19s - loss: 0.3411 - acc: 0.9353 - val_loss: 0.3286 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 01899: val_acc did not improve from 0.94225\n",
+ "Epoch 1900/100000\n",
+ " - 19s - loss: 0.3371 - acc: 0.9370 - val_loss: 0.3265 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 01900: val_acc did not improve from 0.94225\n",
+ "Epoch 1901/100000\n",
+ " - 19s - loss: 0.3371 - acc: 0.9364 - val_loss: 0.3699 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 01901: val_acc did not improve from 0.94225\n",
+ "Epoch 1902/100000\n",
+ " - 19s - loss: 0.3402 - acc: 0.9352 - val_loss: 0.3211 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 01902: val_acc did not improve from 0.94225\n",
+ "Epoch 1903/100000\n",
+ " - 19s - loss: 0.3367 - acc: 0.9364 - val_loss: 0.3658 - val_acc: 0.9219\n",
+ "\n",
+ "Epoch 01903: val_acc did not improve from 0.94225\n",
+ "Epoch 1904/100000\n",
+ " - 18s - loss: 0.3389 - acc: 0.9361 - val_loss: 0.3663 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 01904: val_acc did not improve from 0.94225\n",
+ "Epoch 1905/100000\n",
+ " - 19s - loss: 0.3407 - acc: 0.9355 - val_loss: 0.3689 - val_acc: 0.9154\n",
+ "\n",
+ "Epoch 01905: val_acc did not improve from 0.94225\n",
+ "Epoch 1906/100000\n",
+ " - 18s - loss: 0.3377 - acc: 0.9362 - val_loss: 0.3263 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 01906: val_acc did not improve from 0.94225\n",
+ "Epoch 1907/100000\n",
+ " - 18s - loss: 0.3376 - acc: 0.9362 - val_loss: 0.4354 - val_acc: 0.8946\n",
+ "\n",
+ "Epoch 01907: val_acc did not improve from 0.94225\n",
+ "Epoch 1908/100000\n",
+ " - 19s - loss: 0.3376 - acc: 0.9363 - val_loss: 0.3607 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 01908: val_acc did not improve from 0.94225\n",
+ "Epoch 1909/100000\n",
+ " - 19s - loss: 0.3379 - acc: 0.9358 - val_loss: 0.3790 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 01909: val_acc did not improve from 0.94225\n",
+ "Epoch 1910/100000\n",
+ " - 18s - loss: 0.3392 - acc: 0.9357 - val_loss: 0.3260 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01910: val_acc did not improve from 0.94225\n",
+ "Epoch 1911/100000\n",
+ " - 19s - loss: 0.3397 - acc: 0.9354 - val_loss: 0.3240 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 01911: val_acc did not improve from 0.94225\n",
+ "Epoch 1912/100000\n",
+ " - 19s - loss: 0.3392 - acc: 0.9358 - val_loss: 0.3316 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01912: val_acc did not improve from 0.94225\n",
+ "Epoch 1913/100000\n",
+ " - 19s - loss: 0.3367 - acc: 0.9365 - val_loss: 0.3337 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 01913: val_acc did not improve from 0.94225\n",
+ "Epoch 1914/100000\n",
+ " - 19s - loss: 0.3390 - acc: 0.9362 - val_loss: 0.3576 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 01914: val_acc did not improve from 0.94225\n",
+ "Epoch 1915/100000\n",
+ " - 19s - loss: 0.3398 - acc: 0.9358 - val_loss: 0.3367 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 01915: val_acc did not improve from 0.94225\n",
+ "Epoch 1916/100000\n",
+ " - 18s - loss: 0.3396 - acc: 0.9354 - val_loss: 0.3309 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 01916: val_acc did not improve from 0.94225\n",
+ "Epoch 1917/100000\n",
+ " - 19s - loss: 0.3388 - acc: 0.9361 - val_loss: 0.3215 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 01917: val_acc did not improve from 0.94225\n",
+ "Epoch 1918/100000\n",
+ " - 18s - loss: 0.3384 - acc: 0.9360 - val_loss: 0.3281 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 01918: val_acc did not improve from 0.94225\n",
+ "Epoch 1919/100000\n",
+ " - 19s - loss: 0.3367 - acc: 0.9371 - val_loss: 0.3318 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01919: val_acc did not improve from 0.94225\n",
+ "Epoch 1920/100000\n",
+ " - 18s - loss: 0.3412 - acc: 0.9350 - val_loss: 0.3378 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 01920: val_acc did not improve from 0.94225\n",
+ "Epoch 1921/100000\n",
+ " - 19s - loss: 0.3380 - acc: 0.9365 - val_loss: 0.3630 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 01921: val_acc did not improve from 0.94225\n",
+ "Epoch 1922/100000\n",
+ " - 18s - loss: 0.3374 - acc: 0.9363 - val_loss: 0.3540 - val_acc: 0.9206\n",
+ "\n",
+ "Epoch 01922: val_acc did not improve from 0.94225\n",
+ "Epoch 1923/100000\n",
+ " - 19s - loss: 0.3382 - acc: 0.9356 - val_loss: 0.3264 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 01923: val_acc did not improve from 0.94225\n",
+ "Epoch 1924/100000\n",
+ " - 19s - loss: 0.3390 - acc: 0.9351 - val_loss: 0.3745 - val_acc: 0.9175\n",
+ "\n",
+ "Epoch 01924: val_acc did not improve from 0.94225\n",
+ "Epoch 1925/100000\n",
+ " - 18s - loss: 0.3367 - acc: 0.9365 - val_loss: 0.4982 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 01925: val_acc did not improve from 0.94225\n",
+ "Epoch 1926/100000\n",
+ " - 19s - loss: 0.3383 - acc: 0.9354 - val_loss: 0.3614 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 01926: val_acc did not improve from 0.94225\n",
+ "Epoch 1927/100000\n",
+ " - 19s - loss: 0.3390 - acc: 0.9359 - val_loss: 0.4017 - val_acc: 0.9062\n",
+ "\n",
+ "Epoch 01927: val_acc did not improve from 0.94225\n",
+ "Epoch 1928/100000\n",
+ " - 19s - loss: 0.3574 - acc: 0.9344 - val_loss: 0.3459 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 01928: val_acc did not improve from 0.94225\n",
+ "Epoch 1929/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9359 - val_loss: 0.3590 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 01929: val_acc did not improve from 0.94225\n",
+ "Epoch 1930/100000\n",
+ " - 19s - loss: 0.3396 - acc: 0.9363 - val_loss: 0.3248 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 01930: val_acc did not improve from 0.94225\n",
+ "Epoch 1931/100000\n",
+ " - 18s - loss: 0.3386 - acc: 0.9363 - val_loss: 0.3324 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 01931: val_acc did not improve from 0.94225\n",
+ "Epoch 1932/100000\n",
+ " - 19s - loss: 0.3385 - acc: 0.9357 - val_loss: 0.3623 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 01932: val_acc did not improve from 0.94225\n",
+ "Epoch 1933/100000\n",
+ " - 19s - loss: 0.3369 - acc: 0.9367 - val_loss: 0.3405 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 01933: val_acc did not improve from 0.94225\n",
+ "Epoch 1934/100000\n",
+ " - 19s - loss: 0.3373 - acc: 0.9365 - val_loss: 0.3684 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 01934: val_acc did not improve from 0.94225\n",
+ "Epoch 1935/100000\n",
+ " - 19s - loss: 0.3375 - acc: 0.9365 - val_loss: 0.3464 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 01935: val_acc did not improve from 0.94225\n",
+ "Epoch 1936/100000\n",
+ " - 19s - loss: 0.3357 - acc: 0.9368 - val_loss: 0.4412 - val_acc: 0.8824\n",
+ "\n",
+ "Epoch 01936: val_acc did not improve from 0.94225\n",
+ "Epoch 1937/100000\n",
+ " - 19s - loss: 0.3405 - acc: 0.9347 - val_loss: 0.3646 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 01937: val_acc did not improve from 0.94225\n",
+ "Epoch 1938/100000\n",
+ " - 19s - loss: 0.3385 - acc: 0.9350 - val_loss: 0.3396 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 01938: val_acc did not improve from 0.94225\n",
+ "Epoch 1939/100000\n",
+ " - 19s - loss: 0.3375 - acc: 0.9359 - val_loss: 0.3470 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 01939: val_acc did not improve from 0.94225\n",
+ "Epoch 1940/100000\n",
+ " - 19s - loss: 0.3379 - acc: 0.9358 - val_loss: 0.3424 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 01940: val_acc did not improve from 0.94225\n",
+ "Epoch 1941/100000\n",
+ " - 18s - loss: 0.3372 - acc: 0.9357 - val_loss: 0.3254 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01941: val_acc did not improve from 0.94225\n",
+ "Epoch 1942/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9366 - val_loss: 0.4281 - val_acc: 0.8955\n",
+ "\n",
+ "Epoch 01942: val_acc did not improve from 0.94225\n",
+ "Epoch 1943/100000\n",
+ " - 19s - loss: 0.3380 - acc: 0.9364 - val_loss: 0.4161 - val_acc: 0.8874\n",
+ "\n",
+ "Epoch 01943: val_acc did not improve from 0.94225\n",
+ "Epoch 1944/100000\n",
+ " - 18s - loss: 0.3364 - acc: 0.9367 - val_loss: 0.3362 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01944: val_acc did not improve from 0.94225\n",
+ "Epoch 1945/100000\n",
+ " - 19s - loss: 0.3383 - acc: 0.9357 - val_loss: 0.3365 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 01945: val_acc did not improve from 0.94225\n",
+ "Epoch 1946/100000\n",
+ " - 18s - loss: 0.3394 - acc: 0.9355 - val_loss: 0.3554 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 01946: val_acc did not improve from 0.94225\n",
+ "Epoch 1947/100000\n",
+ " - 18s - loss: 0.3407 - acc: 0.9352 - val_loss: 0.3323 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 01947: val_acc did not improve from 0.94225\n",
+ "Epoch 1948/100000\n",
+ " - 18s - loss: 0.3358 - acc: 0.9365 - val_loss: 0.3352 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01948: val_acc did not improve from 0.94225\n",
+ "Epoch 1949/100000\n",
+ " - 18s - loss: 0.3373 - acc: 0.9364 - val_loss: 0.3615 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 01949: val_acc did not improve from 0.94225\n",
+ "Epoch 1950/100000\n",
+ " - 18s - loss: 0.3383 - acc: 0.9360 - val_loss: 0.4481 - val_acc: 0.8908\n",
+ "\n",
+ "Epoch 01950: val_acc did not improve from 0.94225\n",
+ "Epoch 1951/100000\n",
+ " - 18s - loss: 0.3353 - acc: 0.9367 - val_loss: 0.3681 - val_acc: 0.9222\n",
+ "\n",
+ "Epoch 01951: val_acc did not improve from 0.94225\n",
+ "Epoch 1952/100000\n",
+ " - 19s - loss: 0.3383 - acc: 0.9358 - val_loss: 0.3576 - val_acc: 0.9182\n",
+ "\n",
+ "Epoch 01952: val_acc did not improve from 0.94225\n",
+ "Epoch 1953/100000\n",
+ " - 19s - loss: 0.3391 - acc: 0.9357 - val_loss: 0.3268 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 01953: val_acc did not improve from 0.94225\n",
+ "Epoch 1954/100000\n",
+ " - 19s - loss: 0.3412 - acc: 0.9361 - val_loss: 0.3746 - val_acc: 0.9137\n",
+ "\n",
+ "Epoch 01954: val_acc did not improve from 0.94225\n",
+ "Epoch 1955/100000\n",
+ " - 18s - loss: 0.3406 - acc: 0.9357 - val_loss: 0.3281 - val_acc: 0.9412\n",
+ "\n",
+ "Epoch 01955: val_acc did not improve from 0.94225\n",
+ "Epoch 1956/100000\n",
+ " - 19s - loss: 0.3355 - acc: 0.9367 - val_loss: 0.3335 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 01956: val_acc did not improve from 0.94225\n",
+ "Epoch 1957/100000\n",
+ " - 19s - loss: 0.3379 - acc: 0.9359 - val_loss: 0.3946 - val_acc: 0.8980\n",
+ "\n",
+ "Epoch 01957: val_acc did not improve from 0.94225\n",
+ "Epoch 1958/100000\n",
+ " - 19s - loss: 0.3383 - acc: 0.9357 - val_loss: 0.3344 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 01958: val_acc did not improve from 0.94225\n",
+ "Epoch 1959/100000\n",
+ " - 19s - loss: 0.3366 - acc: 0.9359 - val_loss: 0.3317 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 01959: val_acc did not improve from 0.94225\n",
+ "Epoch 1960/100000\n",
+ " - 19s - loss: 0.3396 - acc: 0.9348 - val_loss: 0.3438 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 01960: val_acc did not improve from 0.94225\n",
+ "Epoch 1961/100000\n",
+ " - 18s - loss: 0.3386 - acc: 0.9361 - val_loss: 0.3336 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 01961: val_acc did not improve from 0.94225\n",
+ "Epoch 1962/100000\n",
+ " - 19s - loss: 0.3856 - acc: 0.9325 - val_loss: 0.3397 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 01962: val_acc did not improve from 0.94225\n",
+ "Epoch 1963/100000\n",
+ " - 19s - loss: 0.3484 - acc: 0.9358 - val_loss: 0.3832 - val_acc: 0.9013\n",
+ "\n",
+ "Epoch 01963: val_acc did not improve from 0.94225\n",
+ "Epoch 1964/100000\n",
+ " - 18s - loss: 0.3457 - acc: 0.9352 - val_loss: 0.3273 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 01964: val_acc did not improve from 0.94225\n",
+ "Epoch 1965/100000\n",
+ " - 19s - loss: 0.3445 - acc: 0.9350 - val_loss: 0.3479 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 01965: val_acc did not improve from 0.94225\n",
+ "Epoch 1966/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9355 - val_loss: 0.3806 - val_acc: 0.9073\n",
+ "\n",
+ "Epoch 01966: val_acc did not improve from 0.94225\n",
+ "Epoch 1967/100000\n",
+ " - 19s - loss: 0.3463 - acc: 0.9341 - val_loss: 0.3320 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 01967: val_acc did not improve from 0.94225\n",
+ "Epoch 1968/100000\n",
+ " - 19s - loss: 0.3429 - acc: 0.9354 - val_loss: 0.3834 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 01968: val_acc did not improve from 0.94225\n",
+ "Epoch 1969/100000\n",
+ " - 19s - loss: 0.3416 - acc: 0.9362 - val_loss: 0.6219 - val_acc: 0.7903\n",
+ "\n",
+ "Epoch 01969: val_acc did not improve from 0.94225\n",
+ "Epoch 1970/100000\n",
+ " - 18s - loss: 0.3480 - acc: 0.9336 - val_loss: 0.3623 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 01970: val_acc did not improve from 0.94225\n",
+ "Epoch 1971/100000\n",
+ " - 19s - loss: 0.3420 - acc: 0.9356 - val_loss: 0.3406 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01971: val_acc did not improve from 0.94225\n",
+ "Epoch 1972/100000\n",
+ " - 18s - loss: 0.3414 - acc: 0.9354 - val_loss: 0.4540 - val_acc: 0.8788\n",
+ "\n",
+ "Epoch 01972: val_acc did not improve from 0.94225\n",
+ "Epoch 1973/100000\n",
+ " - 18s - loss: 0.3437 - acc: 0.9353 - val_loss: 0.4030 - val_acc: 0.8900\n",
+ "\n",
+ "Epoch 01973: val_acc did not improve from 0.94225\n",
+ "Epoch 1974/100000\n",
+ " - 18s - loss: 0.3437 - acc: 0.9354 - val_loss: 0.3295 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 01974: val_acc did not improve from 0.94225\n",
+ "Epoch 1975/100000\n",
+ " - 18s - loss: 0.3416 - acc: 0.9357 - val_loss: 0.3305 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01975: val_acc did not improve from 0.94225\n",
+ "Epoch 1976/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9355 - val_loss: 0.3444 - val_acc: 0.9259\n",
+ "\n",
+ "Epoch 01976: val_acc did not improve from 0.94225\n",
+ "Epoch 1977/100000\n",
+ " - 19s - loss: 0.3402 - acc: 0.9362 - val_loss: 0.4115 - val_acc: 0.8915\n",
+ "\n",
+ "Epoch 01977: val_acc did not improve from 0.94225\n",
+ "Epoch 1978/100000\n",
+ " - 19s - loss: 0.3410 - acc: 0.9358 - val_loss: 0.3272 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 01978: val_acc did not improve from 0.94225\n",
+ "Epoch 1979/100000\n",
+ " - 19s - loss: 0.3505 - acc: 0.9347 - val_loss: 0.3568 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 01979: val_acc did not improve from 0.94225\n",
+ "Epoch 1980/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9361 - val_loss: 0.3264 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 01980: val_acc did not improve from 0.94225\n",
+ "Epoch 1981/100000\n",
+ " - 18s - loss: 0.3440 - acc: 0.9354 - val_loss: 0.3464 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01981: val_acc did not improve from 0.94225\n",
+ "Epoch 1982/100000\n",
+ " - 19s - loss: 0.3444 - acc: 0.9356 - val_loss: 0.4236 - val_acc: 0.8851\n",
+ "\n",
+ "Epoch 01982: val_acc did not improve from 0.94225\n",
+ "Epoch 1983/100000\n",
+ " - 19s - loss: 0.3433 - acc: 0.9356 - val_loss: 0.3553 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 01983: val_acc did not improve from 0.94225\n",
+ "Epoch 1984/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9357 - val_loss: 0.3376 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01984: val_acc did not improve from 0.94225\n",
+ "Epoch 1985/100000\n",
+ " - 19s - loss: 0.3416 - acc: 0.9357 - val_loss: 0.3321 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 01985: val_acc did not improve from 0.94225\n",
+ "Epoch 1986/100000\n",
+ " - 18s - loss: 0.3424 - acc: 0.9353 - val_loss: 0.3612 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 01986: val_acc did not improve from 0.94225\n",
+ "Epoch 1987/100000\n",
+ " - 18s - loss: 0.3426 - acc: 0.9347 - val_loss: 0.4087 - val_acc: 0.8853\n",
+ "\n",
+ "Epoch 01987: val_acc did not improve from 0.94225\n",
+ "Epoch 1988/100000\n",
+ " - 19s - loss: 0.3441 - acc: 0.9350 - val_loss: 0.5892 - val_acc: 0.8060\n",
+ "\n",
+ "Epoch 01988: val_acc did not improve from 0.94225\n",
+ "Epoch 1989/100000\n",
+ " - 18s - loss: 0.3434 - acc: 0.9350 - val_loss: 0.3648 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 01989: val_acc did not improve from 0.94225\n",
+ "Epoch 1990/100000\n",
+ " - 19s - loss: 0.3408 - acc: 0.9360 - val_loss: 0.4497 - val_acc: 0.8646\n",
+ "\n",
+ "Epoch 01990: val_acc did not improve from 0.94225\n",
+ "Epoch 1991/100000\n",
+ " - 18s - loss: 0.3425 - acc: 0.9353 - val_loss: 0.3556 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 01991: val_acc did not improve from 0.94225\n",
+ "Epoch 1992/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9344 - val_loss: 0.3330 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01992: val_acc did not improve from 0.94225\n",
+ "Epoch 1993/100000\n",
+ " - 19s - loss: 0.3420 - acc: 0.9350 - val_loss: 0.3585 - val_acc: 0.9165\n",
+ "\n",
+ "Epoch 01993: val_acc did not improve from 0.94225\n",
+ "Epoch 1994/100000\n",
+ " - 19s - loss: 0.3430 - acc: 0.9345 - val_loss: 0.3227 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 01994: val_acc did not improve from 0.94225\n",
+ "Epoch 1995/100000\n",
+ " - 19s - loss: 0.3397 - acc: 0.9355 - val_loss: 0.3269 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 01995: val_acc did not improve from 0.94225\n",
+ "Epoch 1996/100000\n",
+ " - 19s - loss: 0.3406 - acc: 0.9354 - val_loss: 0.3438 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 01996: val_acc did not improve from 0.94225\n",
+ "Epoch 1997/100000\n",
+ " - 19s - loss: 0.3442 - acc: 0.9349 - val_loss: 0.3937 - val_acc: 0.8923\n",
+ "\n",
+ "Epoch 01997: val_acc did not improve from 0.94225\n",
+ "Epoch 1998/100000\n",
+ " - 19s - loss: 0.3396 - acc: 0.9359 - val_loss: 0.4087 - val_acc: 0.8817\n",
+ "\n",
+ "Epoch 01998: val_acc did not improve from 0.94225\n",
+ "Epoch 1999/100000\n",
+ " - 19s - loss: 0.3449 - acc: 0.9340 - val_loss: 0.3427 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 01999: val_acc did not improve from 0.94225\n",
+ "Epoch 2000/100000\n",
+ " - 19s - loss: 0.3418 - acc: 0.9354 - val_loss: 0.3387 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02000: val_acc did not improve from 0.94225\n",
+ "Epoch 2001/100000\n",
+ " - 18s - loss: 0.3411 - acc: 0.9349 - val_loss: 0.3382 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02001: val_acc did not improve from 0.94225\n",
+ "Epoch 2002/100000\n",
+ " - 19s - loss: 0.3417 - acc: 0.9355 - val_loss: 0.3714 - val_acc: 0.9135\n",
+ "\n",
+ "Epoch 02002: val_acc did not improve from 0.94225\n",
+ "Epoch 2003/100000\n",
+ " - 19s - loss: 0.3412 - acc: 0.9353 - val_loss: 0.3347 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 02003: val_acc did not improve from 0.94225\n",
+ "Epoch 2004/100000\n",
+ " - 18s - loss: 0.3393 - acc: 0.9359 - val_loss: 0.3619 - val_acc: 0.9132\n",
+ "\n",
+ "Epoch 02004: val_acc did not improve from 0.94225\n",
+ "Epoch 2005/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9346 - val_loss: 0.3287 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02005: val_acc did not improve from 0.94225\n",
+ "Epoch 2006/100000\n",
+ " - 18s - loss: 0.3422 - acc: 0.9347 - val_loss: 0.3251 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02006: val_acc did not improve from 0.94225\n",
+ "Epoch 2007/100000\n",
+ " - 19s - loss: 0.3412 - acc: 0.9352 - val_loss: 0.3373 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02007: val_acc did not improve from 0.94225\n",
+ "Epoch 2008/100000\n",
+ " - 19s - loss: 0.3386 - acc: 0.9358 - val_loss: 0.3450 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 02008: val_acc did not improve from 0.94225\n",
+ "Epoch 2009/100000\n",
+ " - 19s - loss: 0.3408 - acc: 0.9353 - val_loss: 0.4146 - val_acc: 0.9035\n",
+ "\n",
+ "Epoch 02009: val_acc did not improve from 0.94225\n",
+ "Epoch 2010/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9343 - val_loss: 0.3258 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02010: val_acc did not improve from 0.94225\n",
+ "Epoch 2011/100000\n",
+ " - 19s - loss: 0.3419 - acc: 0.9351 - val_loss: 0.3459 - val_acc: 0.9268\n",
+ "\n",
+ "Epoch 02011: val_acc did not improve from 0.94225\n",
+ "Epoch 2012/100000\n",
+ " - 19s - loss: 0.3410 - acc: 0.9352 - val_loss: 0.3782 - val_acc: 0.9057\n",
+ "\n",
+ "Epoch 02012: val_acc did not improve from 0.94225\n",
+ "Epoch 2013/100000\n",
+ " - 19s - loss: 0.3426 - acc: 0.9344 - val_loss: 0.4218 - val_acc: 0.8894\n",
+ "\n",
+ "Epoch 02013: val_acc did not improve from 0.94225\n",
+ "Epoch 2014/100000\n",
+ " - 19s - loss: 0.3398 - acc: 0.9348 - val_loss: 0.4696 - val_acc: 0.8771\n",
+ "\n",
+ "Epoch 02014: val_acc did not improve from 0.94225\n",
+ "Epoch 2015/100000\n",
+ " - 18s - loss: 0.3421 - acc: 0.9348 - val_loss: 0.3225 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02015: val_acc did not improve from 0.94225\n",
+ "Epoch 2016/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9345 - val_loss: 0.5538 - val_acc: 0.8742\n",
+ "\n",
+ "Epoch 02016: val_acc did not improve from 0.94225\n",
+ "Epoch 2017/100000\n",
+ " - 18s - loss: 0.3411 - acc: 0.9353 - val_loss: 0.3358 - val_acc: 0.9273\n",
+ "\n",
+ "Epoch 02017: val_acc did not improve from 0.94225\n",
+ "Epoch 2018/100000\n",
+ " - 19s - loss: 0.3414 - acc: 0.9354 - val_loss: 0.3666 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 02018: val_acc did not improve from 0.94225\n",
+ "Epoch 2019/100000\n",
+ " - 18s - loss: 0.3423 - acc: 0.9341 - val_loss: 0.3280 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02019: val_acc did not improve from 0.94225\n",
+ "Epoch 2020/100000\n",
+ " - 19s - loss: 0.3418 - acc: 0.9349 - val_loss: 0.3397 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02020: val_acc did not improve from 0.94225\n",
+ "Epoch 2021/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9355 - val_loss: 0.3634 - val_acc: 0.9163\n",
+ "\n",
+ "Epoch 02021: val_acc did not improve from 0.94225\n",
+ "Epoch 2022/100000\n",
+ " - 19s - loss: 0.3421 - acc: 0.9351 - val_loss: 0.3620 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 02022: val_acc did not improve from 0.94225\n",
+ "Epoch 2023/100000\n",
+ " - 18s - loss: 0.3420 - acc: 0.9350 - val_loss: 0.3409 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 02023: val_acc did not improve from 0.94225\n",
+ "Epoch 2024/100000\n",
+ " - 19s - loss: 0.3410 - acc: 0.9350 - val_loss: 0.3271 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02024: val_acc did not improve from 0.94225\n",
+ "Epoch 2025/100000\n",
+ " - 19s - loss: 0.3422 - acc: 0.9347 - val_loss: 0.3480 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02025: val_acc did not improve from 0.94225\n",
+ "Epoch 2026/100000\n",
+ " - 19s - loss: 0.3420 - acc: 0.9351 - val_loss: 0.3223 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 02026: val_acc did not improve from 0.94225\n",
+ "Epoch 2027/100000\n",
+ " - 19s - loss: 0.3437 - acc: 0.9338 - val_loss: 0.3394 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02027: val_acc did not improve from 0.94225\n",
+ "Epoch 2028/100000\n",
+ " - 19s - loss: 0.3388 - acc: 0.9358 - val_loss: 0.3579 - val_acc: 0.9178\n",
+ "\n",
+ "Epoch 02028: val_acc did not improve from 0.94225\n",
+ "Epoch 2029/100000\n",
+ " - 19s - loss: 0.3411 - acc: 0.9353 - val_loss: 0.3750 - val_acc: 0.9064\n",
+ "\n",
+ "Epoch 02029: val_acc did not improve from 0.94225\n",
+ "Epoch 2030/100000\n",
+ " - 19s - loss: 0.3413 - acc: 0.9354 - val_loss: 0.3302 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02030: val_acc did not improve from 0.94225\n",
+ "Epoch 2031/100000\n",
+ " - 18s - loss: 0.3403 - acc: 0.9355 - val_loss: 0.4689 - val_acc: 0.8633\n",
+ "\n",
+ "Epoch 02031: val_acc did not improve from 0.94225\n",
+ "Epoch 2032/100000\n",
+ " - 18s - loss: 0.3428 - acc: 0.9344 - val_loss: 0.3881 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02032: val_acc did not improve from 0.94225\n",
+ "Epoch 2033/100000\n",
+ " - 19s - loss: 0.3427 - acc: 0.9350 - val_loss: 0.3290 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02033: val_acc did not improve from 0.94225\n",
+ "Epoch 2034/100000\n",
+ " - 18s - loss: 0.3427 - acc: 0.9347 - val_loss: 0.3256 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 02034: val_acc did not improve from 0.94225\n",
+ "Epoch 2035/100000\n",
+ " - 19s - loss: 0.3438 - acc: 0.9341 - val_loss: 0.3846 - val_acc: 0.9193\n",
+ "\n",
+ "Epoch 02035: val_acc did not improve from 0.94225\n",
+ "Epoch 2036/100000\n",
+ " - 19s - loss: 0.3393 - acc: 0.9358 - val_loss: 0.3229 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02036: val_acc did not improve from 0.94225\n",
+ "Epoch 2037/100000\n",
+ " - 19s - loss: 0.3443 - acc: 0.9348 - val_loss: 0.3326 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02037: val_acc did not improve from 0.94225\n",
+ "Epoch 2038/100000\n",
+ " - 19s - loss: 0.3406 - acc: 0.9351 - val_loss: 0.3303 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02038: val_acc did not improve from 0.94225\n",
+ "Epoch 2039/100000\n",
+ " - 19s - loss: 0.3396 - acc: 0.9357 - val_loss: 0.3359 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02039: val_acc did not improve from 0.94225\n",
+ "Epoch 2040/100000\n",
+ " - 19s - loss: 0.3399 - acc: 0.9355 - val_loss: 0.3266 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02040: val_acc did not improve from 0.94225\n",
+ "Epoch 2041/100000\n",
+ " - 18s - loss: 0.3422 - acc: 0.9348 - val_loss: 0.3240 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02041: val_acc did not improve from 0.94225\n",
+ "Epoch 2042/100000\n",
+ " - 19s - loss: 0.3404 - acc: 0.9354 - val_loss: 0.3493 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02042: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 02042: ReduceLROnPlateau reducing learning rate to 0.0006302493420662358.\n",
+ "Epoch 2043/100000\n",
+ " - 19s - loss: 0.3349 - acc: 0.9356 - val_loss: 0.3276 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02043: val_acc did not improve from 0.94225\n",
+ "Epoch 2044/100000\n",
+ " - 19s - loss: 0.3355 - acc: 0.9348 - val_loss: 0.3959 - val_acc: 0.8978\n",
+ "\n",
+ "Epoch 02044: val_acc did not improve from 0.94225\n",
+ "Epoch 2045/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9356 - val_loss: 0.3602 - val_acc: 0.9113\n",
+ "\n",
+ "Epoch 02045: val_acc did not improve from 0.94225\n",
+ "Epoch 2046/100000\n",
+ " - 19s - loss: 0.3352 - acc: 0.9351 - val_loss: 0.3152 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02046: val_acc did not improve from 0.94225\n",
+ "Epoch 2047/100000\n",
+ " - 19s - loss: 0.3325 - acc: 0.9355 - val_loss: 0.3465 - val_acc: 0.9179\n",
+ "\n",
+ "Epoch 02047: val_acc did not improve from 0.94225\n",
+ "Epoch 2048/100000\n",
+ " - 19s - loss: 0.3347 - acc: 0.9350 - val_loss: 0.3155 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 02048: val_acc did not improve from 0.94225\n",
+ "Epoch 2049/100000\n",
+ " - 19s - loss: 0.3343 - acc: 0.9358 - val_loss: 0.3084 - val_acc: 0.9406\n",
+ "\n",
+ "Epoch 02049: val_acc did not improve from 0.94225\n",
+ "Epoch 2050/100000\n",
+ " - 18s - loss: 0.3321 - acc: 0.9359 - val_loss: 0.3933 - val_acc: 0.9154\n",
+ "\n",
+ "Epoch 02050: val_acc did not improve from 0.94225\n",
+ "Epoch 2051/100000\n",
+ " - 19s - loss: 0.3356 - acc: 0.9350 - val_loss: 0.3647 - val_acc: 0.9146\n",
+ "\n",
+ "Epoch 02051: val_acc did not improve from 0.94225\n",
+ "Epoch 2052/100000\n",
+ " - 18s - loss: 0.3357 - acc: 0.9350 - val_loss: 0.3320 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02052: val_acc did not improve from 0.94225\n",
+ "Epoch 2053/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9350 - val_loss: 0.3236 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02053: val_acc did not improve from 0.94225\n",
+ "Epoch 2054/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9352 - val_loss: 0.3183 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02054: val_acc did not improve from 0.94225\n",
+ "Epoch 2055/100000\n",
+ " - 18s - loss: 0.3337 - acc: 0.9354 - val_loss: 0.3158 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02055: val_acc did not improve from 0.94225\n",
+ "Epoch 2056/100000\n",
+ " - 19s - loss: 0.3347 - acc: 0.9352 - val_loss: 0.3342 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 02056: val_acc did not improve from 0.94225\n",
+ "Epoch 2057/100000\n",
+ " - 18s - loss: 0.3335 - acc: 0.9357 - val_loss: 0.3153 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02057: val_acc did not improve from 0.94225\n",
+ "Epoch 2058/100000\n",
+ " - 19s - loss: 0.3341 - acc: 0.9350 - val_loss: 0.3229 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02058: val_acc did not improve from 0.94225\n",
+ "Epoch 2059/100000\n",
+ " - 19s - loss: 0.3336 - acc: 0.9355 - val_loss: 0.3492 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02059: val_acc did not improve from 0.94225\n",
+ "Epoch 2060/100000\n",
+ " - 19s - loss: 0.3361 - acc: 0.9350 - val_loss: 0.3216 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02060: val_acc did not improve from 0.94225\n",
+ "Epoch 2061/100000\n",
+ " - 19s - loss: 0.3322 - acc: 0.9365 - val_loss: 0.3327 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02061: val_acc did not improve from 0.94225\n",
+ "Epoch 2062/100000\n",
+ " - 18s - loss: 0.3359 - acc: 0.9346 - val_loss: 0.3320 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02062: val_acc did not improve from 0.94225\n",
+ "Epoch 2063/100000\n",
+ " - 19s - loss: 0.3367 - acc: 0.9352 - val_loss: 0.3330 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02063: val_acc did not improve from 0.94225\n",
+ "Epoch 2064/100000\n",
+ " - 19s - loss: 0.3350 - acc: 0.9345 - val_loss: 0.4152 - val_acc: 0.8842\n",
+ "\n",
+ "Epoch 02064: val_acc did not improve from 0.94225\n",
+ "Epoch 2065/100000\n",
+ " - 18s - loss: 0.3340 - acc: 0.9350 - val_loss: 0.3929 - val_acc: 0.9132\n",
+ "\n",
+ "Epoch 02065: val_acc did not improve from 0.94225\n",
+ "Epoch 2066/100000\n",
+ " - 19s - loss: 0.3336 - acc: 0.9348 - val_loss: 0.3201 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02066: val_acc did not improve from 0.94225\n",
+ "Epoch 2067/100000\n",
+ " - 19s - loss: 0.3312 - acc: 0.9359 - val_loss: 0.3360 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02067: val_acc did not improve from 0.94225\n",
+ "Epoch 2068/100000\n",
+ " - 19s - loss: 0.3324 - acc: 0.9355 - val_loss: 0.3958 - val_acc: 0.8971\n",
+ "\n",
+ "Epoch 02068: val_acc did not improve from 0.94225\n",
+ "Epoch 2069/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9351 - val_loss: 0.3209 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02069: val_acc did not improve from 0.94225\n",
+ "Epoch 2070/100000\n",
+ " - 19s - loss: 0.3334 - acc: 0.9355 - val_loss: 0.3228 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02070: val_acc did not improve from 0.94225\n",
+ "Epoch 2071/100000\n",
+ " - 19s - loss: 0.3325 - acc: 0.9360 - val_loss: 0.3383 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 02071: val_acc did not improve from 0.94225\n",
+ "Epoch 2072/100000\n",
+ " - 19s - loss: 0.3358 - acc: 0.9345 - val_loss: 0.3357 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 02072: val_acc did not improve from 0.94225\n",
+ "Epoch 2073/100000\n",
+ " - 19s - loss: 0.3335 - acc: 0.9358 - val_loss: 0.3274 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 02073: val_acc did not improve from 0.94225\n",
+ "Epoch 2074/100000\n",
+ " - 19s - loss: 0.3382 - acc: 0.9341 - val_loss: 0.3464 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 02074: val_acc did not improve from 0.94225\n",
+ "Epoch 2075/100000\n",
+ " - 19s - loss: 0.3333 - acc: 0.9356 - val_loss: 0.3558 - val_acc: 0.9148\n",
+ "\n",
+ "Epoch 02075: val_acc did not improve from 0.94225\n",
+ "Epoch 2076/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9354 - val_loss: 0.3221 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02076: val_acc did not improve from 0.94225\n",
+ "Epoch 2077/100000\n",
+ " - 19s - loss: 0.3346 - acc: 0.9353 - val_loss: 0.4634 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02077: val_acc did not improve from 0.94225\n",
+ "Epoch 2078/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9352 - val_loss: 0.3210 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02078: val_acc did not improve from 0.94225\n",
+ "Epoch 2079/100000\n",
+ " - 19s - loss: 0.3304 - acc: 0.9362 - val_loss: 0.3238 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 02079: val_acc did not improve from 0.94225\n",
+ "Epoch 2080/100000\n",
+ " - 19s - loss: 0.3341 - acc: 0.9348 - val_loss: 0.3165 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02080: val_acc did not improve from 0.94225\n",
+ "Epoch 2081/100000\n",
+ " - 19s - loss: 0.3335 - acc: 0.9348 - val_loss: 0.3676 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 02081: val_acc did not improve from 0.94225\n",
+ "Epoch 2082/100000\n",
+ " - 19s - loss: 0.3326 - acc: 0.9357 - val_loss: 0.3365 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02082: val_acc did not improve from 0.94225\n",
+ "Epoch 2083/100000\n",
+ " - 18s - loss: 0.3384 - acc: 0.9333 - val_loss: 0.3670 - val_acc: 0.9261\n",
+ "\n",
+ "Epoch 02083: val_acc did not improve from 0.94225\n",
+ "Epoch 2084/100000\n",
+ " - 19s - loss: 0.3318 - acc: 0.9358 - val_loss: 0.3263 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02084: val_acc did not improve from 0.94225\n",
+ "Epoch 2085/100000\n",
+ " - 18s - loss: 0.3329 - acc: 0.9358 - val_loss: 0.3370 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 02085: val_acc did not improve from 0.94225\n",
+ "Epoch 2086/100000\n",
+ " - 19s - loss: 0.3322 - acc: 0.9357 - val_loss: 0.3138 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02086: val_acc did not improve from 0.94225\n",
+ "Epoch 2087/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9353 - val_loss: 0.3397 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 02087: val_acc did not improve from 0.94225\n",
+ "Epoch 2088/100000\n",
+ " - 19s - loss: 0.3302 - acc: 0.9364 - val_loss: 0.3245 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02088: val_acc did not improve from 0.94225\n",
+ "Epoch 2089/100000\n",
+ " - 19s - loss: 0.3316 - acc: 0.9352 - val_loss: 0.3207 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02089: val_acc did not improve from 0.94225\n",
+ "Epoch 2090/100000\n",
+ " - 19s - loss: 0.3321 - acc: 0.9350 - val_loss: 0.3192 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 02090: val_acc did not improve from 0.94225\n",
+ "Epoch 2091/100000\n",
+ " - 19s - loss: 0.3344 - acc: 0.9348 - val_loss: 0.3396 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02091: val_acc did not improve from 0.94225\n",
+ "Epoch 2092/100000\n",
+ " - 19s - loss: 0.3313 - acc: 0.9360 - val_loss: 0.3140 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 02092: val_acc did not improve from 0.94225\n",
+ "Epoch 2093/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9348 - val_loss: 0.3243 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 02093: val_acc did not improve from 0.94225\n",
+ "Epoch 2094/100000\n",
+ " - 19s - loss: 0.3327 - acc: 0.9353 - val_loss: 0.3118 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 02094: val_acc did not improve from 0.94225\n",
+ "Epoch 2095/100000\n",
+ " - 18s - loss: 0.3335 - acc: 0.9358 - val_loss: 0.3522 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02095: val_acc did not improve from 0.94225\n",
+ "Epoch 2096/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9350 - val_loss: 0.3496 - val_acc: 0.9165\n",
+ "\n",
+ "Epoch 02096: val_acc did not improve from 0.94225\n",
+ "Epoch 2097/100000\n",
+ " - 19s - loss: 0.3309 - acc: 0.9363 - val_loss: 0.3159 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02097: val_acc did not improve from 0.94225\n",
+ "Epoch 2098/100000\n",
+ " - 18s - loss: 0.3341 - acc: 0.9352 - val_loss: 0.3640 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 02098: val_acc did not improve from 0.94225\n",
+ "Epoch 2099/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9352 - val_loss: 0.3843 - val_acc: 0.9035\n",
+ "\n",
+ "Epoch 02099: val_acc did not improve from 0.94225\n",
+ "Epoch 2100/100000\n",
+ " - 18s - loss: 0.3299 - acc: 0.9361 - val_loss: 0.3456 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 02100: val_acc did not improve from 0.94225\n",
+ "Epoch 2101/100000\n",
+ " - 19s - loss: 0.3311 - acc: 0.9357 - val_loss: 0.3218 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 02101: val_acc did not improve from 0.94225\n",
+ "Epoch 2102/100000\n",
+ " - 19s - loss: 0.3331 - acc: 0.9357 - val_loss: 0.3317 - val_acc: 0.9346\n",
+ "\n",
+ "Epoch 02102: val_acc did not improve from 0.94225\n",
+ "Epoch 2103/100000\n",
+ " - 19s - loss: 0.3341 - acc: 0.9353 - val_loss: 0.3162 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02103: val_acc did not improve from 0.94225\n",
+ "Epoch 2104/100000\n",
+ " - 19s - loss: 0.3323 - acc: 0.9356 - val_loss: 0.3803 - val_acc: 0.9099\n",
+ "\n",
+ "Epoch 02104: val_acc did not improve from 0.94225\n",
+ "Epoch 2105/100000\n",
+ " - 18s - loss: 0.3338 - acc: 0.9351 - val_loss: 0.3132 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02105: val_acc did not improve from 0.94225\n",
+ "Epoch 2106/100000\n",
+ " - 19s - loss: 0.3333 - acc: 0.9353 - val_loss: 0.3960 - val_acc: 0.9018\n",
+ "\n",
+ "Epoch 02106: val_acc did not improve from 0.94225\n",
+ "Epoch 2107/100000\n",
+ " - 19s - loss: 0.3334 - acc: 0.9353 - val_loss: 0.3250 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02107: val_acc did not improve from 0.94225\n",
+ "Epoch 2108/100000\n",
+ " - 19s - loss: 0.3332 - acc: 0.9356 - val_loss: 0.3377 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 02108: val_acc did not improve from 0.94225\n",
+ "Epoch 2109/100000\n",
+ " - 19s - loss: 0.3351 - acc: 0.9353 - val_loss: 0.3141 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02109: val_acc did not improve from 0.94225\n",
+ "Epoch 2110/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9350 - val_loss: 0.3303 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02110: val_acc did not improve from 0.94225\n",
+ "Epoch 2111/100000\n",
+ " - 19s - loss: 0.3336 - acc: 0.9349 - val_loss: 0.3149 - val_acc: 0.9404\n",
+ "\n",
+ "Epoch 02111: val_acc did not improve from 0.94225\n",
+ "Epoch 2112/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9358 - val_loss: 0.3876 - val_acc: 0.9032\n",
+ "\n",
+ "Epoch 02112: val_acc did not improve from 0.94225\n",
+ "Epoch 2113/100000\n",
+ " - 20s - loss: 0.3332 - acc: 0.9353 - val_loss: 0.3361 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02113: val_acc did not improve from 0.94225\n",
+ "Epoch 2114/100000\n",
+ " - 19s - loss: 0.3330 - acc: 0.9353 - val_loss: 0.3229 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02114: val_acc did not improve from 0.94225\n",
+ "Epoch 2115/100000\n",
+ " - 19s - loss: 0.3331 - acc: 0.9355 - val_loss: 0.3429 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 02115: val_acc did not improve from 0.94225\n",
+ "Epoch 2116/100000\n",
+ " - 19s - loss: 0.3329 - acc: 0.9353 - val_loss: 0.3966 - val_acc: 0.9144\n",
+ "\n",
+ "Epoch 02116: val_acc did not improve from 0.94225\n",
+ "Epoch 2117/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9349 - val_loss: 0.3314 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02117: val_acc did not improve from 0.94225\n",
+ "Epoch 2118/100000\n",
+ " - 19s - loss: 0.3329 - acc: 0.9353 - val_loss: 0.3228 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02118: val_acc did not improve from 0.94225\n",
+ "Epoch 2119/100000\n",
+ " - 19s - loss: 0.3315 - acc: 0.9360 - val_loss: 0.3308 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 02119: val_acc did not improve from 0.94225\n",
+ "Epoch 2120/100000\n",
+ " - 19s - loss: 0.3343 - acc: 0.9349 - val_loss: 0.3178 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02120: val_acc did not improve from 0.94225\n",
+ "Epoch 2121/100000\n",
+ " - 19s - loss: 0.3323 - acc: 0.9357 - val_loss: 0.3650 - val_acc: 0.9106\n",
+ "\n",
+ "Epoch 02121: val_acc did not improve from 0.94225\n",
+ "Epoch 2122/100000\n",
+ " - 19s - loss: 0.3313 - acc: 0.9357 - val_loss: 0.3353 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02122: val_acc did not improve from 0.94225\n",
+ "Epoch 2123/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9348 - val_loss: 0.3137 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 02123: val_acc did not improve from 0.94225\n",
+ "Epoch 2124/100000\n",
+ " - 19s - loss: 0.3331 - acc: 0.9353 - val_loss: 0.3365 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 02124: val_acc did not improve from 0.94225\n",
+ "Epoch 2125/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9352 - val_loss: 0.3208 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02125: val_acc did not improve from 0.94225\n",
+ "Epoch 2126/100000\n",
+ " - 19s - loss: 0.3335 - acc: 0.9356 - val_loss: 0.3218 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02126: val_acc did not improve from 0.94225\n",
+ "Epoch 2127/100000\n",
+ " - 19s - loss: 0.3366 - acc: 0.9343 - val_loss: 0.3370 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 02127: val_acc did not improve from 0.94225\n",
+ "Epoch 2128/100000\n",
+ " - 18s - loss: 0.3328 - acc: 0.9355 - val_loss: 0.3392 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 02128: val_acc did not improve from 0.94225\n",
+ "Epoch 2129/100000\n",
+ " - 19s - loss: 0.3349 - acc: 0.9344 - val_loss: 0.3334 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 02129: val_acc did not improve from 0.94225\n",
+ "Epoch 2130/100000\n",
+ " - 19s - loss: 0.3356 - acc: 0.9343 - val_loss: 0.3244 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02130: val_acc did not improve from 0.94225\n",
+ "Epoch 2131/100000\n",
+ " - 19s - loss: 0.3363 - acc: 0.9344 - val_loss: 0.3163 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02131: val_acc did not improve from 0.94225\n",
+ "Epoch 2132/100000\n",
+ " - 19s - loss: 0.3312 - acc: 0.9360 - val_loss: 0.3229 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02132: val_acc did not improve from 0.94225\n",
+ "Epoch 2133/100000\n",
+ " - 19s - loss: 0.3326 - acc: 0.9351 - val_loss: 0.3677 - val_acc: 0.9062\n",
+ "\n",
+ "Epoch 02133: val_acc did not improve from 0.94225\n",
+ "Epoch 2134/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9343 - val_loss: 0.3764 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 02134: val_acc did not improve from 0.94225\n",
+ "Epoch 2135/100000\n",
+ " - 19s - loss: 0.3361 - acc: 0.9348 - val_loss: 0.3705 - val_acc: 0.9189\n",
+ "\n",
+ "Epoch 02135: val_acc did not improve from 0.94225\n",
+ "Epoch 2136/100000\n",
+ " - 18s - loss: 0.3336 - acc: 0.9351 - val_loss: 0.3457 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 02136: val_acc did not improve from 0.94225\n",
+ "Epoch 2137/100000\n",
+ " - 19s - loss: 0.3343 - acc: 0.9345 - val_loss: 0.3550 - val_acc: 0.9164\n",
+ "\n",
+ "Epoch 02137: val_acc did not improve from 0.94225\n",
+ "Epoch 2138/100000\n",
+ " - 19s - loss: 0.3326 - acc: 0.9353 - val_loss: 0.3245 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02138: val_acc did not improve from 0.94225\n",
+ "Epoch 2139/100000\n",
+ " - 18s - loss: 0.3331 - acc: 0.9356 - val_loss: 0.3215 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02139: val_acc did not improve from 0.94225\n",
+ "Epoch 2140/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9353 - val_loss: 0.4185 - val_acc: 0.8827\n",
+ "\n",
+ "Epoch 02140: val_acc did not improve from 0.94225\n",
+ "Epoch 2141/100000\n",
+ " - 19s - loss: 0.3352 - acc: 0.9347 - val_loss: 0.3284 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02141: val_acc did not improve from 0.94225\n",
+ "Epoch 2142/100000\n",
+ " - 18s - loss: 0.3326 - acc: 0.9354 - val_loss: 0.4411 - val_acc: 0.8780\n",
+ "\n",
+ "Epoch 02142: val_acc did not improve from 0.94225\n",
+ "Epoch 2143/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9349 - val_loss: 0.3459 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02143: val_acc did not improve from 0.94225\n",
+ "Epoch 2144/100000\n",
+ " - 18s - loss: 0.3340 - acc: 0.9352 - val_loss: 0.3508 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 02144: val_acc did not improve from 0.94225\n",
+ "Epoch 2145/100000\n",
+ " - 19s - loss: 0.3319 - acc: 0.9361 - val_loss: 0.4259 - val_acc: 0.8833\n",
+ "\n",
+ "Epoch 02145: val_acc did not improve from 0.94225\n",
+ "Epoch 2146/100000\n",
+ " - 18s - loss: 0.3373 - acc: 0.9346 - val_loss: 0.3129 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 02146: val_acc did not improve from 0.94225\n",
+ "Epoch 2147/100000\n",
+ " - 19s - loss: 0.3336 - acc: 0.9355 - val_loss: 0.3498 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02147: val_acc did not improve from 0.94225\n",
+ "Epoch 2148/100000\n",
+ " - 18s - loss: 0.3338 - acc: 0.9354 - val_loss: 0.3130 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02148: val_acc did not improve from 0.94225\n",
+ "Epoch 2149/100000\n",
+ " - 19s - loss: 0.3344 - acc: 0.9354 - val_loss: 0.3293 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 02149: val_acc did not improve from 0.94225\n",
+ "Epoch 2150/100000\n",
+ " - 19s - loss: 0.3339 - acc: 0.9351 - val_loss: 0.3223 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02150: val_acc did not improve from 0.94225\n",
+ "Epoch 2151/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9357 - val_loss: 0.3223 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02151: val_acc did not improve from 0.94225\n",
+ "Epoch 2152/100000\n",
+ " - 19s - loss: 0.3345 - acc: 0.9352 - val_loss: 0.3645 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 02152: val_acc did not improve from 0.94225\n",
+ "Epoch 2153/100000\n",
+ " - 19s - loss: 0.3333 - acc: 0.9362 - val_loss: 0.3544 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 02153: val_acc did not improve from 0.94225\n",
+ "Epoch 2154/100000\n",
+ " - 19s - loss: 0.3363 - acc: 0.9344 - val_loss: 0.4285 - val_acc: 0.8861\n",
+ "\n",
+ "Epoch 02154: val_acc did not improve from 0.94225\n",
+ "Epoch 2155/100000\n",
+ " - 18s - loss: 0.3359 - acc: 0.9345 - val_loss: 0.3354 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 02155: val_acc did not improve from 0.94225\n",
+ "Epoch 2156/100000\n",
+ " - 19s - loss: 0.3338 - acc: 0.9353 - val_loss: 0.3603 - val_acc: 0.9184\n",
+ "\n",
+ "Epoch 02156: val_acc did not improve from 0.94225\n",
+ "Epoch 2157/100000\n",
+ " - 19s - loss: 0.3315 - acc: 0.9361 - val_loss: 0.3314 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 02157: val_acc did not improve from 0.94225\n",
+ "Epoch 2158/100000\n",
+ " - 19s - loss: 0.3335 - acc: 0.9357 - val_loss: 0.3251 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02158: val_acc did not improve from 0.94225\n",
+ "Epoch 2159/100000\n",
+ " - 19s - loss: 0.3326 - acc: 0.9355 - val_loss: 0.3649 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 02159: val_acc did not improve from 0.94225\n",
+ "Epoch 2160/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9345 - val_loss: 0.3337 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 02160: val_acc did not improve from 0.94225\n",
+ "Epoch 2161/100000\n",
+ " - 19s - loss: 0.3348 - acc: 0.9353 - val_loss: 0.3137 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 02161: val_acc did not improve from 0.94225\n",
+ "Epoch 2162/100000\n",
+ " - 19s - loss: 0.3328 - acc: 0.9355 - val_loss: 0.3173 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02162: val_acc did not improve from 0.94225\n",
+ "Epoch 2163/100000\n",
+ " - 19s - loss: 0.3330 - acc: 0.9356 - val_loss: 0.3783 - val_acc: 0.8888\n",
+ "\n",
+ "Epoch 02163: val_acc did not improve from 0.94225\n",
+ "Epoch 2164/100000\n",
+ " - 19s - loss: 0.3317 - acc: 0.9361 - val_loss: 0.3583 - val_acc: 0.9126\n",
+ "\n",
+ "Epoch 02164: val_acc did not improve from 0.94225\n",
+ "Epoch 2165/100000\n",
+ " - 18s - loss: 0.3353 - acc: 0.9346 - val_loss: 0.3442 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 02165: val_acc did not improve from 0.94225\n",
+ "Epoch 2166/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9351 - val_loss: 0.3311 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 02166: val_acc did not improve from 0.94225\n",
+ "Epoch 2167/100000\n",
+ " - 19s - loss: 0.3344 - acc: 0.9354 - val_loss: 0.3444 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02167: val_acc did not improve from 0.94225\n",
+ "Epoch 2168/100000\n",
+ " - 19s - loss: 0.3349 - acc: 0.9351 - val_loss: 0.3745 - val_acc: 0.9083\n",
+ "\n",
+ "Epoch 02168: val_acc did not improve from 0.94225\n",
+ "Epoch 2169/100000\n",
+ " - 19s - loss: 0.3332 - acc: 0.9354 - val_loss: 0.3176 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02169: val_acc did not improve from 0.94225\n",
+ "Epoch 2170/100000\n",
+ " - 19s - loss: 0.3341 - acc: 0.9354 - val_loss: 0.3231 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02170: val_acc did not improve from 0.94225\n",
+ "Epoch 2171/100000\n",
+ " - 19s - loss: 0.3356 - acc: 0.9346 - val_loss: 0.3619 - val_acc: 0.9135\n",
+ "\n",
+ "Epoch 02171: val_acc did not improve from 0.94225\n",
+ "Epoch 2172/100000\n",
+ " - 19s - loss: 0.3368 - acc: 0.9346 - val_loss: 0.3787 - val_acc: 0.9063\n",
+ "\n",
+ "Epoch 02172: val_acc did not improve from 0.94225\n",
+ "Epoch 2173/100000\n",
+ " - 19s - loss: 0.3337 - acc: 0.9358 - val_loss: 0.3144 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02173: val_acc did not improve from 0.94225\n",
+ "Epoch 2174/100000\n",
+ " - 19s - loss: 0.3376 - acc: 0.9341 - val_loss: 0.3377 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 02174: val_acc did not improve from 0.94225\n",
+ "Epoch 2175/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9352 - val_loss: 0.3946 - val_acc: 0.8952\n",
+ "\n",
+ "Epoch 02175: val_acc did not improve from 0.94225\n",
+ "Epoch 2176/100000\n",
+ " - 19s - loss: 0.3347 - acc: 0.9352 - val_loss: 0.3259 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 02176: val_acc did not improve from 0.94225\n",
+ "Epoch 2177/100000\n",
+ " - 19s - loss: 0.3369 - acc: 0.9346 - val_loss: 0.3830 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 02177: val_acc did not improve from 0.94225\n",
+ "Epoch 2178/100000\n",
+ " - 19s - loss: 0.3362 - acc: 0.9347 - val_loss: 0.3291 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 02178: val_acc did not improve from 0.94225\n",
+ "Epoch 2179/100000\n",
+ " - 19s - loss: 0.3322 - acc: 0.9353 - val_loss: 0.3336 - val_acc: 0.9265\n",
+ "\n",
+ "Epoch 02179: val_acc did not improve from 0.94225\n",
+ "Epoch 2180/100000\n",
+ " - 19s - loss: 0.3343 - acc: 0.9353 - val_loss: 0.3778 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 02180: val_acc did not improve from 0.94225\n",
+ "Epoch 2181/100000\n",
+ " - 19s - loss: 0.3366 - acc: 0.9343 - val_loss: 0.3327 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 02181: val_acc did not improve from 0.94225\n",
+ "Epoch 2182/100000\n",
+ " - 19s - loss: 0.3346 - acc: 0.9363 - val_loss: 0.3593 - val_acc: 0.9164\n",
+ "\n",
+ "Epoch 02182: val_acc did not improve from 0.94225\n",
+ "Epoch 2183/100000\n",
+ " - 19s - loss: 0.3353 - acc: 0.9342 - val_loss: 0.3701 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 02183: val_acc did not improve from 0.94225\n",
+ "Epoch 2184/100000\n",
+ " - 19s - loss: 0.3355 - acc: 0.9351 - val_loss: 0.3306 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 02184: val_acc did not improve from 0.94225\n",
+ "Epoch 2185/100000\n",
+ " - 18s - loss: 0.3349 - acc: 0.9352 - val_loss: 0.3295 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02185: val_acc did not improve from 0.94225\n",
+ "Epoch 2186/100000\n",
+ " - 19s - loss: 0.3340 - acc: 0.9357 - val_loss: 0.3553 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02186: val_acc did not improve from 0.94225\n",
+ "Epoch 2187/100000\n",
+ " - 18s - loss: 0.3374 - acc: 0.9347 - val_loss: 0.3646 - val_acc: 0.9104\n",
+ "\n",
+ "Epoch 02187: val_acc did not improve from 0.94225\n",
+ "Epoch 2188/100000\n",
+ " - 19s - loss: 0.3342 - acc: 0.9354 - val_loss: 0.3216 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02188: val_acc did not improve from 0.94225\n",
+ "Epoch 2189/100000\n",
+ " - 18s - loss: 0.3359 - acc: 0.9350 - val_loss: 0.3492 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 02189: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 02189: ReduceLROnPlateau reducing learning rate to 0.0005987368611386045.\n",
+ "Epoch 2190/100000\n",
+ " - 18s - loss: 0.3301 - acc: 0.9345 - val_loss: 0.3098 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02190: val_acc did not improve from 0.94225\n",
+ "Epoch 2191/100000\n",
+ " - 18s - loss: 0.3264 - acc: 0.9355 - val_loss: 0.3180 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 02191: val_acc did not improve from 0.94225\n",
+ "Epoch 2192/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9365 - val_loss: 0.4054 - val_acc: 0.8870\n",
+ "\n",
+ "Epoch 02192: val_acc did not improve from 0.94225\n",
+ "Epoch 2193/100000\n",
+ " - 19s - loss: 0.3266 - acc: 0.9360 - val_loss: 0.3161 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02193: val_acc did not improve from 0.94225\n",
+ "Epoch 2194/100000\n",
+ " - 19s - loss: 0.3276 - acc: 0.9360 - val_loss: 0.6201 - val_acc: 0.7837\n",
+ "\n",
+ "Epoch 02194: val_acc did not improve from 0.94225\n",
+ "Epoch 2195/100000\n",
+ " - 19s - loss: 0.3267 - acc: 0.9361 - val_loss: 0.3205 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02195: val_acc did not improve from 0.94225\n",
+ "Epoch 2196/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9360 - val_loss: 0.3317 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 02196: val_acc did not improve from 0.94225\n",
+ "Epoch 2197/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9361 - val_loss: 0.3308 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02197: val_acc did not improve from 0.94225\n",
+ "Epoch 2198/100000\n",
+ " - 18s - loss: 0.3301 - acc: 0.9348 - val_loss: 0.3132 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02198: val_acc did not improve from 0.94225\n",
+ "Epoch 2199/100000\n",
+ " - 18s - loss: 0.3252 - acc: 0.9362 - val_loss: 0.3166 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02199: val_acc did not improve from 0.94225\n",
+ "Epoch 2200/100000\n",
+ " - 19s - loss: 0.3265 - acc: 0.9358 - val_loss: 0.3651 - val_acc: 0.9142\n",
+ "\n",
+ "Epoch 02200: val_acc did not improve from 0.94225\n",
+ "Epoch 2201/100000\n",
+ " - 19s - loss: 0.3280 - acc: 0.9360 - val_loss: 0.3169 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02201: val_acc did not improve from 0.94225\n",
+ "Epoch 2202/100000\n",
+ " - 19s - loss: 0.3272 - acc: 0.9358 - val_loss: 0.3498 - val_acc: 0.9144\n",
+ "\n",
+ "Epoch 02202: val_acc did not improve from 0.94225\n",
+ "Epoch 2203/100000\n",
+ " - 19s - loss: 0.3267 - acc: 0.9364 - val_loss: 0.3128 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02203: val_acc did not improve from 0.94225\n",
+ "Epoch 2204/100000\n",
+ " - 18s - loss: 0.3266 - acc: 0.9358 - val_loss: 0.3199 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02204: val_acc did not improve from 0.94225\n",
+ "Epoch 2205/100000\n",
+ " - 19s - loss: 0.3291 - acc: 0.9351 - val_loss: 0.3252 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02205: val_acc did not improve from 0.94225\n",
+ "Epoch 2206/100000\n",
+ " - 18s - loss: 0.3269 - acc: 0.9360 - val_loss: 0.3469 - val_acc: 0.9120\n",
+ "\n",
+ "Epoch 02206: val_acc did not improve from 0.94225\n",
+ "Epoch 2207/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9365 - val_loss: 0.3283 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 02207: val_acc did not improve from 0.94225\n",
+ "Epoch 2208/100000\n",
+ " - 18s - loss: 0.3274 - acc: 0.9353 - val_loss: 0.3119 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02208: val_acc did not improve from 0.94225\n",
+ "Epoch 2209/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9363 - val_loss: 0.3488 - val_acc: 0.9109\n",
+ "\n",
+ "Epoch 02209: val_acc did not improve from 0.94225\n",
+ "Epoch 2210/100000\n",
+ " - 18s - loss: 0.3271 - acc: 0.9357 - val_loss: 0.3175 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02210: val_acc did not improve from 0.94225\n",
+ "Epoch 2211/100000\n",
+ " - 19s - loss: 0.3286 - acc: 0.9359 - val_loss: 0.3201 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02211: val_acc did not improve from 0.94225\n",
+ "Epoch 2212/100000\n",
+ " - 18s - loss: 0.3238 - acc: 0.9369 - val_loss: 0.3060 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02212: val_acc did not improve from 0.94225\n",
+ "Epoch 2213/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9368 - val_loss: 0.3233 - val_acc: 0.9236\n",
+ "\n",
+ "Epoch 02213: val_acc did not improve from 0.94225\n",
+ "Epoch 2214/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9358 - val_loss: 0.3237 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02214: val_acc did not improve from 0.94225\n",
+ "Epoch 2215/100000\n",
+ " - 19s - loss: 0.3264 - acc: 0.9360 - val_loss: 0.3171 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 02215: val_acc did not improve from 0.94225\n",
+ "Epoch 2216/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9364 - val_loss: 0.3694 - val_acc: 0.9097\n",
+ "\n",
+ "Epoch 02216: val_acc did not improve from 0.94225\n",
+ "Epoch 2217/100000\n",
+ " - 19s - loss: 0.3265 - acc: 0.9360 - val_loss: 0.3070 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02217: val_acc did not improve from 0.94225\n",
+ "Epoch 2218/100000\n",
+ " - 18s - loss: 0.3261 - acc: 0.9361 - val_loss: 0.3175 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02218: val_acc did not improve from 0.94225\n",
+ "Epoch 2219/100000\n",
+ " - 19s - loss: 0.3253 - acc: 0.9365 - val_loss: 0.3220 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02219: val_acc did not improve from 0.94225\n",
+ "Epoch 2220/100000\n",
+ " - 18s - loss: 0.3263 - acc: 0.9360 - val_loss: 0.3304 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02220: val_acc did not improve from 0.94225\n",
+ "Epoch 2221/100000\n",
+ " - 19s - loss: 0.3271 - acc: 0.9354 - val_loss: 0.3224 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 02221: val_acc did not improve from 0.94225\n",
+ "Epoch 2222/100000\n",
+ " - 19s - loss: 0.3256 - acc: 0.9362 - val_loss: 0.3245 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02222: val_acc did not improve from 0.94225\n",
+ "Epoch 2223/100000\n",
+ " - 18s - loss: 0.3256 - acc: 0.9364 - val_loss: 0.3130 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02223: val_acc did not improve from 0.94225\n",
+ "Epoch 2224/100000\n",
+ " - 19s - loss: 0.3252 - acc: 0.9366 - val_loss: 0.3106 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02224: val_acc did not improve from 0.94225\n",
+ "Epoch 2225/100000\n",
+ " - 19s - loss: 0.3244 - acc: 0.9365 - val_loss: 0.3203 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02225: val_acc did not improve from 0.94225\n",
+ "Epoch 2226/100000\n",
+ " - 19s - loss: 0.3255 - acc: 0.9365 - val_loss: 0.3763 - val_acc: 0.9112\n",
+ "\n",
+ "Epoch 02226: val_acc did not improve from 0.94225\n",
+ "Epoch 2227/100000\n",
+ " - 18s - loss: 0.3283 - acc: 0.9358 - val_loss: 0.5042 - val_acc: 0.8577\n",
+ "\n",
+ "Epoch 02227: val_acc did not improve from 0.94225\n",
+ "Epoch 2228/100000\n",
+ " - 19s - loss: 0.3245 - acc: 0.9363 - val_loss: 0.3263 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 02228: val_acc did not improve from 0.94225\n",
+ "Epoch 2229/100000\n",
+ " - 19s - loss: 0.3252 - acc: 0.9362 - val_loss: 0.5468 - val_acc: 0.8286\n",
+ "\n",
+ "Epoch 02229: val_acc did not improve from 0.94225\n",
+ "Epoch 2230/100000\n",
+ " - 19s - loss: 0.3283 - acc: 0.9355 - val_loss: 0.3467 - val_acc: 0.9214\n",
+ "\n",
+ "Epoch 02230: val_acc did not improve from 0.94225\n",
+ "Epoch 2231/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9364 - val_loss: 0.4078 - val_acc: 0.8927\n",
+ "\n",
+ "Epoch 02231: val_acc did not improve from 0.94225\n",
+ "Epoch 2232/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9366 - val_loss: 0.3183 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 02232: val_acc did not improve from 0.94225\n",
+ "Epoch 2233/100000\n",
+ " - 19s - loss: 0.3280 - acc: 0.9355 - val_loss: 0.3069 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02233: val_acc did not improve from 0.94225\n",
+ "Epoch 2234/100000\n",
+ " - 19s - loss: 0.3249 - acc: 0.9363 - val_loss: 0.3451 - val_acc: 0.9168\n",
+ "\n",
+ "Epoch 02234: val_acc did not improve from 0.94225\n",
+ "Epoch 2235/100000\n",
+ " - 19s - loss: 0.3280 - acc: 0.9354 - val_loss: 0.3080 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02235: val_acc did not improve from 0.94225\n",
+ "Epoch 2236/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9365 - val_loss: 0.3322 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 02236: val_acc did not improve from 0.94225\n",
+ "Epoch 2237/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9361 - val_loss: 0.3407 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02237: val_acc did not improve from 0.94225\n",
+ "Epoch 2238/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9370 - val_loss: 0.5896 - val_acc: 0.8032\n",
+ "\n",
+ "Epoch 02238: val_acc did not improve from 0.94225\n",
+ "Epoch 2239/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9364 - val_loss: 0.3138 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02239: val_acc did not improve from 0.94225\n",
+ "Epoch 2240/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9363 - val_loss: 0.3226 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 02240: val_acc did not improve from 0.94225\n",
+ "Epoch 2241/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9362 - val_loss: 0.3218 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02241: val_acc did not improve from 0.94225\n",
+ "Epoch 2242/100000\n",
+ " - 19s - loss: 0.3248 - acc: 0.9368 - val_loss: 0.3115 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02242: val_acc did not improve from 0.94225\n",
+ "Epoch 2243/100000\n",
+ " - 18s - loss: 0.3263 - acc: 0.9357 - val_loss: 0.3314 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02243: val_acc did not improve from 0.94225\n",
+ "Epoch 2244/100000\n",
+ " - 18s - loss: 0.3253 - acc: 0.9362 - val_loss: 0.3391 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 02244: val_acc did not improve from 0.94225\n",
+ "Epoch 2245/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9363 - val_loss: 0.3027 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 02245: val_acc did not improve from 0.94225\n",
+ "Epoch 2246/100000\n",
+ " - 18s - loss: 0.3241 - acc: 0.9366 - val_loss: 0.3634 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 02246: val_acc did not improve from 0.94225\n",
+ "Epoch 2247/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9365 - val_loss: 0.3143 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02247: val_acc did not improve from 0.94225\n",
+ "Epoch 2248/100000\n",
+ " - 18s - loss: 0.3246 - acc: 0.9364 - val_loss: 0.3112 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02248: val_acc did not improve from 0.94225\n",
+ "Epoch 2249/100000\n",
+ " - 18s - loss: 0.3267 - acc: 0.9356 - val_loss: 0.3113 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02249: val_acc did not improve from 0.94225\n",
+ "Epoch 2250/100000\n",
+ " - 18s - loss: 0.3253 - acc: 0.9366 - val_loss: 0.3163 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 02250: val_acc did not improve from 0.94225\n",
+ "Epoch 2251/100000\n",
+ " - 19s - loss: 0.3284 - acc: 0.9353 - val_loss: 0.3277 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02251: val_acc did not improve from 0.94225\n",
+ "Epoch 2252/100000\n",
+ " - 18s - loss: 0.3253 - acc: 0.9360 - val_loss: 0.3213 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 02252: val_acc did not improve from 0.94225\n",
+ "Epoch 2253/100000\n",
+ " - 19s - loss: 0.3253 - acc: 0.9361 - val_loss: 0.4819 - val_acc: 0.8655\n",
+ "\n",
+ "Epoch 02253: val_acc did not improve from 0.94225\n",
+ "Epoch 2254/100000\n",
+ " - 18s - loss: 0.3273 - acc: 0.9364 - val_loss: 0.3747 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 02254: val_acc did not improve from 0.94225\n",
+ "Epoch 2255/100000\n",
+ " - 18s - loss: 0.3272 - acc: 0.9360 - val_loss: 0.3189 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02255: val_acc did not improve from 0.94225\n",
+ "Epoch 2256/100000\n",
+ " - 18s - loss: 0.3273 - acc: 0.9357 - val_loss: 0.3492 - val_acc: 0.9180\n",
+ "\n",
+ "Epoch 02256: val_acc did not improve from 0.94225\n",
+ "Epoch 2257/100000\n",
+ " - 18s - loss: 0.3248 - acc: 0.9366 - val_loss: 0.3142 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02257: val_acc did not improve from 0.94225\n",
+ "Epoch 2258/100000\n",
+ " - 18s - loss: 0.3250 - acc: 0.9365 - val_loss: 0.3086 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02258: val_acc did not improve from 0.94225\n",
+ "Epoch 2259/100000\n",
+ " - 19s - loss: 0.3236 - acc: 0.9364 - val_loss: 0.3166 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02259: val_acc did not improve from 0.94225\n",
+ "Epoch 2260/100000\n",
+ " - 18s - loss: 0.3239 - acc: 0.9363 - val_loss: 0.3211 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02260: val_acc did not improve from 0.94225\n",
+ "Epoch 2261/100000\n",
+ " - 19s - loss: 0.3245 - acc: 0.9361 - val_loss: 0.3495 - val_acc: 0.9156\n",
+ "\n",
+ "Epoch 02261: val_acc did not improve from 0.94225\n",
+ "Epoch 2262/100000\n",
+ " - 18s - loss: 0.3262 - acc: 0.9357 - val_loss: 0.3503 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 02262: val_acc did not improve from 0.94225\n",
+ "Epoch 2263/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9361 - val_loss: 0.3151 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02263: val_acc did not improve from 0.94225\n",
+ "Epoch 2264/100000\n",
+ " - 19s - loss: 0.3240 - acc: 0.9365 - val_loss: 0.3147 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02264: val_acc did not improve from 0.94225\n",
+ "Epoch 2265/100000\n",
+ " - 19s - loss: 0.3277 - acc: 0.9354 - val_loss: 0.3428 - val_acc: 0.9234\n",
+ "\n",
+ "Epoch 02265: val_acc did not improve from 0.94225\n",
+ "Epoch 2266/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9365 - val_loss: 0.4584 - val_acc: 0.8730\n",
+ "\n",
+ "Epoch 02266: val_acc did not improve from 0.94225\n",
+ "Epoch 2267/100000\n",
+ " - 19s - loss: 0.3261 - acc: 0.9353 - val_loss: 0.3240 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02267: val_acc did not improve from 0.94225\n",
+ "Epoch 2268/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9363 - val_loss: 0.3135 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02268: val_acc did not improve from 0.94225\n",
+ "Epoch 2269/100000\n",
+ " - 18s - loss: 0.3240 - acc: 0.9362 - val_loss: 0.3363 - val_acc: 0.9245\n",
+ "\n",
+ "Epoch 02269: val_acc did not improve from 0.94225\n",
+ "Epoch 2270/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9362 - val_loss: 0.3340 - val_acc: 0.9264\n",
+ "\n",
+ "Epoch 02270: val_acc did not improve from 0.94225\n",
+ "Epoch 2271/100000\n",
+ " - 18s - loss: 0.3245 - acc: 0.9363 - val_loss: 0.3231 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02271: val_acc did not improve from 0.94225\n",
+ "Epoch 2272/100000\n",
+ " - 18s - loss: 0.3251 - acc: 0.9366 - val_loss: 0.3156 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02272: val_acc did not improve from 0.94225\n",
+ "Epoch 2273/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9365 - val_loss: 0.3711 - val_acc: 0.9091\n",
+ "\n",
+ "Epoch 02273: val_acc did not improve from 0.94225\n",
+ "Epoch 2274/100000\n",
+ " - 18s - loss: 0.3255 - acc: 0.9361 - val_loss: 0.3116 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02274: val_acc did not improve from 0.94225\n",
+ "Epoch 2275/100000\n",
+ " - 18s - loss: 0.3261 - acc: 0.9360 - val_loss: 0.3327 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02275: val_acc did not improve from 0.94225\n",
+ "Epoch 2276/100000\n",
+ " - 18s - loss: 0.3263 - acc: 0.9357 - val_loss: 0.6150 - val_acc: 0.7694\n",
+ "\n",
+ "Epoch 02276: val_acc did not improve from 0.94225\n",
+ "Epoch 2277/100000\n",
+ " - 18s - loss: 0.3228 - acc: 0.9369 - val_loss: 0.3367 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 02277: val_acc did not improve from 0.94225\n",
+ "Epoch 2278/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9364 - val_loss: 0.3398 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 02278: val_acc did not improve from 0.94225\n",
+ "Epoch 2279/100000\n",
+ " - 18s - loss: 0.3250 - acc: 0.9363 - val_loss: 0.3728 - val_acc: 0.9084\n",
+ "\n",
+ "Epoch 02279: val_acc did not improve from 0.94225\n",
+ "Epoch 2280/100000\n",
+ " - 19s - loss: 0.3272 - acc: 0.9358 - val_loss: 0.4430 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 02280: val_acc did not improve from 0.94225\n",
+ "Epoch 2281/100000\n",
+ " - 18s - loss: 0.3249 - acc: 0.9362 - val_loss: 0.3073 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02281: val_acc did not improve from 0.94225\n",
+ "Epoch 2282/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9362 - val_loss: 0.3218 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02282: val_acc did not improve from 0.94225\n",
+ "Epoch 2283/100000\n",
+ " - 18s - loss: 0.3241 - acc: 0.9367 - val_loss: 0.3333 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 02283: val_acc did not improve from 0.94225\n",
+ "Epoch 2284/100000\n",
+ " - 19s - loss: 0.3290 - acc: 0.9350 - val_loss: 0.3161 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02284: val_acc did not improve from 0.94225\n",
+ "Epoch 2285/100000\n",
+ " - 18s - loss: 0.3266 - acc: 0.9359 - val_loss: 0.3252 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02285: val_acc did not improve from 0.94225\n",
+ "Epoch 2286/100000\n",
+ " - 19s - loss: 0.3233 - acc: 0.9367 - val_loss: 0.3043 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02286: val_acc did not improve from 0.94225\n",
+ "Epoch 2287/100000\n",
+ " - 18s - loss: 0.3261 - acc: 0.9361 - val_loss: 0.3320 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02287: val_acc did not improve from 0.94225\n",
+ "Epoch 2288/100000\n",
+ " - 19s - loss: 0.3236 - acc: 0.9368 - val_loss: 0.3539 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 02288: val_acc did not improve from 0.94225\n",
+ "Epoch 2289/100000\n",
+ " - 18s - loss: 0.3229 - acc: 0.9367 - val_loss: 0.3393 - val_acc: 0.9224\n",
+ "\n",
+ "Epoch 02289: val_acc did not improve from 0.94225\n",
+ "Epoch 2290/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9358 - val_loss: 0.3263 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 02290: val_acc did not improve from 0.94225\n",
+ "Epoch 2291/100000\n",
+ " - 18s - loss: 0.3262 - acc: 0.9358 - val_loss: 0.3188 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02291: val_acc did not improve from 0.94225\n",
+ "Epoch 2292/100000\n",
+ " - 18s - loss: 0.3229 - acc: 0.9372 - val_loss: 0.3220 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02292: val_acc did not improve from 0.94225\n",
+ "Epoch 2293/100000\n",
+ " - 19s - loss: 0.3246 - acc: 0.9367 - val_loss: 0.3219 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 02293: val_acc did not improve from 0.94225\n",
+ "Epoch 2294/100000\n",
+ " - 18s - loss: 0.3245 - acc: 0.9363 - val_loss: 0.3592 - val_acc: 0.9103\n",
+ "\n",
+ "Epoch 02294: val_acc did not improve from 0.94225\n",
+ "Epoch 2295/100000\n",
+ " - 19s - loss: 0.3267 - acc: 0.9357 - val_loss: 0.3187 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02295: val_acc did not improve from 0.94225\n",
+ "Epoch 2296/100000\n",
+ " - 18s - loss: 0.3265 - acc: 0.9361 - val_loss: 0.3323 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02296: val_acc did not improve from 0.94225\n",
+ "Epoch 2297/100000\n",
+ " - 19s - loss: 0.3239 - acc: 0.9367 - val_loss: 0.3172 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02297: val_acc did not improve from 0.94225\n",
+ "Epoch 2298/100000\n",
+ " - 19s - loss: 0.3233 - acc: 0.9372 - val_loss: 0.3212 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 02298: val_acc did not improve from 0.94225\n",
+ "Epoch 2299/100000\n",
+ " - 19s - loss: 0.3235 - acc: 0.9369 - val_loss: 0.3246 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02299: val_acc did not improve from 0.94225\n",
+ "Epoch 2300/100000\n",
+ " - 18s - loss: 0.3239 - acc: 0.9364 - val_loss: 0.3143 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02300: val_acc did not improve from 0.94225\n",
+ "Epoch 2301/100000\n",
+ " - 19s - loss: 0.3275 - acc: 0.9354 - val_loss: 0.3141 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02301: val_acc did not improve from 0.94225\n",
+ "Epoch 2302/100000\n",
+ " - 18s - loss: 0.3221 - acc: 0.9374 - val_loss: 0.3295 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 02302: val_acc did not improve from 0.94225\n",
+ "Epoch 2303/100000\n",
+ " - 19s - loss: 0.3243 - acc: 0.9363 - val_loss: 0.3076 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02303: val_acc did not improve from 0.94225\n",
+ "Epoch 2304/100000\n",
+ " - 19s - loss: 0.3261 - acc: 0.9357 - val_loss: 0.3140 - val_acc: 0.9400\n",
+ "\n",
+ "Epoch 02304: val_acc did not improve from 0.94225\n",
+ "Epoch 2305/100000\n",
+ " - 18s - loss: 0.3247 - acc: 0.9362 - val_loss: 0.3223 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 02305: val_acc did not improve from 0.94225\n",
+ "Epoch 2306/100000\n",
+ " - 19s - loss: 0.3234 - acc: 0.9367 - val_loss: 0.3293 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02306: val_acc did not improve from 0.94225\n",
+ "Epoch 2307/100000\n",
+ " - 18s - loss: 0.3234 - acc: 0.9367 - val_loss: 0.3402 - val_acc: 0.9196\n",
+ "\n",
+ "Epoch 02307: val_acc did not improve from 0.94225\n",
+ "Epoch 2308/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9363 - val_loss: 0.3314 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02308: val_acc did not improve from 0.94225\n",
+ "Epoch 2309/100000\n",
+ " - 19s - loss: 0.3245 - acc: 0.9369 - val_loss: 0.3466 - val_acc: 0.9214\n",
+ "\n",
+ "Epoch 02309: val_acc did not improve from 0.94225\n",
+ "Epoch 2310/100000\n",
+ " - 19s - loss: 0.3229 - acc: 0.9369 - val_loss: 0.3236 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 02310: val_acc did not improve from 0.94225\n",
+ "Epoch 2311/100000\n",
+ " - 19s - loss: 0.3258 - acc: 0.9362 - val_loss: 0.3756 - val_acc: 0.9017\n",
+ "\n",
+ "Epoch 02311: val_acc did not improve from 0.94225\n",
+ "Epoch 2312/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9359 - val_loss: 0.3144 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02312: val_acc did not improve from 0.94225\n",
+ "Epoch 2313/100000\n",
+ " - 19s - loss: 0.3256 - acc: 0.9365 - val_loss: 0.3100 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02313: val_acc did not improve from 0.94225\n",
+ "Epoch 2314/100000\n",
+ " - 18s - loss: 0.3224 - acc: 0.9373 - val_loss: 0.3446 - val_acc: 0.9193\n",
+ "\n",
+ "Epoch 02314: val_acc did not improve from 0.94225\n",
+ "Epoch 2315/100000\n",
+ " - 19s - loss: 0.3252 - acc: 0.9364 - val_loss: 0.3087 - val_acc: 0.9397\n",
+ "\n",
+ "Epoch 02315: val_acc did not improve from 0.94225\n",
+ "Epoch 2316/100000\n",
+ " - 18s - loss: 0.3267 - acc: 0.9363 - val_loss: 0.4227 - val_acc: 0.8733\n",
+ "\n",
+ "Epoch 02316: val_acc did not improve from 0.94225\n",
+ "Epoch 2317/100000\n",
+ " - 19s - loss: 0.3268 - acc: 0.9362 - val_loss: 0.3213 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 02317: val_acc did not improve from 0.94225\n",
+ "Epoch 2318/100000\n",
+ " - 18s - loss: 0.3254 - acc: 0.9362 - val_loss: 0.3268 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 02318: val_acc did not improve from 0.94225\n",
+ "Epoch 2319/100000\n",
+ " - 19s - loss: 0.3248 - acc: 0.9362 - val_loss: 0.3260 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 02319: val_acc did not improve from 0.94225\n",
+ "Epoch 2320/100000\n",
+ " - 18s - loss: 0.3230 - acc: 0.9368 - val_loss: 0.3252 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02320: val_acc did not improve from 0.94225\n",
+ "Epoch 2321/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9357 - val_loss: 0.3084 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02321: val_acc did not improve from 0.94225\n",
+ "Epoch 2322/100000\n",
+ " - 19s - loss: 0.3262 - acc: 0.9355 - val_loss: 0.3238 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02322: val_acc did not improve from 0.94225\n",
+ "Epoch 2323/100000\n",
+ " - 19s - loss: 0.3236 - acc: 0.9371 - val_loss: 0.3112 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02323: val_acc did not improve from 0.94225\n",
+ "Epoch 2324/100000\n",
+ " - 18s - loss: 0.3230 - acc: 0.9371 - val_loss: 0.3043 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02324: val_acc did not improve from 0.94225\n",
+ "Epoch 2325/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9359 - val_loss: 0.3364 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 02325: val_acc did not improve from 0.94225\n",
+ "Epoch 2326/100000\n",
+ " - 19s - loss: 0.3265 - acc: 0.9364 - val_loss: 0.3259 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02326: val_acc did not improve from 0.94225\n",
+ "Epoch 2327/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9362 - val_loss: 0.3227 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 02327: val_acc did not improve from 0.94225\n",
+ "Epoch 2328/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9357 - val_loss: 0.3348 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 02328: val_acc did not improve from 0.94225\n",
+ "Epoch 2329/100000\n",
+ " - 18s - loss: 0.3231 - acc: 0.9369 - val_loss: 0.3246 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02329: val_acc did not improve from 0.94225\n",
+ "Epoch 2330/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9357 - val_loss: 0.3276 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 02330: val_acc did not improve from 0.94225\n",
+ "Epoch 2331/100000\n",
+ " - 19s - loss: 0.3261 - acc: 0.9360 - val_loss: 0.3266 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02331: val_acc did not improve from 0.94225\n",
+ "Epoch 2332/100000\n",
+ " - 18s - loss: 0.3264 - acc: 0.9364 - val_loss: 0.3534 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 02332: val_acc did not improve from 0.94225\n",
+ "Epoch 2333/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9368 - val_loss: 0.3533 - val_acc: 0.9096\n",
+ "\n",
+ "Epoch 02333: val_acc did not improve from 0.94225\n",
+ "Epoch 2334/100000\n",
+ " - 18s - loss: 0.3258 - acc: 0.9359 - val_loss: 0.3143 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02334: val_acc did not improve from 0.94225\n",
+ "Epoch 2335/100000\n",
+ " - 18s - loss: 0.3267 - acc: 0.9359 - val_loss: 0.3557 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 02335: val_acc did not improve from 0.94225\n",
+ "Epoch 2336/100000\n",
+ " - 19s - loss: 0.3214 - acc: 0.9375 - val_loss: 0.3133 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02336: val_acc did not improve from 0.94225\n",
+ "Epoch 2337/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9362 - val_loss: 0.3200 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 02337: val_acc did not improve from 0.94225\n",
+ "Epoch 2338/100000\n",
+ " - 18s - loss: 0.3242 - acc: 0.9362 - val_loss: 0.3723 - val_acc: 0.9087\n",
+ "\n",
+ "Epoch 02338: val_acc did not improve from 0.94225\n",
+ "Epoch 2339/100000\n",
+ " - 19s - loss: 0.3249 - acc: 0.9361 - val_loss: 0.3482 - val_acc: 0.9206\n",
+ "\n",
+ "Epoch 02339: val_acc did not improve from 0.94225\n",
+ "Epoch 2340/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9363 - val_loss: 0.3215 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02340: val_acc did not improve from 0.94225\n",
+ "Epoch 2341/100000\n",
+ " - 18s - loss: 0.3265 - acc: 0.9360 - val_loss: 0.3111 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02341: val_acc did not improve from 0.94225\n",
+ "Epoch 2342/100000\n",
+ " - 19s - loss: 0.3239 - acc: 0.9366 - val_loss: 0.3118 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02342: val_acc did not improve from 0.94225\n",
+ "Epoch 2343/100000\n",
+ " - 20s - loss: 0.3243 - acc: 0.9367 - val_loss: 0.3158 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02343: val_acc did not improve from 0.94225\n",
+ "Epoch 2344/100000\n",
+ " - 19s - loss: 0.3267 - acc: 0.9359 - val_loss: 0.3128 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02344: val_acc did not improve from 0.94225\n",
+ "Epoch 2345/100000\n",
+ " - 18s - loss: 0.3246 - acc: 0.9370 - val_loss: 0.3161 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02345: val_acc did not improve from 0.94225\n",
+ "Epoch 2346/100000\n",
+ " - 19s - loss: 0.3221 - acc: 0.9374 - val_loss: 0.3173 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02346: val_acc did not improve from 0.94225\n",
+ "Epoch 2347/100000\n",
+ " - 19s - loss: 0.3257 - acc: 0.9362 - val_loss: 0.3353 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 02347: val_acc did not improve from 0.94225\n",
+ "Epoch 2348/100000\n",
+ " - 19s - loss: 0.3236 - acc: 0.9370 - val_loss: 0.3107 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02348: val_acc did not improve from 0.94225\n",
+ "Epoch 2349/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9357 - val_loss: 0.3488 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 02349: val_acc did not improve from 0.94225\n",
+ "Epoch 2350/100000\n",
+ " - 18s - loss: 0.3247 - acc: 0.9365 - val_loss: 0.3054 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 02350: val_acc did not improve from 0.94225\n",
+ "Epoch 2351/100000\n",
+ " - 19s - loss: 0.3237 - acc: 0.9369 - val_loss: 0.3227 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 02351: val_acc did not improve from 0.94225\n",
+ "Epoch 2352/100000\n",
+ " - 18s - loss: 0.3249 - acc: 0.9360 - val_loss: 0.3290 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02352: val_acc did not improve from 0.94225\n",
+ "Epoch 2353/100000\n",
+ " - 18s - loss: 0.3254 - acc: 0.9365 - val_loss: 0.3071 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 02353: val_acc did not improve from 0.94225\n",
+ "Epoch 2354/100000\n",
+ " - 18s - loss: 0.3245 - acc: 0.9365 - val_loss: 0.3834 - val_acc: 0.9012\n",
+ "\n",
+ "Epoch 02354: val_acc did not improve from 0.94225\n",
+ "Epoch 2355/100000\n",
+ " - 18s - loss: 0.3241 - acc: 0.9366 - val_loss: 0.3500 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 02355: val_acc did not improve from 0.94225\n",
+ "Epoch 2356/100000\n",
+ " - 18s - loss: 0.3258 - acc: 0.9366 - val_loss: 0.3230 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02356: val_acc did not improve from 0.94225\n",
+ "Epoch 2357/100000\n",
+ " - 18s - loss: 0.3265 - acc: 0.9366 - val_loss: 0.3845 - val_acc: 0.9014\n",
+ "\n",
+ "Epoch 02357: val_acc did not improve from 0.94225\n",
+ "Epoch 2358/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9368 - val_loss: 0.3185 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02358: val_acc did not improve from 0.94225\n",
+ "Epoch 2359/100000\n",
+ " - 19s - loss: 0.3279 - acc: 0.9357 - val_loss: 0.3255 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 02359: val_acc did not improve from 0.94225\n",
+ "Epoch 2360/100000\n",
+ " - 19s - loss: 0.3230 - acc: 0.9366 - val_loss: 0.3089 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02360: val_acc did not improve from 0.94225\n",
+ "Epoch 2361/100000\n",
+ " - 19s - loss: 0.3274 - acc: 0.9355 - val_loss: 0.3252 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02361: val_acc did not improve from 0.94225\n",
+ "Epoch 2362/100000\n",
+ " - 18s - loss: 0.3240 - acc: 0.9369 - val_loss: 0.3307 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02362: val_acc did not improve from 0.94225\n",
+ "Epoch 2363/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9358 - val_loss: 0.3076 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 02363: val_acc did not improve from 0.94225\n",
+ "Epoch 2364/100000\n",
+ " - 18s - loss: 0.3226 - acc: 0.9369 - val_loss: 0.3125 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02364: val_acc did not improve from 0.94225\n",
+ "Epoch 2365/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9369 - val_loss: 0.3119 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02365: val_acc did not improve from 0.94225\n",
+ "Epoch 2366/100000\n",
+ " - 18s - loss: 0.3257 - acc: 0.9361 - val_loss: 0.3053 - val_acc: 0.9398\n",
+ "\n",
+ "Epoch 02366: val_acc did not improve from 0.94225\n",
+ "Epoch 2367/100000\n",
+ " - 19s - loss: 0.3239 - acc: 0.9366 - val_loss: 0.3149 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02367: val_acc did not improve from 0.94225\n",
+ "Epoch 2368/100000\n",
+ " - 18s - loss: 0.3247 - acc: 0.9362 - val_loss: 0.3220 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 02368: val_acc did not improve from 0.94225\n",
+ "Epoch 2369/100000\n",
+ " - 18s - loss: 0.3241 - acc: 0.9360 - val_loss: 0.4030 - val_acc: 0.8970\n",
+ "\n",
+ "Epoch 02369: val_acc did not improve from 0.94225\n",
+ "Epoch 2370/100000\n",
+ " - 19s - loss: 0.3237 - acc: 0.9370 - val_loss: 0.3140 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 02370: val_acc did not improve from 0.94225\n",
+ "Epoch 2371/100000\n",
+ " - 18s - loss: 0.3258 - acc: 0.9368 - val_loss: 0.3783 - val_acc: 0.8999\n",
+ "\n",
+ "Epoch 02371: val_acc did not improve from 0.94225\n",
+ "Epoch 2372/100000\n",
+ " - 18s - loss: 0.3260 - acc: 0.9358 - val_loss: 0.3245 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 02372: val_acc did not improve from 0.94225\n",
+ "Epoch 2373/100000\n",
+ " - 18s - loss: 0.3240 - acc: 0.9367 - val_loss: 0.3442 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 02373: val_acc did not improve from 0.94225\n",
+ "Epoch 2374/100000\n",
+ " - 18s - loss: 0.3256 - acc: 0.9365 - val_loss: 0.3210 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02374: val_acc did not improve from 0.94225\n",
+ "Epoch 2375/100000\n",
+ " - 19s - loss: 0.3242 - acc: 0.9371 - val_loss: 0.4090 - val_acc: 0.8915\n",
+ "\n",
+ "Epoch 02375: val_acc did not improve from 0.94225\n",
+ "Epoch 2376/100000\n",
+ " - 19s - loss: 0.3238 - acc: 0.9366 - val_loss: 0.3131 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02376: val_acc did not improve from 0.94225\n",
+ "Epoch 2377/100000\n",
+ " - 19s - loss: 0.3254 - acc: 0.9362 - val_loss: 0.3064 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 02377: val_acc did not improve from 0.94225\n",
+ "Epoch 2378/100000\n",
+ " - 19s - loss: 0.3244 - acc: 0.9360 - val_loss: 0.3132 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02378: val_acc did not improve from 0.94225\n",
+ "Epoch 2379/100000\n",
+ " - 18s - loss: 0.3265 - acc: 0.9359 - val_loss: 0.3118 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02379: val_acc did not improve from 0.94225\n",
+ "Epoch 2380/100000\n",
+ " - 19s - loss: 0.3250 - acc: 0.9368 - val_loss: 0.4455 - val_acc: 0.8694\n",
+ "\n",
+ "Epoch 02380: val_acc did not improve from 0.94225\n",
+ "Epoch 2381/100000\n",
+ " - 19s - loss: 0.3244 - acc: 0.9367 - val_loss: 0.3198 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 02381: val_acc did not improve from 0.94225\n",
+ "Epoch 2382/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9361 - val_loss: 0.3162 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02382: val_acc did not improve from 0.94225\n",
+ "Epoch 2383/100000\n",
+ " - 19s - loss: 0.3260 - acc: 0.9361 - val_loss: 0.3094 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02383: val_acc did not improve from 0.94225\n",
+ "Epoch 2384/100000\n",
+ " - 19s - loss: 0.3239 - acc: 0.9370 - val_loss: 0.4408 - val_acc: 0.8690\n",
+ "\n",
+ "Epoch 02384: val_acc did not improve from 0.94225\n",
+ "Epoch 2385/100000\n",
+ " - 19s - loss: 0.3251 - acc: 0.9367 - val_loss: 0.3213 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 02385: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 02385: ReduceLROnPlateau reducing learning rate to 0.0005688000208465382.\n",
+ "Epoch 2386/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9370 - val_loss: 0.3104 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02386: val_acc did not improve from 0.94225\n",
+ "Epoch 2387/100000\n",
+ " - 19s - loss: 0.3187 - acc: 0.9371 - val_loss: 0.3102 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02387: val_acc did not improve from 0.94225\n",
+ "Epoch 2388/100000\n",
+ " - 18s - loss: 0.3234 - acc: 0.9357 - val_loss: 0.3174 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02388: val_acc did not improve from 0.94225\n",
+ "Epoch 2389/100000\n",
+ " - 19s - loss: 0.3186 - acc: 0.9369 - val_loss: 0.3173 - val_acc: 0.9302\n",
+ "\n",
+ "Epoch 02389: val_acc did not improve from 0.94225\n",
+ "Epoch 2390/100000\n",
+ " - 19s - loss: 0.3207 - acc: 0.9365 - val_loss: 0.3082 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02390: val_acc did not improve from 0.94225\n",
+ "Epoch 2391/100000\n",
+ " - 19s - loss: 0.3176 - acc: 0.9369 - val_loss: 0.3425 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 02391: val_acc did not improve from 0.94225\n",
+ "Epoch 2392/100000\n",
+ " - 19s - loss: 0.3192 - acc: 0.9362 - val_loss: 0.3515 - val_acc: 0.9172\n",
+ "\n",
+ "Epoch 02392: val_acc did not improve from 0.94225\n",
+ "Epoch 2393/100000\n",
+ " - 19s - loss: 0.3163 - acc: 0.9377 - val_loss: 0.3416 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 02393: val_acc did not improve from 0.94225\n",
+ "Epoch 2394/100000\n",
+ " - 19s - loss: 0.3180 - acc: 0.9365 - val_loss: 0.3120 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 02394: val_acc did not improve from 0.94225\n",
+ "Epoch 2395/100000\n",
+ " - 19s - loss: 0.3167 - acc: 0.9370 - val_loss: 0.3163 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02395: val_acc did not improve from 0.94225\n",
+ "Epoch 2396/100000\n",
+ " - 19s - loss: 0.3163 - acc: 0.9374 - val_loss: 0.3053 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02396: val_acc did not improve from 0.94225\n",
+ "Epoch 2397/100000\n",
+ " - 19s - loss: 0.3163 - acc: 0.9368 - val_loss: 0.3204 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02397: val_acc did not improve from 0.94225\n",
+ "Epoch 2398/100000\n",
+ " - 19s - loss: 0.3176 - acc: 0.9365 - val_loss: 0.3200 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02398: val_acc did not improve from 0.94225\n",
+ "Epoch 2399/100000\n",
+ " - 19s - loss: 0.3192 - acc: 0.9374 - val_loss: 0.3495 - val_acc: 0.9135\n",
+ "\n",
+ "Epoch 02399: val_acc did not improve from 0.94225\n",
+ "Epoch 2400/100000\n",
+ " - 18s - loss: 0.3227 - acc: 0.9357 - val_loss: 0.3385 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 02400: val_acc did not improve from 0.94225\n",
+ "Epoch 2401/100000\n",
+ " - 19s - loss: 0.3177 - acc: 0.9372 - val_loss: 0.5859 - val_acc: 0.7594\n",
+ "\n",
+ "Epoch 02401: val_acc did not improve from 0.94225\n",
+ "Epoch 2402/100000\n",
+ " - 18s - loss: 0.3167 - acc: 0.9375 - val_loss: 0.3520 - val_acc: 0.9121\n",
+ "\n",
+ "Epoch 02402: val_acc did not improve from 0.94225\n",
+ "Epoch 2403/100000\n",
+ " - 19s - loss: 0.3191 - acc: 0.9365 - val_loss: 0.3167 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02403: val_acc did not improve from 0.94225\n",
+ "Epoch 2404/100000\n",
+ " - 19s - loss: 0.3185 - acc: 0.9367 - val_loss: 0.3530 - val_acc: 0.9152\n",
+ "\n",
+ "Epoch 02404: val_acc did not improve from 0.94225\n",
+ "Epoch 2405/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9366 - val_loss: 0.5352 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 02405: val_acc did not improve from 0.94225\n",
+ "Epoch 2406/100000\n",
+ " - 19s - loss: 0.3207 - acc: 0.9368 - val_loss: 0.3196 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 02406: val_acc did not improve from 0.94225\n",
+ "Epoch 2407/100000\n",
+ " - 19s - loss: 0.3210 - acc: 0.9362 - val_loss: 0.3283 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 02407: val_acc did not improve from 0.94225\n",
+ "Epoch 2408/100000\n",
+ " - 19s - loss: 0.3196 - acc: 0.9366 - val_loss: 0.3149 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 02408: val_acc did not improve from 0.94225\n",
+ "Epoch 2409/100000\n",
+ " - 19s - loss: 0.3188 - acc: 0.9366 - val_loss: 0.3287 - val_acc: 0.9233\n",
+ "\n",
+ "Epoch 02409: val_acc did not improve from 0.94225\n",
+ "Epoch 2410/100000\n",
+ " - 20s - loss: 0.3207 - acc: 0.9361 - val_loss: 0.3108 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02410: val_acc did not improve from 0.94225\n",
+ "Epoch 2411/100000\n",
+ " - 19s - loss: 0.3188 - acc: 0.9369 - val_loss: 0.3259 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02411: val_acc did not improve from 0.94225\n",
+ "Epoch 2412/100000\n",
+ " - 18s - loss: 0.3179 - acc: 0.9373 - val_loss: 0.3297 - val_acc: 0.9210\n",
+ "\n",
+ "Epoch 02412: val_acc did not improve from 0.94225\n",
+ "Epoch 2413/100000\n",
+ " - 19s - loss: 0.3207 - acc: 0.9360 - val_loss: 0.3523 - val_acc: 0.9188\n",
+ "\n",
+ "Epoch 02413: val_acc did not improve from 0.94225\n",
+ "Epoch 2414/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9370 - val_loss: 0.3439 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02414: val_acc did not improve from 0.94225\n",
+ "Epoch 2415/100000\n",
+ " - 18s - loss: 0.3197 - acc: 0.9366 - val_loss: 0.3244 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 02415: val_acc did not improve from 0.94225\n",
+ "Epoch 2416/100000\n",
+ " - 19s - loss: 0.3214 - acc: 0.9362 - val_loss: 0.3253 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02416: val_acc did not improve from 0.94225\n",
+ "Epoch 2417/100000\n",
+ " - 18s - loss: 0.3189 - acc: 0.9369 - val_loss: 0.3104 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02417: val_acc did not improve from 0.94225\n",
+ "Epoch 2418/100000\n",
+ " - 19s - loss: 0.3179 - acc: 0.9370 - val_loss: 0.3061 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02418: val_acc did not improve from 0.94225\n",
+ "Epoch 2419/100000\n",
+ " - 18s - loss: 0.3197 - acc: 0.9362 - val_loss: 0.3178 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 02419: val_acc did not improve from 0.94225\n",
+ "Epoch 2420/100000\n",
+ " - 19s - loss: 0.3219 - acc: 0.9356 - val_loss: 0.3272 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02420: val_acc did not improve from 0.94225\n",
+ "Epoch 2421/100000\n",
+ " - 18s - loss: 0.3170 - acc: 0.9371 - val_loss: 0.5105 - val_acc: 0.8481\n",
+ "\n",
+ "Epoch 02421: val_acc did not improve from 0.94225\n",
+ "Epoch 2422/100000\n",
+ " - 19s - loss: 0.3178 - acc: 0.9370 - val_loss: 0.3109 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02422: val_acc did not improve from 0.94225\n",
+ "Epoch 2423/100000\n",
+ " - 18s - loss: 0.3189 - acc: 0.9363 - val_loss: 0.3007 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 02423: val_acc did not improve from 0.94225\n",
+ "Epoch 2424/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9369 - val_loss: 0.3033 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02424: val_acc did not improve from 0.94225\n",
+ "Epoch 2425/100000\n",
+ " - 19s - loss: 0.3178 - acc: 0.9371 - val_loss: 0.3358 - val_acc: 0.9195\n",
+ "\n",
+ "Epoch 02425: val_acc did not improve from 0.94225\n",
+ "Epoch 2426/100000\n",
+ " - 19s - loss: 0.3187 - acc: 0.9367 - val_loss: 0.3137 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02426: val_acc did not improve from 0.94225\n",
+ "Epoch 2427/100000\n",
+ " - 18s - loss: 0.3193 - acc: 0.9362 - val_loss: 0.3361 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 02427: val_acc did not improve from 0.94225\n",
+ "Epoch 2428/100000\n",
+ " - 19s - loss: 0.3202 - acc: 0.9360 - val_loss: 0.3120 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 02428: val_acc did not improve from 0.94225\n",
+ "Epoch 2429/100000\n",
+ " - 19s - loss: 0.3164 - acc: 0.9376 - val_loss: 0.3783 - val_acc: 0.8925\n",
+ "\n",
+ "Epoch 02429: val_acc did not improve from 0.94225\n",
+ "Epoch 2430/100000\n",
+ " - 18s - loss: 0.3187 - acc: 0.9373 - val_loss: 0.3086 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02430: val_acc did not improve from 0.94225\n",
+ "Epoch 2431/100000\n",
+ " - 19s - loss: 0.3205 - acc: 0.9363 - val_loss: 0.3177 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02431: val_acc did not improve from 0.94225\n",
+ "Epoch 2432/100000\n",
+ " - 19s - loss: 0.3164 - acc: 0.9372 - val_loss: 0.3266 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 02432: val_acc did not improve from 0.94225\n",
+ "Epoch 2433/100000\n",
+ " - 18s - loss: 0.3175 - acc: 0.9369 - val_loss: 0.3477 - val_acc: 0.9154\n",
+ "\n",
+ "Epoch 02433: val_acc did not improve from 0.94225\n",
+ "Epoch 2434/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9361 - val_loss: 0.3883 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 02434: val_acc did not improve from 0.94225\n",
+ "Epoch 2435/100000\n",
+ " - 19s - loss: 0.3192 - acc: 0.9370 - val_loss: 0.3047 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02435: val_acc did not improve from 0.94225\n",
+ "Epoch 2436/100000\n",
+ " - 18s - loss: 0.3188 - acc: 0.9367 - val_loss: 0.3071 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 02436: val_acc did not improve from 0.94225\n",
+ "Epoch 2437/100000\n",
+ " - 19s - loss: 0.3176 - acc: 0.9374 - val_loss: 0.3289 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 02437: val_acc did not improve from 0.94225\n",
+ "Epoch 2438/100000\n",
+ " - 19s - loss: 0.3193 - acc: 0.9367 - val_loss: 0.3056 - val_acc: 0.9398\n",
+ "\n",
+ "Epoch 02438: val_acc did not improve from 0.94225\n",
+ "Epoch 2439/100000\n",
+ " - 19s - loss: 0.3214 - acc: 0.9364 - val_loss: 0.3042 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 02439: val_acc did not improve from 0.94225\n",
+ "Epoch 2440/100000\n",
+ " - 19s - loss: 0.3187 - acc: 0.9366 - val_loss: 0.3043 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02440: val_acc did not improve from 0.94225\n",
+ "Epoch 2441/100000\n",
+ " - 19s - loss: 0.3181 - acc: 0.9368 - val_loss: 0.3128 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02441: val_acc did not improve from 0.94225\n",
+ "Epoch 2442/100000\n",
+ " - 19s - loss: 0.3218 - acc: 0.9355 - val_loss: 0.3077 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02442: val_acc did not improve from 0.94225\n",
+ "Epoch 2443/100000\n",
+ " - 19s - loss: 0.3180 - acc: 0.9376 - val_loss: 0.3283 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02450: val_acc did not improve from 0.94225\n",
+ "Epoch 2451/100000\n",
+ " - 19s - loss: 0.3184 - acc: 0.9372 - val_loss: 0.3024 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02451: val_acc did not improve from 0.94225\n",
+ "Epoch 2452/100000\n",
+ " - 19s - loss: 0.3215 - acc: 0.9368 - val_loss: 0.3043 - val_acc: 0.9392\n",
+ "\n",
+ "Epoch 02452: val_acc did not improve from 0.94225\n",
+ "Epoch 2453/100000\n",
+ " - 18s - loss: 0.3176 - acc: 0.9372 - val_loss: 0.3059 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02453: val_acc did not improve from 0.94225\n",
+ "Epoch 2454/100000\n",
+ " - 19s - loss: 0.3205 - acc: 0.9363 - val_loss: 0.3231 - val_acc: 0.9250\n",
+ "\n",
+ "Epoch 02454: val_acc did not improve from 0.94225\n",
+ "Epoch 2455/100000\n",
+ " - 18s - loss: 0.3179 - acc: 0.9373 - val_loss: 0.3436 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 02455: val_acc did not improve from 0.94225\n",
+ "Epoch 2456/100000\n",
+ " - 19s - loss: 0.3193 - acc: 0.9363 - val_loss: 0.3148 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02456: val_acc did not improve from 0.94225\n",
+ "Epoch 2457/100000\n",
+ " - 18s - loss: 0.3187 - acc: 0.9369 - val_loss: 0.4035 - val_acc: 0.8955\n",
+ "\n",
+ "Epoch 02457: val_acc did not improve from 0.94225\n",
+ "Epoch 2458/100000\n",
+ " - 18s - loss: 0.3191 - acc: 0.9360 - val_loss: 0.3115 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02458: val_acc did not improve from 0.94225\n",
+ "Epoch 2459/100000\n",
+ " - 19s - loss: 0.3176 - acc: 0.9376 - val_loss: 0.3182 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 02459: val_acc did not improve from 0.94225\n",
+ "Epoch 2460/100000\n",
+ " - 19s - loss: 0.3194 - acc: 0.9360 - val_loss: 0.3146 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02460: val_acc did not improve from 0.94225\n",
+ "Epoch 2461/100000\n",
+ " - 19s - loss: 0.3196 - acc: 0.9366 - val_loss: 0.3061 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02461: val_acc did not improve from 0.94225\n",
+ "Epoch 2462/100000\n",
+ " - 19s - loss: 0.3198 - acc: 0.9363 - val_loss: 0.3128 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02462: val_acc did not improve from 0.94225\n",
+ "Epoch 2463/100000\n",
+ " - 19s - loss: 0.3173 - acc: 0.9372 - val_loss: 0.3250 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 02463: val_acc did not improve from 0.94225\n",
+ "Epoch 2464/100000\n",
+ " - 18s - loss: 0.3182 - acc: 0.9367 - val_loss: 0.3645 - val_acc: 0.9084\n",
+ "\n",
+ "Epoch 02464: val_acc did not improve from 0.94225\n",
+ "Epoch 2465/100000\n",
+ " - 19s - loss: 0.3159 - acc: 0.9376 - val_loss: 0.4317 - val_acc: 0.8795\n",
+ "\n",
+ "Epoch 02465: val_acc did not improve from 0.94225\n",
+ "Epoch 2466/100000\n",
+ " - 19s - loss: 0.3197 - acc: 0.9361 - val_loss: 0.3043 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02466: val_acc did not improve from 0.94225\n",
+ "Epoch 2467/100000\n",
+ " - 18s - loss: 0.3171 - acc: 0.9376 - val_loss: 0.3035 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02467: val_acc did not improve from 0.94225\n",
+ "Epoch 2468/100000\n",
+ " - 19s - loss: 0.3195 - acc: 0.9362 - val_loss: 0.3121 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02468: val_acc did not improve from 0.94225\n",
+ "Epoch 2469/100000\n",
+ " - 19s - loss: 0.3179 - acc: 0.9373 - val_loss: 0.3193 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 02469: val_acc did not improve from 0.94225\n",
+ "Epoch 2470/100000\n",
+ " - 19s - loss: 0.3196 - acc: 0.9370 - val_loss: 0.3103 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02470: val_acc did not improve from 0.94225\n",
+ "Epoch 2471/100000\n",
+ " - 19s - loss: 0.3191 - acc: 0.9361 - val_loss: 0.3274 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 02471: val_acc did not improve from 0.94225\n",
+ "Epoch 2472/100000\n",
+ " - 19s - loss: 0.3205 - acc: 0.9358 - val_loss: 0.3385 - val_acc: 0.9209\n",
+ "\n",
+ "Epoch 02472: val_acc did not improve from 0.94225\n",
+ "Epoch 2473/100000\n",
+ " - 18s - loss: 0.3213 - acc: 0.9366 - val_loss: 0.3099 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02473: val_acc did not improve from 0.94225\n",
+ "Epoch 2474/100000\n",
+ " - 19s - loss: 0.3199 - acc: 0.9362 - val_loss: 0.3182 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02474: val_acc did not improve from 0.94225\n",
+ "Epoch 2475/100000\n",
+ " - 19s - loss: 0.3200 - acc: 0.9373 - val_loss: 0.3374 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 02475: val_acc did not improve from 0.94225\n",
+ "Epoch 2476/100000\n",
+ " - 18s - loss: 0.3177 - acc: 0.9371 - val_loss: 0.3270 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02476: val_acc did not improve from 0.94225\n",
+ "Epoch 2477/100000\n",
+ " - 19s - loss: 0.3177 - acc: 0.9367 - val_loss: 0.3843 - val_acc: 0.8978\n",
+ "\n",
+ "Epoch 02477: val_acc did not improve from 0.94225\n",
+ "Epoch 2478/100000\n",
+ " - 18s - loss: 0.3201 - acc: 0.9362 - val_loss: 0.3399 - val_acc: 0.9242\n",
+ "\n",
+ "Epoch 02478: val_acc did not improve from 0.94225\n",
+ "Epoch 2479/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9363 - val_loss: 0.3342 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02479: val_acc did not improve from 0.94225\n",
+ "Epoch 2480/100000\n",
+ " - 18s - loss: 0.3194 - acc: 0.9367 - val_loss: 0.3335 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 02480: val_acc did not improve from 0.94225\n",
+ "Epoch 2481/100000\n",
+ " - 19s - loss: 0.3179 - acc: 0.9373 - val_loss: 0.3182 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02481: val_acc did not improve from 0.94225\n",
+ "Epoch 2482/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9369 - val_loss: 0.3161 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02482: val_acc did not improve from 0.94225\n",
+ "Epoch 2483/100000\n",
+ " - 19s - loss: 0.3188 - acc: 0.9377 - val_loss: 0.3051 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 02483: val_acc did not improve from 0.94225\n",
+ "Epoch 2484/100000\n",
+ " - 19s - loss: 0.3180 - acc: 0.9378 - val_loss: 0.4424 - val_acc: 0.8918\n",
+ "\n",
+ "Epoch 02484: val_acc did not improve from 0.94225\n",
+ "Epoch 2485/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9364 - val_loss: 0.3101 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02485: val_acc did not improve from 0.94225\n",
+ "Epoch 2486/100000\n",
+ " - 19s - loss: 0.3201 - acc: 0.9355 - val_loss: 0.3231 - val_acc: 0.9279\n",
+ "\n",
+ "Epoch 02486: val_acc did not improve from 0.94225\n",
+ "Epoch 2487/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9365 - val_loss: 0.3167 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02487: val_acc did not improve from 0.94225\n",
+ "Epoch 2488/100000\n",
+ " - 19s - loss: 0.3175 - acc: 0.9372 - val_loss: 0.3213 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 02488: val_acc did not improve from 0.94225\n",
+ "Epoch 2489/100000\n",
+ " - 19s - loss: 0.3182 - acc: 0.9364 - val_loss: 0.3194 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02489: val_acc did not improve from 0.94225\n",
+ "Epoch 2490/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9366 - val_loss: 0.3181 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 02490: val_acc did not improve from 0.94225\n",
+ "Epoch 2491/100000\n",
+ " - 18s - loss: 0.3194 - acc: 0.9365 - val_loss: 0.3814 - val_acc: 0.8985\n",
+ "\n",
+ "Epoch 02491: val_acc did not improve from 0.94225\n",
+ "Epoch 2492/100000\n",
+ " - 19s - loss: 0.3226 - acc: 0.9359 - val_loss: 0.3085 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02492: val_acc did not improve from 0.94225\n",
+ "Epoch 2493/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9365 - val_loss: 0.3090 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02493: val_acc did not improve from 0.94225\n",
+ "Epoch 2494/100000\n",
+ " - 19s - loss: 0.3193 - acc: 0.9369 - val_loss: 0.3331 - val_acc: 0.9209\n",
+ "\n",
+ "Epoch 02494: val_acc did not improve from 0.94225\n",
+ "Epoch 2495/100000\n",
+ " - 18s - loss: 0.3178 - acc: 0.9368 - val_loss: 0.3256 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 02495: val_acc did not improve from 0.94225\n",
+ "Epoch 2496/100000\n",
+ " - 19s - loss: 0.3161 - acc: 0.9371 - val_loss: 0.3214 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 02496: val_acc did not improve from 0.94225\n",
+ "Epoch 2497/100000\n",
+ " - 19s - loss: 0.3173 - acc: 0.9378 - val_loss: 0.3406 - val_acc: 0.9238\n",
+ "\n",
+ "Epoch 02497: val_acc did not improve from 0.94225\n",
+ "Epoch 2498/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9369 - val_loss: 0.3650 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 02498: val_acc did not improve from 0.94225\n",
+ "Epoch 2499/100000\n",
+ " - 18s - loss: 0.3168 - acc: 0.9371 - val_loss: 0.3015 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 02499: val_acc did not improve from 0.94225\n",
+ "Epoch 2500/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9367 - val_loss: 0.4135 - val_acc: 0.8846\n",
+ "\n",
+ "Epoch 02500: val_acc did not improve from 0.94225\n",
+ "Epoch 2501/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9364 - val_loss: 0.3237 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 02501: val_acc did not improve from 0.94225\n",
+ "Epoch 2502/100000\n",
+ " - 19s - loss: 0.3202 - acc: 0.9360 - val_loss: 0.3122 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02502: val_acc did not improve from 0.94225\n",
+ "Epoch 2503/100000\n",
+ " - 18s - loss: 0.3184 - acc: 0.9364 - val_loss: 0.3195 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02503: val_acc did not improve from 0.94225\n",
+ "Epoch 2504/100000\n",
+ " - 19s - loss: 0.3182 - acc: 0.9367 - val_loss: 0.3176 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02504: val_acc did not improve from 0.94225\n",
+ "Epoch 2505/100000\n",
+ " - 18s - loss: 0.3213 - acc: 0.9364 - val_loss: 0.3554 - val_acc: 0.9137\n",
+ "\n",
+ "Epoch 02505: val_acc did not improve from 0.94225\n",
+ "Epoch 2506/100000\n",
+ " - 18s - loss: 0.3197 - acc: 0.9363 - val_loss: 0.3732 - val_acc: 0.8995\n",
+ "\n",
+ "Epoch 02506: val_acc did not improve from 0.94225\n",
+ "Epoch 2507/100000\n",
+ " - 18s - loss: 0.3192 - acc: 0.9366 - val_loss: 0.3578 - val_acc: 0.9034\n",
+ "\n",
+ "Epoch 02507: val_acc did not improve from 0.94225\n",
+ "Epoch 2508/100000\n",
+ " - 18s - loss: 0.3177 - acc: 0.9370 - val_loss: 0.3114 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 02508: val_acc did not improve from 0.94225\n",
+ "Epoch 2509/100000\n",
+ " - 19s - loss: 0.3219 - acc: 0.9358 - val_loss: 0.3391 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 02509: val_acc did not improve from 0.94225\n",
+ "Epoch 2510/100000\n",
+ " - 18s - loss: 0.3176 - acc: 0.9373 - val_loss: 0.3204 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 02510: val_acc did not improve from 0.94225\n",
+ "Epoch 2511/100000\n",
+ " - 19s - loss: 0.3175 - acc: 0.9369 - val_loss: 0.3183 - val_acc: 0.9280\n",
+ "\n",
+ "Epoch 02511: val_acc did not improve from 0.94225\n",
+ "Epoch 2512/100000\n",
+ " - 18s - loss: 0.3191 - acc: 0.9365 - val_loss: 0.3042 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02512: val_acc did not improve from 0.94225\n",
+ "Epoch 2513/100000\n",
+ " - 19s - loss: 0.3193 - acc: 0.9363 - val_loss: 0.3259 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02513: val_acc did not improve from 0.94225\n",
+ "Epoch 2514/100000\n",
+ " - 18s - loss: 0.3186 - acc: 0.9366 - val_loss: 0.3555 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02514: val_acc did not improve from 0.94225\n",
+ "Epoch 2515/100000\n",
+ " - 19s - loss: 0.3166 - acc: 0.9374 - val_loss: 0.3891 - val_acc: 0.8972\n",
+ "\n",
+ "Epoch 02515: val_acc did not improve from 0.94225\n",
+ "Epoch 2516/100000\n",
+ " - 19s - loss: 0.3209 - acc: 0.9362 - val_loss: 0.3199 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 02516: val_acc did not improve from 0.94225\n",
+ "Epoch 2517/100000\n",
+ " - 19s - loss: 0.3201 - acc: 0.9366 - val_loss: 0.3776 - val_acc: 0.9176\n",
+ "\n",
+ "Epoch 02517: val_acc did not improve from 0.94225\n",
+ "Epoch 2518/100000\n",
+ " - 19s - loss: 0.3172 - acc: 0.9372 - val_loss: 0.3152 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02518: val_acc did not improve from 0.94225\n",
+ "Epoch 2519/100000\n",
+ " - 18s - loss: 0.3169 - acc: 0.9375 - val_loss: 0.3149 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 02519: val_acc did not improve from 0.94225\n",
+ "Epoch 2520/100000\n",
+ " - 19s - loss: 0.3182 - acc: 0.9369 - val_loss: 0.3119 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02520: val_acc did not improve from 0.94225\n",
+ "Epoch 2521/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9366 - val_loss: 0.3394 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 02521: val_acc did not improve from 0.94225\n",
+ "Epoch 2522/100000\n",
+ " - 19s - loss: 0.3215 - acc: 0.9351 - val_loss: 0.3518 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 02522: val_acc did not improve from 0.94225\n",
+ "Epoch 2523/100000\n",
+ " - 19s - loss: 0.3206 - acc: 0.9362 - val_loss: 0.3932 - val_acc: 0.8890\n",
+ "\n",
+ "Epoch 02523: val_acc did not improve from 0.94225\n",
+ "Epoch 2524/100000\n",
+ " - 18s - loss: 0.3184 - acc: 0.9367 - val_loss: 0.3784 - val_acc: 0.8944\n",
+ "\n",
+ "Epoch 02524: val_acc did not improve from 0.94225\n",
+ "Epoch 2525/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9367 - val_loss: 0.3058 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02525: val_acc did not improve from 0.94225\n",
+ "Epoch 2526/100000\n",
+ " - 18s - loss: 0.3210 - acc: 0.9360 - val_loss: 0.3156 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 02526: val_acc did not improve from 0.94225\n",
+ "Epoch 2527/100000\n",
+ " - 19s - loss: 0.3173 - acc: 0.9369 - val_loss: 0.3069 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02527: val_acc did not improve from 0.94225\n",
+ "Epoch 2528/100000\n",
+ " - 19s - loss: 0.3178 - acc: 0.9369 - val_loss: 0.3450 - val_acc: 0.9140\n",
+ "\n",
+ "Epoch 02528: val_acc did not improve from 0.94225\n",
+ "Epoch 2529/100000\n",
+ " - 19s - loss: 0.3171 - acc: 0.9371 - val_loss: 0.3430 - val_acc: 0.9154\n",
+ "\n",
+ "Epoch 02529: val_acc did not improve from 0.94225\n",
+ "Epoch 2530/100000\n",
+ " - 18s - loss: 0.3191 - acc: 0.9364 - val_loss: 0.3154 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02530: val_acc did not improve from 0.94225\n",
+ "Epoch 2531/100000\n",
+ " - 19s - loss: 0.3204 - acc: 0.9368 - val_loss: 0.3671 - val_acc: 0.9059\n",
+ "\n",
+ "Epoch 02531: val_acc did not improve from 0.94225\n",
+ "Epoch 2532/100000\n",
+ " - 18s - loss: 0.3197 - acc: 0.9362 - val_loss: 0.3365 - val_acc: 0.9186\n",
+ "\n",
+ "Epoch 02532: val_acc did not improve from 0.94225\n",
+ "Epoch 2533/100000\n",
+ " - 19s - loss: 0.3220 - acc: 0.9359 - val_loss: 0.3199 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02533: val_acc did not improve from 0.94225\n",
+ "Epoch 2534/100000\n",
+ " - 18s - loss: 0.3180 - acc: 0.9367 - val_loss: 0.4454 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 02534: val_acc did not improve from 0.94225\n",
+ "Epoch 2535/100000\n",
+ " - 19s - loss: 0.3197 - acc: 0.9366 - val_loss: 0.3290 - val_acc: 0.9254\n",
+ "\n",
+ "Epoch 02535: val_acc did not improve from 0.94225\n",
+ "Epoch 2536/100000\n",
+ " - 19s - loss: 0.3204 - acc: 0.9364 - val_loss: 0.3182 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02536: val_acc did not improve from 0.94225\n",
+ "Epoch 2537/100000\n",
+ " - 18s - loss: 0.3194 - acc: 0.9367 - val_loss: 0.3100 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02537: val_acc did not improve from 0.94225\n",
+ "Epoch 2538/100000\n",
+ " - 19s - loss: 0.3177 - acc: 0.9369 - val_loss: 0.3170 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 02538: val_acc did not improve from 0.94225\n",
+ "Epoch 2539/100000\n",
+ " - 18s - loss: 0.3212 - acc: 0.9361 - val_loss: 0.3176 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02539: val_acc did not improve from 0.94225\n",
+ "Epoch 2540/100000\n",
+ " - 19s - loss: 0.3172 - acc: 0.9371 - val_loss: 0.3248 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02540: val_acc did not improve from 0.94225\n",
+ "Epoch 2541/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9362 - val_loss: 0.3491 - val_acc: 0.9134\n",
+ "\n",
+ "Epoch 02541: val_acc did not improve from 0.94225\n",
+ "Epoch 2542/100000\n",
+ " - 19s - loss: 0.3179 - acc: 0.9366 - val_loss: 0.3156 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02542: val_acc did not improve from 0.94225\n",
+ "Epoch 2543/100000\n",
+ " - 19s - loss: 0.3209 - acc: 0.9359 - val_loss: 0.3072 - val_acc: 0.9377\n",
+ "\n",
+ "Epoch 02543: val_acc did not improve from 0.94225\n",
+ "Epoch 2544/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9372 - val_loss: 0.3204 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02544: val_acc did not improve from 0.94225\n",
+ "Epoch 2545/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9372 - val_loss: 0.3655 - val_acc: 0.9094\n",
+ "\n",
+ "Epoch 02545: val_acc did not improve from 0.94225\n",
+ "Epoch 2546/100000\n",
+ " - 18s - loss: 0.3166 - acc: 0.9370 - val_loss: 0.3452 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02546: val_acc did not improve from 0.94225\n",
+ "Epoch 2547/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9368 - val_loss: 0.3289 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 02547: val_acc did not improve from 0.94225\n",
+ "Epoch 2548/100000\n",
+ " - 18s - loss: 0.3196 - acc: 0.9361 - val_loss: 0.3794 - val_acc: 0.9026\n",
+ "\n",
+ "Epoch 02548: val_acc did not improve from 0.94225\n",
+ "Epoch 2549/100000\n",
+ " - 19s - loss: 0.3189 - acc: 0.9367 - val_loss: 0.3261 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 02549: val_acc did not improve from 0.94225\n",
+ "Epoch 2550/100000\n",
+ " - 18s - loss: 0.3185 - acc: 0.9363 - val_loss: 0.3037 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02550: val_acc did not improve from 0.94225\n",
+ "Epoch 2551/100000\n",
+ " - 19s - loss: 0.3196 - acc: 0.9367 - val_loss: 0.3156 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02551: val_acc did not improve from 0.94225\n",
+ "Epoch 2552/100000\n",
+ " - 19s - loss: 0.3171 - acc: 0.9372 - val_loss: 0.3165 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02552: val_acc did not improve from 0.94225\n",
+ "Epoch 2553/100000\n",
+ " - 19s - loss: 0.3215 - acc: 0.9362 - val_loss: 0.3076 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02553: val_acc did not improve from 0.94225\n",
+ "Epoch 2554/100000\n",
+ " - 19s - loss: 0.3209 - acc: 0.9361 - val_loss: 0.3035 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02554: val_acc did not improve from 0.94225\n",
+ "Epoch 2555/100000\n",
+ " - 19s - loss: 0.3207 - acc: 0.9368 - val_loss: 0.3473 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 02555: val_acc did not improve from 0.94225\n",
+ "Epoch 2556/100000\n",
+ " - 19s - loss: 0.3190 - acc: 0.9367 - val_loss: 0.3064 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 02556: val_acc did not improve from 0.94225\n",
+ "Epoch 2557/100000\n",
+ " - 19s - loss: 0.3168 - acc: 0.9375 - val_loss: 0.3202 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02557: val_acc did not improve from 0.94225\n",
+ "Epoch 2558/100000\n",
+ " - 19s - loss: 0.3183 - acc: 0.9366 - val_loss: 0.3630 - val_acc: 0.9061\n",
+ "\n",
+ "Epoch 02558: val_acc did not improve from 0.94225\n",
+ "Epoch 2559/100000\n",
+ " - 18s - loss: 0.3177 - acc: 0.9366 - val_loss: 0.3669 - val_acc: 0.8989\n",
+ "\n",
+ "Epoch 02559: val_acc did not improve from 0.94225\n",
+ "Epoch 2560/100000\n",
+ " - 19s - loss: 0.3197 - acc: 0.9366 - val_loss: 0.3121 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02560: val_acc did not improve from 0.94225\n",
+ "Epoch 2561/100000\n",
+ " - 18s - loss: 0.3185 - acc: 0.9368 - val_loss: 0.3179 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02561: val_acc did not improve from 0.94225\n",
+ "Epoch 2562/100000\n",
+ " - 19s - loss: 0.3210 - acc: 0.9360 - val_loss: 0.4296 - val_acc: 0.8792\n",
+ "\n",
+ "Epoch 02562: val_acc did not improve from 0.94225\n",
+ "Epoch 2563/100000\n",
+ " - 18s - loss: 0.3189 - acc: 0.9368 - val_loss: 0.3085 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02563: val_acc did not improve from 0.94225\n",
+ "\n",
+ "Epoch 02563: ReduceLROnPlateau reducing learning rate to 0.0005403600225690752.\n",
+ "Epoch 2564/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9368 - val_loss: 0.3077 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02564: val_acc did not improve from 0.94225\n",
+ "Epoch 2565/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9376 - val_loss: 0.3070 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02565: val_acc did not improve from 0.94225\n",
+ "Epoch 2566/100000\n",
+ " - 18s - loss: 0.3140 - acc: 0.9365 - val_loss: 0.3828 - val_acc: 0.8967\n",
+ "\n",
+ "Epoch 02566: val_acc did not improve from 0.94225\n",
+ "Epoch 2567/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.3054 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 02567: val_acc did not improve from 0.94225\n",
+ "Epoch 2568/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9374 - val_loss: 0.3100 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02568: val_acc did not improve from 0.94225\n",
+ "Epoch 2569/100000\n",
+ " - 19s - loss: 0.3123 - acc: 0.9371 - val_loss: 0.2991 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02569: val_acc did not improve from 0.94225\n",
+ "Epoch 2570/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9373 - val_loss: 0.3394 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 02570: val_acc did not improve from 0.94225\n",
+ "Epoch 2571/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9364 - val_loss: 0.3048 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02571: val_acc did not improve from 0.94225\n",
+ "Epoch 2572/100000\n",
+ " - 18s - loss: 0.3109 - acc: 0.9376 - val_loss: 0.5494 - val_acc: 0.8317\n",
+ "\n",
+ "Epoch 02572: val_acc did not improve from 0.94225\n",
+ "Epoch 2573/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9365 - val_loss: 0.3078 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02573: val_acc did not improve from 0.94225\n",
+ "Epoch 2574/100000\n",
+ " - 19s - loss: 0.3165 - acc: 0.9360 - val_loss: 0.2965 - val_acc: 0.9391\n",
+ "\n",
+ "Epoch 02574: val_acc did not improve from 0.94225\n",
+ "Epoch 2575/100000\n",
+ " - 18s - loss: 0.3119 - acc: 0.9374 - val_loss: 0.3079 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02575: val_acc did not improve from 0.94225\n",
+ "Epoch 2576/100000\n",
+ " - 19s - loss: 0.3110 - acc: 0.9376 - val_loss: 0.3965 - val_acc: 0.8843\n",
+ "\n",
+ "Epoch 02576: val_acc did not improve from 0.94225\n",
+ "Epoch 2577/100000\n",
+ " - 19s - loss: 0.3140 - acc: 0.9368 - val_loss: 0.3188 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 02577: val_acc did not improve from 0.94225\n",
+ "Epoch 2578/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9368 - val_loss: 0.3076 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02578: val_acc did not improve from 0.94225\n",
+ "Epoch 2579/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9365 - val_loss: 0.3092 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02579: val_acc did not improve from 0.94225\n",
+ "Epoch 2580/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9371 - val_loss: 0.3713 - val_acc: 0.8988\n",
+ "\n",
+ "Epoch 02580: val_acc did not improve from 0.94225\n",
+ "Epoch 2581/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9370 - val_loss: 0.3088 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02581: val_acc did not improve from 0.94225\n",
+ "Epoch 2582/100000\n",
+ " - 19s - loss: 0.3156 - acc: 0.9365 - val_loss: 0.3302 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02582: val_acc did not improve from 0.94225\n",
+ "Epoch 2583/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9378 - val_loss: 0.3162 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02583: val_acc did not improve from 0.94225\n",
+ "Epoch 2584/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9373 - val_loss: 0.3061 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 02584: val_acc did not improve from 0.94225\n",
+ "Epoch 2585/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9372 - val_loss: 0.3279 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 02585: val_acc did not improve from 0.94225\n",
+ "Epoch 2586/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9363 - val_loss: 0.3726 - val_acc: 0.9076\n",
+ "\n",
+ "Epoch 02586: val_acc did not improve from 0.94225\n",
+ "Epoch 2587/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9371 - val_loss: 0.3067 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02587: val_acc did not improve from 0.94225\n",
+ "Epoch 2588/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9365 - val_loss: 0.3073 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02588: val_acc did not improve from 0.94225\n",
+ "Epoch 2589/100000\n",
+ " - 18s - loss: 0.3128 - acc: 0.9370 - val_loss: 0.3091 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 02589: val_acc did not improve from 0.94225\n",
+ "Epoch 2590/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9368 - val_loss: 0.5209 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 02590: val_acc did not improve from 0.94225\n",
+ "Epoch 2591/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9366 - val_loss: 0.6249 - val_acc: 0.7502\n",
+ "\n",
+ "Epoch 02591: val_acc did not improve from 0.94225\n",
+ "Epoch 2592/100000\n",
+ " - 18s - loss: 0.3107 - acc: 0.9376 - val_loss: 0.3094 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 02592: val_acc did not improve from 0.94225\n",
+ "Epoch 2593/100000\n",
+ " - 18s - loss: 0.3150 - acc: 0.9362 - val_loss: 0.3023 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02593: val_acc did not improve from 0.94225\n",
+ "Epoch 2594/100000\n",
+ " - 18s - loss: 0.3139 - acc: 0.9366 - val_loss: 0.3176 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 02594: val_acc did not improve from 0.94225\n",
+ "Epoch 2595/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9364 - val_loss: 0.3192 - val_acc: 0.9260\n",
+ "\n",
+ "Epoch 02595: val_acc did not improve from 0.94225\n",
+ "Epoch 2596/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9368 - val_loss: 0.2980 - val_acc: 0.9382\n",
+ "\n",
+ "Epoch 02596: val_acc did not improve from 0.94225\n",
+ "Epoch 2597/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.2959 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02597: val_acc did not improve from 0.94225\n",
+ "Epoch 2598/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9373 - val_loss: 0.3049 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02598: val_acc did not improve from 0.94225\n",
+ "Epoch 2599/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9372 - val_loss: 0.2989 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02599: val_acc did not improve from 0.94225\n",
+ "Epoch 2600/100000\n",
+ " - 18s - loss: 0.3112 - acc: 0.9376 - val_loss: 0.2944 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02600: val_acc did not improve from 0.94225\n",
+ "Epoch 2601/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9372 - val_loss: 0.3588 - val_acc: 0.9061\n",
+ "\n",
+ "Epoch 02601: val_acc did not improve from 0.94225\n",
+ "Epoch 2602/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9370 - val_loss: 0.2978 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02602: val_acc did not improve from 0.94225\n",
+ "Epoch 2603/100000\n",
+ " - 18s - loss: 0.3159 - acc: 0.9373 - val_loss: 0.3515 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 02603: val_acc did not improve from 0.94225\n",
+ "Epoch 2604/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9372 - val_loss: 0.3017 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02604: val_acc did not improve from 0.94225\n",
+ "Epoch 2605/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9372 - val_loss: 0.3148 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 02605: val_acc did not improve from 0.94225\n",
+ "Epoch 2606/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9374 - val_loss: 0.3068 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02606: val_acc did not improve from 0.94225\n",
+ "Epoch 2607/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9372 - val_loss: 0.2954 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02607: val_acc did not improve from 0.94225\n",
+ "Epoch 2608/100000\n",
+ " - 18s - loss: 0.3105 - acc: 0.9374 - val_loss: 0.3119 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 02608: val_acc did not improve from 0.94225\n",
+ "Epoch 2609/100000\n",
+ " - 19s - loss: 0.3158 - acc: 0.9353 - val_loss: 0.3030 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02609: val_acc did not improve from 0.94225\n",
+ "Epoch 2610/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9368 - val_loss: 0.3086 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02610: val_acc did not improve from 0.94225\n",
+ "Epoch 2611/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9369 - val_loss: 0.3218 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02611: val_acc did not improve from 0.94225\n",
+ "Epoch 2612/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9369 - val_loss: 0.3096 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02612: val_acc did not improve from 0.94225\n",
+ "Epoch 2613/100000\n",
+ " - 18s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.2978 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 02613: val_acc did not improve from 0.94225\n",
+ "Epoch 2614/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9369 - val_loss: 0.3082 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02614: val_acc did not improve from 0.94225\n",
+ "Epoch 2615/100000\n",
+ " - 18s - loss: 0.3126 - acc: 0.9369 - val_loss: 0.2999 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02615: val_acc did not improve from 0.94225\n",
+ "Epoch 2616/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9371 - val_loss: 0.3155 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 02616: val_acc did not improve from 0.94225\n",
+ "Epoch 2617/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9371 - val_loss: 0.4312 - val_acc: 0.8804\n",
+ "\n",
+ "Epoch 02617: val_acc did not improve from 0.94225\n",
+ "Epoch 2618/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9372 - val_loss: 0.3158 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02618: val_acc did not improve from 0.94225\n",
+ "Epoch 2619/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9365 - val_loss: 0.3003 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02619: val_acc did not improve from 0.94225\n",
+ "Epoch 2620/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9371 - val_loss: 0.3935 - val_acc: 0.9166\n",
+ "\n",
+ "Epoch 02620: val_acc did not improve from 0.94225\n",
+ "Epoch 2621/100000\n",
+ " - 18s - loss: 0.3142 - acc: 0.9368 - val_loss: 0.3503 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 02621: val_acc did not improve from 0.94225\n",
+ "Epoch 2622/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9369 - val_loss: 0.3128 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02622: val_acc did not improve from 0.94225\n",
+ "Epoch 2623/100000\n",
+ " - 19s - loss: 0.3098 - acc: 0.9375 - val_loss: 0.3708 - val_acc: 0.9012\n",
+ "\n",
+ "Epoch 02623: val_acc did not improve from 0.94225\n",
+ "Epoch 2624/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9364 - val_loss: 0.3036 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02624: val_acc did not improve from 0.94225\n",
+ "Epoch 2625/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9367 - val_loss: 0.3130 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02625: val_acc did not improve from 0.94225\n",
+ "Epoch 2626/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9367 - val_loss: 0.3643 - val_acc: 0.9112\n",
+ "\n",
+ "Epoch 02626: val_acc did not improve from 0.94225\n",
+ "Epoch 2627/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9370 - val_loss: 0.3113 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02627: val_acc did not improve from 0.94225\n",
+ "Epoch 2628/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9364 - val_loss: 0.4512 - val_acc: 0.8644\n",
+ "\n",
+ "Epoch 02628: val_acc did not improve from 0.94225\n",
+ "Epoch 2629/100000\n",
+ " - 18s - loss: 0.3162 - acc: 0.9366 - val_loss: 0.3039 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02629: val_acc did not improve from 0.94225\n",
+ "Epoch 2630/100000\n",
+ " - 19s - loss: 0.3122 - acc: 0.9373 - val_loss: 0.3103 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02630: val_acc did not improve from 0.94225\n",
+ "Epoch 2631/100000\n",
+ " - 18s - loss: 0.3133 - acc: 0.9367 - val_loss: 0.3025 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02631: val_acc did not improve from 0.94225\n",
+ "Epoch 2632/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9368 - val_loss: 0.3606 - val_acc: 0.9054\n",
+ "\n",
+ "Epoch 02632: val_acc did not improve from 0.94225\n",
+ "Epoch 2633/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9368 - val_loss: 0.3255 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 02633: val_acc did not improve from 0.94225\n",
+ "Epoch 2634/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9365 - val_loss: 0.3147 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 02634: val_acc did not improve from 0.94225\n",
+ "Epoch 2635/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9368 - val_loss: 0.3008 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 02635: val_acc did not improve from 0.94225\n",
+ "Epoch 2636/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9374 - val_loss: 0.3185 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02636: val_acc did not improve from 0.94225\n",
+ "Epoch 2637/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9372 - val_loss: 0.3793 - val_acc: 0.9158\n",
+ "\n",
+ "Epoch 02637: val_acc did not improve from 0.94225\n",
+ "Epoch 2638/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9375 - val_loss: 0.3137 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 02638: val_acc did not improve from 0.94225\n",
+ "Epoch 2639/100000\n",
+ " - 20s - loss: 0.3122 - acc: 0.9370 - val_loss: 0.3182 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 02639: val_acc did not improve from 0.94225\n",
+ "Epoch 2640/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9374 - val_loss: 0.3005 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02640: val_acc did not improve from 0.94225\n",
+ "Epoch 2641/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9362 - val_loss: 0.3311 - val_acc: 0.9202\n",
+ "\n",
+ "Epoch 02641: val_acc did not improve from 0.94225\n",
+ "Epoch 2642/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9379 - val_loss: 0.3676 - val_acc: 0.9032\n",
+ "\n",
+ "Epoch 02642: val_acc did not improve from 0.94225\n",
+ "Epoch 2643/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9372 - val_loss: 0.7344 - val_acc: 0.6719\n",
+ "\n",
+ "Epoch 02643: val_acc did not improve from 0.94225\n",
+ "Epoch 2644/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9370 - val_loss: 0.3049 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02644: val_acc did not improve from 0.94225\n",
+ "Epoch 2645/100000\n",
+ " - 18s - loss: 0.3141 - acc: 0.9368 - val_loss: 0.3411 - val_acc: 0.9197\n",
+ "\n",
+ "Epoch 02645: val_acc did not improve from 0.94225\n",
+ "Epoch 2646/100000\n",
+ " - 19s - loss: 0.3138 - acc: 0.9371 - val_loss: 0.3001 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02646: val_acc did not improve from 0.94225\n",
+ "Epoch 2647/100000\n",
+ " - 18s - loss: 0.3143 - acc: 0.9366 - val_loss: 0.3471 - val_acc: 0.9083\n",
+ "\n",
+ "Epoch 02647: val_acc did not improve from 0.94225\n",
+ "Epoch 2648/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9372 - val_loss: 0.3309 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 02648: val_acc did not improve from 0.94225\n",
+ "Epoch 2649/100000\n",
+ " - 19s - loss: 0.3145 - acc: 0.9364 - val_loss: 0.3716 - val_acc: 0.8990\n",
+ "\n",
+ "Epoch 02649: val_acc did not improve from 0.94225\n",
+ "Epoch 2650/100000\n",
+ " - 18s - loss: 0.3123 - acc: 0.9371 - val_loss: 0.2940 - val_acc: 0.9398\n",
+ "\n",
+ "Epoch 02650: val_acc did not improve from 0.94225\n",
+ "Epoch 2651/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9373 - val_loss: 0.3113 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02651: val_acc did not improve from 0.94225\n",
+ "Epoch 2652/100000\n",
+ " - 18s - loss: 0.3140 - acc: 0.9367 - val_loss: 0.3311 - val_acc: 0.9203\n",
+ "\n",
+ "Epoch 02652: val_acc did not improve from 0.94225\n",
+ "Epoch 2653/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9365 - val_loss: 0.3049 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02653: val_acc did not improve from 0.94225\n",
+ "Epoch 2654/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9370 - val_loss: 0.3075 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02654: val_acc did not improve from 0.94225\n",
+ "Epoch 2655/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9377 - val_loss: 0.3035 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02655: val_acc did not improve from 0.94225\n",
+ "Epoch 2656/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9370 - val_loss: 0.3209 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02656: val_acc did not improve from 0.94225\n",
+ "Epoch 2657/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9373 - val_loss: 0.3175 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 02657: val_acc did not improve from 0.94225\n",
+ "Epoch 2658/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9367 - val_loss: 0.3135 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 02658: val_acc did not improve from 0.94225\n",
+ "Epoch 2659/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9369 - val_loss: 0.3038 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02659: val_acc did not improve from 0.94225\n",
+ "Epoch 2660/100000\n",
+ " - 19s - loss: 0.3110 - acc: 0.9373 - val_loss: 0.3099 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 02660: val_acc did not improve from 0.94225\n",
+ "Epoch 2661/100000\n",
+ " - 19s - loss: 0.3146 - acc: 0.9365 - val_loss: 0.3639 - val_acc: 0.9112\n",
+ "\n",
+ "Epoch 02661: val_acc did not improve from 0.94225\n",
+ "Epoch 2662/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9371 - val_loss: 0.3746 - val_acc: 0.8950\n",
+ "\n",
+ "Epoch 02662: val_acc did not improve from 0.94225\n",
+ "Epoch 2663/100000\n",
+ " - 18s - loss: 0.3132 - acc: 0.9365 - val_loss: 0.3088 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02663: val_acc did not improve from 0.94225\n",
+ "Epoch 2664/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9367 - val_loss: 0.3138 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02664: val_acc did not improve from 0.94225\n",
+ "Epoch 2665/100000\n",
+ " - 18s - loss: 0.3160 - acc: 0.9357 - val_loss: 0.3482 - val_acc: 0.9118\n",
+ "\n",
+ "Epoch 02665: val_acc did not improve from 0.94225\n",
+ "Epoch 2666/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9370 - val_loss: 0.3096 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 02666: val_acc did not improve from 0.94225\n",
+ "Epoch 2667/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9370 - val_loss: 0.2990 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02667: val_acc did not improve from 0.94225\n",
+ "Epoch 2668/100000\n",
+ " - 19s - loss: 0.3101 - acc: 0.9377 - val_loss: 0.3125 - val_acc: 0.9316\n",
+ "\n",
+ "Epoch 02668: val_acc did not improve from 0.94225\n",
+ "Epoch 2669/100000\n",
+ " - 19s - loss: 0.3144 - acc: 0.9373 - val_loss: 0.3016 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02669: val_acc did not improve from 0.94225\n",
+ "Epoch 2670/100000\n",
+ " - 18s - loss: 0.3123 - acc: 0.9369 - val_loss: 0.2944 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 02670: val_acc did not improve from 0.94225\n",
+ "Epoch 2671/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9373 - val_loss: 0.3081 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 02671: val_acc did not improve from 0.94225\n",
+ "Epoch 2672/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9366 - val_loss: 0.3307 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02672: val_acc did not improve from 0.94225\n",
+ "Epoch 2673/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9365 - val_loss: 0.3490 - val_acc: 0.9096\n",
+ "\n",
+ "Epoch 02673: val_acc did not improve from 0.94225\n",
+ "Epoch 2674/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9373 - val_loss: 0.3054 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02674: val_acc did not improve from 0.94225\n",
+ "Epoch 2675/100000\n",
+ " - 18s - loss: 0.3116 - acc: 0.9374 - val_loss: 0.3452 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 02675: val_acc did not improve from 0.94225\n",
+ "Epoch 2676/100000\n",
+ " - 19s - loss: 0.3150 - acc: 0.9362 - val_loss: 0.3278 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02676: val_acc did not improve from 0.94225\n",
+ "Epoch 2677/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9374 - val_loss: 0.3131 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02677: val_acc did not improve from 0.94225\n",
+ "Epoch 2678/100000\n",
+ " - 19s - loss: 0.3154 - acc: 0.9355 - val_loss: 0.3480 - val_acc: 0.9079\n",
+ "\n",
+ "Epoch 02678: val_acc did not improve from 0.94225\n",
+ "Epoch 2679/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9367 - val_loss: 0.3817 - val_acc: 0.8891\n",
+ "\n",
+ "Epoch 02679: val_acc did not improve from 0.94225\n",
+ "Epoch 2680/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9369 - val_loss: 0.3378 - val_acc: 0.9313\n",
+ "\n",
+ "Epoch 02680: val_acc did not improve from 0.94225\n",
+ "Epoch 2681/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9365 - val_loss: 0.3057 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 02681: val_acc did not improve from 0.94225\n",
+ "Epoch 2682/100000\n",
+ " - 18s - loss: 0.3143 - acc: 0.9360 - val_loss: 0.3170 - val_acc: 0.9314\n",
+ "\n",
+ "Epoch 02682: val_acc did not improve from 0.94225\n",
+ "Epoch 2683/100000\n",
+ " - 18s - loss: 0.3161 - acc: 0.9363 - val_loss: 0.2972 - val_acc: 0.9402\n",
+ "\n",
+ "Epoch 02683: val_acc did not improve from 0.94225\n",
+ "Epoch 2684/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9374 - val_loss: 0.3190 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02684: val_acc did not improve from 0.94225\n",
+ "Epoch 2685/100000\n",
+ " - 18s - loss: 0.3135 - acc: 0.9370 - val_loss: 0.3055 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 02685: val_acc did not improve from 0.94225\n",
+ "Epoch 2686/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9377 - val_loss: 0.3093 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 02686: val_acc did not improve from 0.94225\n",
+ "Epoch 2687/100000\n",
+ " - 18s - loss: 0.3141 - acc: 0.9369 - val_loss: 0.3203 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02687: val_acc did not improve from 0.94225\n",
+ "Epoch 2688/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9364 - val_loss: 0.3156 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02688: val_acc did not improve from 0.94225\n",
+ "Epoch 2689/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3189 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 02689: val_acc did not improve from 0.94225\n",
+ "Epoch 2690/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9372 - val_loss: 0.2932 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02690: val_acc did not improve from 0.94225\n",
+ "Epoch 2691/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9367 - val_loss: 0.3119 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02691: val_acc did not improve from 0.94225\n",
+ "Epoch 2692/100000\n",
+ " - 18s - loss: 0.3148 - acc: 0.9366 - val_loss: 0.3048 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02692: val_acc did not improve from 0.94225\n",
+ "Epoch 2693/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9373 - val_loss: 0.3069 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02693: val_acc did not improve from 0.94225\n",
+ "Epoch 2694/100000\n",
+ " - 18s - loss: 0.3115 - acc: 0.9375 - val_loss: 0.3023 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02694: val_acc did not improve from 0.94225\n",
+ "Epoch 2695/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9373 - val_loss: 0.3479 - val_acc: 0.9083\n",
+ "\n",
+ "Epoch 02695: val_acc did not improve from 0.94225\n",
+ "Epoch 2696/100000\n",
+ " - 19s - loss: 0.3158 - acc: 0.9364 - val_loss: 0.3216 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02696: val_acc did not improve from 0.94225\n",
+ "Epoch 2697/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9369 - val_loss: 0.3159 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02697: val_acc did not improve from 0.94225\n",
+ "Epoch 2698/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9376 - val_loss: 0.3066 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02698: val_acc did not improve from 0.94225\n",
+ "Epoch 2699/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9367 - val_loss: 0.2983 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02699: val_acc did not improve from 0.94225\n",
+ "Epoch 2700/100000\n",
+ " - 18s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.3095 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02700: val_acc did not improve from 0.94225\n",
+ "Epoch 2701/100000\n",
+ " - 19s - loss: 0.3114 - acc: 0.9372 - val_loss: 0.3352 - val_acc: 0.9201\n",
+ "\n",
+ "Epoch 02701: val_acc did not improve from 0.94225\n",
+ "Epoch 2702/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9366 - val_loss: 0.3016 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02702: val_acc did not improve from 0.94225\n",
+ "Epoch 2703/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9369 - val_loss: 0.3087 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02703: val_acc did not improve from 0.94225\n",
+ "Epoch 2704/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9368 - val_loss: 0.3540 - val_acc: 0.9023\n",
+ "\n",
+ "Epoch 02704: val_acc did not improve from 0.94225\n",
+ "Epoch 2705/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9367 - val_loss: 0.3097 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 02705: val_acc did not improve from 0.94225\n",
+ "Epoch 2706/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9371 - val_loss: 0.3196 - val_acc: 0.9262\n",
+ "\n",
+ "Epoch 02706: val_acc did not improve from 0.94225\n",
+ "Epoch 2707/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9375 - val_loss: 0.3009 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 02707: val_acc did not improve from 0.94225\n",
+ "Epoch 2708/100000\n",
+ " - 18s - loss: 0.3111 - acc: 0.9375 - val_loss: 0.2966 - val_acc: 0.9406\n",
+ "\n",
+ "Epoch 02708: val_acc did not improve from 0.94225\n",
+ "Epoch 2709/100000\n",
+ " - 19s - loss: 0.3114 - acc: 0.9373 - val_loss: 0.3487 - val_acc: 0.9159\n",
+ "\n",
+ "Epoch 02709: val_acc did not improve from 0.94225\n",
+ "Epoch 2710/100000\n",
+ " - 18s - loss: 0.3149 - acc: 0.9361 - val_loss: 0.3101 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02710: val_acc did not improve from 0.94225\n",
+ "Epoch 2711/100000\n",
+ " - 19s - loss: 0.3138 - acc: 0.9370 - val_loss: 0.3126 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02711: val_acc did not improve from 0.94225\n",
+ "Epoch 2712/100000\n",
+ " - 18s - loss: 0.3115 - acc: 0.9372 - val_loss: 0.3130 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02712: val_acc did not improve from 0.94225\n",
+ "Epoch 2713/100000\n",
+ " - 18s - loss: 0.3128 - acc: 0.9367 - val_loss: 0.3274 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 02713: val_acc did not improve from 0.94225\n",
+ "Epoch 2714/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9373 - val_loss: 0.3132 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 02714: val_acc did not improve from 0.94225\n",
+ "Epoch 2715/100000\n",
+ " - 19s - loss: 0.3175 - acc: 0.9369 - val_loss: 0.3029 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02715: val_acc did not improve from 0.94225\n",
+ "Epoch 2716/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9375 - val_loss: 0.2987 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02716: val_acc did not improve from 0.94225\n",
+ "Epoch 2717/100000\n",
+ " - 18s - loss: 0.3134 - acc: 0.9369 - val_loss: 0.3246 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02717: val_acc did not improve from 0.94225\n",
+ "Epoch 2718/100000\n",
+ " - 19s - loss: 0.3141 - acc: 0.9370 - val_loss: 0.3520 - val_acc: 0.9012\n",
+ "\n",
+ "Epoch 02718: val_acc did not improve from 0.94225\n",
+ "Epoch 2719/100000\n",
+ " - 19s - loss: 0.3112 - acc: 0.9374 - val_loss: 0.2955 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 02719: val_acc did not improve from 0.94225\n",
+ "Epoch 2720/100000\n",
+ " - 18s - loss: 0.3142 - acc: 0.9366 - val_loss: 0.3084 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02720: val_acc did not improve from 0.94225\n",
+ "Epoch 2721/100000\n",
+ " - 18s - loss: 0.3109 - acc: 0.9373 - val_loss: 0.2971 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02721: val_acc did not improve from 0.94225\n",
+ "Epoch 2722/100000\n",
+ " - 19s - loss: 0.3145 - acc: 0.9366 - val_loss: 0.3159 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 02722: val_acc did not improve from 0.94225\n",
+ "Epoch 2723/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9366 - val_loss: 0.3291 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 02723: val_acc did not improve from 0.94225\n",
+ "Epoch 2724/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9374 - val_loss: 0.3722 - val_acc: 0.8916\n",
+ "\n",
+ "Epoch 02724: val_acc did not improve from 0.94225\n",
+ "Epoch 2725/100000\n",
+ " - 18s - loss: 0.3135 - acc: 0.9368 - val_loss: 0.3330 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 02725: val_acc did not improve from 0.94225\n",
+ "Epoch 2726/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9376 - val_loss: 0.3212 - val_acc: 0.9274\n",
+ "\n",
+ "Epoch 02726: val_acc did not improve from 0.94225\n",
+ "Epoch 2727/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.3103 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02727: val_acc did not improve from 0.94225\n",
+ "Epoch 2728/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9370 - val_loss: 0.3468 - val_acc: 0.9089\n",
+ "\n",
+ "Epoch 02728: val_acc did not improve from 0.94225\n",
+ "Epoch 2729/100000\n",
+ " - 18s - loss: 0.3100 - acc: 0.9375 - val_loss: 0.3210 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02729: val_acc did not improve from 0.94225\n",
+ "Epoch 2730/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9370 - val_loss: 0.3311 - val_acc: 0.9192\n",
+ "\n",
+ "Epoch 02730: val_acc did not improve from 0.94225\n",
+ "Epoch 2731/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9360 - val_loss: 0.3299 - val_acc: 0.9284\n",
+ "\n",
+ "Epoch 02731: val_acc did not improve from 0.94225\n",
+ "Epoch 2732/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9375 - val_loss: 0.3606 - val_acc: 0.9019\n",
+ "\n",
+ "Epoch 02732: val_acc did not improve from 0.94225\n",
+ "Epoch 2733/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9373 - val_loss: 0.3030 - val_acc: 0.9407\n",
+ "\n",
+ "Epoch 02733: val_acc did not improve from 0.94225\n",
+ "Epoch 2734/100000\n",
+ " - 19s - loss: 0.3186 - acc: 0.9367 - val_loss: 0.3074 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02734: val_acc did not improve from 0.94225\n",
+ "Epoch 2735/100000\n",
+ " - 18s - loss: 0.3114 - acc: 0.9375 - val_loss: 0.3233 - val_acc: 0.9237\n",
+ "\n",
+ "Epoch 02735: val_acc did not improve from 0.94225\n",
+ "Epoch 2736/100000\n",
+ " - 19s - loss: 0.3100 - acc: 0.9377 - val_loss: 0.3067 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 02736: val_acc did not improve from 0.94225\n",
+ "Epoch 2737/100000\n",
+ " - 18s - loss: 0.3108 - acc: 0.9376 - val_loss: 0.3074 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02737: val_acc did not improve from 0.94225\n",
+ "Epoch 2738/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9374 - val_loss: 0.3018 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02738: val_acc did not improve from 0.94225\n",
+ "Epoch 2739/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9366 - val_loss: 0.3676 - val_acc: 0.9115\n",
+ "\n",
+ "Epoch 02739: val_acc did not improve from 0.94225\n",
+ "Epoch 2740/100000\n",
+ " - 19s - loss: 0.3154 - acc: 0.9361 - val_loss: 0.3080 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02740: val_acc did not improve from 0.94225\n",
+ "Epoch 2741/100000\n",
+ " - 19s - loss: 0.3101 - acc: 0.9371 - val_loss: 0.3192 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02741: val_acc did not improve from 0.94225\n",
+ "Epoch 2742/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9375 - val_loss: 0.3039 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02742: val_acc did not improve from 0.94225\n",
+ "Epoch 2743/100000\n",
+ " - 18s - loss: 0.3133 - acc: 0.9376 - val_loss: 0.3223 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 02743: val_acc did not improve from 0.94225\n",
+ "Epoch 2744/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9372 - val_loss: 0.3175 - val_acc: 0.9303\n",
+ "\n",
+ "Epoch 02744: val_acc did not improve from 0.94225\n",
+ "Epoch 2745/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9365 - val_loss: 0.2947 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 02745: val_acc did not improve from 0.94225\n",
+ "Epoch 2746/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9373 - val_loss: 0.3162 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02746: val_acc did not improve from 0.94225\n",
+ "Epoch 2747/100000\n",
+ " - 18s - loss: 0.3135 - acc: 0.9369 - val_loss: 0.3559 - val_acc: 0.9045\n",
+ "\n",
+ "Epoch 02747: val_acc did not improve from 0.94225\n",
+ "Epoch 2748/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9372 - val_loss: 0.3374 - val_acc: 0.9304\n",
+ "\n",
+ "Epoch 02748: val_acc did not improve from 0.94225\n",
+ "Epoch 2749/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9374 - val_loss: 0.3082 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02749: val_acc did not improve from 0.94225\n",
+ "Epoch 2750/100000\n",
+ " - 18s - loss: 0.3128 - acc: 0.9370 - val_loss: 0.3491 - val_acc: 0.9120\n",
+ "\n",
+ "Epoch 02750: val_acc did not improve from 0.94225\n",
+ "Epoch 2751/100000\n",
+ " - 18s - loss: 0.3124 - acc: 0.9369 - val_loss: 0.2974 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02751: val_acc did not improve from 0.94225\n",
+ "Epoch 2752/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9366 - val_loss: 0.3915 - val_acc: 0.8857\n",
+ "\n",
+ "Epoch 02752: val_acc did not improve from 0.94225\n",
+ "Epoch 2753/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9370 - val_loss: 0.3198 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 02753: val_acc did not improve from 0.94225\n",
+ "Epoch 2754/100000\n",
+ " - 18s - loss: 0.3136 - acc: 0.9369 - val_loss: 0.3051 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02754: val_acc did not improve from 0.94225\n",
+ "Epoch 2755/100000\n",
+ " - 19s - loss: 0.3104 - acc: 0.9379 - val_loss: 0.3706 - val_acc: 0.9018\n",
+ "\n",
+ "Epoch 02755: val_acc did not improve from 0.94225\n",
+ "Epoch 2756/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9370 - val_loss: 0.3073 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02756: val_acc did not improve from 0.94225\n",
+ "Epoch 2757/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9373 - val_loss: 0.3080 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02757: val_acc did not improve from 0.94225\n",
+ "Epoch 2758/100000\n",
+ " - 19s - loss: 0.3156 - acc: 0.9361 - val_loss: 0.3138 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02758: val_acc did not improve from 0.94225\n",
+ "Epoch 2759/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9371 - val_loss: 0.3083 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02759: val_acc did not improve from 0.94225\n",
+ "Epoch 2760/100000\n",
+ " - 19s - loss: 0.3148 - acc: 0.9370 - val_loss: 0.3048 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02760: val_acc did not improve from 0.94225\n",
+ "Epoch 2761/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3277 - val_acc: 0.9223\n",
+ "\n",
+ "Epoch 02761: val_acc did not improve from 0.94225\n",
+ "Epoch 2762/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9367 - val_loss: 0.3034 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02762: val_acc did not improve from 0.94225\n",
+ "Epoch 2763/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9369 - val_loss: 0.3643 - val_acc: 0.9089\n",
+ "\n",
+ "Epoch 02763: val_acc did not improve from 0.94225\n",
+ "Epoch 2764/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9367 - val_loss: 0.3204 - val_acc: 0.9331\n",
+ "\n",
+ "Epoch 02764: val_acc did not improve from 0.94225\n",
+ "Epoch 2765/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9373 - val_loss: 0.3002 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 02765: val_acc did not improve from 0.94225\n",
+ "Epoch 2766/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9376 - val_loss: 0.4164 - val_acc: 0.8802\n",
+ "\n",
+ "Epoch 02766: val_acc did not improve from 0.94225\n",
+ "Epoch 2767/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9369 - val_loss: 0.6117 - val_acc: 0.7844\n",
+ "\n",
+ "Epoch 02767: val_acc did not improve from 0.94225\n",
+ "Epoch 2768/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9376 - val_loss: 0.3364 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 02768: val_acc did not improve from 0.94225\n",
+ "Epoch 2769/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9374 - val_loss: 0.2932 - val_acc: 0.9409\n",
+ "\n",
+ "Epoch 02769: val_acc did not improve from 0.94225\n",
+ "Epoch 2770/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9366 - val_loss: 0.3611 - val_acc: 0.9098\n",
+ "\n",
+ "Epoch 02770: val_acc did not improve from 0.94225\n",
+ "Epoch 2771/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9367 - val_loss: 0.3611 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 02771: val_acc did not improve from 0.94225\n",
+ "Epoch 2772/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9367 - val_loss: 0.3179 - val_acc: 0.9277\n",
+ "\n",
+ "Epoch 02772: val_acc did not improve from 0.94225\n",
+ "Epoch 2773/100000\n",
+ " - 18s - loss: 0.3097 - acc: 0.9378 - val_loss: 0.3161 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02773: val_acc did not improve from 0.94225\n",
+ "Epoch 2774/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9367 - val_loss: 0.3253 - val_acc: 0.9248\n",
+ "\n",
+ "Epoch 02774: val_acc did not improve from 0.94225\n",
+ "Epoch 2775/100000\n",
+ " - 19s - loss: 0.3151 - acc: 0.9362 - val_loss: 0.3015 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02775: val_acc did not improve from 0.94225\n",
+ "Epoch 2776/100000\n",
+ " - 19s - loss: 0.3110 - acc: 0.9375 - val_loss: 0.3552 - val_acc: 0.9126\n",
+ "\n",
+ "Epoch 02776: val_acc did not improve from 0.94225\n",
+ "Epoch 2777/100000\n",
+ " - 19s - loss: 0.3122 - acc: 0.9373 - val_loss: 0.3383 - val_acc: 0.9210\n",
+ "\n",
+ "Epoch 02777: val_acc did not improve from 0.94225\n",
+ "Epoch 2778/100000\n",
+ " - 19s - loss: 0.3154 - acc: 0.9363 - val_loss: 0.3102 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02778: val_acc did not improve from 0.94225\n",
+ "Epoch 2779/100000\n",
+ " - 19s - loss: 0.3114 - acc: 0.9375 - val_loss: 0.2964 - val_acc: 0.9375\n",
+ "\n",
+ "Epoch 02779: val_acc did not improve from 0.94225\n",
+ "Epoch 2780/100000\n",
+ " - 19s - loss: 0.3153 - acc: 0.9370 - val_loss: 0.5286 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 02780: val_acc did not improve from 0.94225\n",
+ "Epoch 2781/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9373 - val_loss: 0.3296 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 02781: val_acc did not improve from 0.94225\n",
+ "Epoch 2782/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9367 - val_loss: 0.3391 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 02782: val_acc did not improve from 0.94225\n",
+ "Epoch 2783/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9368 - val_loss: 0.3073 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02783: val_acc did not improve from 0.94225\n",
+ "Epoch 2784/100000\n",
+ " - 18s - loss: 0.3137 - acc: 0.9370 - val_loss: 0.3107 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02784: val_acc did not improve from 0.94225\n",
+ "Epoch 2785/100000\n",
+ " - 19s - loss: 0.3150 - acc: 0.9370 - val_loss: 0.3174 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02785: val_acc did not improve from 0.94225\n",
+ "Epoch 2786/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9373 - val_loss: 0.3578 - val_acc: 0.9090\n",
+ "\n",
+ "Epoch 02786: val_acc did not improve from 0.94225\n",
+ "Epoch 2787/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9368 - val_loss: 0.3105 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02787: val_acc did not improve from 0.94225\n",
+ "Epoch 2788/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9367 - val_loss: 0.3224 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 02788: val_acc did not improve from 0.94225\n",
+ "Epoch 2789/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9373 - val_loss: 0.3091 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 02789: val_acc did not improve from 0.94225\n",
+ "Epoch 2790/100000\n",
+ " - 19s - loss: 0.3150 - acc: 0.9362 - val_loss: 0.3340 - val_acc: 0.9225\n",
+ "\n",
+ "Epoch 02790: val_acc did not improve from 0.94225\n",
+ "Epoch 2791/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9368 - val_loss: 0.2963 - val_acc: 0.9399\n",
+ "\n",
+ "Epoch 02791: val_acc did not improve from 0.94225\n",
+ "Epoch 2792/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9379 - val_loss: 0.3790 - val_acc: 0.9011\n",
+ "\n",
+ "Epoch 02792: val_acc did not improve from 0.94225\n",
+ "Epoch 2793/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9372 - val_loss: 0.2950 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 02793: val_acc did not improve from 0.94225\n",
+ "Epoch 2794/100000\n",
+ " - 19s - loss: 0.3145 - acc: 0.9360 - val_loss: 0.3244 - val_acc: 0.9291\n",
+ "\n",
+ "Epoch 02794: val_acc did not improve from 0.94225\n",
+ "Epoch 2795/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9371 - val_loss: 0.3247 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 02795: val_acc did not improve from 0.94225\n",
+ "Epoch 2796/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9367 - val_loss: 0.2972 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 02796: val_acc did not improve from 0.94225\n",
+ "Epoch 2797/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9377 - val_loss: 0.3032 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02797: val_acc did not improve from 0.94225\n",
+ "Epoch 2798/100000\n",
+ " - 18s - loss: 0.3119 - acc: 0.9370 - val_loss: 0.3214 - val_acc: 0.9230\n",
+ "\n",
+ "Epoch 02798: val_acc did not improve from 0.94225\n",
+ "Epoch 2799/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9367 - val_loss: 0.3097 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 02799: val_acc did not improve from 0.94225\n",
+ "Epoch 2800/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.4605 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 02800: val_acc did not improve from 0.94225\n",
+ "Epoch 2801/100000\n",
+ " - 18s - loss: 0.3123 - acc: 0.9377 - val_loss: 0.3075 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02801: val_acc did not improve from 0.94225\n",
+ "Epoch 2802/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9372 - val_loss: 0.3261 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 02802: val_acc did not improve from 0.94225\n",
+ "Epoch 2803/100000\n",
+ " - 18s - loss: 0.3134 - acc: 0.9369 - val_loss: 0.3102 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 02803: val_acc did not improve from 0.94225\n",
+ "Epoch 2804/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9368 - val_loss: 0.2997 - val_acc: 0.9389\n",
+ "\n",
+ "Epoch 02804: val_acc did not improve from 0.94225\n",
+ "Epoch 2805/100000\n",
+ " - 19s - loss: 0.3156 - acc: 0.9356 - val_loss: 0.3062 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02805: val_acc did not improve from 0.94225\n",
+ "Epoch 2806/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9374 - val_loss: 0.3786 - val_acc: 0.8955\n",
+ "\n",
+ "Epoch 02806: val_acc did not improve from 0.94225\n",
+ "Epoch 2807/100000\n",
+ " - 19s - loss: 0.3155 - acc: 0.9367 - val_loss: 0.3332 - val_acc: 0.9204\n",
+ "\n",
+ "Epoch 02807: val_acc did not improve from 0.94225\n",
+ "Epoch 2808/100000\n",
+ " - 20s - loss: 0.3144 - acc: 0.9371 - val_loss: 0.3085 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02808: val_acc did not improve from 0.94225\n",
+ "Epoch 2809/100000\n",
+ " - 19s - loss: 0.3149 - acc: 0.9369 - val_loss: 0.3033 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02809: val_acc did not improve from 0.94225\n",
+ "Epoch 2810/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9375 - val_loss: 0.3368 - val_acc: 0.9137\n",
+ "\n",
+ "Epoch 02810: val_acc did not improve from 0.94225\n",
+ "Epoch 2811/100000\n",
+ " - 18s - loss: 0.3108 - acc: 0.9377 - val_loss: 0.3336 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 02811: val_acc did not improve from 0.94225\n",
+ "Epoch 2812/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9365 - val_loss: 0.3543 - val_acc: 0.9056\n",
+ "\n",
+ "Epoch 02812: val_acc did not improve from 0.94225\n",
+ "Epoch 2813/100000\n",
+ " - 18s - loss: 0.3119 - acc: 0.9377 - val_loss: 0.3112 - val_acc: 0.9270\n",
+ "\n",
+ "Epoch 02813: val_acc did not improve from 0.94225\n",
+ "Epoch 2814/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9373 - val_loss: 0.3110 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 02814: val_acc did not improve from 0.94225\n",
+ "Epoch 2815/100000\n",
+ " - 19s - loss: 0.3120 - acc: 0.9375 - val_loss: 0.3335 - val_acc: 0.9194\n",
+ "\n",
+ "Epoch 02815: val_acc did not improve from 0.94225\n",
+ "Epoch 2816/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9378 - val_loss: 0.2977 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 02816: val_acc did not improve from 0.94225\n",
+ "Epoch 2817/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9368 - val_loss: 0.3001 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02817: val_acc did not improve from 0.94225\n",
+ "Epoch 2818/100000\n",
+ " - 19s - loss: 0.3146 - acc: 0.9366 - val_loss: 0.3084 - val_acc: 0.9311\n",
+ "\n",
+ "Epoch 02818: val_acc did not improve from 0.94225\n",
+ "Epoch 2819/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9371 - val_loss: 0.3125 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02819: val_acc did not improve from 0.94225\n",
+ "Epoch 2820/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9372 - val_loss: 0.3064 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02820: val_acc did not improve from 0.94225\n",
+ "Epoch 2821/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9366 - val_loss: 0.3101 - val_acc: 0.9322\n",
+ "\n",
+ "Epoch 02821: val_acc did not improve from 0.94225\n",
+ "Epoch 2822/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9371 - val_loss: 0.3382 - val_acc: 0.9122\n",
+ "\n",
+ "Epoch 02822: val_acc did not improve from 0.94225\n",
+ "Epoch 2823/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9369 - val_loss: 0.4093 - val_acc: 0.8929\n",
+ "\n",
+ "Epoch 02823: val_acc did not improve from 0.94225\n",
+ "Epoch 2824/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9373 - val_loss: 0.3273 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02824: val_acc did not improve from 0.94225\n",
+ "Epoch 2825/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9362 - val_loss: 0.3321 - val_acc: 0.9243\n",
+ "\n",
+ "Epoch 02825: val_acc did not improve from 0.94225\n",
+ "Epoch 2826/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9367 - val_loss: 0.2936 - val_acc: 0.9402\n",
+ "\n",
+ "Epoch 02826: val_acc did not improve from 0.94225\n",
+ "Epoch 2827/100000\n",
+ " - 18s - loss: 0.3128 - acc: 0.9369 - val_loss: 0.3520 - val_acc: 0.9089\n",
+ "\n",
+ "Epoch 02827: val_acc did not improve from 0.94225\n",
+ "Epoch 2828/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9370 - val_loss: 0.3042 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02828: val_acc did not improve from 0.94225\n",
+ "Epoch 2829/100000\n",
+ " - 19s - loss: 0.3149 - acc: 0.9365 - val_loss: 0.3041 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02829: val_acc did not improve from 0.94225\n",
+ "Epoch 2830/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9372 - val_loss: 0.2916 - val_acc: 0.9414\n",
+ "\n",
+ "Epoch 02830: val_acc did not improve from 0.94225\n",
+ "Epoch 2831/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9372 - val_loss: 0.3211 - val_acc: 0.9231\n",
+ "\n",
+ "Epoch 02831: val_acc did not improve from 0.94225\n",
+ "Epoch 2832/100000\n",
+ " - 19s - loss: 0.3122 - acc: 0.9371 - val_loss: 0.3010 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 02832: val_acc did not improve from 0.94225\n",
+ "Epoch 2833/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9368 - val_loss: 0.3204 - val_acc: 0.9307\n",
+ "\n",
+ "Epoch 02833: val_acc did not improve from 0.94225\n",
+ "Epoch 2834/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9369 - val_loss: 0.3064 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 02834: val_acc did not improve from 0.94225\n",
+ "Epoch 2835/100000\n",
+ " - 19s - loss: 0.3153 - acc: 0.9358 - val_loss: 0.3347 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 02835: val_acc did not improve from 0.94225\n",
+ "Epoch 2836/100000\n",
+ " - 18s - loss: 0.3123 - acc: 0.9372 - val_loss: 0.3473 - val_acc: 0.9115\n",
+ "\n",
+ "Epoch 02836: val_acc did not improve from 0.94225\n",
+ "Epoch 2837/100000\n",
+ " - 19s - loss: 0.3128 - acc: 0.9368 - val_loss: 0.2993 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02837: val_acc did not improve from 0.94225\n",
+ "Epoch 2838/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9367 - val_loss: 0.3023 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02838: val_acc did not improve from 0.94225\n",
+ "Epoch 2839/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9366 - val_loss: 0.3072 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02839: val_acc did not improve from 0.94225\n",
+ "Epoch 2840/100000\n",
+ " - 18s - loss: 0.3131 - acc: 0.9370 - val_loss: 0.3097 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 02840: val_acc did not improve from 0.94225\n",
+ "Epoch 2841/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9366 - val_loss: 0.3110 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02841: val_acc did not improve from 0.94225\n",
+ "Epoch 2842/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9364 - val_loss: 0.3994 - val_acc: 0.8881\n",
+ "\n",
+ "Epoch 02842: val_acc did not improve from 0.94225\n",
+ "Epoch 2843/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9373 - val_loss: 0.3065 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 02843: val_acc did not improve from 0.94225\n",
+ "Epoch 2844/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9365 - val_loss: 0.3907 - val_acc: 0.8980\n",
+ "\n",
+ "Epoch 02844: val_acc did not improve from 0.94225\n",
+ "Epoch 2845/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9374 - val_loss: 0.3015 - val_acc: 0.9396\n",
+ "\n",
+ "Epoch 02845: val_acc did not improve from 0.94225\n",
+ "Epoch 2846/100000\n",
+ " - 18s - loss: 0.3157 - acc: 0.9364 - val_loss: 0.3120 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 02846: val_acc did not improve from 0.94225\n",
+ "Epoch 2847/100000\n",
+ " - 19s - loss: 0.3143 - acc: 0.9367 - val_loss: 0.3006 - val_acc: 0.9379\n",
+ "\n",
+ "Epoch 02847: val_acc did not improve from 0.94225\n",
+ "Epoch 2848/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3038 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02848: val_acc did not improve from 0.94225\n",
+ "Epoch 2849/100000\n",
+ " - 18s - loss: 0.3138 - acc: 0.9369 - val_loss: 0.3080 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02849: val_acc did not improve from 0.94225\n",
+ "Epoch 2850/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9371 - val_loss: 0.4164 - val_acc: 0.8890\n",
+ "\n",
+ "Epoch 02850: val_acc did not improve from 0.94225\n",
+ "Epoch 2851/100000\n",
+ " - 18s - loss: 0.3161 - acc: 0.9357 - val_loss: 0.3023 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 02851: val_acc did not improve from 0.94225\n",
+ "Epoch 2852/100000\n",
+ " - 19s - loss: 0.3149 - acc: 0.9364 - val_loss: 0.3037 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02852: val_acc did not improve from 0.94225\n",
+ "Epoch 2853/100000\n",
+ " - 18s - loss: 0.3121 - acc: 0.9375 - val_loss: 0.3645 - val_acc: 0.9028\n",
+ "\n",
+ "Epoch 02853: val_acc did not improve from 0.94225\n",
+ "Epoch 2854/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9367 - val_loss: 0.3121 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02854: val_acc did not improve from 0.94225\n",
+ "Epoch 2855/100000\n",
+ " - 18s - loss: 0.3134 - acc: 0.9372 - val_loss: 0.3006 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02855: val_acc did not improve from 0.94225\n",
+ "Epoch 2856/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9372 - val_loss: 0.3768 - val_acc: 0.9169\n",
+ "\n",
+ "Epoch 02856: val_acc did not improve from 0.94225\n",
+ "Epoch 2857/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9367 - val_loss: 0.3153 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02857: val_acc did not improve from 0.94225\n",
+ "Epoch 2858/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9370 - val_loss: 0.4579 - val_acc: 0.8811\n",
+ "\n",
+ "Epoch 02858: val_acc did not improve from 0.94225\n",
+ "Epoch 2859/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9377 - val_loss: 0.3072 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02859: val_acc did not improve from 0.94225\n",
+ "Epoch 2860/100000\n",
+ " - 19s - loss: 0.3148 - acc: 0.9361 - val_loss: 0.3073 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02860: val_acc did not improve from 0.94225\n",
+ "Epoch 2861/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9365 - val_loss: 0.3461 - val_acc: 0.9225\n",
+ "\n",
+ "Epoch 02861: val_acc did not improve from 0.94225\n",
+ "Epoch 2862/100000\n",
+ " - 19s - loss: 0.3151 - acc: 0.9364 - val_loss: 0.3352 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02862: val_acc did not improve from 0.94225\n",
+ "Epoch 2863/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9365 - val_loss: 0.6935 - val_acc: 0.7499\n",
+ "\n",
+ "Epoch 02863: val_acc did not improve from 0.94225\n",
+ "Epoch 2864/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9374 - val_loss: 0.3021 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 02864: val_acc did not improve from 0.94225\n",
+ "Epoch 2865/100000\n",
+ " - 18s - loss: 0.3135 - acc: 0.9366 - val_loss: 0.3292 - val_acc: 0.9390\n",
+ "\n",
+ "Epoch 02865: val_acc did not improve from 0.94225\n",
+ "Epoch 2866/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9362 - val_loss: 0.3847 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 02866: val_acc did not improve from 0.94225\n",
+ "Epoch 2867/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9371 - val_loss: 0.3084 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02867: val_acc did not improve from 0.94225\n",
+ "Epoch 2868/100000\n",
+ " - 19s - loss: 0.3153 - acc: 0.9373 - val_loss: 0.3014 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 02868: val_acc did not improve from 0.94225\n",
+ "Epoch 2869/100000\n",
+ " - 18s - loss: 0.3141 - acc: 0.9366 - val_loss: 0.3253 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 02869: val_acc did not improve from 0.94225\n",
+ "Epoch 2870/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9374 - val_loss: 0.4106 - val_acc: 0.8753\n",
+ "\n",
+ "Epoch 02870: val_acc did not improve from 0.94225\n",
+ "Epoch 2871/100000\n",
+ " - 18s - loss: 0.3137 - acc: 0.9369 - val_loss: 0.3037 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 02871: val_acc did not improve from 0.94225\n",
+ "Epoch 2872/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9369 - val_loss: 0.3165 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 02872: val_acc did not improve from 0.94225\n",
+ "Epoch 2873/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.3171 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 02873: val_acc did not improve from 0.94225\n",
+ "Epoch 2874/100000\n",
+ " - 18s - loss: 0.3113 - acc: 0.9371 - val_loss: 0.3059 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02874: val_acc did not improve from 0.94225\n",
+ "Epoch 2875/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9366 - val_loss: 0.3017 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02875: val_acc did not improve from 0.94225\n",
+ "Epoch 2876/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9375 - val_loss: 0.3260 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 02876: val_acc did not improve from 0.94225\n",
+ "Epoch 2877/100000\n",
+ " - 18s - loss: 0.3143 - acc: 0.9365 - val_loss: 0.3033 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 02877: val_acc did not improve from 0.94225\n",
+ "Epoch 2878/100000\n",
+ " - 19s - loss: 0.3121 - acc: 0.9376 - val_loss: 0.3161 - val_acc: 0.9296\n",
+ "\n",
+ "Epoch 02878: val_acc did not improve from 0.94225\n",
+ "Epoch 2879/100000\n",
+ " - 18s - loss: 0.3151 - acc: 0.9360 - val_loss: 0.3143 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02879: val_acc did not improve from 0.94225\n",
+ "Epoch 2880/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9370 - val_loss: 0.3285 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02880: val_acc did not improve from 0.94225\n",
+ "Epoch 2881/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9378 - val_loss: 0.3252 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02881: val_acc did not improve from 0.94225\n",
+ "Epoch 2882/100000\n",
+ " - 18s - loss: 0.3122 - acc: 0.9367 - val_loss: 0.3303 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 02882: val_acc did not improve from 0.94225\n",
+ "Epoch 2883/100000\n",
+ " - 19s - loss: 0.3140 - acc: 0.9368 - val_loss: 0.4265 - val_acc: 0.8835\n",
+ "\n",
+ "Epoch 02883: val_acc did not improve from 0.94225\n",
+ "Epoch 2884/100000\n",
+ " - 18s - loss: 0.3112 - acc: 0.9377 - val_loss: 0.8281 - val_acc: 0.6407\n",
+ "\n",
+ "Epoch 02884: val_acc did not improve from 0.94225\n",
+ "Epoch 2885/100000\n",
+ " - 19s - loss: 0.3140 - acc: 0.9370 - val_loss: 0.3159 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02885: val_acc did not improve from 0.94225\n",
+ "Epoch 2886/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9370 - val_loss: 0.4271 - val_acc: 0.8706\n",
+ "\n",
+ "Epoch 02886: val_acc did not improve from 0.94225\n",
+ "Epoch 2887/100000\n",
+ " - 19s - loss: 0.3135 - acc: 0.9364 - val_loss: 0.3160 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02887: val_acc did not improve from 0.94225\n",
+ "Epoch 2888/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9363 - val_loss: 0.3293 - val_acc: 0.9208\n",
+ "\n",
+ "Epoch 02888: val_acc did not improve from 0.94225\n",
+ "Epoch 2889/100000\n",
+ " - 18s - loss: 0.3120 - acc: 0.9371 - val_loss: 0.3144 - val_acc: 0.9286\n",
+ "\n",
+ "Epoch 02889: val_acc did not improve from 0.94225\n",
+ "Epoch 2890/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3241 - val_acc: 0.9219\n",
+ "\n",
+ "Epoch 02890: val_acc did not improve from 0.94225\n",
+ "Epoch 2891/100000\n",
+ " - 18s - loss: 0.3156 - acc: 0.9358 - val_loss: 0.2988 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02891: val_acc did not improve from 0.94225\n",
+ "Epoch 2892/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9370 - val_loss: 0.3057 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 02892: val_acc did not improve from 0.94225\n",
+ "Epoch 2893/100000\n",
+ " - 18s - loss: 0.3127 - acc: 0.9366 - val_loss: 0.3199 - val_acc: 0.9301\n",
+ "\n",
+ "Epoch 02893: val_acc did not improve from 0.94225\n",
+ "Epoch 2894/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9370 - val_loss: 0.3424 - val_acc: 0.9216\n",
+ "\n",
+ "Epoch 02894: val_acc did not improve from 0.94225\n",
+ "Epoch 2895/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9372 - val_loss: 0.3280 - val_acc: 0.9246\n",
+ "\n",
+ "Epoch 02895: val_acc did not improve from 0.94225\n",
+ "Epoch 2896/100000\n",
+ " - 19s - loss: 0.3123 - acc: 0.9371 - val_loss: 0.3998 - val_acc: 0.8942\n",
+ "\n",
+ "Epoch 02896: val_acc did not improve from 0.94225\n",
+ "Epoch 2897/100000\n",
+ " - 18s - loss: 0.3138 - acc: 0.9372 - val_loss: 0.3077 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02897: val_acc did not improve from 0.94225\n",
+ "Epoch 2898/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9377 - val_loss: 0.3309 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02898: val_acc did not improve from 0.94225\n",
+ "Epoch 2899/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.3060 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02899: val_acc did not improve from 0.94225\n",
+ "Epoch 2900/100000\n",
+ " - 18s - loss: 0.3139 - acc: 0.9368 - val_loss: 0.3003 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02900: val_acc did not improve from 0.94225\n",
+ "Epoch 2901/100000\n",
+ " - 18s - loss: 0.3110 - acc: 0.9372 - val_loss: 0.3054 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 02901: val_acc did not improve from 0.94225\n",
+ "Epoch 2902/100000\n",
+ " - 19s - loss: 0.3131 - acc: 0.9370 - val_loss: 0.3478 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 02902: val_acc did not improve from 0.94225\n",
+ "Epoch 2903/100000\n",
+ " - 18s - loss: 0.3130 - acc: 0.9368 - val_loss: 0.3032 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02903: val_acc did not improve from 0.94225\n",
+ "Epoch 2904/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9370 - val_loss: 0.3152 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 02904: val_acc did not improve from 0.94225\n",
+ "Epoch 2905/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9377 - val_loss: 0.3137 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02905: val_acc did not improve from 0.94225\n",
+ "Epoch 2906/100000\n",
+ " - 18s - loss: 0.3180 - acc: 0.9360 - val_loss: 0.3161 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 02906: val_acc did not improve from 0.94225\n",
+ "Epoch 2907/100000\n",
+ " - 19s - loss: 0.3152 - acc: 0.9366 - val_loss: 0.3007 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 02907: val_acc did not improve from 0.94225\n",
+ "Epoch 2908/100000\n",
+ " - 18s - loss: 0.3129 - acc: 0.9366 - val_loss: 0.3099 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02908: val_acc did not improve from 0.94225\n",
+ "Epoch 2909/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9363 - val_loss: 0.3114 - val_acc: 0.9292\n",
+ "\n",
+ "Epoch 02909: val_acc did not improve from 0.94225\n",
+ "Epoch 2910/100000\n",
+ " - 19s - loss: 0.3130 - acc: 0.9363 - val_loss: 0.3232 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 02910: val_acc did not improve from 0.94225\n",
+ "Epoch 2911/100000\n",
+ " - 18s - loss: 0.3140 - acc: 0.9371 - val_loss: 0.3096 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 02911: val_acc did not improve from 0.94225\n",
+ "Epoch 2912/100000\n",
+ " - 19s - loss: 0.3161 - acc: 0.9359 - val_loss: 0.3148 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 02912: val_acc did not improve from 0.94225\n",
+ "Epoch 2913/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9372 - val_loss: 0.3352 - val_acc: 0.9215\n",
+ "\n",
+ "Epoch 02913: val_acc did not improve from 0.94225\n",
+ "Epoch 2914/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9371 - val_loss: 0.3907 - val_acc: 0.9185\n",
+ "\n",
+ "Epoch 02914: val_acc did not improve from 0.94225\n",
+ "Epoch 2915/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9375 - val_loss: 0.3166 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02915: val_acc did not improve from 0.94225\n",
+ "Epoch 2916/100000\n",
+ " - 19s - loss: 0.3114 - acc: 0.9374 - val_loss: 0.3225 - val_acc: 0.9258\n",
+ "\n",
+ "Epoch 02916: val_acc did not improve from 0.94225\n",
+ "Epoch 2917/100000\n",
+ " - 19s - loss: 0.3123 - acc: 0.9371 - val_loss: 0.3106 - val_acc: 0.9340\n",
+ "\n",
+ "Epoch 02917: val_acc did not improve from 0.94225\n",
+ "Epoch 2918/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9370 - val_loss: 0.3104 - val_acc: 0.9345\n",
+ "\n",
+ "Epoch 02918: val_acc did not improve from 0.94225\n",
+ "Epoch 2919/100000\n",
+ " - 18s - loss: 0.3133 - acc: 0.9368 - val_loss: 0.3168 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02919: val_acc did not improve from 0.94225\n",
+ "Epoch 2920/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9364 - val_loss: 0.3254 - val_acc: 0.9326\n",
+ "\n",
+ "Epoch 02920: val_acc did not improve from 0.94225\n",
+ "Epoch 2921/100000\n",
+ " - 19s - loss: 0.3168 - acc: 0.9360 - val_loss: 0.3195 - val_acc: 0.9244\n",
+ "\n",
+ "Epoch 02921: val_acc did not improve from 0.94225\n",
+ "Epoch 2922/100000\n",
+ " - 19s - loss: 0.3137 - acc: 0.9370 - val_loss: 0.3001 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02922: val_acc did not improve from 0.94225\n",
+ "Epoch 2923/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9369 - val_loss: 0.3131 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02923: val_acc did not improve from 0.94225\n",
+ "Epoch 2924/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9373 - val_loss: 0.3146 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02924: val_acc did not improve from 0.94225\n",
+ "Epoch 2925/100000\n",
+ " - 19s - loss: 0.3147 - acc: 0.9363 - val_loss: 0.3907 - val_acc: 0.8911\n",
+ "\n",
+ "Epoch 02925: val_acc did not improve from 0.94225\n",
+ "Epoch 2926/100000\n",
+ " - 19s - loss: 0.3132 - acc: 0.9368 - val_loss: 0.3157 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 02926: val_acc did not improve from 0.94225\n",
+ "Epoch 2927/100000\n",
+ " - 19s - loss: 0.3146 - acc: 0.9363 - val_loss: 0.4041 - val_acc: 0.8941\n",
+ "\n",
+ "Epoch 02927: val_acc did not improve from 0.94225\n",
+ "Epoch 2928/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9371 - val_loss: 0.3108 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 02928: val_acc did not improve from 0.94225\n",
+ "Epoch 2929/100000\n",
+ " - 19s - loss: 0.3118 - acc: 0.9372 - val_loss: 0.4674 - val_acc: 0.8730\n",
+ "\n",
+ "Epoch 02929: val_acc did not improve from 0.94225\n",
+ "Epoch 2930/100000\n",
+ " - 19s - loss: 0.3142 - acc: 0.9359 - val_loss: 0.3118 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02930: val_acc did not improve from 0.94225\n",
+ "Epoch 2931/100000\n",
+ " - 18s - loss: 0.3142 - acc: 0.9363 - val_loss: 0.2941 - val_acc: 0.9430\n",
+ "\n",
+ "Epoch 02931: val_acc improved from 0.94225 to 0.94304, saving model to ./ModelSnapshots/CNN-2931.h5\n",
+ "Epoch 2932/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9366 - val_loss: 0.3210 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 02932: val_acc did not improve from 0.94304\n",
+ "Epoch 2933/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9375 - val_loss: 0.3085 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 02933: val_acc did not improve from 0.94304\n",
+ "Epoch 2934/100000\n",
+ " - 18s - loss: 0.3141 - acc: 0.9362 - val_loss: 0.3120 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 02934: val_acc did not improve from 0.94304\n",
+ "Epoch 2935/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9378 - val_loss: 0.3214 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 02935: val_acc did not improve from 0.94304\n",
+ "Epoch 2936/100000\n",
+ " - 19s - loss: 0.3139 - acc: 0.9367 - val_loss: 0.3097 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02936: val_acc did not improve from 0.94304\n",
+ "Epoch 2937/100000\n",
+ " - 19s - loss: 0.3161 - acc: 0.9357 - val_loss: 0.3381 - val_acc: 0.9159\n",
+ "\n",
+ "Epoch 02937: val_acc did not improve from 0.94304\n",
+ "Epoch 2938/100000\n",
+ " - 18s - loss: 0.3125 - acc: 0.9367 - val_loss: 0.3073 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 02938: val_acc did not improve from 0.94304\n",
+ "Epoch 2939/100000\n",
+ " - 19s - loss: 0.3127 - acc: 0.9367 - val_loss: 0.2982 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02939: val_acc did not improve from 0.94304\n",
+ "Epoch 2940/100000\n",
+ " - 18s - loss: 0.3131 - acc: 0.9368 - val_loss: 0.4198 - val_acc: 0.8701\n",
+ "\n",
+ "Epoch 02940: val_acc did not improve from 0.94304\n",
+ "Epoch 2941/100000\n",
+ " - 19s - loss: 0.3172 - acc: 0.9360 - val_loss: 0.3651 - val_acc: 0.9016\n",
+ "\n",
+ "Epoch 02941: val_acc did not improve from 0.94304\n",
+ "Epoch 2942/100000\n",
+ " - 18s - loss: 0.3136 - acc: 0.9361 - val_loss: 0.3114 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 02942: val_acc did not improve from 0.94304\n",
+ "Epoch 2943/100000\n",
+ " - 19s - loss: 0.3108 - acc: 0.9374 - val_loss: 0.3058 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02943: val_acc did not improve from 0.94304\n",
+ "Epoch 2944/100000\n",
+ " - 19s - loss: 0.3133 - acc: 0.9372 - val_loss: 0.3065 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02944: val_acc did not improve from 0.94304\n",
+ "Epoch 2945/100000\n",
+ " - 18s - loss: 0.3134 - acc: 0.9371 - val_loss: 0.3043 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 02945: val_acc did not improve from 0.94304\n",
+ "Epoch 2946/100000\n",
+ " - 19s - loss: 0.3138 - acc: 0.9371 - val_loss: 0.3824 - val_acc: 0.8934\n",
+ "\n",
+ "Epoch 02946: val_acc did not improve from 0.94304\n",
+ "Epoch 2947/100000\n",
+ " - 18s - loss: 0.3126 - acc: 0.9372 - val_loss: 0.3157 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 02947: val_acc did not improve from 0.94304\n",
+ "Epoch 2948/100000\n",
+ " - 18s - loss: 0.3129 - acc: 0.9371 - val_loss: 0.3048 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 02948: val_acc did not improve from 0.94304\n",
+ "Epoch 2949/100000\n",
+ " - 19s - loss: 0.3116 - acc: 0.9368 - val_loss: 0.3442 - val_acc: 0.9177\n",
+ "\n",
+ "Epoch 02949: val_acc did not improve from 0.94304\n",
+ "Epoch 2950/100000\n",
+ " - 19s - loss: 0.3115 - acc: 0.9376 - val_loss: 0.3514 - val_acc: 0.9119\n",
+ "\n",
+ "Epoch 02950: val_acc did not improve from 0.94304\n",
+ "Epoch 2951/100000\n",
+ " - 19s - loss: 0.3119 - acc: 0.9373 - val_loss: 0.3128 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 02951: val_acc did not improve from 0.94304\n",
+ "Epoch 2952/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9370 - val_loss: 0.2992 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02952: val_acc did not improve from 0.94304\n",
+ "Epoch 2953/100000\n",
+ " - 19s - loss: 0.3150 - acc: 0.9360 - val_loss: 0.3061 - val_acc: 0.9349\n",
+ "\n",
+ "Epoch 02953: val_acc did not improve from 0.94304\n",
+ "Epoch 2954/100000\n",
+ " - 18s - loss: 0.3142 - acc: 0.9368 - val_loss: 0.3075 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02954: val_acc did not improve from 0.94304\n",
+ "Epoch 2955/100000\n",
+ " - 19s - loss: 0.3117 - acc: 0.9374 - val_loss: 0.3287 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 02955: val_acc did not improve from 0.94304\n",
+ "Epoch 2956/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9373 - val_loss: 0.3157 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 02956: val_acc did not improve from 0.94304\n",
+ "Epoch 2957/100000\n",
+ " - 19s - loss: 0.3136 - acc: 0.9371 - val_loss: 0.3593 - val_acc: 0.9078\n",
+ "\n",
+ "Epoch 02957: val_acc did not improve from 0.94304\n",
+ "Epoch 2958/100000\n",
+ " - 18s - loss: 0.3108 - acc: 0.9375 - val_loss: 0.3423 - val_acc: 0.9338\n",
+ "\n",
+ "Epoch 02958: val_acc did not improve from 0.94304\n",
+ "Epoch 2959/100000\n",
+ " - 19s - loss: 0.3138 - acc: 0.9374 - val_loss: 0.3018 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 02959: val_acc did not improve from 0.94304\n",
+ "Epoch 2960/100000\n",
+ " - 19s - loss: 0.3124 - acc: 0.9369 - val_loss: 0.3666 - val_acc: 0.9145\n",
+ "\n",
+ "Epoch 02960: val_acc did not improve from 0.94304\n",
+ "Epoch 2961/100000\n",
+ " - 19s - loss: 0.3129 - acc: 0.9369 - val_loss: 0.3044 - val_acc: 0.9328\n",
+ "\n",
+ "Epoch 02961: val_acc did not improve from 0.94304\n",
+ "Epoch 2962/100000\n",
+ " - 19s - loss: 0.3122 - acc: 0.9369 - val_loss: 0.3599 - val_acc: 0.9058\n",
+ "\n",
+ "Epoch 02962: val_acc did not improve from 0.94304\n",
+ "Epoch 2963/100000\n",
+ " - 19s - loss: 0.3126 - acc: 0.9366 - val_loss: 0.3026 - val_acc: 0.9400\n",
+ "\n",
+ "Epoch 02963: val_acc did not improve from 0.94304\n",
+ "Epoch 2964/100000\n",
+ " - 19s - loss: 0.3155 - acc: 0.9361 - val_loss: 0.3283 - val_acc: 0.9199\n",
+ "\n",
+ "Epoch 02964: val_acc did not improve from 0.94304\n",
+ "Epoch 2965/100000\n",
+ " - 18s - loss: 0.3112 - acc: 0.9377 - val_loss: 0.3065 - val_acc: 0.9356\n",
+ "\n",
+ "Epoch 02965: val_acc did not improve from 0.94304\n",
+ "Epoch 2966/100000\n",
+ " - 18s - loss: 0.3152 - acc: 0.9370 - val_loss: 0.3454 - val_acc: 0.9164\n",
+ "\n",
+ "Epoch 02966: val_acc did not improve from 0.94304\n",
+ "Epoch 2967/100000\n",
+ " - 19s - loss: 0.3128 - acc: 0.9374 - val_loss: 0.3013 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 02967: val_acc did not improve from 0.94304\n",
+ "Epoch 2968/100000\n",
+ " - 18s - loss: 0.3126 - acc: 0.9375 - val_loss: 0.3361 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 02968: val_acc did not improve from 0.94304\n",
+ "Epoch 2969/100000\n",
+ " - 18s - loss: 0.3110 - acc: 0.9372 - val_loss: 0.3153 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 02969: val_acc did not improve from 0.94304\n",
+ "Epoch 2970/100000\n",
+ " - 19s - loss: 0.3134 - acc: 0.9378 - val_loss: 0.3074 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 02970: val_acc did not improve from 0.94304\n",
+ "\n",
+ "Epoch 02970: ReduceLROnPlateau reducing learning rate to 0.0005133419937919825.\n",
+ "Epoch 2971/100000\n",
+ " - 18s - loss: 0.3113 - acc: 0.9367 - val_loss: 0.3777 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 02971: val_acc did not improve from 0.94304\n",
+ "Epoch 2972/100000\n",
+ " - 19s - loss: 0.3085 - acc: 0.9370 - val_loss: 0.2987 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 02972: val_acc did not improve from 0.94304\n",
+ "Epoch 2973/100000\n",
+ " - 18s - loss: 0.3064 - acc: 0.9372 - val_loss: 0.3204 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 02973: val_acc did not improve from 0.94304\n",
+ "Epoch 2974/100000\n",
+ " - 18s - loss: 0.3037 - acc: 0.9383 - val_loss: 0.2977 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 02974: val_acc did not improve from 0.94304\n",
+ "Epoch 2975/100000\n",
+ " - 19s - loss: 0.3049 - acc: 0.9378 - val_loss: 0.3012 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 02975: val_acc did not improve from 0.94304\n",
+ "Epoch 2976/100000\n",
+ " - 18s - loss: 0.3056 - acc: 0.9374 - val_loss: 0.3031 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 02976: val_acc did not improve from 0.94304\n",
+ "Epoch 2977/100000\n",
+ " - 19s - loss: 0.3065 - acc: 0.9373 - val_loss: 0.2935 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 02977: val_acc did not improve from 0.94304\n",
+ "Epoch 2978/100000\n",
+ " - 18s - loss: 0.3061 - acc: 0.9373 - val_loss: 0.3027 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 02978: val_acc did not improve from 0.94304\n",
+ "Epoch 2979/100000\n",
+ " - 18s - loss: 0.3059 - acc: 0.9370 - val_loss: 0.3450 - val_acc: 0.9113\n",
+ "\n",
+ "Epoch 02979: val_acc did not improve from 0.94304\n",
+ "Epoch 2980/100000\n",
+ " - 19s - loss: 0.3073 - acc: 0.9371 - val_loss: 0.3051 - val_acc: 0.9336\n",
+ "\n",
+ "Epoch 02980: val_acc did not improve from 0.94304\n",
+ "Epoch 2981/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9373 - val_loss: 0.2952 - val_acc: 0.9351\n",
+ "\n",
+ "Epoch 02981: val_acc did not improve from 0.94304\n",
+ "Epoch 2982/100000\n",
+ " - 19s - loss: 0.3049 - acc: 0.9377 - val_loss: 0.2973 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 02982: val_acc did not improve from 0.94304\n",
+ "Epoch 2983/100000\n",
+ " - 18s - loss: 0.3050 - acc: 0.9380 - val_loss: 0.2944 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 02983: val_acc did not improve from 0.94304\n",
+ "Epoch 2984/100000\n",
+ " - 19s - loss: 0.3058 - acc: 0.9374 - val_loss: 0.3177 - val_acc: 0.9289\n",
+ "\n",
+ "Epoch 02984: val_acc did not improve from 0.94304\n",
+ "Epoch 2985/100000\n",
+ " - 19s - loss: 0.3069 - acc: 0.9378 - val_loss: 0.3013 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 02985: val_acc did not improve from 0.94304\n",
+ "Epoch 2986/100000\n",
+ " - 19s - loss: 0.3069 - acc: 0.9377 - val_loss: 0.3304 - val_acc: 0.9155\n",
+ "\n",
+ "Epoch 02986: val_acc did not improve from 0.94304\n",
+ "Epoch 2987/100000\n",
+ " - 18s - loss: 0.3078 - acc: 0.9372 - val_loss: 0.3098 - val_acc: 0.9362\n",
+ "\n",
+ "Epoch 02987: val_acc did not improve from 0.94304\n",
+ "Epoch 2988/100000\n",
+ " - 19s - loss: 0.3073 - acc: 0.9374 - val_loss: 0.3190 - val_acc: 0.9261\n",
+ "\n",
+ "Epoch 02988: val_acc did not improve from 0.94304\n",
+ "Epoch 2989/100000\n",
+ " - 19s - loss: 0.3071 - acc: 0.9369 - val_loss: 0.2924 - val_acc: 0.9402\n",
+ "\n",
+ "Epoch 02989: val_acc did not improve from 0.94304\n",
+ "Epoch 2990/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9375 - val_loss: 0.2919 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 02990: val_acc did not improve from 0.94304\n",
+ "Epoch 2991/100000\n",
+ " - 19s - loss: 0.3061 - acc: 0.9370 - val_loss: 0.2994 - val_acc: 0.9337\n",
+ "\n",
+ "Epoch 02991: val_acc did not improve from 0.94304\n",
+ "Epoch 2992/100000\n",
+ " - 19s - loss: 0.3024 - acc: 0.9380 - val_loss: 0.2994 - val_acc: 0.9342\n",
+ "\n",
+ "Epoch 02992: val_acc did not improve from 0.94304\n",
+ "Epoch 2993/100000\n",
+ " - 19s - loss: 0.3048 - acc: 0.9375 - val_loss: 0.3206 - val_acc: 0.9239\n",
+ "\n",
+ "Epoch 02993: val_acc did not improve from 0.94304\n",
+ "Epoch 2994/100000\n",
+ " - 18s - loss: 0.3070 - acc: 0.9371 - val_loss: 0.3311 - val_acc: 0.9157\n",
+ "\n",
+ "Epoch 02994: val_acc did not improve from 0.94304\n",
+ "Epoch 2995/100000\n",
+ " - 19s - loss: 0.3031 - acc: 0.9387 - val_loss: 0.3056 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 02995: val_acc did not improve from 0.94304\n",
+ "Epoch 2996/100000\n",
+ " - 18s - loss: 0.3071 - acc: 0.9371 - val_loss: 0.2975 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 02996: val_acc did not improve from 0.94304\n",
+ "Epoch 2997/100000\n",
+ " - 19s - loss: 0.3092 - acc: 0.9370 - val_loss: 0.3102 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 02997: val_acc did not improve from 0.94304\n",
+ "Epoch 2998/100000\n",
+ " - 18s - loss: 0.3052 - acc: 0.9377 - val_loss: 0.3259 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 02998: val_acc did not improve from 0.94304\n",
+ "Epoch 2999/100000\n",
+ " - 19s - loss: 0.3077 - acc: 0.9369 - val_loss: 0.3729 - val_acc: 0.8995\n",
+ "\n",
+ "Epoch 02999: val_acc did not improve from 0.94304\n",
+ "Epoch 3000/100000\n",
+ " - 18s - loss: 0.3085 - acc: 0.9363 - val_loss: 0.3352 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 03000: val_acc did not improve from 0.94304\n",
+ "Epoch 3001/100000\n",
+ " - 19s - loss: 0.3077 - acc: 0.9369 - val_loss: 0.2941 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 03001: val_acc did not improve from 0.94304\n",
+ "Epoch 3002/100000\n",
+ " - 19s - loss: 0.3066 - acc: 0.9369 - val_loss: 0.3770 - val_acc: 0.8919\n",
+ "\n",
+ "Epoch 03002: val_acc did not improve from 0.94304\n",
+ "Epoch 3003/100000\n",
+ " - 18s - loss: 0.3057 - acc: 0.9375 - val_loss: 0.3034 - val_acc: 0.9305\n",
+ "\n",
+ "Epoch 03003: val_acc did not improve from 0.94304\n",
+ "Epoch 3004/100000\n",
+ " - 19s - loss: 0.3080 - acc: 0.9371 - val_loss: 0.3244 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 03004: val_acc did not improve from 0.94304\n",
+ "Epoch 3005/100000\n",
+ " - 19s - loss: 0.3039 - acc: 0.9379 - val_loss: 0.3105 - val_acc: 0.9275\n",
+ "\n",
+ "Epoch 03005: val_acc did not improve from 0.94304\n",
+ "Epoch 3006/100000\n",
+ " - 19s - loss: 0.3070 - acc: 0.9371 - val_loss: 0.3640 - val_acc: 0.9075\n",
+ "\n",
+ "Epoch 03006: val_acc did not improve from 0.94304\n",
+ "Epoch 3007/100000\n",
+ " - 19s - loss: 0.3078 - acc: 0.9371 - val_loss: 0.3673 - val_acc: 0.9123\n",
+ "\n",
+ "Epoch 03007: val_acc did not improve from 0.94304\n",
+ "Epoch 3008/100000\n",
+ " - 19s - loss: 0.3095 - acc: 0.9370 - val_loss: 0.2987 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 03008: val_acc did not improve from 0.94304\n",
+ "Epoch 3009/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9374 - val_loss: 0.2992 - val_acc: 0.9384\n",
+ "\n",
+ "Epoch 03009: val_acc did not improve from 0.94304\n",
+ "Epoch 3010/100000\n",
+ " - 19s - loss: 0.3102 - acc: 0.9372 - val_loss: 0.3152 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 03010: val_acc did not improve from 0.94304\n",
+ "Epoch 3011/100000\n",
+ " - 19s - loss: 0.3062 - acc: 0.9378 - val_loss: 0.3093 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 03011: val_acc did not improve from 0.94304\n",
+ "Epoch 3012/100000\n",
+ " - 18s - loss: 0.3075 - acc: 0.9370 - val_loss: 0.3008 - val_acc: 0.9358\n",
+ "\n",
+ "Epoch 03012: val_acc did not improve from 0.94304\n",
+ "Epoch 3013/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9376 - val_loss: 0.3104 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 03013: val_acc did not improve from 0.94304\n",
+ "Epoch 3014/100000\n",
+ " - 18s - loss: 0.3068 - acc: 0.9376 - val_loss: 0.3239 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 03014: val_acc did not improve from 0.94304\n",
+ "Epoch 3015/100000\n",
+ " - 19s - loss: 0.3088 - acc: 0.9369 - val_loss: 1.3347 - val_acc: 0.3410\n",
+ "\n",
+ "Epoch 03015: val_acc did not improve from 0.94304\n",
+ "Epoch 3016/100000\n",
+ " - 19s - loss: 0.3054 - acc: 0.9375 - val_loss: 0.4498 - val_acc: 0.8558\n",
+ "\n",
+ "Epoch 03016: val_acc did not improve from 0.94304\n",
+ "Epoch 3017/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9364 - val_loss: 0.3004 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 03017: val_acc did not improve from 0.94304\n",
+ "Epoch 3018/100000\n",
+ " - 19s - loss: 0.3074 - acc: 0.9375 - val_loss: 0.3651 - val_acc: 0.9028\n",
+ "\n",
+ "Epoch 03018: val_acc did not improve from 0.94304\n",
+ "Epoch 3019/100000\n",
+ " - 18s - loss: 0.3064 - acc: 0.9379 - val_loss: 0.3062 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 03019: val_acc did not improve from 0.94304\n",
+ "Epoch 3020/100000\n",
+ " - 19s - loss: 0.3095 - acc: 0.9363 - val_loss: 0.3107 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 03020: val_acc did not improve from 0.94304\n",
+ "Epoch 3021/100000\n",
+ " - 18s - loss: 0.3061 - acc: 0.9375 - val_loss: 0.3072 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 03021: val_acc did not improve from 0.94304\n",
+ "Epoch 3022/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9368 - val_loss: 0.3269 - val_acc: 0.9200\n",
+ "\n",
+ "Epoch 03022: val_acc did not improve from 0.94304\n",
+ "Epoch 3023/100000\n",
+ " - 18s - loss: 0.3061 - acc: 0.9381 - val_loss: 0.2915 - val_acc: 0.9410\n",
+ "\n",
+ "Epoch 03023: val_acc did not improve from 0.94304\n",
+ "Epoch 3024/100000\n",
+ " - 18s - loss: 0.3058 - acc: 0.9376 - val_loss: 0.3054 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 03024: val_acc did not improve from 0.94304\n",
+ "Epoch 3025/100000\n",
+ " - 19s - loss: 0.3063 - acc: 0.9369 - val_loss: 0.3510 - val_acc: 0.9051\n",
+ "\n",
+ "Epoch 03025: val_acc did not improve from 0.94304\n",
+ "Epoch 3026/100000\n",
+ " - 18s - loss: 0.3069 - acc: 0.9369 - val_loss: 0.3174 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 03026: val_acc did not improve from 0.94304\n",
+ "Epoch 3027/100000\n",
+ " - 19s - loss: 0.3063 - acc: 0.9372 - val_loss: 0.3686 - val_acc: 0.8980\n",
+ "\n",
+ "Epoch 03027: val_acc did not improve from 0.94304\n",
+ "Epoch 3028/100000\n",
+ " - 18s - loss: 0.3064 - acc: 0.9371 - val_loss: 0.3006 - val_acc: 0.9327\n",
+ "\n",
+ "Epoch 03028: val_acc did not improve from 0.94304\n",
+ "Epoch 3029/100000\n",
+ " - 19s - loss: 0.3046 - acc: 0.9382 - val_loss: 0.3270 - val_acc: 0.9240\n",
+ "\n",
+ "Epoch 03029: val_acc did not improve from 0.94304\n",
+ "Epoch 3030/100000\n",
+ " - 18s - loss: 0.3085 - acc: 0.9367 - val_loss: 0.3243 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 03030: val_acc did not improve from 0.94304\n",
+ "Epoch 3031/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9378 - val_loss: 0.3836 - val_acc: 0.8936\n",
+ "\n",
+ "Epoch 03031: val_acc did not improve from 0.94304\n",
+ "Epoch 3032/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9377 - val_loss: 0.8802 - val_acc: 0.5064\n",
+ "\n",
+ "Epoch 03032: val_acc did not improve from 0.94304\n",
+ "Epoch 3033/100000\n",
+ " - 18s - loss: 0.3067 - acc: 0.9371 - val_loss: 0.2938 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 03033: val_acc did not improve from 0.94304\n",
+ "Epoch 3034/100000\n",
+ " - 19s - loss: 0.3067 - acc: 0.9371 - val_loss: 0.2986 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 03034: val_acc did not improve from 0.94304\n",
+ "Epoch 3035/100000\n",
+ " - 18s - loss: 0.3055 - acc: 0.9373 - val_loss: 0.3246 - val_acc: 0.9285\n",
+ "\n",
+ "Epoch 03035: val_acc did not improve from 0.94304\n",
+ "Epoch 3036/100000\n",
+ " - 19s - loss: 0.3052 - acc: 0.9377 - val_loss: 0.3329 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 03036: val_acc did not improve from 0.94304\n",
+ "Epoch 3037/100000\n",
+ " - 18s - loss: 0.3057 - acc: 0.9373 - val_loss: 0.2892 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 03037: val_acc did not improve from 0.94304\n",
+ "Epoch 3038/100000\n",
+ " - 19s - loss: 0.3050 - acc: 0.9377 - val_loss: 0.2884 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 03038: val_acc did not improve from 0.94304\n",
+ "Epoch 3039/100000\n",
+ " - 18s - loss: 0.3076 - acc: 0.9370 - val_loss: 0.3069 - val_acc: 0.9293\n",
+ "\n",
+ "Epoch 03039: val_acc did not improve from 0.94304\n",
+ "Epoch 3040/100000\n",
+ " - 18s - loss: 0.3066 - acc: 0.9372 - val_loss: 0.3303 - val_acc: 0.9228\n",
+ "\n",
+ "Epoch 03040: val_acc did not improve from 0.94304\n",
+ "Epoch 3041/100000\n",
+ " - 19s - loss: 0.3060 - acc: 0.9373 - val_loss: 0.2938 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 03041: val_acc did not improve from 0.94304\n",
+ "Epoch 3042/100000\n",
+ " - 18s - loss: 0.3054 - acc: 0.9375 - val_loss: 0.3025 - val_acc: 0.9332\n",
+ "\n",
+ "Epoch 03042: val_acc did not improve from 0.94304\n",
+ "Epoch 3043/100000\n",
+ " - 19s - loss: 0.3075 - acc: 0.9371 - val_loss: 0.3080 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 03043: val_acc did not improve from 0.94304\n",
+ "Epoch 3044/100000\n",
+ " - 18s - loss: 0.3065 - acc: 0.9374 - val_loss: 0.2991 - val_acc: 0.9359\n",
+ "\n",
+ "Epoch 03044: val_acc did not improve from 0.94304\n",
+ "Epoch 3045/100000\n",
+ " - 19s - loss: 0.3087 - acc: 0.9372 - val_loss: 0.3193 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 03045: val_acc did not improve from 0.94304\n",
+ "Epoch 3046/100000\n",
+ " - 18s - loss: 0.3096 - acc: 0.9367 - val_loss: 0.3144 - val_acc: 0.9372\n",
+ "\n",
+ "Epoch 03046: val_acc did not improve from 0.94304\n",
+ "Epoch 3047/100000\n",
+ " - 19s - loss: 0.3083 - acc: 0.9372 - val_loss: 0.4617 - val_acc: 0.8495\n",
+ "\n",
+ "Epoch 03047: val_acc did not improve from 0.94304\n",
+ "Epoch 3048/100000\n",
+ " - 19s - loss: 0.3065 - acc: 0.9380 - val_loss: 0.3201 - val_acc: 0.9283\n",
+ "\n",
+ "Epoch 03048: val_acc did not improve from 0.94304\n",
+ "Epoch 3049/100000\n",
+ " - 18s - loss: 0.3076 - acc: 0.9368 - val_loss: 0.2988 - val_acc: 0.9373\n",
+ "\n",
+ "Epoch 03049: val_acc did not improve from 0.94304\n",
+ "Epoch 3050/100000\n",
+ " - 19s - loss: 0.3043 - acc: 0.9380 - val_loss: 0.3050 - val_acc: 0.9369\n",
+ "\n",
+ "Epoch 03050: val_acc did not improve from 0.94304\n",
+ "Epoch 3051/100000\n",
+ " - 19s - loss: 0.3079 - acc: 0.9368 - val_loss: 0.3312 - val_acc: 0.9333\n",
+ "\n",
+ "Epoch 03051: val_acc did not improve from 0.94304\n",
+ "Epoch 3052/100000\n",
+ " - 19s - loss: 0.3086 - acc: 0.9370 - val_loss: 0.3069 - val_acc: 0.9370\n",
+ "\n",
+ "Epoch 03052: val_acc did not improve from 0.94304\n",
+ "Epoch 3053/100000\n",
+ " - 18s - loss: 0.3113 - acc: 0.9363 - val_loss: 0.3058 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 03053: val_acc did not improve from 0.94304\n",
+ "Epoch 3054/100000\n",
+ " - 19s - loss: 0.3047 - acc: 0.9379 - val_loss: 0.2929 - val_acc: 0.9357\n",
+ "\n",
+ "Epoch 03054: val_acc did not improve from 0.94304\n",
+ "Epoch 3055/100000\n",
+ " - 18s - loss: 0.3065 - acc: 0.9373 - val_loss: 0.3973 - val_acc: 0.9227\n",
+ "\n",
+ "Epoch 03055: val_acc did not improve from 0.94304\n",
+ "Epoch 3056/100000\n",
+ " - 19s - loss: 0.3067 - acc: 0.9371 - val_loss: 0.3329 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 03056: val_acc did not improve from 0.94304\n",
+ "Epoch 3057/100000\n",
+ " - 19s - loss: 0.3044 - acc: 0.9380 - val_loss: 0.3077 - val_acc: 0.9297\n",
+ "\n",
+ "Epoch 03063: val_acc did not improve from 0.94304\n",
+ "Epoch 3064/100000\n",
+ " - 19s - loss: 0.3072 - acc: 0.9376 - val_loss: 0.3255 - val_acc: 0.9198\n",
+ "\n",
+ "Epoch 03064: val_acc did not improve from 0.94304\n",
+ "Epoch 3065/100000\n",
+ " - 19s - loss: 0.3072 - acc: 0.9371 - val_loss: 0.2982 - val_acc: 0.9329\n",
+ "\n",
+ "Epoch 03065: val_acc did not improve from 0.94304\n",
+ "Epoch 3066/100000\n",
+ " - 19s - loss: 0.3075 - acc: 0.9367 - val_loss: 0.3134 - val_acc: 0.9232\n",
+ "\n",
+ "Epoch 03066: val_acc did not improve from 0.94304\n",
+ "Epoch 3067/100000\n",
+ " - 18s - loss: 0.3041 - acc: 0.9374 - val_loss: 0.3434 - val_acc: 0.9165\n",
+ "\n",
+ "Epoch 03067: val_acc did not improve from 0.94304\n",
+ "Epoch 3068/100000\n",
+ " - 19s - loss: 0.3061 - acc: 0.9373 - val_loss: 0.3057 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 03068: val_acc did not improve from 0.94304\n",
+ "Epoch 3069/100000\n",
+ " - 18s - loss: 0.3058 - acc: 0.9371 - val_loss: 0.2997 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 03069: val_acc did not improve from 0.94304\n",
+ "Epoch 3070/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9374 - val_loss: 0.3157 - val_acc: 0.9257\n",
+ "\n",
+ "Epoch 03070: val_acc did not improve from 0.94304\n",
+ "Epoch 3071/100000\n",
+ " - 18s - loss: 0.3052 - acc: 0.9375 - val_loss: 0.3393 - val_acc: 0.9217\n",
+ "\n",
+ "Epoch 03071: val_acc did not improve from 0.94304\n",
+ "Epoch 3072/100000\n",
+ " - 18s - loss: 0.3093 - acc: 0.9363 - val_loss: 0.3119 - val_acc: 0.9298\n",
+ "\n",
+ "Epoch 03072: val_acc did not improve from 0.94304\n",
+ "Epoch 3073/100000\n",
+ " - 19s - loss: 0.3074 - acc: 0.9375 - val_loss: 0.3112 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 03073: val_acc did not improve from 0.94304\n",
+ "Epoch 3074/100000\n",
+ " - 18s - loss: 0.3074 - acc: 0.9371 - val_loss: 0.2980 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 03074: val_acc did not improve from 0.94304\n",
+ "Epoch 3075/100000\n",
+ " - 19s - loss: 0.3068 - acc: 0.9380 - val_loss: 0.4343 - val_acc: 0.8745\n",
+ "\n",
+ "Epoch 03075: val_acc did not improve from 0.94304\n",
+ "Epoch 3076/100000\n",
+ " - 18s - loss: 0.3077 - acc: 0.9369 - val_loss: 0.3055 - val_acc: 0.9323\n",
+ "\n",
+ "Epoch 03076: val_acc did not improve from 0.94304\n",
+ "Epoch 3077/100000\n",
+ " - 19s - loss: 0.3071 - acc: 0.9377 - val_loss: 0.3081 - val_acc: 0.9295\n",
+ "\n",
+ "Epoch 03077: val_acc did not improve from 0.94304\n",
+ "Epoch 3078/100000\n",
+ " - 19s - loss: 0.3089 - acc: 0.9374 - val_loss: 0.3407 - val_acc: 0.9102\n",
+ "\n",
+ "Epoch 03078: val_acc did not improve from 0.94304\n",
+ "Epoch 3079/100000\n",
+ " - 18s - loss: 0.3052 - acc: 0.9379 - val_loss: 0.3000 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 03079: val_acc did not improve from 0.94304\n",
+ "Epoch 3080/100000\n",
+ " - 18s - loss: 0.3062 - acc: 0.9373 - val_loss: 0.3151 - val_acc: 0.9320\n",
+ "\n",
+ "Epoch 03080: val_acc did not improve from 0.94304\n",
+ "Epoch 3081/100000\n",
+ " - 19s - loss: 0.3049 - acc: 0.9372 - val_loss: 0.2984 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 03081: val_acc did not improve from 0.94304\n",
+ "Epoch 3082/100000\n",
+ " - 18s - loss: 0.3062 - acc: 0.9376 - val_loss: 0.2963 - val_acc: 0.9393\n",
+ "\n",
+ "Epoch 03082: val_acc did not improve from 0.94304\n",
+ "Epoch 3083/100000\n",
+ " - 18s - loss: 0.3088 - acc: 0.9367 - val_loss: 0.3228 - val_acc: 0.9247\n",
+ "\n",
+ "Epoch 03083: val_acc did not improve from 0.94304\n",
+ "Epoch 3084/100000\n",
+ " - 19s - loss: 0.3047 - acc: 0.9378 - val_loss: 0.3067 - val_acc: 0.9271\n",
+ "\n",
+ "Epoch 03084: val_acc did not improve from 0.94304\n",
+ "Epoch 3085/100000\n",
+ " - 19s - loss: 0.3058 - acc: 0.9373 - val_loss: 0.3029 - val_acc: 0.9318\n",
+ "\n",
+ "Epoch 03085: val_acc did not improve from 0.94304\n",
+ "Epoch 3086/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9371 - val_loss: 0.3204 - val_acc: 0.9290\n",
+ "\n",
+ "Epoch 03086: val_acc did not improve from 0.94304\n",
+ "Epoch 3087/100000\n",
+ " - 18s - loss: 0.3068 - acc: 0.9377 - val_loss: 0.3117 - val_acc: 0.9306\n",
+ "\n",
+ "Epoch 03087: val_acc did not improve from 0.94304\n",
+ "Epoch 3088/100000\n",
+ " - 19s - loss: 0.3050 - acc: 0.9378 - val_loss: 0.3289 - val_acc: 0.9155\n",
+ "\n",
+ "Epoch 03088: val_acc did not improve from 0.94304\n",
+ "Epoch 3089/100000\n",
+ " - 18s - loss: 0.3059 - acc: 0.9371 - val_loss: 0.7666 - val_acc: 0.6464\n",
+ "\n",
+ "Epoch 03089: val_acc did not improve from 0.94304\n",
+ "Epoch 3090/100000\n",
+ " - 19s - loss: 0.3060 - acc: 0.9374 - val_loss: 0.3049 - val_acc: 0.9341\n",
+ "\n",
+ "Epoch 03090: val_acc did not improve from 0.94304\n",
+ "Epoch 3091/100000\n",
+ " - 18s - loss: 0.3083 - acc: 0.9364 - val_loss: 0.2953 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 03091: val_acc did not improve from 0.94304\n",
+ "Epoch 3092/100000\n",
+ " - 19s - loss: 0.3125 - acc: 0.9369 - val_loss: 0.2957 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 03092: val_acc did not improve from 0.94304\n",
+ "Epoch 3093/100000\n",
+ " - 18s - loss: 0.3091 - acc: 0.9371 - val_loss: 0.2991 - val_acc: 0.9367\n",
+ "\n",
+ "Epoch 03093: val_acc did not improve from 0.94304\n",
+ "Epoch 3094/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9377 - val_loss: 0.2948 - val_acc: 0.9368\n",
+ "\n",
+ "Epoch 03094: val_acc did not improve from 0.94304\n",
+ "Epoch 3095/100000\n",
+ " - 19s - loss: 0.3073 - acc: 0.9374 - val_loss: 0.3284 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 03095: val_acc did not improve from 0.94304\n",
+ "Epoch 3096/100000\n",
+ " - 19s - loss: 0.3073 - acc: 0.9373 - val_loss: 0.3160 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 03096: val_acc did not improve from 0.94304\n",
+ "Epoch 3097/100000\n",
+ " - 19s - loss: 0.3045 - acc: 0.9378 - val_loss: 0.4257 - val_acc: 0.8796\n",
+ "\n",
+ "Epoch 03097: val_acc did not improve from 0.94304\n",
+ "Epoch 3098/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9371 - val_loss: 0.3316 - val_acc: 0.9187\n",
+ "\n",
+ "Epoch 03098: val_acc did not improve from 0.94304\n",
+ "Epoch 3099/100000\n",
+ " - 18s - loss: 0.3067 - acc: 0.9374 - val_loss: 0.3505 - val_acc: 0.9097\n",
+ "\n",
+ "Epoch 03099: val_acc did not improve from 0.94304\n",
+ "Epoch 3100/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9368 - val_loss: 0.3152 - val_acc: 0.9299\n",
+ "\n",
+ "Epoch 03100: val_acc did not improve from 0.94304\n",
+ "Epoch 3101/100000\n",
+ " - 18s - loss: 0.3080 - acc: 0.9369 - val_loss: 0.3300 - val_acc: 0.9335\n",
+ "\n",
+ "Epoch 03101: val_acc did not improve from 0.94304\n",
+ "Epoch 3102/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9378 - val_loss: 0.2967 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03102: val_acc did not improve from 0.94304\n",
+ "Epoch 3103/100000\n",
+ " - 18s - loss: 0.3045 - acc: 0.9378 - val_loss: 0.3180 - val_acc: 0.9256\n",
+ "\n",
+ "Epoch 03103: val_acc did not improve from 0.94304\n",
+ "Epoch 3104/100000\n",
+ " - 18s - loss: 0.3060 - acc: 0.9373 - val_loss: 0.3063 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 03104: val_acc did not improve from 0.94304\n",
+ "Epoch 3105/100000\n",
+ " - 18s - loss: 0.3069 - acc: 0.9375 - val_loss: 0.2912 - val_acc: 0.9386\n",
+ "\n",
+ "Epoch 03105: val_acc did not improve from 0.94304\n",
+ "Epoch 3106/100000\n",
+ " - 18s - loss: 0.3085 - acc: 0.9370 - val_loss: 0.4813 - val_acc: 0.8491\n",
+ "\n",
+ "Epoch 03106: val_acc did not improve from 0.94304\n",
+ "Epoch 3107/100000\n",
+ " - 19s - loss: 0.3068 - acc: 0.9377 - val_loss: 0.2939 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 03107: val_acc did not improve from 0.94304\n",
+ "Epoch 3108/100000\n",
+ " - 18s - loss: 0.3056 - acc: 0.9374 - val_loss: 0.2990 - val_acc: 0.9343\n",
+ "\n",
+ "Epoch 03108: val_acc did not improve from 0.94304\n",
+ "Epoch 3109/100000\n",
+ " - 19s - loss: 0.3064 - acc: 0.9373 - val_loss: 0.2968 - val_acc: 0.9364\n",
+ "\n",
+ "Epoch 03109: val_acc did not improve from 0.94304\n",
+ "Epoch 3110/100000\n",
+ " - 18s - loss: 0.3060 - acc: 0.9376 - val_loss: 0.3127 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 03110: val_acc did not improve from 0.94304\n",
+ "Epoch 3111/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9375 - val_loss: 0.3570 - val_acc: 0.9255\n",
+ "\n",
+ "Epoch 03111: val_acc did not improve from 0.94304\n",
+ "Epoch 3112/100000\n",
+ " - 18s - loss: 0.3037 - acc: 0.9382 - val_loss: 0.3017 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03112: val_acc did not improve from 0.94304\n",
+ "Epoch 3113/100000\n",
+ " - 19s - loss: 0.3087 - acc: 0.9370 - val_loss: 0.3007 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 03113: val_acc did not improve from 0.94304\n",
+ "Epoch 3114/100000\n",
+ " - 18s - loss: 0.3070 - acc: 0.9378 - val_loss: 0.2997 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 03114: val_acc did not improve from 0.94304\n",
+ "Epoch 3115/100000\n",
+ " - 19s - loss: 0.3056 - acc: 0.9375 - val_loss: 0.3026 - val_acc: 0.9282\n",
+ "\n",
+ "Epoch 03115: val_acc did not improve from 0.94304\n",
+ "Epoch 3116/100000\n",
+ " - 18s - loss: 0.3077 - acc: 0.9367 - val_loss: 0.2950 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03116: val_acc did not improve from 0.94304\n",
+ "Epoch 3117/100000\n",
+ " - 18s - loss: 0.3062 - acc: 0.9375 - val_loss: 0.2959 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 03117: val_acc did not improve from 0.94304\n",
+ "Epoch 3118/100000\n",
+ " - 18s - loss: 0.3072 - acc: 0.9366 - val_loss: 0.3058 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 03118: val_acc did not improve from 0.94304\n",
+ "Epoch 3119/100000\n",
+ " - 18s - loss: 0.3080 - acc: 0.9367 - val_loss: 0.4545 - val_acc: 0.8612\n",
+ "\n",
+ "Epoch 03119: val_acc did not improve from 0.94304\n",
+ "Epoch 3120/100000\n",
+ " - 19s - loss: 0.3050 - acc: 0.9372 - val_loss: 0.3711 - val_acc: 0.8987\n",
+ "\n",
+ "Epoch 03120: val_acc did not improve from 0.94304\n",
+ "Epoch 3121/100000\n",
+ " - 18s - loss: 0.3059 - acc: 0.9373 - val_loss: 0.2965 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03121: val_acc did not improve from 0.94304\n",
+ "Epoch 3122/100000\n",
+ " - 19s - loss: 0.3084 - acc: 0.9365 - val_loss: 0.3181 - val_acc: 0.9354\n",
+ "\n",
+ "Epoch 03122: val_acc did not improve from 0.94304\n",
+ "Epoch 3123/100000\n",
+ " - 18s - loss: 0.3084 - acc: 0.9366 - val_loss: 0.3016 - val_acc: 0.9317\n",
+ "\n",
+ "Epoch 03123: val_acc did not improve from 0.94304\n",
+ "Epoch 3124/100000\n",
+ " - 18s - loss: 0.3071 - acc: 0.9370 - val_loss: 0.3064 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 03124: val_acc did not improve from 0.94304\n",
+ "Epoch 3125/100000\n",
+ " - 18s - loss: 0.3048 - acc: 0.9376 - val_loss: 0.4543 - val_acc: 0.8567\n",
+ "\n",
+ "Epoch 03125: val_acc did not improve from 0.94304\n",
+ "Epoch 3126/100000\n",
+ " - 19s - loss: 0.3079 - acc: 0.9378 - val_loss: 0.3155 - val_acc: 0.9366\n",
+ "\n",
+ "Epoch 03126: val_acc did not improve from 0.94304\n",
+ "Epoch 3127/100000\n",
+ " - 18s - loss: 0.3077 - acc: 0.9378 - val_loss: 0.2989 - val_acc: 0.9344\n",
+ "\n",
+ "Epoch 03127: val_acc did not improve from 0.94304\n",
+ "Epoch 3128/100000\n",
+ " - 19s - loss: 0.3077 - acc: 0.9371 - val_loss: 0.3066 - val_acc: 0.9310\n",
+ "\n",
+ "Epoch 03128: val_acc did not improve from 0.94304\n",
+ "Epoch 3129/100000\n",
+ " - 18s - loss: 0.3078 - acc: 0.9371 - val_loss: 0.3697 - val_acc: 0.8963\n",
+ "\n",
+ "Epoch 03129: val_acc did not improve from 0.94304\n",
+ "Epoch 3130/100000\n",
+ " - 18s - loss: 0.3069 - acc: 0.9369 - val_loss: 0.3060 - val_acc: 0.9353\n",
+ "\n",
+ "Epoch 03130: val_acc did not improve from 0.94304\n",
+ "Epoch 3131/100000\n",
+ " - 18s - loss: 0.3058 - acc: 0.9378 - val_loss: 0.3096 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 03131: val_acc did not improve from 0.94304\n",
+ "Epoch 3132/100000\n",
+ " - 19s - loss: 0.3081 - acc: 0.9371 - val_loss: 0.4269 - val_acc: 0.8692\n",
+ "\n",
+ "Epoch 03132: val_acc did not improve from 0.94304\n",
+ "Epoch 3133/100000\n",
+ " - 18s - loss: 0.3066 - acc: 0.9370 - val_loss: 0.2918 - val_acc: 0.9380\n",
+ "\n",
+ "Epoch 03133: val_acc did not improve from 0.94304\n",
+ "Epoch 3134/100000\n",
+ " - 19s - loss: 0.3078 - acc: 0.9370 - val_loss: 0.4157 - val_acc: 0.8707\n",
+ "\n",
+ "Epoch 03134: val_acc did not improve from 0.94304\n",
+ "Epoch 3135/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9363 - val_loss: 0.3089 - val_acc: 0.9347\n",
+ "\n",
+ "Epoch 03135: val_acc did not improve from 0.94304\n",
+ "Epoch 3136/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9374 - val_loss: 0.3001 - val_acc: 0.9348\n",
+ "\n",
+ "Epoch 03136: val_acc did not improve from 0.94304\n",
+ "Epoch 3137/100000\n",
+ " - 19s - loss: 0.3077 - acc: 0.9373 - val_loss: 0.2996 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 03137: val_acc did not improve from 0.94304\n",
+ "Epoch 3138/100000\n",
+ " - 18s - loss: 0.3072 - acc: 0.9366 - val_loss: 0.2920 - val_acc: 0.9365\n",
+ "\n",
+ "Epoch 03138: val_acc did not improve from 0.94304\n",
+ "Epoch 3139/100000\n",
+ " - 19s - loss: 0.3078 - acc: 0.9371 - val_loss: 0.3048 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 03139: val_acc did not improve from 0.94304\n",
+ "Epoch 3140/100000\n",
+ " - 18s - loss: 0.3037 - acc: 0.9382 - val_loss: 0.3267 - val_acc: 0.9173\n",
+ "\n",
+ "Epoch 03140: val_acc did not improve from 0.94304\n",
+ "Epoch 3141/100000\n",
+ " - 19s - loss: 0.3050 - acc: 0.9379 - val_loss: 0.3101 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 03141: val_acc did not improve from 0.94304\n",
+ "Epoch 3142/100000\n",
+ " - 18s - loss: 0.3087 - acc: 0.9370 - val_loss: 0.2946 - val_acc: 0.9376\n",
+ "\n",
+ "Epoch 03142: val_acc did not improve from 0.94304\n",
+ "Epoch 3143/100000\n",
+ " - 18s - loss: 0.3066 - acc: 0.9382 - val_loss: 0.8759 - val_acc: 0.5239\n",
+ "\n",
+ "Epoch 03143: val_acc did not improve from 0.94304\n",
+ "Epoch 3144/100000\n",
+ " - 18s - loss: 0.3051 - acc: 0.9381 - val_loss: 0.2953 - val_acc: 0.9378\n",
+ "\n",
+ "Epoch 03144: val_acc did not improve from 0.94304\n",
+ "Epoch 3145/100000\n",
+ " - 18s - loss: 0.3057 - acc: 0.9374 - val_loss: 0.3194 - val_acc: 0.9267\n",
+ "\n",
+ "Epoch 03145: val_acc did not improve from 0.94304\n",
+ "Epoch 3146/100000\n",
+ " - 18s - loss: 0.3058 - acc: 0.9371 - val_loss: 0.3219 - val_acc: 0.9226\n",
+ "\n",
+ "Epoch 03146: val_acc did not improve from 0.94304\n",
+ "Epoch 3147/100000\n",
+ " - 18s - loss: 0.3073 - acc: 0.9366 - val_loss: 0.2973 - val_acc: 0.9363\n",
+ "\n",
+ "Epoch 03147: val_acc did not improve from 0.94304\n",
+ "Epoch 3148/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9375 - val_loss: 0.3064 - val_acc: 0.9272\n",
+ "\n",
+ "Epoch 03148: val_acc did not improve from 0.94304\n",
+ "Epoch 3149/100000\n",
+ " - 19s - loss: 0.3058 - acc: 0.9382 - val_loss: 0.3211 - val_acc: 0.9252\n",
+ "\n",
+ "Epoch 03149: val_acc did not improve from 0.94304\n",
+ "Epoch 3150/100000\n",
+ " - 18s - loss: 0.3055 - acc: 0.9377 - val_loss: 0.3082 - val_acc: 0.9287\n",
+ "\n",
+ "Epoch 03150: val_acc did not improve from 0.94304\n",
+ "Epoch 3151/100000\n",
+ " - 19s - loss: 0.3061 - acc: 0.9371 - val_loss: 0.3066 - val_acc: 0.9324\n",
+ "\n",
+ "Epoch 03151: val_acc did not improve from 0.94304\n",
+ "Epoch 3152/100000\n",
+ " - 18s - loss: 0.3087 - acc: 0.9365 - val_loss: 0.3044 - val_acc: 0.9321\n",
+ "\n",
+ "Epoch 03152: val_acc did not improve from 0.94304\n",
+ "Epoch 3153/100000\n",
+ " - 19s - loss: 0.3069 - acc: 0.9379 - val_loss: 0.3525 - val_acc: 0.9100\n",
+ "\n",
+ "Epoch 03153: val_acc did not improve from 0.94304\n",
+ "Epoch 3154/100000\n",
+ " - 19s - loss: 0.3067 - acc: 0.9372 - val_loss: 0.3370 - val_acc: 0.9159\n",
+ "\n",
+ "Epoch 03154: val_acc did not improve from 0.94304\n",
+ "Epoch 3155/100000\n",
+ " - 18s - loss: 0.3061 - acc: 0.9370 - val_loss: 0.2943 - val_acc: 0.9360\n",
+ "\n",
+ "Epoch 03155: val_acc did not improve from 0.94304\n",
+ "Epoch 3156/100000\n",
+ " - 18s - loss: 0.3087 - acc: 0.9367 - val_loss: 0.3152 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 03156: val_acc did not improve from 0.94304\n",
+ "Epoch 3157/100000\n",
+ " - 19s - loss: 0.3063 - acc: 0.9374 - val_loss: 0.4472 - val_acc: 0.8742\n",
+ "\n",
+ "Epoch 03157: val_acc did not improve from 0.94304\n",
+ "Epoch 3158/100000\n",
+ " - 18s - loss: 0.3062 - acc: 0.9373 - val_loss: 0.2993 - val_acc: 0.9361\n",
+ "\n",
+ "Epoch 03158: val_acc did not improve from 0.94304\n",
+ "Epoch 3159/100000\n",
+ " - 19s - loss: 0.3080 - acc: 0.9367 - val_loss: 0.3168 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 03159: val_acc did not improve from 0.94304\n",
+ "Epoch 3160/100000\n",
+ " - 18s - loss: 0.3072 - acc: 0.9369 - val_loss: 0.3015 - val_acc: 0.9350\n",
+ "\n",
+ "Epoch 03160: val_acc did not improve from 0.94304\n",
+ "Epoch 3161/100000\n",
+ " - 19s - loss: 0.3059 - acc: 0.9378 - val_loss: 0.3216 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 03161: val_acc did not improve from 0.94304\n",
+ "Epoch 3162/100000\n",
+ " - 19s - loss: 0.3071 - acc: 0.9368 - val_loss: 0.3350 - val_acc: 0.9212\n",
+ "\n",
+ "Epoch 03162: val_acc did not improve from 0.94304\n",
+ "Epoch 3163/100000\n",
+ " - 19s - loss: 0.3111 - acc: 0.9364 - val_loss: 0.3439 - val_acc: 0.9148\n",
+ "\n",
+ "Epoch 03163: val_acc did not improve from 0.94304\n",
+ "Epoch 3164/100000\n",
+ " - 19s - loss: 0.3048 - acc: 0.9387 - val_loss: 0.3586 - val_acc: 0.8973\n",
+ "\n",
+ "Epoch 03164: val_acc did not improve from 0.94304\n",
+ "Epoch 3165/100000\n",
+ " - 19s - loss: 0.3113 - acc: 0.9364 - val_loss: 0.3691 - val_acc: 0.8984\n",
+ "\n",
+ "Epoch 03165: val_acc did not improve from 0.94304\n",
+ "Epoch 3166/100000\n",
+ " - 19s - loss: 0.3076 - acc: 0.9376 - val_loss: 0.3802 - val_acc: 0.8953\n",
+ "\n",
+ "Epoch 03166: val_acc did not improve from 0.94304\n",
+ "Epoch 3167/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9377 - val_loss: 0.3180 - val_acc: 0.9266\n",
+ "\n",
+ "Epoch 03167: val_acc did not improve from 0.94304\n",
+ "Epoch 3168/100000\n",
+ " - 18s - loss: 0.3055 - acc: 0.9374 - val_loss: 0.3024 - val_acc: 0.9330\n",
+ "\n",
+ "Epoch 03168: val_acc did not improve from 0.94304\n",
+ "Epoch 3169/100000\n",
+ " - 19s - loss: 0.3065 - acc: 0.9373 - val_loss: 0.2976 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 03169: val_acc did not improve from 0.94304\n",
+ "Epoch 3170/100000\n",
+ " - 19s - loss: 0.3057 - acc: 0.9373 - val_loss: 0.5487 - val_acc: 0.8129\n",
+ "\n",
+ "Epoch 03170: val_acc did not improve from 0.94304\n",
+ "Epoch 3171/100000\n",
+ " - 19s - loss: 0.3075 - acc: 0.9376 - val_loss: 0.3715 - val_acc: 0.9001\n",
+ "\n",
+ "Epoch 03171: val_acc did not improve from 0.94304\n",
+ "Epoch 3172/100000\n",
+ " - 18s - loss: 0.3046 - acc: 0.9384 - val_loss: 0.3271 - val_acc: 0.9189\n",
+ "\n",
+ "Epoch 03172: val_acc did not improve from 0.94304\n",
+ "Epoch 3173/100000\n",
+ " - 19s - loss: 0.3070 - acc: 0.9371 - val_loss: 0.3109 - val_acc: 0.9263\n",
+ "\n",
+ "Epoch 03173: val_acc did not improve from 0.94304\n",
+ "Epoch 3174/100000\n",
+ " - 18s - loss: 0.3081 - acc: 0.9366 - val_loss: 0.2895 - val_acc: 0.9395\n",
+ "\n",
+ "Epoch 03174: val_acc did not improve from 0.94304\n",
+ "Epoch 3175/100000\n",
+ " - 19s - loss: 0.3065 - acc: 0.9369 - val_loss: 0.2914 - val_acc: 0.9387\n",
+ "\n",
+ "Epoch 03175: val_acc did not improve from 0.94304\n",
+ "Epoch 3176/100000\n",
+ " - 18s - loss: 0.3041 - acc: 0.9382 - val_loss: 0.3353 - val_acc: 0.9207\n",
+ "\n",
+ "Epoch 03176: val_acc did not improve from 0.94304\n",
+ "Epoch 3177/100000\n",
+ " - 19s - loss: 0.3062 - acc: 0.9377 - val_loss: 0.3048 - val_acc: 0.9394\n",
+ "\n",
+ "Epoch 03177: val_acc did not improve from 0.94304\n",
+ "Epoch 3178/100000\n",
+ " - 18s - loss: 0.3069 - acc: 0.9372 - val_loss: 0.3574 - val_acc: 0.9108\n",
+ "\n",
+ "Epoch 03178: val_acc did not improve from 0.94304\n",
+ "\n",
+ "Epoch 03178: ReduceLROnPlateau reducing learning rate to 0.0004876748775132.\n",
+ "Epoch 3179/100000\n",
+ " - 19s - loss: 0.3044 - acc: 0.9365 - val_loss: 0.3126 - val_acc: 0.9288\n",
+ "\n",
+ "Epoch 03179: val_acc did not improve from 0.94304\n",
+ "Epoch 3180/100000\n",
+ " - 18s - loss: 0.3016 - acc: 0.9375 - val_loss: 0.2866 - val_acc: 0.9388\n",
+ "\n",
+ "Epoch 03180: val_acc did not improve from 0.94304\n",
+ "Epoch 3181/100000\n",
+ " - 19s - loss: 0.3009 - acc: 0.9380 - val_loss: 0.3235 - val_acc: 0.9150\n",
+ "\n",
+ "Epoch 03181: val_acc did not improve from 0.94304\n",
+ "Epoch 3182/100000\n",
+ " - 19s - loss: 0.3026 - acc: 0.9369 - val_loss: 0.2864 - val_acc: 0.9381\n",
+ "\n",
+ "Epoch 03182: val_acc did not improve from 0.94304\n",
+ "Epoch 3183/100000\n",
+ " - 19s - loss: 0.3015 - acc: 0.9375 - val_loss: 0.3091 - val_acc: 0.9281\n",
+ "\n",
+ "Epoch 03183: val_acc did not improve from 0.94304\n",
+ "Epoch 3184/100000\n",
+ " - 18s - loss: 0.2995 - acc: 0.9382 - val_loss: 0.2989 - val_acc: 0.9339\n",
+ "\n",
+ "Epoch 03184: val_acc did not improve from 0.94304\n",
+ "Epoch 3185/100000\n",
+ " - 19s - loss: 0.3010 - acc: 0.9378 - val_loss: 0.2989 - val_acc: 0.9319\n",
+ "\n",
+ "Epoch 03185: val_acc did not improve from 0.94304\n",
+ "Epoch 3186/100000\n",
+ " - 19s - loss: 0.3016 - acc: 0.9372 - val_loss: 0.3112 - val_acc: 0.9300\n",
+ "\n",
+ "Epoch 03186: val_acc did not improve from 0.94304\n",
+ "Epoch 3187/100000\n",
+ " - 18s - loss: 0.3011 - acc: 0.9375 - val_loss: 0.3042 - val_acc: 0.9325\n",
+ "\n",
+ "Epoch 03187: val_acc did not improve from 0.94304\n",
+ "Epoch 3188/100000\n",
+ " - 18s - loss: 0.3028 - acc: 0.9368 - val_loss: 0.3545 - val_acc: 0.9214\n",
+ "\n",
+ "Epoch 03188: val_acc did not improve from 0.94304\n",
+ "Epoch 3189/100000\n",
+ " - 18s - loss: 0.3024 - acc: 0.9376 - val_loss: 0.2885 - val_acc: 0.9403\n",
+ "\n",
+ "Epoch 03189: val_acc did not improve from 0.94304\n",
+ "Epoch 3190/100000\n",
+ " - 18s - loss: 0.2998 - acc: 0.9378 - val_loss: 0.3087 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 03190: val_acc did not improve from 0.94304\n",
+ "Epoch 3191/100000\n",
+ " - 19s - loss: 0.3004 - acc: 0.9385 - val_loss: 0.6119 - val_acc: 0.7614\n",
+ "\n",
+ "Epoch 03191: val_acc did not improve from 0.94304\n",
+ "Epoch 3192/100000\n",
+ " - 18s - loss: 0.3000 - acc: 0.9382 - val_loss: 0.3348 - val_acc: 0.9104\n",
+ "\n",
+ "Epoch 03192: val_acc did not improve from 0.94304\n",
+ "Epoch 3193/100000\n",
+ " - 19s - loss: 0.3029 - acc: 0.9369 - val_loss: 0.2857 - val_acc: 0.9385\n",
+ "\n",
+ "Epoch 03193: val_acc did not improve from 0.94304\n",
+ "Epoch 3194/100000\n",
+ " - 18s - loss: 0.2995 - acc: 0.9383 - val_loss: 0.3050 - val_acc: 0.9334\n",
+ "\n",
+ "Epoch 03194: val_acc did not improve from 0.94304\n",
+ "Epoch 3195/100000\n",
+ " - 19s - loss: 0.3001 - acc: 0.9378 - val_loss: 0.3074 - val_acc: 0.9253\n",
+ "\n",
+ "Epoch 03195: val_acc did not improve from 0.94304\n",
+ "Epoch 3196/100000\n",
+ " - 19s - loss: 0.2999 - acc: 0.9385 - val_loss: 0.2931 - val_acc: 0.9355\n",
+ "\n",
+ "Epoch 03196: val_acc did not improve from 0.94304\n",
+ "Epoch 3197/100000\n",
+ " - 18s - loss: 0.3038 - acc: 0.9368 - val_loss: 0.3030 - val_acc: 0.9294\n",
+ "\n",
+ "Epoch 03197: val_acc did not improve from 0.94304\n",
+ "Epoch 3198/100000\n",
+ " - 19s - loss: 0.3007 - acc: 0.9377 - val_loss: 0.3000 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 03198: val_acc did not improve from 0.94304\n",
+ "Epoch 3199/100000\n",
+ " - 19s - loss: 0.3071 - acc: 0.9371 - val_loss: 0.3467 - val_acc: 0.9136\n",
+ "\n",
+ "Epoch 03199: val_acc did not improve from 0.94304\n",
+ "Epoch 3200/100000\n",
+ " - 18s - loss: 0.3027 - acc: 0.9379 - val_loss: 0.4056 - val_acc: 0.8857\n",
+ "\n",
+ "Epoch 03200: val_acc did not improve from 0.94304\n",
+ "Epoch 3201/100000\n",
+ " - 18s - loss: 0.3031 - acc: 0.9371 - val_loss: 0.2881 - val_acc: 0.9383\n",
+ "\n",
+ "Epoch 03201: val_acc did not improve from 0.94304\n",
+ "Epoch 3202/100000\n",
+ " - 18s - loss: 0.3026 - acc: 0.9376 - val_loss: 0.2900 - val_acc: 0.9371\n",
+ "\n",
+ "Epoch 03202: val_acc did not improve from 0.94304\n",
+ "Epoch 3203/100000\n",
+ " - 18s - loss: 0.3038 - acc: 0.9374 - val_loss: 0.3219 - val_acc: 0.9183\n",
+ "\n",
+ "Epoch 03203: val_acc did not improve from 0.94304\n",
+ "Epoch 3204/100000\n",
+ " - 19s - loss: 0.3014 - acc: 0.9374 - val_loss: 0.4039 - val_acc: 0.8728\n",
+ "\n",
+ "Epoch 03204: val_acc did not improve from 0.94304\n",
+ "Epoch 3205/100000\n",
+ " - 18s - loss: 0.3001 - acc: 0.9376 - val_loss: 0.3022 - val_acc: 0.9269\n",
+ "\n",
+ "Epoch 03205: val_acc did not improve from 0.94304\n",
+ "Epoch 3206/100000\n",
+ " - 19s - loss: 0.2998 - acc: 0.9378 - val_loss: 0.3115 - val_acc: 0.9308\n",
+ "\n",
+ "Epoch 03206: val_acc did not improve from 0.94304\n",
+ "Epoch 3207/100000\n",
+ " - 19s - loss: 0.3015 - acc: 0.9374 - val_loss: 0.2904 - val_acc: 0.9352\n",
+ "\n",
+ "Epoch 03207: val_acc did not improve from 0.94304\n",
+ "Epoch 3208/100000\n",
+ " - 19s - loss: 0.2991 - acc: 0.9383 - val_loss: 0.2872 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 03208: val_acc did not improve from 0.94304\n",
+ "Epoch 3209/100000\n",
+ " - 19s - loss: 0.2998 - acc: 0.9377 - val_loss: 0.3062 - val_acc: 0.9251\n",
+ "\n",
+ "Epoch 03209: val_acc did not improve from 0.94304\n",
+ "Epoch 3210/100000\n",
+ " - 18s - loss: 0.3017 - acc: 0.9371 - val_loss: 0.3043 - val_acc: 0.9276\n",
+ "\n",
+ "Epoch 03210: val_acc did not improve from 0.94304\n",
+ "Epoch 3211/100000\n",
+ " - 19s - loss: 0.3034 - acc: 0.9367 - val_loss: 0.2863 - val_acc: 0.9374\n",
+ "\n",
+ "Epoch 03211: val_acc did not improve from 0.94304\n",
+ "Epoch 3212/100000\n",
+ " - 18s - loss: 0.3013 - acc: 0.9379 - val_loss: 0.2973 - val_acc: 0.9312\n",
+ "\n",
+ "Epoch 03212: val_acc did not improve from 0.94304\n",
+ "Epoch 3213/100000\n",
+ " - 18s - loss: 0.3009 - acc: 0.9378 - val_loss: 0.2939 - val_acc: 0.9315\n",
+ "\n",
+ "Epoch 03213: val_acc did not improve from 0.94304\n",
+ "Epoch 3214/100000\n",
+ " - 18s - loss: 0.2992 - acc: 0.9384 - val_loss: 0.3207 - val_acc: 0.9211\n",
+ "\n",
+ "Epoch 03214: val_acc did not improve from 0.94304\n",
+ "Epoch 3215/100000\n"
+ ]
+ }
+ ],
+ "source": [
+ "tf.get_default_graph()\n",
+ "########## HYPER PARAMETERS\n",
+ "\n",
+ "batch_size = 2000\n",
+ "epochs = 100000\n",
+ "optimizer = tf.keras.optimizers.Adam(lr=0.001)\n",
+ "\n",
+ "l1v = 0.005\n",
+ "l2v = 0.015\n",
+ "#optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.1)\n",
+ "#init=tf.global_variables_initializer()\n",
+ "\n",
+ "########## HYPER PARAMETERS\n",
+ "########## MODEL ARCHITECTURE\n",
+ "model = tf.keras.models.Sequential()\n",
+ "model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', input_shape=(27,15,1), \n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+ "model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same', \n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+ "model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same', data_format='channels_last'))\n",
+ "model.add(tf.keras.layers.Dropout(0.45))\n",
+ "\n",
+ "model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same',\n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+ "model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same',\n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+ "model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same', data_format='channels_last'))\n",
+ "model.add(tf.keras.layers.Dropout(0.45))\n",
+ "\n",
+ "model.add(tf.keras.layers.Flatten())\n",
+ "model.add(tf.keras.layers.Dense(140, activation='relu',\n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v), use_bias=True))\n",
+ "model.add(tf.keras.layers.Dropout(0.5))\n",
+ "model.add(tf.keras.layers.Dense(70, activation='relu',\n",
+ " kernel_regularizer=tf.keras.regularizers.l1_l2(l1v,l2v), use_bias=True))\n",
+ "model.add(tf.keras.layers.Dropout(0.5))\n",
+ "model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))\n",
+ "########## MODEL ARCHITECTURE\n",
+ "####TENSORBOARD\n",
+ "config = \"\"\n",
+ "for layer in model.layers:\n",
+ " config += str(layer.output).split('\\\"')[1].split(\"/\")[0] + str(layer.output_shape) + \"\\n\\n\"\n",
+ "#### END TENSORBOARD\n",
+ "config += \"batchsize: \" + str(batch_size) + \"\\n\\n\" + \"epochs: \" + str(epochs) + \"\\n\\n\"\n",
+ "\n",
+ "# Print summary\n",
+ "current_name = \"CNN\"\n",
+ "readable_timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')\n",
+ "tensorflowfolder = \"/srv/share/tensorboardfiles/\" + current_name + readable_timestamp\n",
+ "print(current_name + readable_timestamp)\n",
+ "\n",
+ "model.summary()\n",
+ "logger = LoggingTensorBoard(settings_str_to_log = config, log_dir=tensorflowfolder, histogram_freq=0,\n",
+ " write_graph=True, write_images=True, update_freq = 'epoch')\n",
+ "storer = ModelCheckpoint(\"./ModelSnapshots/\" + current_name + readable_timestamp + '-{epoch:03d}.h5',\n",
+ " monitor='val_acc', verbose=1,\n",
+ " save_best_only=True, save_weights_only=False,\n",
+ " mode='auto', period=1)\n",
+ "learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', \n",
+ " patience=140, \n",
+ " verbose=1, \n",
+ " factor=0.95, \n",
+ " min_lr=0.00001)\n",
+ "# compile model for training\n",
+ "model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=optimizer,\n",
+ " metrics=['accuracy'])\n",
+ "\n",
+ "history = model.fit(x_train, y_train_one_hot,\n",
+ " batch_size=batch_size,\n",
+ " epochs=epochs,\n",
+ " verbose=2,\n",
+ " validation_data=(x_test, y_test_one_hot),\n",
+ " callbacks=[logger, storer, learning_rate_reduction])\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model.save(\"./ModelSnapshots/\" + current_name + \"_DONE.h5\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# summarize history for accuracy\n",
+ "plt.plot(history.history['acc'])\n",
+ "plt.plot(history.history['val_acc'])\n",
+ "plt.title('model accuracy')\n",
+ "plt.ylabel('accuracy')\n",
+ "plt.xlabel('epoch')\n",
+ "plt.legend(['train', 'test'], loc='upper left')\n",
+ "plt.show()\n",
+ "# summarize history for loss\n",
+ "plt.plot(history.history['loss'])\n",
+ "plt.plot(history.history['val_loss'])\n",
+ "plt.title('model loss')\n",
+ "plt.ylabel('loss')\n",
+ "plt.xlabel('epoch')\n",
+ "plt.legend(['train', 'test'], loc='upper left')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_08_CNN-Report.ipynb b/python/Step_08_CNN-Report.ipynb
new file mode 100644
index 0000000..c17b718
--- /dev/null
+++ b/python/Step_08_CNN-Report.ipynb
@@ -0,0 +1,430 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n",
+ "/usr/local/lib/python3.6/dist-packages/requests/__init__.py:91: RequestsDependencyWarning: urllib3 (1.25.2) or chardet (3.0.4) doesn't match a supported version!\n",
+ " RequestsDependencyWarning)\n"
+ ]
+ }
+ ],
+ "source": [
+ "import keras\n",
+ "from keras.models import load_model\n",
+ "from keras import utils\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle \n",
+ "\n",
+ "# Used for graph export\n",
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "from keras import backend as K\n",
+ "from keras import regularizers\n",
+ "\n",
+ "import pickle as pkl\n",
+ "import h5py\n",
+ "\n",
+ "from pathlib import Path\n",
+ "import os.path\n",
+ "import sys\n",
+ "import datetime\n",
+ "import time\n",
+ "\n",
+ "target_names = [\"Knuckle\", \"Finger\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n",
+ "13 : 5\n",
+ "0.7222222222222222 : 0.2777777777777778\n",
+ "503886\n"
+ ]
+ }
+ ],
+ "source": [
+ "# the data, split between train and test sets\n",
+ "df = pd.read_pickle(\"DataStudyCollection/df_blobs_area.pkl\")\n",
+ "\n",
+ "lst = df.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)\n",
+ "print(len(train_ids), \":\", len(test_ids))\n",
+ "print(len(train_ids) / len(lst), \":\", len(test_ids)/ len(lst))\n",
+ "\n",
+ "df = df[df.userID.isin(train_ids) | df.userID.isin(test_ids) & (df.Version == \"Normal\")]\n",
+ "print(len(df))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.vstack(df.Blobs)\n",
+ "x = x.reshape(-1, 27, 15, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = 2\n",
+ "y = utils.to_categorical(df.InputMethod, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Text(0.5, 1.0, 'Label for image 1 is: [1. 0.]')"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAALEAAAEICAYAAAAQmxXMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAD1tJREFUeJzt3X2wXHV9x/H35yaBQIiFAGYgPIRmUp2UGeIMgm3BQhEE+hD8oymM0GhpY1XG2vpQdNoGqVXGKVUZLaNAIEWBQShD1BQIGRjaKRUCA5oUNBASSBoSIASChIck3/5xflc2l717d++evXu/8HnN3Nlz9jx9d+9nf3vO2bP7U0RgltlAvwsw65ZDbOk5xJaeQ2zpOcSWnkNs6fUsxJLulvTndS+rytWSnpd0X3dVgqQjJL0kaUK36xov6npMkq6R9JqkdTWV1sm2f6M8hl0j5WjEEEtaJ+kD9ZXXtROAU4HDIuK4blcWEU9GxH4Rsav70npH0smS7pL0wkihqvkxfS0iZjbUMV/Sf0t6WdLdna5M0l9LelrSi5IWS9q72XwR8YuI2A/4z5HWmXF34khgXUT8stMFJU3sQT1j5ZfAYuBzfa5jK/AN4JJOF5T0QeBC4BSq/+OvA1/qtqBRh1jSAZJ+JOmZ8tb+I0mHDZltlqT7yqvuVknTGpZ/X3lFb5P0sKST2tjm+cCVwG+Vt5ovlfv/QtJjkrZKWirp0IZlQtInJa0B1jRZ58wyz8QyfrekL5faXpL0Q0kHSvp+eRz3S5rZsPw3JT1Vpj0g6cSGaftIWlKen0ckfV7Shobph0q6uTyHT0j61HCPPSLui4hrgbVtPE9DH9NHJK2VtL1s58MjraNFHXdGxI3A/41i8QXAVRGxOiKeB/4R+MhoaxnUTUs8AFxN9Yo6AtgBfGvIPH8K/BlwCLATuAxA0gzgx8CXgWnAZ4GbJR3caoMRcRXwl8C95e1ykaTfA74KzC/bWQ/cMGTRs4DjgTltPrazgfOAGcAs4N7yWKcBjwCLGua9H5hbpl0H/EDS5DJtETCTqsU5FTh3cCFJA8APgYfLdk4BPl1aq9pImkL1vJ8REVOB3wYeKtOOKI3IEXVus4XfpHq8gx4Gpks6sJuVjjrEEfFcRNwcES9HxHbgn4DfHTLbtRGxqrz1/z0wvxxsnAssi4hlEbE7IpYDK4EzR1HKh4HFEfFgRLwKfIGqpZ7ZMM9XI2JrROxoc51XR8TjEfEC8B/A46UF2gn8AHjP4IwR8b3yXOyMiEuBvYF3lcnzga9ExPMRsYHyIi7eCxwcERdHxGsRsRa4guoFVLfdwNGS9omITRGxutT+ZETsHxFP9mCbzewHvNAwPjg8tZuVdrM7sa+k70haL+lF4B5g/yFHxE81DK8HJgEHUbXef1xagW2StlEdsB0yilIOLesGICJeAp6jat2a1dGOzQ3DO5qM7zc4IumzZVfhhfI4fo3qMQ7W1rjtxuEjgUOHPAdfBKZ3WGtLpQH5E6p3sE2Sfizp3XVuowMvAe9oGB8c3t7NSrvZnfgMVYtzfES8A3h/uV8N8xzeMHwE8DrwLNU/89rSCgz+TYmIjg8WqPbNjhwcKW+fBwIbG+bpyaV6Zf/381Qt7gERsT9V6zL4HGwCGo8TGp+Pp4AnhjwHUyNiNO9GLUXE7RFxKlUj8ShVi98Pq4FjGsaPATZHxHPdrLTdEE+SNLnhbyLVW8AOYFs5YFvUZLlzJc2RtC9wMXBTOe3zPeAPJX1Q0oSyzpOaHBi243rgo5LmltM1XwF+EhHrRrGuTk2l2td/Bpgo6R/Ys6W5EfhCOQieAVzQMO0+YLukvy0HgBMkHS3pvc02JGmg7GtPqkY1WdJeIxUoabqkeeXF/SpVa7h7NA+2rG9CqWMiMFDqmNTm4v8GnF8ysT/wd8A1o61lULshXkYV2MG/i6hOs+xD1bL+D3Bbk+WuLUU+DUwGPgUQEU8B86jePp+hapU+10E9vxIRd1Ltb99M1fLNojf7lc3cTvW4f0G1S/MKe+4yXAxsAJ4A7gRuogoS5cX8B1QHhU9QPY9XUu2ONPN+qud+GW8cSN/RRo0DwN9QvWNtpTpu+Tjs8aFIJwd255VtXw6cWIZ/1bKX9Z3YbMGIuA34GnAX8CTVc7aoYdnVozlzIl8UP3YkfRw4OyKGHgCPS5KuAM6hesufNcbbnk115mcv4BMRcc2w8zrEvSPpEKrTa/cCs6lOK34rIr7R18LeYjJ/gpXBXsB3gKOAbVTnr/+1rxW9BbkltvQyXjthtocx353YS3vHZKYMP4M0/DRAE0a6urD1O0vsHNcXq70lbOf5ZyOi5SUEdaolxJJOB74JTACubPWhxWSmcPyE04Zf16TWJQ3sP9wZqGJX65Du2rqt9fK7HfJu3Rk3rR95rvp0vTtRPmb+NnAG1QU250hq90Ibs67VsU98HPBYRKyNiNeojsDn1bBes7bUEeIZ7Pkp1Qb2vPgGSQslrZS08vXqAyuz2ozJ2YmI+G5EHBsRx06i6bdRzEatjhBvZM+rsw5jzyvIzHqqjhDfD8yWdFS5qupsYGkN6zVrS9en2CJip6QLqK7omkD1LYvVrRca/krAgb1H2N2Y2uIcM8D2Eb4/6lNobzm1nCeOiGVUlwiajTl/7GzpOcSWnkNs6TnElp5DbOk5xJbemF9PrIEBBvbZZ/gZZrT+7ZDHzz2o5fRJ21tfj3z4ZS+1nL775ZdbTrfxxy2xpecQW3oOsaXnEFt6DrGl5xBbeg6xpTfufsYqRvhdiZ9/9PKu1v/7N/xRy+m7143Vj6ZbXdwSW3oOsaXnEFt6DrGl5xBbeg6xpecQW3rj7jzxwPMvtpx+9GWfaDn95UNa92717h0jdo1sybgltvQcYkvPIbb0HGJLzyG29BxiS88htvTG/Dxx7N7N7leG77dDr7zScvkjb9jQ3fZ3tF6/5VNXP3brgO3ALmBnRBxbx3rN2lFnS3xyRDxb4/rM2uJ9YkuvrhAHcIekByQtrGmdZm2pa3fihIjYKOmdwHJJj0bEPYMTS7AXAkxm35o2aVappSWOiI3ldgtwC1VXuY3T3Rmj9UwdHZRPkTR1cBg4DVjV7XrN2lXH7sR04BZJg+u7LiJua7lEi77kdj23tfXWRppubzt1dMa4FjimhlrMRsWn2Cw9h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSc4gtPYfY0nOILT2H2NJziC09h9jSazvEkhZL2iJpVcN90yQtl7Sm3B7QmzLNhtdJS3wNcPqQ+y4EVkTEbGBFGTcbU22HuHTpNbTDjHnAkjK8BDirprrM2tZtnx3TI2JTGX6aqhOaN3E/dtZLtR3YRURQ9SzabJr7sbOe6TbEmyUdAlBut3Rfkllnug3xUmBBGV4A3Nrl+sw61skptuuBe4F3Sdog6XzgEuBUSWuAD5RxszHV9oFdRJwzzKRTaqrFbFT8iZ2l5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl5xBbeg6xpecQW3oOsaXnEFt6DrGl120/dhdJ2ijpofJ3Zm/KNBtet/3YAXw9IuaWv2X1lGXWvm77sTPruzr2iS+Q9NOyu9G0W1xJCyWtlLTydV6tYZNmb+g2xJcDs4C5wCbg0mYzuR8766WuQhwRmyNiV0TsBq4AjqunLLP2dRXiwY4Yiw8Bq4ab16xX2u4CrPRjdxJwkKQNwCLgJElzqbrDXQd8rAc1mrXUbT92V9VYi9mo+BM7S88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS88htvQcYkvPIbb0HGJLzyG29BxiS6+TfuwOl3SXpP+VtFrSX5X7p0laLmlNuW3a+YxZr3TSEu8EPhMRc4D3AZ+UNAe4EFgREbOBFWXcbMx00o/dpoh4sAxvBx4BZgDzgCVltiXAWXUXadZK290dNJI0E3gP8BNgekRsKpOeBqY3mX8hsBBgMvuOZpNmw+r4wE7SfsDNwKcj4sXGaRERVJ3QMOR+92NnPdNRiCVNogrw9yPi38vdmwe7Aiu3W+ot0ay1Ts5OiKq3pEci4l8aJi0FFpThBcCt9ZVnNrJO9ol/BzgP+Jmkh8p9XwQuAW6UdD6wHphfb4lmrXXSj91/ARpm8in1lGPWOX9iZ+k5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJaeQ2zpOcSWnkNs6TnElp5DbOk5xJZeWyFu0RHjRZI2Snqo/J3Z23LN3qzdX4of7IjxQUlTgQckLS/Tvh4R/9yb8sxG1laISz91m8rwdkmDHTGa9d1o+rGbyRsdMQJcIOmnkhYP16+zpIWSVkpa+TqvjrpYs2Y67cduaEeMlwOzgLlULfWlzZZzZ4zWS530Y/emjhgjYnNE7IqI3cAVwHG9KdNseO2enWjaEeNgT6LFh4BV9ZZnNrJ2z04M1xHjOZLmUvXnvA74WO0Vmo2g3bMTw3XEuKzecsw650/sLD2H2NJziC09h9jSc4gtPYfY0lNEjO0GpWeA9UPuPgh4dkwL6Yzr68yREXHwWG1szEPctAhpZUQc2+86huP6xjfvTlh6DrGlN15C/N1+FzAC1zeOjYt9YrNujJeW2GzUHGJLr68hlnS6pJ9LekzShf2spRlJ6yT9rPwcwcp+1wNQvsu4RdKqhvumSVouaU25bfpdx7eqvoVY0gTg28AZwByqC+zn9KueFk6OiLnj6DzsNcDpQ+67EFgREbOBFWX8baOfLfFxwGMRsTYiXgNuAOb1sZ4UIuIeYOuQu+cBS8rwEuCsMS2qz/oZ4hnAUw3jGxh/v2URwB2SHpC0sN/FtDC9/DYIwNPA9H4WM9ba/Y7d29UJEbFR0juB5ZIeLS3huBURIeltdd60ny3xRuDwhvHDyn3jRkRsLLdbgFsYvz9JsHnwm+fldkuf6xlT/Qzx/cBsSUdJ2gs4G1jax3r2IGlK+d05JE0BTmP8/iTBUmBBGV4A3NrHWsZc33YnImKnpAuA24EJwOKIWN2vepqYDtxS/eQGE4HrIuK2/pYEkq4HTgIOkrQBWARcAtwo6Xyqy1zn96/CseePnS09f2Jn6TnElp5DbOk5xJaeQ2zpOcSWnkNs6f0/kZhtd/D2o3sAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "i = 1\n",
+ "plt.imshow(x[i].reshape(27, 15)) #np.sqrt(784) = 28\n",
+ "plt.title(\"Label for image %i is: %s\" % (i, y[i]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.4\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loadpath = \"./ModelSnapshots/CNN-33767.h5\"\n",
+ "model = load_model(loadpath)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 1min 28s, sys: 9.52 s, total: 1min 37s\n",
+ "Wall time: 1min\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "lst = []\n",
+ "batch = 100\n",
+ "for i in range(0, len(x), batch):\n",
+ " _x = x[i: i+batch]\n",
+ " lst.extend(model.predict(_x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df[\"InputMethodPred\"] = lst\n",
+ "df.InputMethodPred = df.InputMethodPred.apply(lambda x: np.argmax(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_train = df[df.userID.isin(train_ids)]\n",
+ "df_test = df[df.userID.isin(test_ids) & (df.Version == \"Normal\")]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[124207 12765]\n",
+ " [ 6596 322276]]\n",
+ "[[0.90680577 0.09319423]\n",
+ " [0.02005644 0.97994356]]\n",
+ "Accuray: 0.958\n",
+ "Recall: 0.943\n",
+ "Precision: 0.957\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.95 0.91 0.93 136972\n",
+ " Finger 0.96 0.98 0.97 328872\n",
+ "\n",
+ " micro avg 0.96 0.96 0.96 465844\n",
+ " macro avg 0.96 0.94 0.95 465844\n",
+ "weighted avg 0.96 0.96 0.96 465844\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_train.InputMethod.values, df_train.InputMethodPred.values, labels=[0, 1]))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_train.InputMethod.values, df_train.InputMethodPred.values, labels=[0, 1])\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(cm)\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_train.InputMethod.values, df_train.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_train.InputMethod.values, df_train.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(df_train.InputMethod.values, df_train.InputMethodPred.values, average=\"macro\"))\n",
+ "#print(\"F1-Score: %.3f\" % metrics.f1_score(df_train.InputMethod.values, df_train.InputMethodPred.values, average=\"macro\"))\n",
+ "print(sklearn.metrics.classification_report(df_train.InputMethod.values, df_train.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[ 8384 1037]\n",
+ " [ 1028 27593]]\n",
+ "[[0.88992676 0.11007324]\n",
+ " [0.03591768 0.96408232]]\n",
+ "Accuray: 0.946\n",
+ "Recall: 0.927\n",
+ "Precision: 0.956\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " Knuckle 0.89 0.89 0.89 9421\n",
+ " Finger 0.96 0.96 0.96 28621\n",
+ "\n",
+ " micro avg 0.95 0.95 0.95 38042\n",
+ " macro avg 0.93 0.93 0.93 38042\n",
+ "weighted avg 0.95 0.95 0.95 38042\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(sklearn.metrics.confusion_matrix(df_test.InputMethod.values, df_test.InputMethodPred.values, labels=[0, 1]))\n",
+ "cm = sklearn.metrics.confusion_matrix(df_test.InputMethod.values, df_test.InputMethodPred.values, labels=[0, 1])\n",
+ "cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
+ "print(cm)\n",
+ "print(\"Accuray: %.3f\" % sklearn.metrics.accuracy_score(df_test.InputMethod.values, df_test.InputMethodPred.values))\n",
+ "print(\"Recall: %.3f\" % metrics.recall_score(df_test.InputMethod.values, df_test.InputMethodPred.values, average=\"macro\"))\n",
+ "print(\"Precision: %.3f\" % metrics.average_precision_score(df_test.InputMethod.values, df_test.InputMethodPred.values, average=\"macro\"))\n",
+ "#print(\"F1-Score: %.3f\" % metrics.f1_score(df_test.InputMethod.values, df_test.InputMethodPred.values, average=\"macro\"))\n",
+ "print(sklearn.metrics.classification_report(df_test.InputMethod.values, df_test.InputMethodPred.values, target_names=target_names))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Export"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "output nodes names are: ['output_node0']\n"
+ ]
+ }
+ ],
+ "source": [
+ "output_node_prefix = \"output_node\"\n",
+ "num_output = 1\n",
+ "pred = [None]*num_output\n",
+ "pred_node_names = [None]*num_output\n",
+ "for i in range(num_output):\n",
+ " pred_node_names[i] = output_node_prefix+str(i)\n",
+ " pred[i] = tf.identity(model.outputs[i], name=pred_node_names[i])\n",
+ "print('output nodes names are: ', pred_node_names)\n",
+ "output_node_prefix = pred_node_names[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[]"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "model.inputs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sess = K.get_session()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "output_path = \"./Models/\"\n",
+ "output_file = \"CNN.pb\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "INFO:tensorflow:Froze 30 variables.\n",
+ "INFO:tensorflow:Converted 30 variables to const ops.\n",
+ "Saved the freezed graph at: ./Models/CNN.pb\n"
+ ]
+ }
+ ],
+ "source": [
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)\n",
+ "\n",
+ "graph_io.write_graph(constant_graph, output_path, output_file, as_text=False)\n",
+ "\n",
+ "print('Saved the freezed graph at: ', (output_path + output_file))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_09_LSTM_ReadData.ipynb b/python/Step_09_LSTM_ReadData.ipynb
new file mode 100644
index 0000000..e5be1fa
--- /dev/null
+++ b/python/Step_09_LSTM_ReadData.ipynb
@@ -0,0 +1,152 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Filtering the data for the LSTM: removes all the rows, where we used the revert button, when the participant performed a wrong gesture\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyCollection/AllData.pkl\")\n",
+ "dfAll.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_actual = dfAll[(dfAll.Actual_Data == True) & (dfAll.Is_Pause == False)]\n",
+ "df_actual.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"all: %s, actual data: %s\" % (len(dfAll), len(df_actual)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%time\n",
+ "# filter out all gestures, where the revert button was pressed during the study and the gestrue was repeated\n",
+ "def is_max(df):\n",
+ " df_temp = df.copy(deep=True)\n",
+ " max_version = df_temp.RepetitionID.max()\n",
+ " df_temp[\"IsMax\"] = np.where(df_temp.RepetitionID == max_version, True, False)\n",
+ " df_temp[\"MaxRepetition\"] = [max_version] * len(df_temp)\n",
+ " return df_temp\n",
+ "\n",
+ "df_filtered = df_actual.copy(deep=True)\n",
+ "df_grp = df_filtered.groupby([df_filtered.userID, df_filtered.TaskID, df_filtered.VersionID])\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "result_lst = pool.map(is_max, [grp for name, grp in df_grp])\n",
+ "df_filtered = pd.concat(result_lst)\n",
+ "df_filtered = df_filtered[df_filtered.IsMax == True]\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered.to_pickle(\"DataStudyCollection/df_lstm.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"actual: %s, filtered data: %s\" % (len(df_actual), len(df_filtered)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_10_LSTM_Preprocessing.ipynb b/python/Step_10_LSTM_Preprocessing.ipynb
new file mode 100644
index 0000000..46a4064
--- /dev/null
+++ b/python/Step_10_LSTM_Preprocessing.ipynb
@@ -0,0 +1,1286 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Preprocessing for LSTM: Blobdetection and Cutting"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "\n",
+ "from scipy.odr import *\n",
+ "from scipy.stats import *\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import os\n",
+ "import time\n",
+ "import matplotlib.pyplot as plt\n",
+ "import ast\n",
+ "from multiprocessing import Pool, cpu_count\n",
+ "\n",
+ "import scipy\n",
+ "\n",
+ "from IPython import display\n",
+ "from matplotlib.patches import Rectangle\n",
+ "\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "import json\n",
+ "\n",
+ "import scipy.stats as st\n",
+ "from sklearn.metrics import r2_score\n",
+ "\n",
+ "\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import axes3d\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import copy\n",
+ "\n",
+ "from sklearn.model_selection import LeaveOneOut, LeavePOut\n",
+ "\n",
+ "from multiprocessing import Pool\n",
+ "import cv2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered = pd.read_pickle(\"DataStudyCollection/df_lstm.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " Timestamp | \n",
+ " Current_Task | \n",
+ " Task_amount | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " RepetitionID | \n",
+ " Actual_Data | \n",
+ " Is_Pause | \n",
+ " Image | \n",
+ " IsMax | \n",
+ " MaxRepetition | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 291980 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 291981 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 291982 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 291983 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 291984 | \n",
+ " 1 | \n",
+ " 1,54515E+12 | \n",
+ " 33 | \n",
+ " 680 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " True | \n",
+ " False | \n",
+ " [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... | \n",
+ " True | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID Timestamp Current_Task Task_amount TaskID VersionID \\\n",
+ "291980 1 1,54515E+12 33 680 0 2 \n",
+ "291981 1 1,54515E+12 33 680 0 2 \n",
+ "291982 1 1,54515E+12 33 680 0 2 \n",
+ "291983 1 1,54515E+12 33 680 0 2 \n",
+ "291984 1 1,54515E+12 33 680 0 2 \n",
+ "\n",
+ " RepetitionID Actual_Data Is_Pause \\\n",
+ "291980 0 True False \n",
+ "291981 0 True False \n",
+ "291982 0 True False \n",
+ "291983 0 True False \n",
+ "291984 0 True False \n",
+ "\n",
+ " Image IsMax \\\n",
+ "291980 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "291981 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "291982 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "291983 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "291984 [0, 2, 0, 0, 0, 0, 1, 2, 2, 3, 2, 1, 1, 1, 0, ... True \n",
+ "\n",
+ " MaxRepetition \n",
+ "291980 0 \n",
+ "291981 0 \n",
+ "291982 0 \n",
+ "291983 0 \n",
+ "291984 0 "
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_filtered.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.reshape(27, 15))\n",
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.clip(min=0, max=255))\n",
+ "df_filtered.Image = df_filtered.Image.apply(lambda x: x.astype(np.uint8))\n",
+ "df_filtered[\"ImageSum\"] = df_filtered.Image.apply(lambda x: np.sum(x))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#LSTMs new Blob detection (only detect, if there are blobs)\n",
+ "def detect_blobs(image):\n",
+ " #image = image.reshape(27, 15)\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = image\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 200, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " return len(contours) > 0"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 3.42 s, sys: 1.14 s, total: 4.57 s\n",
+ "Wall time: 4.94 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "pool = Pool(cpu_count() - 1)\n",
+ "temp_blobs = pool.map(detect_blobs, df_filtered.Image)\n",
+ "pool.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered[\"ContainsBlobs\"] = temp_blobs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Label if knuckle or finger\n",
+ "def f(row):\n",
+ " if row['TaskID'] < 17:\n",
+ " #val = \"Knuckle\"\n",
+ " val = 0\n",
+ " elif row['TaskID'] >= 17:\n",
+ " #val = \"Finger\"\n",
+ " val = 1\n",
+ " return val\n",
+ "df_filtered['InputMethod'] = df_filtered.apply(f, axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_filtered.index = range(len(df_filtered))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "1\n",
+ "2\n",
+ "3\n",
+ "4\n",
+ "5\n",
+ "6\n",
+ "7\n",
+ "8\n",
+ "9\n",
+ "10\n",
+ "11\n",
+ "12\n",
+ "13\n",
+ "14\n",
+ "15\n",
+ "16\n",
+ "17\n",
+ "18\n",
+ "CPU times: user 4min 7s, sys: 424 ms, total: 4min 8s\n",
+ "Wall time: 4min 8s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "# trim image sequences down to only between first and last detected blob\n",
+ "UserIDs = []\n",
+ "TaskIDs = []\n",
+ "VersionIDs = []\n",
+ "Blobs = []\n",
+ "for userID in df_filtered.userID.unique():\n",
+ " print(userID)\n",
+ " for TaskID in df_filtered[df_filtered.userID == userID].TaskID.unique():\n",
+ " for VersionID in df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID)].VersionID.unique():\n",
+ " first_blob = -1\n",
+ " last_blob = -1\n",
+ " for index, row in df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID) & (df_filtered.VersionID == VersionID)].iterrows():\n",
+ " if row.ContainsBlobs:\n",
+ " last_blob = index\n",
+ " if first_blob == -1:\n",
+ " first_blob = index\n",
+ " if first_blob >= 0 and last_blob >= 0:\n",
+ " UserIDs.append(userID)\n",
+ " TaskIDs.append(TaskID)\n",
+ " VersionIDs.append(VersionID)\n",
+ " Blobs.append(df_filtered[(df_filtered.userID == userID) & (df_filtered.TaskID == TaskID) & (df_filtered.VersionID == VersionID) & (df_filtered.index >= first_blob) & (df_filtered.index <= last_blob)].Image.tolist())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "UserIDs = np.array(UserIDs, dtype=np.int64)\n",
+ "TaskIDs = np.array(TaskIDs, dtype=np.int64)\n",
+ "VersionIDs = np.array(VersionIDs, dtype=np.int64)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 239,... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 7 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 8 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 3 \n",
+ "1 1 0 5 \n",
+ "2 1 0 6 \n",
+ "3 1 0 7 \n",
+ "4 1 0 8 \n",
+ "\n",
+ " Blobs \n",
+ "0 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... \n",
+ "1 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... \n",
+ "2 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 239,... \n",
+ "3 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... \n",
+ "4 [[[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0... "
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all = pd.DataFrame()\n",
+ "df_lstm_all[\"userID\"] = UserIDs\n",
+ "df_lstm_all[\"TaskID\"] = TaskIDs\n",
+ "df_lstm_all[\"VersionID\"] = VersionIDs\n",
+ "df_lstm_all[\"Blobs\"] = Blobs\n",
+ "df_lstm_all.Blobs = df_lstm_all.Blobs.map(np.array)\n",
+ "df_lstm_all.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ " BlobCount | \n",
+ " GestureOnly | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ " 38 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ " 57 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 239,... | \n",
+ " 41 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 7 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ " 20 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 8 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0... | \n",
+ " 41 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 3 \n",
+ "1 1 0 5 \n",
+ "2 1 0 6 \n",
+ "3 1 0 7 \n",
+ "4 1 0 8 \n",
+ "\n",
+ " Blobs BlobCount GestureOnly \n",
+ "0 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... 38 0 \n",
+ "1 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... 57 0 \n",
+ "2 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 239,... 41 0 \n",
+ "3 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... 20 0 \n",
+ "4 [[[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0... 41 0 "
+ ]
+ },
+ "execution_count": 33,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_lstm_all[\"Length\"] = df_lstm_all.Blobs.apply(lambda x: x.shape[0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 43,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAEP1JREFUeJzt3XuMXOV5x/HvE5ubSBpDiFaWbXVNsRQ5oiVoRYgSRVtQwEBVU4lEjlBxIkuWWqImElVrGqnkhgSVCE2iXOQGK06EApQkMgpU1AVGUf8AAuFiDCJsEkd45WAlBidLFNqlT/+Yd+l0s+ud2Z3dubzfj7TyOe9558zz7Fn7t+fMmXFkJpKk+ryp1wVIknrDAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVanWvCziRs846K0dHRxf9+FdffZXTTz+9ewX1yLD0AfbSj4alD7CXGY8//vgvM/PtC83r6wAYHR3lscceW/TjG40G4+Pj3SuoR4alD7CXfjQsfYC9zIiIn7czz0tAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUqb5+J/BKGd11b1vzDt10xTJXIkkrxzMASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkirVdgBExKqIeCIivl/WN0bEIxExERF3RsTJZfyUsj5Rto+27OP6Mv58RFza7WYkSe3r5Azg48BzLes3A7dm5jnAy8COMr4DeLmM31rmERGbgW3AO4EtwFciYtXSypckLVZbARAR64ErgK+X9QAuAu4uU/YCV5blrWWdsv3iMn8rcEdmvpaZPwMmgAu60YQkqXOr25z3z8DfAW8p628DXsnM6bJ+GFhXltcBLwJk5nREHC/z1wEPt+yz9TFviIidwE6AkZERGo1Gu738nqmpqbYef9250wvOAZZUy1K028cgsJf+Myx9gL10asEAiIg/A45m5uMRMb6s1QCZuRvYDTA2Npbj44t/ykajQTuP/8iue9va36GrF1/LUrTbxyCwl/4zLH2AvXSqnTOA9wJ/HhGXA6cCfwB8AVgTEavLWcB6YLLMnwQ2AIcjYjXwVuBXLeMzWh8jSVphC74GkJnXZ+b6zByl+SLug5l5NfAQcFWZth3YV5bvKeuU7Q9mZpbxbeUuoY3AJuDRrnUiSepIu68BzOXvgTsi4nPAE8BtZfw24FsRMQEcoxkaZObBiLgLeBaYBq7NzNeX8PySpCXoKAAyswE0yvJPmeMunsz8HfDBeR5/I3Bjp0VKkrrPdwJLUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqtbrXBSynA5PH+ciue3tdhiT1Jc8AJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkSi0YABFxakQ8GhFPRcTBiPh0Gd8YEY9ExERE3BkRJ5fxU8r6RNk+2rKv68v48xFx6XI1JUlaWDtnAK8BF2XmnwDnAVsi4kLgZuDWzDwHeBnYUebvAF4u47eWeUTEZmAb8E5gC/CViFjVzWYkSe1bMACyaaqsnlS+ErgIuLuM7wWuLMtbyzpl+8UREWX8jsx8LTN/BkwAF3SlC0lSx9r6OOjym/rjwDnAl4GfAK9k5nSZchhYV5bXAS8CZOZ0RBwH3lbGH27ZbetjWp9rJ7ATYGRkhEaj0VlHLUZOg+vOnV54YpuWUstSTE1N9ey5u81e+s+w9AH20qm2AiAzXwfOi4g1wPeAdyxXQZm5G9gNMDY2luPj44ve15du38ctB7r3Xx4cunrxtSxFo9FgKd+HfmIv/WdY+gB76VRHdwFl5ivAQ8B7gDURMfOv63pgsixPAhsAyva3Ar9qHZ/jMZKkFdbOXUBvL7/5ExGnAR8AnqMZBFeVaduBfWX5nrJO2f5gZmYZ31buEtoIbAIe7VYjkqTOtHN9ZC2wt7wO8Cbgrsz8fkQ8C9wREZ8DngBuK/NvA74VERPAMZp3/pCZByPiLuBZYBq4tlxakiT1wIIBkJlPA++aY/ynzHEXT2b+DvjgPPu6Ebix8zIlSd3mO4ElqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKLRgAEbEhIh6KiGcj4mBEfLyMnxkR+yPihfLnGWU8IuKLETEREU9HxPkt+9pe5r8QEduXry1J0kLaOQOYBq7LzM3AhcC1EbEZ2AU8kJmbgAfKOsBlwKbytRP4KjQDA7gBeDdwAXDDTGhIklbeggGQmUcy80dl+TfAc8A6YCuwt0zbC1xZlrcC38ymh4E1EbEWuBTYn5nHMvNlYD+wpavdSJLa1tFrABExCrwLeAQYycwjZdMvgJGyvA54seVhh8vYfOOSpB5Y3e7EiHgz8B3gE5n564h4Y1tmZkRkNwqKiJ00Lx0xMjJCo9FY9L5GToPrzp3uRlkAS6plKaampnr23N1mL/1nWPoAe+lUWwEQESfR/Mf/9sz8bhl+KSLWZuaRconnaBmfBDa0PHx9GZsExmeNN2Y/V2buBnYDjI2N5fj4+OwpbfvS7fu45UDbGbegQ1cvvpalaDQaLOX70E/spf8MSx9gL51q5y6gAG4DnsvMz7dsugeYuZNnO7CvZfyacjfQhcDxcqnofuCSiDijvPh7SRmTJPVAO78evxf4S+BARDxZxv4BuAm4KyJ2AD8HPlS23QdcDkwAvwU+CpCZxyLis8APy7zPZOaxrnQhSerYggGQmf8JxDybL55jfgLXzrOvPcCeTgqUJC0P3wksSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpbr3/yVWYHTXvW3NO3TTFctciSQtnWcAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmq1IIBEBF7IuJoRDzTMnZmROyPiBfKn2eU8YiIL0bEREQ8HRHntzxme5n/QkRsX552JEntaucM4BvAlllju4AHMnMT8EBZB7gM2FS+dgJfhWZgADcA7wYuAG6YCQ1JUm8sGACZ+QPg2KzhrcDesrwXuLJl/JvZ9DCwJiLWApcC+zPzWGa+DOzn90NFkrSCFvsawEhmHinLvwBGyvI64MWWeYfL2HzjkqQeWb3UHWRmRkR2oxiAiNhJ8/IRIyMjNBqNRe9r5DS47tzpLlXWvqXUPJepqamu77NX7KX/DEsfYC+dWmwAvBQRazPzSLnEc7SMTwIbWuatL2OTwPis8cZcO87M3cBugLGxsRwfH59rWlu+dPs+bjmw5Izr2KGrx7u6v0ajwVK+D/3EXvrPsPQB9tKpxV4CugeYuZNnO7CvZfyacjfQhcDxcqnofuCSiDijvPh7SRmTJPXIgr8eR8S3af72flZEHKZ5N89NwF0RsQP4OfChMv0+4HJgAvgt8FGAzDwWEZ8FfljmfSYzZ7+wLElaQQsGQGZ+eJ5NF88xN4Fr59nPHmBPR9VJkpaN7wSWpEoZAJJUKQNAkiplAEhSpQwASaqUASBJlTIAJKlSBoAkVcoAkKRKGQCSVCkDQJIqZQBIUqUMAEmqlAEgSZUyACSpUgaAJFXKAJCkShkAklQpA0CSKrXg/wmszo3uureteYduumKZK5Gk+XkGIEmVMgAkqVIGgCRVygCQpEoZAJJUKQNAkiplAEhSpQwASaqUbwTroXbfMPaNLacvcyWSauQZgCRVygCQpEoZAJJUKQNAkirli8AD4MDkcT7SxgvGfrqopE54BiBJlTIAJKlSK34JKCK2AF8AVgFfz8ybVrqGYdXu+wrAy0WSVvgMICJWAV8GLgM2Ax+OiM0rWYMkqWmlzwAuACYy86cAEXEHsBV4doXrqJ7/baWklQ6AdcCLLeuHgXevcA3qQCeXldrhx1pI/aPvbgONiJ3AzrI6FRHPL2F3ZwG/XHpVvfU3Q9IHwJ/ePDy9MDzHZVj6AHuZ8YftTFrpAJgENrSsry9jb8jM3cDubjxZRDyWmWPd2FcvDUsfYC/9aFj6AHvp1ErfBvpDYFNEbIyIk4FtwD0rXIMkiRU+A8jM6Yj4GHA/zdtA92TmwZWsQZLUtOKvAWTmfcB9K/R0XbmU1AeGpQ+wl340LH2AvXQkMnO5n0OS1If8KAhJqtRQBkBEbImI5yNiIiJ29bqeTkXEoYg4EBFPRsRjZezMiNgfES+UP8/odZ1ziYg9EXE0Ip5pGZuz9mj6YjlOT0fE+b2r/P+bp49PRcRkOS5PRsTlLduuL308HxGX9qbquUXEhoh4KCKejYiDEfHxMj5Qx+UEfQzccYmIUyPi0Yh4qvTy6TK+MSIeKTXfWW6WISJOKesTZftoVwrJzKH6ovni8k+As4GTgaeAzb2uq8MeDgFnzRr7J2BXWd4F3NzrOuep/f3A+cAzC9UOXA78GxDAhcAjva5/gT4+BfztHHM3l5+zU4CN5edvVa97aKlvLXB+WX4L8ONS80AdlxP0MXDHpXxv31yWTwIeKd/ru4BtZfxrwF+V5b8GvlaWtwF3dqOOYTwDeOPjJjLzv4CZj5sYdFuBvWV5L3BlD2uZV2b+ADg2a3i+2rcC38ymh4E1EbF2ZSo9sXn6mM9W4I7MfC0zfwZM0Pw57AuZeSQzf1SWfwM8R/Nd+QN1XE7Qx3z69riU7+1UWT2pfCVwEXB3GZ99TGaO1d3AxRERS61jGANgro+bONEPST9K4N8j4vHyzmiAkcw8UpZ/AYz0prRFma/2QTxWHyuXRfa0XIYbmD7KpYN30fyNc2CPy6w+YACPS0SsiogngaPAfppnKK9k5nSZ0lrvG72U7ceBty21hmEMgGHwvsw8n+anpl4bEe9v3ZjN88CBvH1rkGsHvgr8EXAecAS4pbfldCYi3gx8B/hEZv66ddsgHZc5+hjI45KZr2fmeTQ/EeEC4B0rXcMwBsCCHzfR7zJzsvx5FPgezR+Ol2ZOw8ufR3tXYcfmq32gjlVmvlT+0v4P8C/83+WEvu8jIk6i+Y/m7Zn53TI8cMdlrj4G+bgAZOYrwEPAe2hebpt5f1ZrvW/0Ura/FfjVUp97GANgoD9uIiJOj4i3zCwDlwDP0Oxhe5m2HdjXmwoXZb7a7wGuKXedXAgcb7kk0XdmXQf/C5rHBZp9bCt3amwENgGPrnR98ynXim8DnsvMz7dsGqjjMl8fg3hcIuLtEbGmLJ8GfIDmaxoPAVeVabOPycyxugp4sJy1LU2vXw1fji+adzH8mOY1tU/2up4Oaz+b5p0LTwEHZ+qneb3vAeAF4D+AM3td6zz1f5vmafh/07yGuWO+2mneCfHlcpwOAGO9rn+BPr5V6ny6/IVc2zL/k6WP54HLel3/rF7eR/PyztPAk+Xr8kE7LifoY+COC/DHwBOl5meAfyzjZ9MMqQngX4FTyvipZX2ibD+7G3X4TmBJqtQwXgKSJLXBAJCkShkAklQpA0CSKmUASFKlDABJqpQBIEmVMgAkqVL/C2+FhSKKT6n/AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "df_lstm_all.Length.hist(range=(0,300), bins=30)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.02870949403069926"
+ ]
+ },
+ "execution_count": 46,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(df_lstm_all[df_lstm_all.Length > 50]) / len(df_lstm_all)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 52,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 10554.0\n",
+ "mean 15.9\n",
+ "std 13.6\n",
+ "min 1.0\n",
+ "25% 8.0\n",
+ "50% 13.0\n",
+ "75% 19.0\n",
+ "max 301.0\n",
+ "Name: Length, dtype: float64"
+ ]
+ },
+ "execution_count": 52,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.Length.describe().round(1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lengths = []\n",
+ "for index, row in df_lstm_all.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])\n",
+ "df_lstm_all[\"BlobCount\"] = lengths\n",
+ "# add a column for pure gesture recognition without finger/knuckle\n",
+ "df_lstm_all[\"GestureOnly\"] = df_lstm_all.TaskID % 17"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 10554.000000\n",
+ "mean 15.906576\n",
+ "std 13.605214\n",
+ "min 1.000000\n",
+ "25% 8.000000\n",
+ "50% 13.000000\n",
+ "75% 19.000000\n",
+ "max 301.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " mean | \n",
+ " std | \n",
+ "
\n",
+ " \n",
+ " GestureOnly | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 11.421429 | \n",
+ " 8.940925 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 13.618683 | \n",
+ " 13.864708 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 8.852596 | \n",
+ " 6.315931 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 8.672913 | \n",
+ " 5.580500 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 9.828767 | \n",
+ " 6.793559 | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 9.211221 | \n",
+ " 6.861675 | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 14.622496 | \n",
+ " 8.338379 | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 13.684524 | \n",
+ " 13.263753 | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 20.397129 | \n",
+ " 12.916920 | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " 14.468599 | \n",
+ " 10.042060 | \n",
+ "
\n",
+ " \n",
+ " 10 | \n",
+ " 14.921440 | \n",
+ " 8.909217 | \n",
+ "
\n",
+ " \n",
+ " 11 | \n",
+ " 13.695578 | \n",
+ " 7.661549 | \n",
+ "
\n",
+ " \n",
+ " 12 | \n",
+ " 17.070853 | \n",
+ " 11.755087 | \n",
+ "
\n",
+ " \n",
+ " 13 | \n",
+ " 15.712219 | \n",
+ " 10.545010 | \n",
+ "
\n",
+ " \n",
+ " 14 | \n",
+ " 16.468354 | \n",
+ " 9.826818 | \n",
+ "
\n",
+ " \n",
+ " 15 | \n",
+ " 19.840836 | \n",
+ " 11.239255 | \n",
+ "
\n",
+ " \n",
+ " 16 | \n",
+ " 42.931624 | \n",
+ " 21.024635 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " mean std\n",
+ "GestureOnly \n",
+ "0 11.421429 8.940925\n",
+ "1 13.618683 13.864708\n",
+ "2 8.852596 6.315931\n",
+ "3 8.672913 5.580500\n",
+ "4 9.828767 6.793559\n",
+ "5 9.211221 6.861675\n",
+ "6 14.622496 8.338379\n",
+ "7 13.684524 13.263753\n",
+ "8 20.397129 12.916920\n",
+ "9 14.468599 10.042060\n",
+ "10 14.921440 8.909217\n",
+ "11 13.695578 7.661549\n",
+ "12 17.070853 11.755087\n",
+ "13 15.712219 10.545010\n",
+ "14 16.468354 9.826818\n",
+ "15 19.840836 11.239255\n",
+ "16 42.931624 21.024635"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_all.groupby(df_lstm_all.GestureOnly)[\"BlobCount\"].agg([\"mean\", \"std\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "before: 10554\n",
+ "after: 9193\n",
+ "ratio: 12.895584612469206\n"
+ ]
+ }
+ ],
+ "source": [
+ "# filter on gesture lengths\n",
+ "print(\"before: %s\" % len(df_lstm_all))\n",
+ "df_lstm = df_lstm_all[(df_lstm_all.BlobCount <= 100) & (df_lstm_all.BlobCount >= 5)]\n",
+ "print(\"after: %s\" % len(df_lstm))\n",
+ "print(\"ratio: %s\" % ((len(df_lstm_all) - len(df_lstm)) / len(df_lstm_all) * 100))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 9193.000000\n",
+ "mean 17.678995\n",
+ "std 12.059369\n",
+ "min 5.000000\n",
+ "25% 10.000000\n",
+ "50% 15.000000\n",
+ "75% 20.000000\n",
+ "max 97.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lengths = []\n",
+ "for index, row in df_lstm.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \n",
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
+ "\n",
+ "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n",
+ " \"\"\"Entry point for launching an IPython kernel.\n"
+ ]
+ }
+ ],
+ "source": [
+ "df_lstm[\"BlobCount\"] = lengths"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 9193.000000\n",
+ "mean 17.678995\n",
+ "std 12.059369\n",
+ "min 5.000000\n",
+ "25% 10.000000\n",
+ "50% 15.000000\n",
+ "75% 20.000000\n",
+ "max 97.000000\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def lerp(a, b, c=0.5):\n",
+ " return c * b + (1.0 - c) * a\n",
+ "\n",
+ "#Svens new Blob detection\n",
+ "def detect_blobs_return_old(image, task):\n",
+ " #image = e.Image\n",
+ " large = np.ones((29,17), dtype=np.uint8)\n",
+ " large[1:28,1:16] = np.copy(image)\n",
+ " temp, thresh = cv2.threshold(cv2.bitwise_not(large), 205, 255, cv2.THRESH_BINARY)\n",
+ " contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
+ " contours = [a for a in contours if cv2.contourArea(a) > 8 and cv2.contourArea(a) < 255]\n",
+ " lstBlob = []\n",
+ " lstCenter = []\n",
+ " lstMin = []\n",
+ " lstMax = []\n",
+ " count = 0\n",
+ " contours.sort(key=lambda a: cv2.contourArea(a))\n",
+ " if len(contours) > 0:\n",
+ " # if two finger or knuckle\n",
+ " cont_count = 2 if task in [1, 6, 7, 18, 23, 24] and len(contours) > 1 else 1\n",
+ " for i in range(1, cont_count + 1):\n",
+ " max_contour = contours[-1 * i]\n",
+ " xmax, ymax = np.max(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " xmin, ymin = np.min(max_contour.reshape(len(max_contour),2), axis=0)\n",
+ " M = cv2.moments(max_contour)\n",
+ " cX = int(M[\"m10\"] / M[\"m00\"]) - 1\n",
+ " cY = int(M[\"m01\"] / M[\"m00\"]) - 1\n",
+ " #croped_im = np.zeros((27,15))\n",
+ " blob = large[max(ymin - 1, 0):min(ymax + 1, large.shape[0]),max(xmin - 1, 0):min(xmax + 1, large.shape[1])]\n",
+ " #croped_im[0:blob.shape[0],0:blob.shape[1]] = blob\n",
+ " #return (1, [croped_im])\n",
+ " lstBlob.append(blob)\n",
+ " lstCenter.append((cY, cX))\n",
+ " lstMin.append(xmax-xmin)\n",
+ " lstMax.append(ymax-ymin)\n",
+ " count = count + 1\n",
+ " return (count, lstBlob, lstCenter)\n",
+ " else:\n",
+ " return (0, [np.zeros((29, 19))], 0, 0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# descides whether or not a normalization is neccessary\n",
+ "# and cuts or adds zeros\n",
+ "def normalize_blobs(blobs, new_len=50):\n",
+ " new_count = new_len - blobs.shape[0]\n",
+ " if new_count == 0:\n",
+ " return blobs\n",
+ " elif new_count > 0:\n",
+ " temp = np.array([np.zeros((27, 15))] * new_count)\n",
+ " return np.append(blobs, temp, axis=0)\n",
+ " else:\n",
+ " return blobs[0:new_len]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU times: user 3.24 s, sys: 556 ms, total: 3.8 s\n",
+ "Wall time: 3.8 s\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%time\n",
+ "# normalizes all image sequences\n",
+ "df_lstm_norm = df_lstm.copy(deep=True)\n",
+ "new_blobs = []\n",
+ "for index, row in df_lstm.iterrows():\n",
+ " new_blobs.append(normalize_blobs(row.Blobs, 50))\n",
+ "\n",
+ "df_lstm_norm.Blobs = new_blobs\n",
+ "\n",
+ "lengths = []\n",
+ "for index, row in df_lstm_norm.iterrows():\n",
+ " lengths.append(row.Blobs.shape[0])\n",
+ "df_lstm_norm[\"BlobCount\"] = lengths"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "count 9193.0\n",
+ "mean 50.0\n",
+ "std 0.0\n",
+ "min 50.0\n",
+ "25% 50.0\n",
+ "50% 50.0\n",
+ "75% 50.0\n",
+ "max 50.0\n",
+ "Name: BlobCount, dtype: float64"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_norm.BlobCount.describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_lstm_norm.to_pickle(\"DataStudyCollection/df_lstm_norm50.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " userID | \n",
+ " TaskID | \n",
+ " VersionID | \n",
+ " Blobs | \n",
+ " BlobCount | \n",
+ " GestureOnly | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 3 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 5 | \n",
+ " [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 6 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 7 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 8 | \n",
+ " [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0... | \n",
+ " 50 | \n",
+ " 0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " userID TaskID VersionID \\\n",
+ "0 1 0 3 \n",
+ "1 1 0 5 \n",
+ "2 1 0 6 \n",
+ "3 1 0 7 \n",
+ "4 1 0 8 \n",
+ "\n",
+ " Blobs BlobCount GestureOnly \n",
+ "0 [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... 50 0 \n",
+ "1 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... 50 0 \n",
+ "2 [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... 50 0 \n",
+ "3 [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0... 50 0 \n",
+ "4 [[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0... 50 0 "
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_lstm_norm.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "finished\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"finished\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_11_LSTM.ipynb b/python/Step_11_LSTM.ipynb
new file mode 100644
index 0000000..60efca3
--- /dev/null
+++ b/python/Step_11_LSTM.ipynb
@@ -0,0 +1,2550 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "from keras.models import Sequential, load_model\n",
+ "from keras.layers import *\n",
+ "from keras import optimizers\n",
+ "from keras import utils\n",
+ "from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau\n",
+ "import keras\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle \n",
+ "\n",
+ "# Used for graph export\n",
+ "from tensorflow.python.framework import graph_util\n",
+ "from tensorflow.python.framework import graph_io\n",
+ "from keras import backend as K\n",
+ "\n",
+ "import pickle as pkl\n",
+ "import h5py\n",
+ "\n",
+ "from pathlib import Path\n",
+ "import os.path\n",
+ "import sys\n",
+ "import datetime\n",
+ "import time\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class LoggingTensorBoard(TensorBoard): \n",
+ "\n",
+ " def __init__(self, log_dir, settings_str_to_log, **kwargs):\n",
+ " super(LoggingTensorBoard, self).__init__(log_dir, **kwargs)\n",
+ "\n",
+ " self.settings_str = settings_str_to_log\n",
+ "\n",
+ " def on_train_begin(self, logs=None):\n",
+ " TensorBoard.on_train_begin(self, logs=logs)\n",
+ "\n",
+ " tensor = tf.convert_to_tensor(self.settings_str)\n",
+ " summary = tf.summary.text (\"Run_Settings\", tensor)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " s = sess.run(summary)\n",
+ " self.writer.add_summary(s)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n"
+ ]
+ }
+ ],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyCollection/df_lstm_norm50.pkl\")\n",
+ "\n",
+ "lst = dfAll.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\n",
+ " 18])"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.userID.unique()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfAll.TaskID = dfAll.TaskID % 17"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_train = dfAll[dfAll.userID.isin(train_ids)][['Blobs', 'TaskID']]\n",
+ "df_test = dfAll[dfAll.userID.isin(test_ids)][['Blobs', 'TaskID']]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_train = np.concatenate(df_train.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "x_test = np.concatenate(df_test.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "\n",
+ "y_train = df_train.TaskID.values\n",
+ "y_test = df_test.TaskID.values\n",
+ "\n",
+ "x_train = x_train / 255.0\n",
+ "x_test = x_test / 255.0\n",
+ "\n",
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = len(dfAll.TaskID.unique())\n",
+ "y_train_one_hot = utils.to_categorical(df_train.TaskID, num_classes)\n",
+ "y_test_one_hot = utils.to_categorical(df_test.TaskID, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.3\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "_________________________________________________________________\n",
+ "Layer (type) Output Shape Param # \n",
+ "=================================================================\n",
+ "time_distributed_10 (TimeDis (None, 50, 27, 15, 64) 640 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_11 (TimeDis (None, 50, 27, 15, 32) 18464 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_12 (TimeDis (None, 50, 14, 8, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_13 (TimeDis (None, 50, 14, 8, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_14 (TimeDis (None, 50, 14, 8, 32) 9248 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_15 (TimeDis (None, 50, 14, 8, 16) 4624 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_16 (TimeDis (None, 50, 7, 4, 16) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_17 (TimeDis (None, 50, 7, 4, 16) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_18 (TimeDis (None, 50, 448) 0 \n",
+ "_________________________________________________________________\n",
+ "cu_dnnlstm_3 (CuDNNLSTM) (None, 50, 80) 169600 \n",
+ "_________________________________________________________________\n",
+ "dropout_7 (Dropout) (None, 50, 80) 0 \n",
+ "_________________________________________________________________\n",
+ "cu_dnnlstm_4 (CuDNNLSTM) (None, 50) 26400 \n",
+ "_________________________________________________________________\n",
+ "dropout_8 (Dropout) (None, 50) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_2 (Dense) (None, 17) 867 \n",
+ "=================================================================\n",
+ "Total params: 229,843\n",
+ "Trainable params: 229,843\n",
+ "Non-trainable params: 0\n",
+ "_________________________________________________________________\n",
+ "LSTM-v1-\n",
+ "Train on 6624 samples, validate on 2569 samples\n",
+ "Epoch 1/3000\n",
+ " - 25s - loss: 49.2683 - acc: 0.0694 - val_loss: 41.9912 - val_acc: 0.0689\n",
+ "\n",
+ "Epoch 00001: val_acc improved from -inf to 0.06890, saving model to ./ModelSnapshots/LSTM-v1-001.h5\n",
+ "Epoch 2/3000\n",
+ " - 27s - loss: 35.8620 - acc: 0.0836 - val_loss: 30.0710 - val_acc: 0.1117\n",
+ "\n",
+ "Epoch 00002: val_acc improved from 0.06890 to 0.11172, saving model to ./ModelSnapshots/LSTM-v1-002.h5\n",
+ "Epoch 3/3000\n",
+ " - 31s - loss: 25.3320 - acc: 0.1129 - val_loss: 20.9569 - val_acc: 0.1179\n",
+ "\n",
+ "Epoch 00003: val_acc improved from 0.11172 to 0.11794, saving model to ./ModelSnapshots/LSTM-v1-003.h5\n",
+ "Epoch 4/3000\n",
+ " - 31s - loss: 17.4001 - acc: 0.1131 - val_loss: 14.1625 - val_acc: 0.1125\n",
+ "\n",
+ "Epoch 00004: val_acc did not improve from 0.11794\n",
+ "Epoch 5/3000\n",
+ " - 32s - loss: 11.5850 - acc: 0.1144 - val_loss: 9.3017 - val_acc: 0.1238\n",
+ "\n",
+ "Epoch 00005: val_acc improved from 0.11794 to 0.12378, saving model to ./ModelSnapshots/LSTM-v1-005.h5\n",
+ "Epoch 6/3000\n",
+ " - 32s - loss: 7.5852 - acc: 0.1152 - val_loss: 6.1837 - val_acc: 0.1269\n",
+ "\n",
+ "Epoch 00006: val_acc improved from 0.12378 to 0.12690, saving model to ./ModelSnapshots/LSTM-v1-006.h5\n",
+ "Epoch 7/3000\n",
+ " - 32s - loss: 5.1798 - acc: 0.1193 - val_loss: 4.4709 - val_acc: 0.1390\n",
+ "\n",
+ "Epoch 00007: val_acc improved from 0.12690 to 0.13896, saving model to ./ModelSnapshots/LSTM-v1-007.h5\n",
+ "Epoch 8/3000\n",
+ " - 33s - loss: 4.0865 - acc: 0.1162 - val_loss: 3.8336 - val_acc: 0.1296\n",
+ "\n",
+ "Epoch 00008: val_acc did not improve from 0.13896\n",
+ "Epoch 9/3000\n",
+ " - 33s - loss: 3.6080 - acc: 0.1200 - val_loss: 3.5308 - val_acc: 0.1059\n",
+ "\n",
+ "Epoch 00009: val_acc did not improve from 0.13896\n",
+ "Epoch 10/3000\n",
+ " - 34s - loss: 3.2663 - acc: 0.1255 - val_loss: 3.2663 - val_acc: 0.1070\n",
+ "\n",
+ "Epoch 00010: val_acc did not improve from 0.13896\n",
+ "Epoch 11/3000\n",
+ " - 32s - loss: 3.0279 - acc: 0.1294 - val_loss: 3.0289 - val_acc: 0.1191\n",
+ "\n",
+ "Epoch 00011: val_acc did not improve from 0.13896\n",
+ "Epoch 12/3000\n",
+ " - 32s - loss: 2.8994 - acc: 0.1335 - val_loss: 2.8769 - val_acc: 0.1549\n",
+ "\n",
+ "Epoch 00012: val_acc improved from 0.13896 to 0.15492, saving model to ./ModelSnapshots/LSTM-v1-012.h5\n",
+ "Epoch 13/3000\n",
+ " - 33s - loss: 2.8275 - acc: 0.1332 - val_loss: 2.8374 - val_acc: 0.1584\n",
+ "\n",
+ "Epoch 00013: val_acc improved from 0.15492 to 0.15843, saving model to ./ModelSnapshots/LSTM-v1-013.h5\n",
+ "Epoch 14/3000\n",
+ " - 33s - loss: 2.7877 - acc: 0.1437 - val_loss: 2.8330 - val_acc: 0.1323\n",
+ "\n",
+ "Epoch 00014: val_acc did not improve from 0.15843\n",
+ "Epoch 15/3000\n",
+ " - 34s - loss: 2.7728 - acc: 0.1386 - val_loss: 2.7907 - val_acc: 0.1541\n",
+ "\n",
+ "Epoch 00015: val_acc did not improve from 0.15843\n",
+ "Epoch 16/3000\n",
+ " - 32s - loss: 2.7565 - acc: 0.1409 - val_loss: 2.8496 - val_acc: 0.1164\n",
+ "\n",
+ "Epoch 00016: val_acc did not improve from 0.15843\n",
+ "Epoch 17/3000\n",
+ " - 32s - loss: 2.7440 - acc: 0.1449 - val_loss: 2.7934 - val_acc: 0.1436\n",
+ "\n",
+ "Epoch 00017: val_acc did not improve from 0.15843\n",
+ "Epoch 18/3000\n",
+ " - 33s - loss: 2.7210 - acc: 0.1458 - val_loss: 2.7578 - val_acc: 0.1374\n",
+ "\n",
+ "Epoch 00018: val_acc did not improve from 0.15843\n",
+ "Epoch 19/3000\n",
+ " - 33s - loss: 2.7060 - acc: 0.1487 - val_loss: 2.7522 - val_acc: 0.1296\n",
+ "\n",
+ "Epoch 00019: val_acc did not improve from 0.15843\n",
+ "Epoch 20/3000\n",
+ " - 32s - loss: 2.7049 - acc: 0.1507 - val_loss: 2.7762 - val_acc: 0.1265\n",
+ "\n",
+ "Epoch 00020: val_acc did not improve from 0.15843\n",
+ "Epoch 21/3000\n",
+ " - 32s - loss: 2.6780 - acc: 0.1514 - val_loss: 2.8229 - val_acc: 0.1222\n",
+ "\n",
+ "Epoch 00021: val_acc did not improve from 0.15843\n",
+ "Epoch 22/3000\n",
+ " - 31s - loss: 2.6713 - acc: 0.1508 - val_loss: 2.7640 - val_acc: 0.1277\n",
+ "\n",
+ "Epoch 00022: val_acc did not improve from 0.15843\n",
+ "Epoch 23/3000\n",
+ " - 32s - loss: 2.6633 - acc: 0.1534 - val_loss: 2.7439 - val_acc: 0.1386\n",
+ "\n",
+ "Epoch 00023: val_acc did not improve from 0.15843\n",
+ "Epoch 24/3000\n",
+ " - 32s - loss: 2.6539 - acc: 0.1511 - val_loss: 2.7243 - val_acc: 0.1339\n",
+ "\n",
+ "Epoch 00024: val_acc did not improve from 0.15843\n",
+ "Epoch 25/3000\n",
+ " - 33s - loss: 2.6404 - acc: 0.1552 - val_loss: 2.7407 - val_acc: 0.1335\n",
+ "\n",
+ "Epoch 00025: val_acc did not improve from 0.15843\n",
+ "Epoch 26/3000\n",
+ " - 29s - loss: 2.6376 - acc: 0.1559 - val_loss: 2.7438 - val_acc: 0.1362\n",
+ "\n",
+ "Epoch 00026: val_acc did not improve from 0.15843\n",
+ "Epoch 27/3000\n",
+ " - 32s - loss: 2.6272 - acc: 0.1538 - val_loss: 2.7144 - val_acc: 0.1444\n",
+ "\n",
+ "Epoch 00027: val_acc did not improve from 0.15843\n",
+ "Epoch 28/3000\n",
+ " - 33s - loss: 2.6152 - acc: 0.1677 - val_loss: 2.7139 - val_acc: 0.1495\n",
+ "\n",
+ "Epoch 00028: val_acc did not improve from 0.15843\n",
+ "Epoch 29/3000\n",
+ " - 31s - loss: 2.6027 - acc: 0.1694 - val_loss: 2.7126 - val_acc: 0.1475\n",
+ "\n",
+ "Epoch 00029: val_acc did not improve from 0.15843\n",
+ "Epoch 30/3000\n",
+ " - 31s - loss: 2.6002 - acc: 0.1639 - val_loss: 2.7428 - val_acc: 0.1347\n",
+ "\n",
+ "Epoch 00030: val_acc did not improve from 0.15843\n",
+ "Epoch 31/3000\n",
+ " - 32s - loss: 2.5983 - acc: 0.1683 - val_loss: 2.6875 - val_acc: 0.1565\n",
+ "\n",
+ "Epoch 00031: val_acc did not improve from 0.15843\n",
+ "Epoch 32/3000\n",
+ " - 31s - loss: 2.5796 - acc: 0.1781 - val_loss: 2.6927 - val_acc: 0.1627\n",
+ "\n",
+ "Epoch 00032: val_acc improved from 0.15843 to 0.16271, saving model to ./ModelSnapshots/LSTM-v1-032.h5\n",
+ "Epoch 33/3000\n",
+ " - 32s - loss: 2.5626 - acc: 0.1792 - val_loss: 2.6635 - val_acc: 0.1682\n",
+ "\n",
+ "Epoch 00033: val_acc improved from 0.16271 to 0.16816, saving model to ./ModelSnapshots/LSTM-v1-033.h5\n",
+ "Epoch 34/3000\n",
+ " - 31s - loss: 2.5736 - acc: 0.1762 - val_loss: 2.6519 - val_acc: 0.1724\n",
+ "\n",
+ "Epoch 00034: val_acc improved from 0.16816 to 0.17244, saving model to ./ModelSnapshots/LSTM-v1-034.h5\n",
+ "Epoch 35/3000\n",
+ " - 31s - loss: 2.5582 - acc: 0.1789 - val_loss: 2.6432 - val_acc: 0.1791\n",
+ "\n",
+ "Epoch 00035: val_acc improved from 0.17244 to 0.17906, saving model to ./ModelSnapshots/LSTM-v1-035.h5\n",
+ "Epoch 36/3000\n",
+ " - 31s - loss: 2.5423 - acc: 0.1784 - val_loss: 2.6365 - val_acc: 0.1771\n",
+ "\n",
+ "Epoch 00036: val_acc did not improve from 0.17906\n",
+ "Epoch 37/3000\n",
+ " - 33s - loss: 2.5256 - acc: 0.1877 - val_loss: 2.6288 - val_acc: 0.1837\n",
+ "\n",
+ "Epoch 00037: val_acc improved from 0.17906 to 0.18373, saving model to ./ModelSnapshots/LSTM-v1-037.h5\n",
+ "Epoch 38/3000\n",
+ " - 31s - loss: 2.5277 - acc: 0.1864 - val_loss: 2.6043 - val_acc: 0.1907\n",
+ "\n",
+ "Epoch 00038: val_acc improved from 0.18373 to 0.19074, saving model to ./ModelSnapshots/LSTM-v1-038.h5\n",
+ "Epoch 39/3000\n",
+ " - 30s - loss: 2.5188 - acc: 0.1928 - val_loss: 2.5917 - val_acc: 0.1958\n",
+ "\n",
+ "Epoch 00039: val_acc improved from 0.19074 to 0.19580, saving model to ./ModelSnapshots/LSTM-v1-039.h5\n",
+ "Epoch 40/3000\n",
+ " - 30s - loss: 2.5115 - acc: 0.1996 - val_loss: 2.5736 - val_acc: 0.2009\n",
+ "\n",
+ "Epoch 00040: val_acc improved from 0.19580 to 0.20086, saving model to ./ModelSnapshots/LSTM-v1-040.h5\n",
+ "Epoch 41/3000\n",
+ " - 33s - loss: 2.4997 - acc: 0.1981 - val_loss: 2.5785 - val_acc: 0.2079\n",
+ "\n",
+ "Epoch 00041: val_acc improved from 0.20086 to 0.20786, saving model to ./ModelSnapshots/LSTM-v1-041.h5\n",
+ "Epoch 42/3000\n",
+ " - 31s - loss: 2.4816 - acc: 0.2097 - val_loss: 2.5575 - val_acc: 0.2141\n",
+ "\n",
+ "Epoch 00042: val_acc improved from 0.20786 to 0.21409, saving model to ./ModelSnapshots/LSTM-v1-042.h5\n",
+ "Epoch 43/3000\n",
+ " - 31s - loss: 2.4817 - acc: 0.2008 - val_loss: 2.5963 - val_acc: 0.1911\n",
+ "\n",
+ "Epoch 00043: val_acc did not improve from 0.21409\n",
+ "Epoch 44/3000\n",
+ " - 31s - loss: 2.4594 - acc: 0.2091 - val_loss: 2.6390 - val_acc: 0.1526\n",
+ "\n",
+ "Epoch 00044: val_acc did not improve from 0.21409\n",
+ "Epoch 45/3000\n",
+ " - 32s - loss: 2.4536 - acc: 0.2120 - val_loss: 2.5017 - val_acc: 0.2382\n",
+ "\n",
+ "Epoch 00045: val_acc improved from 0.21409 to 0.23822, saving model to ./ModelSnapshots/LSTM-v1-045.h5\n",
+ "Epoch 46/3000\n",
+ " - 31s - loss: 2.4471 - acc: 0.2126 - val_loss: 2.6241 - val_acc: 0.1654\n",
+ "\n",
+ "Epoch 00046: val_acc did not improve from 0.23822\n",
+ "Epoch 47/3000\n",
+ " - 32s - loss: 2.4455 - acc: 0.2165 - val_loss: 2.5086 - val_acc: 0.2351\n",
+ "\n",
+ "Epoch 00047: val_acc did not improve from 0.23822\n",
+ "Epoch 48/3000\n",
+ " - 31s - loss: 2.4194 - acc: 0.2236 - val_loss: 2.5044 - val_acc: 0.2413\n",
+ "\n",
+ "Epoch 00048: val_acc improved from 0.23822 to 0.24134, saving model to ./ModelSnapshots/LSTM-v1-048.h5\n",
+ "Epoch 49/3000\n",
+ " - 33s - loss: 2.4320 - acc: 0.2210 - val_loss: 2.5420 - val_acc: 0.1938\n",
+ "\n",
+ "Epoch 00049: val_acc did not improve from 0.24134\n",
+ "Epoch 50/3000\n",
+ " - 30s - loss: 2.4093 - acc: 0.2382 - val_loss: 2.5476 - val_acc: 0.2145\n",
+ "\n",
+ "Epoch 00050: val_acc did not improve from 0.24134\n",
+ "Epoch 51/3000\n",
+ " - 31s - loss: 2.4086 - acc: 0.2403 - val_loss: 2.4596 - val_acc: 0.2853\n",
+ "\n",
+ "Epoch 00051: val_acc improved from 0.24134 to 0.28533, saving model to ./ModelSnapshots/LSTM-v1-051.h5\n",
+ "Epoch 52/3000\n",
+ " - 32s - loss: 2.3899 - acc: 0.2452 - val_loss: 2.4842 - val_acc: 0.2589\n",
+ "\n",
+ "Epoch 00052: val_acc did not improve from 0.28533\n",
+ "Epoch 53/3000\n",
+ " - 32s - loss: 2.3794 - acc: 0.2509 - val_loss: 2.4321 - val_acc: 0.2896\n",
+ "\n",
+ "Epoch 00053: val_acc improved from 0.28533 to 0.28961, saving model to ./ModelSnapshots/LSTM-v1-053.h5\n",
+ "Epoch 54/3000\n",
+ " - 30s - loss: 2.3714 - acc: 0.2548 - val_loss: 2.4887 - val_acc: 0.2740\n",
+ "\n",
+ "Epoch 00054: val_acc did not improve from 0.28961\n",
+ "Epoch 55/3000\n",
+ " - 31s - loss: 2.3547 - acc: 0.2649 - val_loss: 2.3934 - val_acc: 0.3091\n",
+ "\n",
+ "Epoch 00055: val_acc improved from 0.28961 to 0.30907, saving model to ./ModelSnapshots/LSTM-v1-055.h5\n",
+ "Epoch 56/3000\n",
+ " - 30s - loss: 2.3493 - acc: 0.2672 - val_loss: 2.4147 - val_acc: 0.2958\n",
+ "\n",
+ "Epoch 00056: val_acc did not improve from 0.30907\n",
+ "Epoch 57/3000\n",
+ " - 33s - loss: 2.3285 - acc: 0.2704 - val_loss: 2.4170 - val_acc: 0.2919\n",
+ "\n",
+ "Epoch 00057: val_acc did not improve from 0.30907\n",
+ "Epoch 58/3000\n",
+ " - 32s - loss: 2.3374 - acc: 0.2640 - val_loss: 2.3739 - val_acc: 0.3149\n",
+ "\n",
+ "Epoch 00058: val_acc improved from 0.30907 to 0.31491, saving model to ./ModelSnapshots/LSTM-v1-058.h5\n",
+ "Epoch 59/3000\n",
+ " - 32s - loss: 2.3138 - acc: 0.2742 - val_loss: 2.3773 - val_acc: 0.2919\n",
+ "\n",
+ "Epoch 00059: val_acc did not improve from 0.31491\n",
+ "Epoch 60/3000\n",
+ " - 32s - loss: 2.3025 - acc: 0.2838 - val_loss: 2.3299 - val_acc: 0.3130\n",
+ "\n",
+ "Epoch 00060: val_acc did not improve from 0.31491\n",
+ "Epoch 61/3000\n",
+ " - 32s - loss: 2.2968 - acc: 0.2849 - val_loss: 2.3850 - val_acc: 0.2744\n",
+ "\n",
+ "Epoch 00061: val_acc did not improve from 0.31491\n",
+ "Epoch 62/3000\n",
+ " - 30s - loss: 2.2868 - acc: 0.2841 - val_loss: 2.2854 - val_acc: 0.3106\n",
+ "\n",
+ "Epoch 00062: val_acc did not improve from 0.31491\n",
+ "Epoch 63/3000\n",
+ " - 32s - loss: 2.2707 - acc: 0.2951 - val_loss: 2.3034 - val_acc: 0.3266\n",
+ "\n",
+ "Epoch 00063: val_acc improved from 0.31491 to 0.32659, saving model to ./ModelSnapshots/LSTM-v1-063.h5\n",
+ "Epoch 64/3000\n",
+ " - 31s - loss: 2.2708 - acc: 0.2915 - val_loss: 2.2739 - val_acc: 0.3106\n",
+ "\n",
+ "Epoch 00064: val_acc did not improve from 0.32659\n",
+ "Epoch 65/3000\n",
+ " - 31s - loss: 2.2486 - acc: 0.3016 - val_loss: 2.2597 - val_acc: 0.3258\n",
+ "\n",
+ "Epoch 00065: val_acc did not improve from 0.32659\n",
+ "Epoch 66/3000\n",
+ " - 32s - loss: 2.2553 - acc: 0.2968 - val_loss: 2.3376 - val_acc: 0.2970\n",
+ "\n",
+ "Epoch 00066: val_acc did not improve from 0.32659\n",
+ "Epoch 67/3000\n",
+ " - 31s - loss: 2.2348 - acc: 0.3053 - val_loss: 2.3033 - val_acc: 0.3036\n",
+ "\n",
+ "Epoch 00067: val_acc did not improve from 0.32659\n",
+ "Epoch 68/3000\n",
+ " - 32s - loss: 2.2185 - acc: 0.3098 - val_loss: 2.2912 - val_acc: 0.3079\n",
+ "\n",
+ "Epoch 00068: val_acc did not improve from 0.32659\n",
+ "Epoch 69/3000\n",
+ " - 31s - loss: 2.2371 - acc: 0.3043 - val_loss: 2.2778 - val_acc: 0.3098\n",
+ "\n",
+ "Epoch 00069: val_acc did not improve from 0.32659\n",
+ "Epoch 70/3000\n",
+ " - 32s - loss: 2.2254 - acc: 0.3021 - val_loss: 2.2988 - val_acc: 0.3063\n",
+ "\n",
+ "Epoch 00070: val_acc did not improve from 0.32659\n",
+ "Epoch 71/3000\n",
+ " - 30s - loss: 2.2071 - acc: 0.3151 - val_loss: 2.2761 - val_acc: 0.3106\n",
+ "\n",
+ "Epoch 00071: val_acc did not improve from 0.32659\n",
+ "Epoch 72/3000\n",
+ " - 31s - loss: 2.2169 - acc: 0.3133 - val_loss: 2.2711 - val_acc: 0.3192\n",
+ "\n",
+ "Epoch 00072: val_acc did not improve from 0.32659\n",
+ "Epoch 73/3000\n",
+ " - 31s - loss: 2.2030 - acc: 0.3190 - val_loss: 2.2297 - val_acc: 0.3297\n",
+ "\n",
+ "Epoch 00073: val_acc improved from 0.32659 to 0.32970, saving model to ./ModelSnapshots/LSTM-v1-073.h5\n",
+ "Epoch 74/3000\n",
+ " - 31s - loss: 2.1944 - acc: 0.3182 - val_loss: 2.2618 - val_acc: 0.3180\n",
+ "\n",
+ "Epoch 00074: val_acc did not improve from 0.32970\n",
+ "Epoch 75/3000\n",
+ " - 31s - loss: 2.1754 - acc: 0.3243 - val_loss: 2.2761 - val_acc: 0.3017\n",
+ "\n",
+ "Epoch 00078: val_acc did not improve from 0.34916\n",
+ "Epoch 79/3000\n",
+ " - 31s - loss: 2.1787 - acc: 0.3123 - val_loss: 2.2157 - val_acc: 0.3320\n",
+ "\n",
+ "Epoch 00079: val_acc did not improve from 0.34916\n",
+ "Epoch 80/3000\n",
+ " - 31s - loss: 2.1571 - acc: 0.3318 - val_loss: 2.2067 - val_acc: 0.3348\n",
+ "\n",
+ "Epoch 00080: val_acc did not improve from 0.34916\n",
+ "Epoch 81/3000\n",
+ " - 30s - loss: 2.1638 - acc: 0.3287 - val_loss: 2.1681 - val_acc: 0.3340\n",
+ "\n",
+ "Epoch 00081: val_acc did not improve from 0.34916\n",
+ "Epoch 82/3000\n",
+ " - 32s - loss: 2.1578 - acc: 0.3309 - val_loss: 2.2740 - val_acc: 0.2993\n",
+ "\n",
+ "Epoch 00082: val_acc did not improve from 0.34916\n",
+ "Epoch 83/3000\n",
+ " - 30s - loss: 2.1539 - acc: 0.3296 - val_loss: 2.1840 - val_acc: 0.3414\n",
+ "\n",
+ "Epoch 00083: val_acc did not improve from 0.34916\n",
+ "Epoch 84/3000\n",
+ " - 31s - loss: 2.1431 - acc: 0.3389 - val_loss: 2.2319 - val_acc: 0.3274\n",
+ "\n",
+ "Epoch 00084: val_acc did not improve from 0.34916\n",
+ "Epoch 85/3000\n",
+ " - 31s - loss: 2.1423 - acc: 0.3315 - val_loss: 2.1704 - val_acc: 0.3340\n",
+ "\n",
+ "Epoch 00085: val_acc did not improve from 0.34916\n",
+ "Epoch 86/3000\n",
+ " - 32s - loss: 2.1380 - acc: 0.3392 - val_loss: 2.1629 - val_acc: 0.3511\n",
+ "\n",
+ "Epoch 00086: val_acc improved from 0.34916 to 0.35111, saving model to ./ModelSnapshots/LSTM-v1-086.h5\n",
+ "Epoch 87/3000\n",
+ " - 31s - loss: 2.1325 - acc: 0.3471 - val_loss: 2.1480 - val_acc: 0.3499\n",
+ "\n",
+ "Epoch 00087: val_acc did not improve from 0.35111\n",
+ "Epoch 88/3000\n",
+ " - 33s - loss: 2.1316 - acc: 0.3465 - val_loss: 2.2243 - val_acc: 0.3297\n",
+ "\n",
+ "Epoch 00088: val_acc did not improve from 0.35111\n",
+ "Epoch 89/3000\n",
+ " - 32s - loss: 2.1370 - acc: 0.3357 - val_loss: 2.1913 - val_acc: 0.3379\n",
+ "\n",
+ "Epoch 00089: val_acc did not improve from 0.35111\n",
+ "Epoch 90/3000\n",
+ " - 30s - loss: 2.1212 - acc: 0.3444 - val_loss: 2.1825 - val_acc: 0.3293\n",
+ "\n",
+ "Epoch 00090: val_acc did not improve from 0.35111\n",
+ "Epoch 91/3000\n",
+ " - 31s - loss: 2.1139 - acc: 0.3501 - val_loss: 2.1661 - val_acc: 0.3437\n",
+ "\n",
+ "Epoch 00091: val_acc did not improve from 0.35111\n",
+ "Epoch 92/3000\n",
+ " - 31s - loss: 2.1255 - acc: 0.3392 - val_loss: 2.1815 - val_acc: 0.3581\n",
+ "\n",
+ "Epoch 00092: val_acc improved from 0.35111 to 0.35812, saving model to ./ModelSnapshots/LSTM-v1-092.h5\n",
+ "Epoch 93/3000\n",
+ " - 32s - loss: 2.1239 - acc: 0.3484 - val_loss: 2.1803 - val_acc: 0.3507\n",
+ "\n",
+ "Epoch 00093: val_acc did not improve from 0.35812\n",
+ "Epoch 94/3000\n",
+ " - 32s - loss: 2.1060 - acc: 0.3588 - val_loss: 2.1368 - val_acc: 0.3612\n",
+ "\n",
+ "Epoch 00094: val_acc improved from 0.35812 to 0.36123, saving model to ./ModelSnapshots/LSTM-v1-094.h5\n",
+ "Epoch 95/3000\n",
+ " - 30s - loss: 2.1124 - acc: 0.3510 - val_loss: 2.2286 - val_acc: 0.3324\n",
+ "\n",
+ "Epoch 00095: val_acc did not improve from 0.36123\n",
+ "Epoch 96/3000\n",
+ " - 33s - loss: 2.0886 - acc: 0.3552 - val_loss: 2.1486 - val_acc: 0.3453\n",
+ "\n",
+ "Epoch 00096: val_acc did not improve from 0.36123\n",
+ "Epoch 97/3000\n",
+ " - 31s - loss: 2.1004 - acc: 0.3564 - val_loss: 2.1067 - val_acc: 0.3678\n",
+ "\n",
+ "Epoch 00097: val_acc improved from 0.36123 to 0.36785, saving model to ./ModelSnapshots/LSTM-v1-097.h5\n",
+ "Epoch 98/3000\n",
+ " - 32s - loss: 2.0886 - acc: 0.3573 - val_loss: 2.1157 - val_acc: 0.3725\n",
+ "\n",
+ "Epoch 00098: val_acc improved from 0.36785 to 0.37252, saving model to ./ModelSnapshots/LSTM-v1-098.h5\n",
+ "Epoch 99/3000\n",
+ " - 31s - loss: 2.0741 - acc: 0.3667 - val_loss: 2.1037 - val_acc: 0.3550\n",
+ "\n",
+ "Epoch 00099: val_acc did not improve from 0.37252\n",
+ "Epoch 100/3000\n",
+ " - 32s - loss: 2.0715 - acc: 0.3656 - val_loss: 2.1305 - val_acc: 0.3566\n",
+ "\n",
+ "Epoch 00100: val_acc did not improve from 0.37252\n",
+ "Epoch 101/3000\n",
+ " - 32s - loss: 2.0641 - acc: 0.3638 - val_loss: 2.1131 - val_acc: 0.3690\n",
+ "\n",
+ "Epoch 00101: val_acc did not improve from 0.37252\n",
+ "Epoch 102/3000\n",
+ " - 31s - loss: 2.0814 - acc: 0.3629 - val_loss: 2.1053 - val_acc: 0.3717\n",
+ "\n",
+ "Epoch 00102: val_acc did not improve from 0.37252\n",
+ "Epoch 103/3000\n",
+ " - 31s - loss: 2.0689 - acc: 0.3675 - val_loss: 2.1272 - val_acc: 0.3636\n",
+ "\n",
+ "Epoch 00103: val_acc did not improve from 0.37252\n",
+ "Epoch 104/3000\n",
+ " - 32s - loss: 2.0602 - acc: 0.3759 - val_loss: 2.0840 - val_acc: 0.3776\n",
+ "\n",
+ "Epoch 00104: val_acc improved from 0.37252 to 0.37758, saving model to ./ModelSnapshots/LSTM-v1-104.h5\n",
+ "Epoch 105/3000\n",
+ " - 31s - loss: 2.0519 - acc: 0.3715 - val_loss: 2.1287 - val_acc: 0.3605\n",
+ "\n",
+ "Epoch 00105: val_acc did not improve from 0.37758\n",
+ "Epoch 106/3000\n",
+ " - 32s - loss: 2.0573 - acc: 0.3759 - val_loss: 2.1473 - val_acc: 0.3706\n",
+ "\n",
+ "Epoch 00106: val_acc did not improve from 0.37758\n",
+ "Epoch 107/3000\n",
+ " - 32s - loss: 2.0489 - acc: 0.3735 - val_loss: 2.1317 - val_acc: 0.3745\n",
+ "\n",
+ "Epoch 00107: val_acc did not improve from 0.37758\n",
+ "Epoch 108/3000\n",
+ " - 30s - loss: 2.0403 - acc: 0.3819 - val_loss: 2.1252 - val_acc: 0.3931\n",
+ "\n",
+ "Epoch 00108: val_acc improved from 0.37758 to 0.39315, saving model to ./ModelSnapshots/LSTM-v1-108.h5\n",
+ "Epoch 109/3000\n",
+ " - 31s - loss: 2.0376 - acc: 0.3853 - val_loss: 2.1251 - val_acc: 0.3881\n",
+ "\n",
+ "Epoch 00109: val_acc did not improve from 0.39315\n",
+ "Epoch 110/3000\n",
+ " - 31s - loss: 2.0264 - acc: 0.3895 - val_loss: 2.0557 - val_acc: 0.3978\n",
+ "\n",
+ "Epoch 00110: val_acc improved from 0.39315 to 0.39782, saving model to ./ModelSnapshots/LSTM-v1-110.h5\n",
+ "Epoch 111/3000\n",
+ " - 31s - loss: 2.0173 - acc: 0.3859 - val_loss: 2.0634 - val_acc: 0.4188\n",
+ "\n",
+ "Epoch 00111: val_acc improved from 0.39782 to 0.41884, saving model to ./ModelSnapshots/LSTM-v1-111.h5\n",
+ "Epoch 112/3000\n",
+ " - 32s - loss: 2.0159 - acc: 0.3992 - val_loss: 2.1026 - val_acc: 0.4025\n",
+ "\n",
+ "Epoch 00112: val_acc did not improve from 0.41884\n",
+ "Epoch 113/3000\n",
+ " - 31s - loss: 2.0291 - acc: 0.3848 - val_loss: 2.0706 - val_acc: 0.4177\n",
+ "\n",
+ "Epoch 00113: val_acc did not improve from 0.41884\n",
+ "Epoch 114/3000\n",
+ " - 31s - loss: 2.0226 - acc: 0.3966 - val_loss: 2.0505 - val_acc: 0.4200\n",
+ "\n",
+ "Epoch 00114: val_acc improved from 0.41884 to 0.42001, saving model to ./ModelSnapshots/LSTM-v1-114.h5\n",
+ "Epoch 115/3000\n",
+ " - 32s - loss: 2.0020 - acc: 0.3954 - val_loss: 2.0881 - val_acc: 0.4079\n",
+ "\n",
+ "Epoch 00115: val_acc did not improve from 0.42001\n",
+ "Epoch 116/3000\n",
+ " - 30s - loss: 1.9986 - acc: 0.4007 - val_loss: 2.0770 - val_acc: 0.4177\n",
+ "\n",
+ "Epoch 00116: val_acc did not improve from 0.42001\n",
+ "Epoch 117/3000\n",
+ " - 32s - loss: 2.0018 - acc: 0.3958 - val_loss: 2.0692 - val_acc: 0.4111\n",
+ "\n",
+ "Epoch 00117: val_acc did not improve from 0.42001\n",
+ "Epoch 118/3000\n",
+ " - 33s - loss: 2.0095 - acc: 0.3973 - val_loss: 2.0942 - val_acc: 0.4103\n",
+ "\n",
+ "Epoch 00118: val_acc did not improve from 0.42001\n",
+ "Epoch 119/3000\n",
+ " - 31s - loss: 1.9914 - acc: 0.4087 - val_loss: 2.0766 - val_acc: 0.4138\n",
+ "\n",
+ "Epoch 00119: val_acc did not improve from 0.42001\n",
+ "Epoch 120/3000\n",
+ " - 31s - loss: 1.9960 - acc: 0.4066 - val_loss: 2.0496 - val_acc: 0.4290\n",
+ "\n",
+ "Epoch 00120: val_acc improved from 0.42001 to 0.42896, saving model to ./ModelSnapshots/LSTM-v1-120.h5\n",
+ "Epoch 121/3000\n",
+ " - 30s - loss: 1.9872 - acc: 0.4062 - val_loss: 2.0904 - val_acc: 0.3986\n",
+ "\n",
+ "Epoch 00121: val_acc did not improve from 0.42896\n",
+ "Epoch 122/3000\n",
+ " - 32s - loss: 1.9837 - acc: 0.4046 - val_loss: 2.0859 - val_acc: 0.4068\n",
+ "\n",
+ "Epoch 00122: val_acc did not improve from 0.42896\n",
+ "Epoch 123/3000\n",
+ " - 31s - loss: 1.9728 - acc: 0.4132 - val_loss: 2.0615 - val_acc: 0.4243\n",
+ "\n",
+ "Epoch 00123: val_acc did not improve from 0.42896\n",
+ "Epoch 124/3000\n",
+ " - 33s - loss: 1.9660 - acc: 0.4161 - val_loss: 2.0382 - val_acc: 0.4414\n",
+ "\n",
+ "Epoch 00124: val_acc improved from 0.42896 to 0.44142, saving model to ./ModelSnapshots/LSTM-v1-124.h5\n",
+ "Epoch 125/3000\n",
+ " - 29s - loss: 1.9553 - acc: 0.4141 - val_loss: 2.0318 - val_acc: 0.4395\n",
+ "\n",
+ "Epoch 00125: val_acc did not improve from 0.44142\n",
+ "Epoch 126/3000\n",
+ " - 31s - loss: 1.9489 - acc: 0.4239 - val_loss: 2.0009 - val_acc: 0.4519\n",
+ "\n",
+ "Epoch 00126: val_acc improved from 0.44142 to 0.45193, saving model to ./ModelSnapshots/LSTM-v1-126.h5\n",
+ "Epoch 127/3000\n",
+ " - 32s - loss: 1.9371 - acc: 0.4274 - val_loss: 2.0688 - val_acc: 0.4165\n",
+ "\n",
+ "Epoch 00127: val_acc did not improve from 0.45193\n",
+ "Epoch 128/3000\n",
+ " - 30s - loss: 1.9494 - acc: 0.4210 - val_loss: 2.0101 - val_acc: 0.4430\n",
+ "\n",
+ "Epoch 00128: val_acc did not improve from 0.45193\n",
+ "Epoch 129/3000\n",
+ " - 32s - loss: 1.9251 - acc: 0.4309 - val_loss: 1.9964 - val_acc: 0.4329\n",
+ "\n",
+ "Epoch 00129: val_acc did not improve from 0.45193\n",
+ "Epoch 130/3000\n",
+ " - 31s - loss: 1.9454 - acc: 0.4266 - val_loss: 2.0455 - val_acc: 0.4270\n",
+ "\n",
+ "Epoch 00130: val_acc did not improve from 0.45193\n",
+ "Epoch 131/3000\n",
+ " - 32s - loss: 1.9343 - acc: 0.4209 - val_loss: 2.0057 - val_acc: 0.4652\n",
+ "\n",
+ "Epoch 00131: val_acc improved from 0.45193 to 0.46516, saving model to ./ModelSnapshots/LSTM-v1-131.h5\n",
+ "Epoch 132/3000\n",
+ " - 31s - loss: 1.9280 - acc: 0.4309 - val_loss: 2.0194 - val_acc: 0.4562\n",
+ "\n",
+ "Epoch 00132: val_acc did not improve from 0.46516\n",
+ "Epoch 133/3000\n",
+ " - 32s - loss: 1.8961 - acc: 0.4429 - val_loss: 1.9824 - val_acc: 0.4601\n",
+ "\n",
+ "Epoch 00133: val_acc did not improve from 0.46516\n",
+ "Epoch 134/3000\n",
+ " - 31s - loss: 1.9103 - acc: 0.4377 - val_loss: 2.0832 - val_acc: 0.4336\n",
+ "\n",
+ "Epoch 00134: val_acc did not improve from 0.46516\n",
+ "Epoch 135/3000\n",
+ " - 31s - loss: 1.9186 - acc: 0.4408 - val_loss: 1.9925 - val_acc: 0.4675\n",
+ "\n",
+ "Epoch 00135: val_acc improved from 0.46516 to 0.46750, saving model to ./ModelSnapshots/LSTM-v1-135.h5\n",
+ "Epoch 136/3000\n",
+ " - 33s - loss: 1.8893 - acc: 0.4435 - val_loss: 2.0137 - val_acc: 0.4539\n",
+ "\n",
+ "Epoch 00136: val_acc did not improve from 0.46750\n",
+ "Epoch 137/3000\n",
+ " - 33s - loss: 1.8867 - acc: 0.4512 - val_loss: 1.9967 - val_acc: 0.4624\n",
+ "\n",
+ "Epoch 00137: val_acc did not improve from 0.46750\n",
+ "Epoch 138/3000\n",
+ " - 31s - loss: 1.8654 - acc: 0.4515 - val_loss: 1.9488 - val_acc: 0.4936\n",
+ "\n",
+ "Epoch 00138: val_acc improved from 0.46750 to 0.49358, saving model to ./ModelSnapshots/LSTM-v1-138.h5\n",
+ "Epoch 139/3000\n",
+ " - 30s - loss: 1.8678 - acc: 0.4567 - val_loss: 1.9670 - val_acc: 0.4710\n",
+ "\n",
+ "Epoch 00139: val_acc did not improve from 0.49358\n",
+ "Epoch 140/3000\n",
+ " - 30s - loss: 1.8754 - acc: 0.4543 - val_loss: 1.9655 - val_acc: 0.4909\n",
+ "\n",
+ "Epoch 00140: val_acc did not improve from 0.49358\n",
+ "Epoch 141/3000\n",
+ " - 31s - loss: 1.8421 - acc: 0.4589 - val_loss: 1.9581 - val_acc: 0.4889\n",
+ "\n",
+ "Epoch 00141: val_acc did not improve from 0.49358\n",
+ "Epoch 142/3000\n",
+ " - 32s - loss: 1.8475 - acc: 0.4614 - val_loss: 1.9425 - val_acc: 0.4944\n",
+ "\n",
+ "Epoch 00142: val_acc improved from 0.49358 to 0.49436, saving model to ./ModelSnapshots/LSTM-v1-142.h5\n",
+ "Epoch 143/3000\n",
+ " - 31s - loss: 1.8568 - acc: 0.4626 - val_loss: 2.0092 - val_acc: 0.4597\n",
+ "\n",
+ "Epoch 00143: val_acc did not improve from 0.49436\n",
+ "Epoch 144/3000\n",
+ " - 31s - loss: 1.8311 - acc: 0.4716 - val_loss: 1.9390 - val_acc: 0.4858\n",
+ "\n",
+ "Epoch 00144: val_acc did not improve from 0.49436\n",
+ "Epoch 145/3000\n",
+ " - 31s - loss: 1.8298 - acc: 0.4694 - val_loss: 1.9286 - val_acc: 0.4792\n",
+ "\n",
+ "Epoch 00145: val_acc did not improve from 0.49436\n",
+ "Epoch 146/3000\n",
+ " - 31s - loss: 1.8223 - acc: 0.4698 - val_loss: 1.9748 - val_acc: 0.4523\n",
+ "\n",
+ "Epoch 00146: val_acc did not improve from 0.49436\n",
+ "Epoch 147/3000\n",
+ " - 31s - loss: 1.8183 - acc: 0.4781 - val_loss: 1.9187 - val_acc: 0.5119\n",
+ "\n",
+ "Epoch 00147: val_acc improved from 0.49436 to 0.51187, saving model to ./ModelSnapshots/LSTM-v1-147.h5\n",
+ "Epoch 148/3000\n",
+ " - 31s - loss: 1.8252 - acc: 0.4710 - val_loss: 1.9349 - val_acc: 0.4815\n",
+ "\n",
+ "Epoch 00148: val_acc did not improve from 0.51187\n",
+ "Epoch 149/3000\n",
+ " - 31s - loss: 1.7967 - acc: 0.4855 - val_loss: 1.9059 - val_acc: 0.5056\n",
+ "\n",
+ "Epoch 00149: val_acc did not improve from 0.51187\n",
+ "Epoch 150/3000\n",
+ " - 32s - loss: 1.7899 - acc: 0.4887 - val_loss: 1.9118 - val_acc: 0.4971\n",
+ "\n",
+ "Epoch 00150: val_acc did not improve from 0.51187\n",
+ "Epoch 151/3000\n",
+ " - 30s - loss: 1.7822 - acc: 0.4956 - val_loss: 1.8954 - val_acc: 0.4877\n",
+ "\n",
+ "Epoch 00151: val_acc did not improve from 0.51187\n",
+ "Epoch 152/3000\n",
+ " - 32s - loss: 1.7850 - acc: 0.4961 - val_loss: 1.8768 - val_acc: 0.5002\n",
+ "\n",
+ "Epoch 00152: val_acc did not improve from 0.51187\n",
+ "Epoch 153/3000\n",
+ " - 32s - loss: 1.7858 - acc: 0.4949 - val_loss: 1.8764 - val_acc: 0.5165\n",
+ "\n",
+ "Epoch 00153: val_acc improved from 0.51187 to 0.51654, saving model to ./ModelSnapshots/LSTM-v1-153.h5\n",
+ "Epoch 154/3000\n",
+ " - 31s - loss: 1.7986 - acc: 0.4902 - val_loss: 1.9126 - val_acc: 0.4504\n",
+ "\n",
+ "Epoch 00154: val_acc did not improve from 0.51654\n",
+ "Epoch 155/3000\n",
+ " - 31s - loss: 1.7665 - acc: 0.5023 - val_loss: 1.8434 - val_acc: 0.5364\n",
+ "\n",
+ "Epoch 00155: val_acc improved from 0.51654 to 0.53640, saving model to ./ModelSnapshots/LSTM-v1-155.h5\n",
+ "Epoch 156/3000\n",
+ " - 32s - loss: 1.7740 - acc: 0.4988 - val_loss: 1.8442 - val_acc: 0.5360\n",
+ "\n",
+ "Epoch 00156: val_acc did not improve from 0.53640\n",
+ "Epoch 157/3000\n",
+ " - 30s - loss: 1.7498 - acc: 0.5103 - val_loss: 1.8750 - val_acc: 0.5243\n",
+ "\n",
+ "Epoch 00157: val_acc did not improve from 0.53640\n",
+ "Epoch 158/3000\n",
+ " - 31s - loss: 1.7603 - acc: 0.5050 - val_loss: 1.9048 - val_acc: 0.4897\n",
+ "\n",
+ "Epoch 00158: val_acc did not improve from 0.53640\n",
+ "Epoch 159/3000\n",
+ " - 31s - loss: 1.7320 - acc: 0.5198 - val_loss: 1.7988 - val_acc: 0.5586\n",
+ "\n",
+ "Epoch 00159: val_acc improved from 0.53640 to 0.55858, saving model to ./ModelSnapshots/LSTM-v1-159.h5\n",
+ "Epoch 160/3000\n",
+ " - 31s - loss: 1.7254 - acc: 0.5246 - val_loss: 1.8168 - val_acc: 0.5469\n",
+ "\n",
+ "Epoch 00160: val_acc did not improve from 0.55858\n",
+ "Epoch 161/3000\n",
+ " - 31s - loss: 1.7381 - acc: 0.5205 - val_loss: 1.8779 - val_acc: 0.5154\n",
+ "\n",
+ "Epoch 00161: val_acc did not improve from 0.55858\n",
+ "Epoch 162/3000\n",
+ " - 31s - loss: 1.7354 - acc: 0.5210 - val_loss: 1.7865 - val_acc: 0.5656\n",
+ "\n",
+ "Epoch 00162: val_acc improved from 0.55858 to 0.56559, saving model to ./ModelSnapshots/LSTM-v1-162.h5\n",
+ "Epoch 163/3000\n",
+ " - 32s - loss: 1.7142 - acc: 0.5282 - val_loss: 1.8238 - val_acc: 0.5566\n",
+ "\n",
+ "Epoch 00163: val_acc did not improve from 0.56559\n",
+ "Epoch 164/3000\n",
+ " - 32s - loss: 1.7238 - acc: 0.5282 - val_loss: 1.8009 - val_acc: 0.5415\n",
+ "\n",
+ "Epoch 00164: val_acc did not improve from 0.56559\n",
+ "Epoch 165/3000\n",
+ " - 29s - loss: 1.7092 - acc: 0.5334 - val_loss: 1.7934 - val_acc: 0.5699\n",
+ "\n",
+ "Epoch 00165: val_acc improved from 0.56559 to 0.56987, saving model to ./ModelSnapshots/LSTM-v1-165.h5\n",
+ "Epoch 166/3000\n",
+ " - 32s - loss: 1.7107 - acc: 0.5374 - val_loss: 1.8209 - val_acc: 0.5481\n",
+ "\n",
+ "Epoch 00166: val_acc did not improve from 0.56987\n",
+ "Epoch 167/3000\n",
+ " - 30s - loss: 1.6829 - acc: 0.5492 - val_loss: 1.7870 - val_acc: 0.5477\n",
+ "\n",
+ "Epoch 00167: val_acc did not improve from 0.56987\n",
+ "Epoch 168/3000\n",
+ " - 31s - loss: 1.6791 - acc: 0.5402 - val_loss: 1.7036 - val_acc: 0.5847\n",
+ "\n",
+ "Epoch 00168: val_acc improved from 0.56987 to 0.58466, saving model to ./ModelSnapshots/LSTM-v1-168.h5\n",
+ "Epoch 169/3000\n",
+ " - 32s - loss: 1.6686 - acc: 0.5515 - val_loss: 1.7252 - val_acc: 0.5777\n",
+ "\n",
+ "Epoch 00169: val_acc did not improve from 0.58466\n",
+ "Epoch 170/3000\n",
+ " - 30s - loss: 1.6653 - acc: 0.5537 - val_loss: 1.7381 - val_acc: 0.5839\n",
+ "\n",
+ "Epoch 00170: val_acc did not improve from 0.58466\n",
+ "Epoch 171/3000\n",
+ " - 32s - loss: 1.6565 - acc: 0.5634 - val_loss: 1.7307 - val_acc: 0.5780\n",
+ "\n",
+ "Epoch 00171: val_acc did not improve from 0.58466\n",
+ "Epoch 172/3000\n",
+ " - 30s - loss: 1.6546 - acc: 0.5556 - val_loss: 1.6916 - val_acc: 0.6057\n",
+ "\n",
+ "Epoch 00172: val_acc improved from 0.58466 to 0.60568, saving model to ./ModelSnapshots/LSTM-v1-172.h5\n",
+ "Epoch 173/3000\n",
+ " - 31s - loss: 1.6538 - acc: 0.5550 - val_loss: 1.7286 - val_acc: 0.5718\n",
+ "\n",
+ "Epoch 00173: val_acc did not improve from 0.60568\n",
+ "Epoch 174/3000\n",
+ " - 31s - loss: 1.6238 - acc: 0.5690 - val_loss: 1.7000 - val_acc: 0.6030\n",
+ "\n",
+ "Epoch 00174: val_acc did not improve from 0.60568\n",
+ "Epoch 175/3000\n",
+ " - 30s - loss: 1.6366 - acc: 0.5630 - val_loss: 1.7058 - val_acc: 0.5987\n",
+ "\n",
+ "Epoch 00175: val_acc did not improve from 0.60568\n",
+ "Epoch 176/3000\n",
+ " - 31s - loss: 1.6284 - acc: 0.5693 - val_loss: 1.6839 - val_acc: 0.6002\n",
+ "\n",
+ "Epoch 00176: val_acc did not improve from 0.60568\n",
+ "Epoch 177/3000\n",
+ " - 31s - loss: 1.6234 - acc: 0.5655 - val_loss: 1.7064 - val_acc: 0.5870\n",
+ "\n",
+ "Epoch 00177: val_acc did not improve from 0.60568\n",
+ "Epoch 178/3000\n",
+ " - 31s - loss: 1.6118 - acc: 0.5768 - val_loss: 1.6805 - val_acc: 0.6088\n",
+ "\n",
+ "Epoch 00178: val_acc improved from 0.60568 to 0.60880, saving model to ./ModelSnapshots/LSTM-v1-178.h5\n",
+ "Epoch 179/3000\n",
+ " - 31s - loss: 1.5972 - acc: 0.5726 - val_loss: 1.6665 - val_acc: 0.5998\n",
+ "\n",
+ "Epoch 00179: val_acc did not improve from 0.60880\n",
+ "Epoch 180/3000\n",
+ " - 32s - loss: 1.6037 - acc: 0.5764 - val_loss: 1.6573 - val_acc: 0.6065\n",
+ "\n",
+ "Epoch 00180: val_acc did not improve from 0.60880\n",
+ "Epoch 181/3000\n",
+ " - 29s - loss: 1.6060 - acc: 0.5814 - val_loss: 1.6807 - val_acc: 0.6010\n",
+ "\n",
+ "Epoch 00181: val_acc did not improve from 0.60880\n",
+ "Epoch 182/3000\n",
+ " - 31s - loss: 1.5979 - acc: 0.5782 - val_loss: 1.6651 - val_acc: 0.6061\n",
+ "\n",
+ "Epoch 00182: val_acc did not improve from 0.60880\n",
+ "Epoch 183/3000\n",
+ " - 32s - loss: 1.5730 - acc: 0.5877 - val_loss: 1.7102 - val_acc: 0.5905\n",
+ "\n",
+ "Epoch 00183: val_acc did not improve from 0.60880\n",
+ "Epoch 184/3000\n",
+ " - 31s - loss: 1.5758 - acc: 0.5882 - val_loss: 1.6400 - val_acc: 0.6345\n",
+ "\n",
+ "Epoch 00184: val_acc improved from 0.60880 to 0.63449, saving model to ./ModelSnapshots/LSTM-v1-184.h5\n",
+ "Epoch 185/3000\n",
+ " - 30s - loss: 1.5683 - acc: 0.5978 - val_loss: 1.7124 - val_acc: 0.5858\n",
+ "\n",
+ "Epoch 00185: val_acc did not improve from 0.63449\n",
+ "Epoch 186/3000\n",
+ " - 32s - loss: 1.5617 - acc: 0.5957 - val_loss: 1.5891 - val_acc: 0.6322\n",
+ "\n",
+ "Epoch 00186: val_acc did not improve from 0.63449\n",
+ "Epoch 187/3000\n",
+ " - 31s - loss: 1.5567 - acc: 0.5992 - val_loss: 1.5721 - val_acc: 0.6454\n",
+ "\n",
+ "Epoch 00187: val_acc improved from 0.63449 to 0.64539, saving model to ./ModelSnapshots/LSTM-v1-187.h5\n",
+ "Epoch 188/3000\n",
+ " - 30s - loss: 1.5432 - acc: 0.5954 - val_loss: 1.6278 - val_acc: 0.6271\n",
+ "\n",
+ "Epoch 00188: val_acc did not improve from 0.64539\n",
+ "Epoch 189/3000\n",
+ " - 31s - loss: 1.5315 - acc: 0.6088 - val_loss: 1.5630 - val_acc: 0.6477\n",
+ "\n",
+ "Epoch 00189: val_acc improved from 0.64539 to 0.64772, saving model to ./ModelSnapshots/LSTM-v1-189.h5\n",
+ "Epoch 190/3000\n",
+ " - 30s - loss: 1.5608 - acc: 0.6040 - val_loss: 1.6215 - val_acc: 0.6279\n",
+ "\n",
+ "Epoch 00190: val_acc did not improve from 0.64772\n",
+ "Epoch 191/3000\n",
+ " - 32s - loss: 1.5158 - acc: 0.6188 - val_loss: 1.6497 - val_acc: 0.6290\n",
+ "\n",
+ "Epoch 00191: val_acc did not improve from 0.64772\n",
+ "Epoch 192/3000\n",
+ " - 29s - loss: 1.5071 - acc: 0.6093 - val_loss: 1.5323 - val_acc: 0.6532\n",
+ "\n",
+ "Epoch 00192: val_acc improved from 0.64772 to 0.65317, saving model to ./ModelSnapshots/LSTM-v1-192.h5\n",
+ "Epoch 193/3000\n",
+ " - 32s - loss: 1.5063 - acc: 0.6128 - val_loss: 1.5500 - val_acc: 0.6407\n",
+ "\n",
+ "Epoch 00193: val_acc did not improve from 0.65317\n",
+ "Epoch 194/3000\n",
+ " - 29s - loss: 1.4953 - acc: 0.6147 - val_loss: 1.5682 - val_acc: 0.6462\n",
+ "\n",
+ "Epoch 00194: val_acc did not improve from 0.65317\n",
+ "Epoch 195/3000\n",
+ " - 32s - loss: 1.4770 - acc: 0.6262 - val_loss: 1.5359 - val_acc: 0.6567\n",
+ "\n",
+ "Epoch 00195: val_acc improved from 0.65317 to 0.65668, saving model to ./ModelSnapshots/LSTM-v1-195.h5\n",
+ "Epoch 196/3000\n",
+ " - 30s - loss: 1.4822 - acc: 0.6330 - val_loss: 1.5659 - val_acc: 0.6613\n",
+ "\n",
+ "Epoch 00196: val_acc improved from 0.65668 to 0.66135, saving model to ./ModelSnapshots/LSTM-v1-196.h5\n",
+ "Epoch 197/3000\n",
+ " - 32s - loss: 1.4783 - acc: 0.6256 - val_loss: 1.5052 - val_acc: 0.6610\n",
+ "\n",
+ "Epoch 00197: val_acc did not improve from 0.66135\n",
+ "Epoch 198/3000\n",
+ " - 30s - loss: 1.4966 - acc: 0.6256 - val_loss: 1.5852 - val_acc: 0.6220\n",
+ "\n",
+ "Epoch 00198: val_acc did not improve from 0.66135\n",
+ "Epoch 199/3000\n",
+ " - 33s - loss: 1.4907 - acc: 0.6270 - val_loss: 1.5417 - val_acc: 0.6641\n",
+ "\n",
+ "Epoch 00199: val_acc improved from 0.66135 to 0.66407, saving model to ./ModelSnapshots/LSTM-v1-199.h5\n",
+ "Epoch 200/3000\n",
+ " - 31s - loss: 1.4683 - acc: 0.6324 - val_loss: 1.5307 - val_acc: 0.6528\n",
+ "\n",
+ "Epoch 00200: val_acc did not improve from 0.66407\n",
+ "Epoch 201/3000\n",
+ " - 30s - loss: 1.4606 - acc: 0.6365 - val_loss: 1.5474 - val_acc: 0.6458\n",
+ "\n",
+ "Epoch 00201: val_acc did not improve from 0.66407\n",
+ "Epoch 202/3000\n",
+ " - 32s - loss: 1.4426 - acc: 0.6439 - val_loss: 1.4965 - val_acc: 0.6676\n",
+ "\n",
+ "Epoch 00202: val_acc improved from 0.66407 to 0.66757, saving model to ./ModelSnapshots/LSTM-v1-202.h5\n",
+ "Epoch 203/3000\n",
+ " - 32s - loss: 1.4257 - acc: 0.6476 - val_loss: 1.5003 - val_acc: 0.6769\n",
+ "\n",
+ "Epoch 00203: val_acc improved from 0.66757 to 0.67692, saving model to ./ModelSnapshots/LSTM-v1-203.h5\n",
+ "Epoch 204/3000\n",
+ " - 29s - loss: 1.4217 - acc: 0.6487 - val_loss: 1.5430 - val_acc: 0.6606\n",
+ "\n",
+ "Epoch 00204: val_acc did not improve from 0.67692\n",
+ "Epoch 205/3000\n",
+ " - 31s - loss: 1.4226 - acc: 0.6493 - val_loss: 1.4830 - val_acc: 0.6789\n",
+ "\n",
+ "Epoch 00205: val_acc improved from 0.67692 to 0.67886, saving model to ./ModelSnapshots/LSTM-v1-205.h5\n",
+ "Epoch 206/3000\n",
+ " - 32s - loss: 1.3975 - acc: 0.6550 - val_loss: 1.4585 - val_acc: 0.6816\n",
+ "\n",
+ "Epoch 00206: val_acc improved from 0.67886 to 0.68159, saving model to ./ModelSnapshots/LSTM-v1-206.h5\n",
+ "Epoch 207/3000\n",
+ " - 30s - loss: 1.3852 - acc: 0.6707 - val_loss: 1.4623 - val_acc: 0.6804\n",
+ "\n",
+ "Epoch 00207: val_acc did not improve from 0.68159\n",
+ "Epoch 208/3000\n",
+ " - 32s - loss: 1.3694 - acc: 0.6736 - val_loss: 1.4777 - val_acc: 0.6828\n",
+ "\n",
+ "Epoch 00208: val_acc improved from 0.68159 to 0.68276, saving model to ./ModelSnapshots/LSTM-v1-208.h5\n",
+ "Epoch 209/3000\n",
+ " - 29s - loss: 1.3926 - acc: 0.6633 - val_loss: 1.4856 - val_acc: 0.6804\n",
+ "\n",
+ "Epoch 00209: val_acc did not improve from 0.68276\n",
+ "Epoch 210/3000\n",
+ " - 31s - loss: 1.3847 - acc: 0.6653 - val_loss: 1.4768 - val_acc: 0.6695\n",
+ "\n",
+ "Epoch 00210: val_acc did not improve from 0.68276\n",
+ "Epoch 211/3000\n",
+ " - 31s - loss: 1.3691 - acc: 0.6732 - val_loss: 1.4424 - val_acc: 0.6940\n",
+ "\n",
+ "Epoch 00211: val_acc improved from 0.68276 to 0.69404, saving model to ./ModelSnapshots/LSTM-v1-211.h5\n",
+ "Epoch 212/3000\n",
+ " - 30s - loss: 1.3587 - acc: 0.6763 - val_loss: 1.4104 - val_acc: 0.6979\n",
+ "\n",
+ "Epoch 00212: val_acc improved from 0.69404 to 0.69794, saving model to ./ModelSnapshots/LSTM-v1-212.h5\n",
+ "Epoch 213/3000\n",
+ " - 32s - loss: 1.3547 - acc: 0.6778 - val_loss: 1.4327 - val_acc: 0.6960\n",
+ "\n",
+ "Epoch 00213: val_acc did not improve from 0.69794\n",
+ "Epoch 214/3000\n",
+ " - 29s - loss: 1.4233 - acc: 0.6624 - val_loss: 1.4354 - val_acc: 0.6925\n",
+ "\n",
+ "Epoch 00214: val_acc did not improve from 0.69794\n",
+ "Epoch 215/3000\n",
+ " - 31s - loss: 1.3359 - acc: 0.6828 - val_loss: 1.4443 - val_acc: 0.6972\n",
+ "\n",
+ "Epoch 00215: val_acc did not improve from 0.69794\n",
+ "Epoch 216/3000\n",
+ " - 31s - loss: 1.3300 - acc: 0.6866 - val_loss: 1.4265 - val_acc: 0.7057\n",
+ "\n",
+ "Epoch 00216: val_acc improved from 0.69794 to 0.70572, saving model to ./ModelSnapshots/LSTM-v1-216.h5\n",
+ "Epoch 217/3000\n",
+ " - 31s - loss: 1.3149 - acc: 0.6937 - val_loss: 1.4729 - val_acc: 0.6909\n",
+ "\n",
+ "Epoch 00217: val_acc did not improve from 0.70572\n",
+ "Epoch 218/3000\n",
+ " - 30s - loss: 1.3437 - acc: 0.6863 - val_loss: 1.4187 - val_acc: 0.7116\n",
+ "\n",
+ "Epoch 00218: val_acc improved from 0.70572 to 0.71156, saving model to ./ModelSnapshots/LSTM-v1-218.h5\n",
+ "Epoch 219/3000\n",
+ " - 32s - loss: 1.2771 - acc: 0.7052 - val_loss: 1.4062 - val_acc: 0.7042\n",
+ "\n",
+ "Epoch 00219: val_acc did not improve from 0.71156\n",
+ "Epoch 220/3000\n",
+ " - 29s - loss: 1.3012 - acc: 0.6947 - val_loss: 1.3739 - val_acc: 0.7197\n",
+ "\n",
+ "Epoch 00220: val_acc improved from 0.71156 to 0.71974, saving model to ./ModelSnapshots/LSTM-v1-220.h5\n",
+ "Epoch 221/3000\n",
+ " - 30s - loss: 1.2894 - acc: 0.7040 - val_loss: 1.3745 - val_acc: 0.7151\n",
+ "\n",
+ "Epoch 00221: val_acc did not improve from 0.71974\n",
+ "Epoch 222/3000\n",
+ " - 31s - loss: 1.2632 - acc: 0.7091 - val_loss: 1.3651 - val_acc: 0.7197\n",
+ "\n",
+ "Epoch 00222: val_acc did not improve from 0.71974\n",
+ "Epoch 223/3000\n",
+ " - 31s - loss: 1.2625 - acc: 0.7120 - val_loss: 1.4143 - val_acc: 0.6979\n",
+ "\n",
+ "Epoch 00223: val_acc did not improve from 0.71974\n",
+ "Epoch 224/3000\n",
+ " - 32s - loss: 1.2671 - acc: 0.7098 - val_loss: 1.3643 - val_acc: 0.7221\n",
+ "\n",
+ "Epoch 00224: val_acc improved from 0.71974 to 0.72207, saving model to ./ModelSnapshots/LSTM-v1-224.h5\n",
+ "Epoch 225/3000\n",
+ " - 30s - loss: 1.2578 - acc: 0.7124 - val_loss: 1.3309 - val_acc: 0.7205\n",
+ "\n",
+ "Epoch 00225: val_acc did not improve from 0.72207\n",
+ "Epoch 226/3000\n",
+ " - 31s - loss: 1.2505 - acc: 0.7168 - val_loss: 1.3527 - val_acc: 0.7197\n",
+ "\n",
+ "Epoch 00226: val_acc did not improve from 0.72207\n",
+ "Epoch 227/3000\n",
+ " - 29s - loss: 1.2482 - acc: 0.7171 - val_loss: 1.3478 - val_acc: 0.7186\n",
+ "\n",
+ "Epoch 00227: val_acc did not improve from 0.72207\n",
+ "Epoch 228/3000\n",
+ " - 31s - loss: 1.2348 - acc: 0.7212 - val_loss: 1.3636 - val_acc: 0.7225\n",
+ "\n",
+ "Epoch 00228: val_acc improved from 0.72207 to 0.72246, saving model to ./ModelSnapshots/LSTM-v1-228.h5\n",
+ "Epoch 229/3000\n",
+ " - 32s - loss: 1.2146 - acc: 0.7337 - val_loss: 1.3154 - val_acc: 0.7384\n",
+ "\n",
+ "Epoch 00229: val_acc improved from 0.72246 to 0.73842, saving model to ./ModelSnapshots/LSTM-v1-229.h5\n",
+ "Epoch 230/3000\n",
+ " - 31s - loss: 1.2042 - acc: 0.7366 - val_loss: 1.4406 - val_acc: 0.7011\n",
+ "\n",
+ "Epoch 00230: val_acc did not improve from 0.73842\n",
+ "Epoch 231/3000\n",
+ " - 29s - loss: 1.2119 - acc: 0.7314 - val_loss: 1.3201 - val_acc: 0.7349\n",
+ "\n",
+ "Epoch 00231: val_acc did not improve from 0.73842\n",
+ "Epoch 232/3000\n",
+ " - 31s - loss: 1.2014 - acc: 0.7348 - val_loss: 1.3087 - val_acc: 0.7326\n",
+ "\n",
+ "Epoch 00232: val_acc did not improve from 0.73842\n",
+ "Epoch 233/3000\n",
+ " - 31s - loss: 1.2043 - acc: 0.7331 - val_loss: 1.3322 - val_acc: 0.7275\n",
+ "\n",
+ "Epoch 00233: val_acc did not improve from 0.73842\n",
+ "Epoch 234/3000\n",
+ " - 31s - loss: 1.1833 - acc: 0.7428 - val_loss: 1.3529 - val_acc: 0.7267\n",
+ "\n",
+ "Epoch 00234: val_acc did not improve from 0.73842\n",
+ "Epoch 235/3000\n",
+ " - 30s - loss: 1.1983 - acc: 0.7411 - val_loss: 1.2910 - val_acc: 0.7384\n",
+ "\n",
+ "Epoch 00235: val_acc did not improve from 0.73842\n",
+ "Epoch 236/3000\n",
+ " - 31s - loss: 1.1755 - acc: 0.7406 - val_loss: 1.3134 - val_acc: 0.7361\n",
+ "\n",
+ "Epoch 00236: val_acc did not improve from 0.73842\n",
+ "Epoch 237/3000\n",
+ " - 29s - loss: 1.1770 - acc: 0.7415 - val_loss: 1.2957 - val_acc: 0.7361\n",
+ "\n",
+ "Epoch 00237: val_acc did not improve from 0.73842\n",
+ "Epoch 238/3000\n",
+ " - 31s - loss: 1.1732 - acc: 0.7494 - val_loss: 1.3361 - val_acc: 0.7267\n",
+ "\n",
+ "Epoch 00238: val_acc did not improve from 0.73842\n",
+ "Epoch 239/3000\n",
+ " - 31s - loss: 1.1891 - acc: 0.7414 - val_loss: 1.3118 - val_acc: 0.7396\n",
+ "\n",
+ "Epoch 00239: val_acc improved from 0.73842 to 0.73959, saving model to ./ModelSnapshots/LSTM-v1-239.h5\n",
+ "Epoch 240/3000\n",
+ " - 30s - loss: 1.1584 - acc: 0.7524 - val_loss: 1.2570 - val_acc: 0.7454\n",
+ "\n",
+ "Epoch 00240: val_acc improved from 0.73959 to 0.74543, saving model to ./ModelSnapshots/LSTM-v1-240.h5\n",
+ "Epoch 241/3000\n",
+ " - 30s - loss: 1.1461 - acc: 0.7583 - val_loss: 1.2675 - val_acc: 0.7388\n",
+ "\n",
+ "Epoch 00241: val_acc did not improve from 0.74543\n",
+ "Epoch 242/3000\n",
+ " - 30s - loss: 1.1188 - acc: 0.7609 - val_loss: 1.2694 - val_acc: 0.7326\n",
+ "\n",
+ "Epoch 00242: val_acc did not improve from 0.74543\n",
+ "Epoch 243/3000\n",
+ " - 32s - loss: 1.1292 - acc: 0.7595 - val_loss: 1.2226 - val_acc: 0.7520\n",
+ "\n",
+ "Epoch 00243: val_acc improved from 0.74543 to 0.75204, saving model to ./ModelSnapshots/LSTM-v1-243.h5\n",
+ "Epoch 244/3000\n",
+ " - 29s - loss: 1.1100 - acc: 0.7674 - val_loss: 1.2890 - val_acc: 0.7373\n",
+ "\n",
+ "Epoch 00244: val_acc did not improve from 0.75204\n",
+ "Epoch 245/3000\n",
+ " - 30s - loss: 1.1264 - acc: 0.7654 - val_loss: 1.2975 - val_acc: 0.7380\n",
+ "\n",
+ "Epoch 00245: val_acc did not improve from 0.75204\n",
+ "Epoch 246/3000\n",
+ " - 31s - loss: 1.1119 - acc: 0.7690 - val_loss: 1.2496 - val_acc: 0.7462\n",
+ "\n",
+ "Epoch 00246: val_acc did not improve from 0.75204\n",
+ "Epoch 247/3000\n",
+ " - 30s - loss: 1.1066 - acc: 0.7669 - val_loss: 1.2268 - val_acc: 0.7513\n",
+ "\n",
+ "Epoch 00247: val_acc did not improve from 0.75204\n",
+ "Epoch 248/3000\n",
+ " - 31s - loss: 1.0889 - acc: 0.7743 - val_loss: 1.2417 - val_acc: 0.7466\n",
+ "\n",
+ "Epoch 00248: val_acc did not improve from 0.75204\n",
+ "Epoch 249/3000\n",
+ " - 31s - loss: 1.0890 - acc: 0.7722 - val_loss: 1.3563 - val_acc: 0.7236\n",
+ "\n",
+ "Epoch 00249: val_acc did not improve from 0.75204\n",
+ "Epoch 250/3000\n",
+ " - 31s - loss: 1.0936 - acc: 0.7722 - val_loss: 1.2323 - val_acc: 0.7571\n",
+ "\n",
+ "Epoch 00250: val_acc improved from 0.75204 to 0.75710, saving model to ./ModelSnapshots/LSTM-v1-250.h5\n",
+ "Epoch 251/3000\n",
+ " - 31s - loss: 1.0894 - acc: 0.7731 - val_loss: 1.2385 - val_acc: 0.7489\n",
+ "\n",
+ "Epoch 00251: val_acc did not improve from 0.75710\n",
+ "Epoch 252/3000\n",
+ " - 30s - loss: 1.0899 - acc: 0.7749 - val_loss: 1.2425 - val_acc: 0.7493\n",
+ "\n",
+ "Epoch 00252: val_acc did not improve from 0.75710\n",
+ "Epoch 253/3000\n",
+ " - 31s - loss: 1.0503 - acc: 0.7814 - val_loss: 1.2751 - val_acc: 0.7489\n",
+ "\n",
+ "Epoch 00253: val_acc did not improve from 0.75710\n",
+ "Epoch 254/3000\n",
+ " - 29s - loss: 1.0586 - acc: 0.7797 - val_loss: 1.2895 - val_acc: 0.7345\n",
+ "\n",
+ "Epoch 00254: val_acc did not improve from 0.75710\n",
+ "Epoch 255/3000\n",
+ " - 31s - loss: 1.0501 - acc: 0.7886 - val_loss: 1.2303 - val_acc: 0.7552\n",
+ "\n",
+ "Epoch 00255: val_acc did not improve from 0.75710\n",
+ "Epoch 256/3000\n",
+ " - 30s - loss: 1.0533 - acc: 0.7838 - val_loss: 1.2029 - val_acc: 0.7602\n",
+ "\n",
+ "Epoch 00256: val_acc improved from 0.75710 to 0.76022, saving model to ./ModelSnapshots/LSTM-v1-256.h5\n",
+ "Epoch 257/3000\n",
+ " - 31s - loss: 1.0425 - acc: 0.7856 - val_loss: 1.2786 - val_acc: 0.7439\n",
+ "\n",
+ "Epoch 00257: val_acc did not improve from 0.76022\n",
+ "Epoch 258/3000\n",
+ " - 32s - loss: 1.0390 - acc: 0.7906 - val_loss: 1.2131 - val_acc: 0.7614\n",
+ "\n",
+ "Epoch 00258: val_acc improved from 0.76022 to 0.76139, saving model to ./ModelSnapshots/LSTM-v1-258.h5\n",
+ "Epoch 259/3000\n",
+ " - 30s - loss: 1.0428 - acc: 0.7888 - val_loss: 1.2970 - val_acc: 0.7291\n",
+ "\n",
+ "Epoch 00259: val_acc did not improve from 0.76139\n",
+ "Epoch 260/3000\n",
+ " - 30s - loss: 1.0305 - acc: 0.7942 - val_loss: 1.2377 - val_acc: 0.7520\n",
+ "\n",
+ "Epoch 00260: val_acc did not improve from 0.76139\n",
+ "Epoch 261/3000\n",
+ " - 30s - loss: 1.0519 - acc: 0.7852 - val_loss: 1.2362 - val_acc: 0.7559\n",
+ "\n",
+ "Epoch 00261: val_acc did not improve from 0.76139\n",
+ "Epoch 262/3000\n",
+ " - 32s - loss: 1.0285 - acc: 0.7980 - val_loss: 1.2101 - val_acc: 0.7594\n",
+ "\n",
+ "Epoch 00262: val_acc did not improve from 0.76139\n",
+ "Epoch 263/3000\n",
+ " - 30s - loss: 1.0345 - acc: 0.7953 - val_loss: 1.1472 - val_acc: 0.7731\n",
+ "\n",
+ "Epoch 00263: val_acc improved from 0.76139 to 0.77306, saving model to ./ModelSnapshots/LSTM-v1-263.h5\n",
+ "Epoch 264/3000\n",
+ " - 31s - loss: 1.0020 - acc: 0.8001 - val_loss: 1.1879 - val_acc: 0.7692\n",
+ "\n",
+ "Epoch 00264: val_acc did not improve from 0.77306\n",
+ "Epoch 265/3000\n",
+ " - 31s - loss: 1.0146 - acc: 0.7980 - val_loss: 1.2670 - val_acc: 0.7415\n",
+ "\n",
+ "Epoch 00265: val_acc did not improve from 0.77306\n",
+ "Epoch 266/3000\n",
+ " - 31s - loss: 0.9991 - acc: 0.7998 - val_loss: 1.1559 - val_acc: 0.7645\n",
+ "\n",
+ "Epoch 00266: val_acc did not improve from 0.77306\n",
+ "Epoch 267/3000\n",
+ " - 30s - loss: 0.9823 - acc: 0.8048 - val_loss: 1.3164 - val_acc: 0.7353\n",
+ "\n",
+ "Epoch 00267: val_acc did not improve from 0.77306\n",
+ "Epoch 268/3000\n",
+ " - 32s - loss: 1.0059 - acc: 0.8021 - val_loss: 1.2022 - val_acc: 0.7610\n",
+ "\n",
+ "Epoch 00268: val_acc did not improve from 0.77306\n",
+ "Epoch 269/3000\n",
+ " - 30s - loss: 1.0039 - acc: 0.8062 - val_loss: 1.2348 - val_acc: 0.7594\n",
+ "\n",
+ "Epoch 00269: val_acc did not improve from 0.77306\n",
+ "Epoch 270/3000\n",
+ " - 33s - loss: 0.9958 - acc: 0.8006 - val_loss: 1.1789 - val_acc: 0.7633\n",
+ "\n",
+ "Epoch 00270: val_acc did not improve from 0.77306\n",
+ "Epoch 271/3000\n",
+ " - 31s - loss: 0.9794 - acc: 0.8069 - val_loss: 1.2661 - val_acc: 0.7482\n",
+ "\n",
+ "Epoch 00271: val_acc did not improve from 0.77306\n",
+ "Epoch 272/3000\n",
+ " - 29s - loss: 0.9759 - acc: 0.8050 - val_loss: 1.1369 - val_acc: 0.7770\n",
+ "\n",
+ "Epoch 00272: val_acc improved from 0.77306 to 0.77696, saving model to ./ModelSnapshots/LSTM-v1-272.h5\n",
+ "Epoch 273/3000\n",
+ " - 31s - loss: 0.9737 - acc: 0.8072 - val_loss: 1.2008 - val_acc: 0.7583\n",
+ "\n",
+ "Epoch 00273: val_acc did not improve from 0.77696\n",
+ "Epoch 274/3000\n",
+ " - 32s - loss: 0.9466 - acc: 0.8170 - val_loss: 1.2508 - val_acc: 0.7544\n",
+ "\n",
+ "Epoch 00274: val_acc did not improve from 0.77696\n",
+ "Epoch 275/3000\n",
+ " - 31s - loss: 0.9635 - acc: 0.8123 - val_loss: 1.2048 - val_acc: 0.7645\n",
+ "\n",
+ "Epoch 00275: val_acc did not improve from 0.77696\n",
+ "Epoch 276/3000\n",
+ " - 31s - loss: 0.9897 - acc: 0.8039 - val_loss: 1.1925 - val_acc: 0.7614\n",
+ "\n",
+ "Epoch 00276: val_acc did not improve from 0.77696\n",
+ "Epoch 277/3000\n",
+ " - 31s - loss: 0.9691 - acc: 0.8110 - val_loss: 1.2056 - val_acc: 0.7614\n",
+ "\n",
+ "Epoch 00277: val_acc did not improve from 0.77696\n",
+ "Epoch 278/3000\n",
+ " - 31s - loss: 0.9639 - acc: 0.8149 - val_loss: 1.1680 - val_acc: 0.7773\n",
+ "\n",
+ "Epoch 00278: val_acc improved from 0.77696 to 0.77735, saving model to ./ModelSnapshots/LSTM-v1-278.h5\n",
+ "Epoch 279/3000\n",
+ " - 29s - loss: 0.9557 - acc: 0.8155 - val_loss: 1.1965 - val_acc: 0.7622\n",
+ "\n",
+ "Epoch 00279: val_acc did not improve from 0.77735\n",
+ "Epoch 280/3000\n",
+ " - 30s - loss: 0.9714 - acc: 0.8111 - val_loss: 1.1554 - val_acc: 0.7793\n",
+ "\n",
+ "Epoch 00280: val_acc improved from 0.77735 to 0.77929, saving model to ./ModelSnapshots/LSTM-v1-280.h5\n",
+ "Epoch 281/3000\n",
+ " - 32s - loss: 0.9599 - acc: 0.8137 - val_loss: 1.1530 - val_acc: 0.7773\n",
+ "\n",
+ "Epoch 00281: val_acc did not improve from 0.77929\n",
+ "Epoch 282/3000\n",
+ " - 31s - loss: 0.9504 - acc: 0.8149 - val_loss: 1.1114 - val_acc: 0.7801\n",
+ "\n",
+ "Epoch 00282: val_acc improved from 0.77929 to 0.78007, saving model to ./ModelSnapshots/LSTM-v1-282.h5\n",
+ "Epoch 283/3000\n",
+ " - 30s - loss: 0.9316 - acc: 0.8238 - val_loss: 1.1905 - val_acc: 0.7711\n",
+ "\n",
+ "Epoch 00283: val_acc did not improve from 0.78007\n",
+ "Epoch 284/3000\n",
+ " - 30s - loss: 0.9260 - acc: 0.8217 - val_loss: 1.1318 - val_acc: 0.7754\n",
+ "\n",
+ "Epoch 00284: val_acc did not improve from 0.78007\n",
+ "Epoch 285/3000\n",
+ " - 31s - loss: 0.9372 - acc: 0.8140 - val_loss: 1.1064 - val_acc: 0.7793\n",
+ "\n",
+ "Epoch 00285: val_acc did not improve from 0.78007\n",
+ "Epoch 286/3000\n",
+ " - 31s - loss: 0.9319 - acc: 0.8220 - val_loss: 1.1778 - val_acc: 0.7805\n",
+ "\n",
+ "Epoch 00286: val_acc improved from 0.78007 to 0.78046, saving model to ./ModelSnapshots/LSTM-v1-286.h5\n",
+ "Epoch 287/3000\n",
+ " - 32s - loss: 0.9362 - acc: 0.8223 - val_loss: 1.2934 - val_acc: 0.7427\n",
+ "\n",
+ "Epoch 00287: val_acc did not improve from 0.78046\n",
+ "Epoch 288/3000\n",
+ " - 30s - loss: 0.9249 - acc: 0.8193 - val_loss: 1.1700 - val_acc: 0.7742\n",
+ "\n",
+ "Epoch 00288: val_acc did not improve from 0.78046\n",
+ "Epoch 289/3000\n",
+ " - 31s - loss: 0.9194 - acc: 0.8271 - val_loss: 1.1633 - val_acc: 0.7808\n",
+ "\n",
+ "Epoch 00289: val_acc improved from 0.78046 to 0.78085, saving model to ./ModelSnapshots/LSTM-v1-289.h5\n",
+ "Epoch 290/3000\n",
+ " - 31s - loss: 0.9366 - acc: 0.8202 - val_loss: 1.1169 - val_acc: 0.7824\n",
+ "\n",
+ "Epoch 00290: val_acc improved from 0.78085 to 0.78241, saving model to ./ModelSnapshots/LSTM-v1-290.h5\n",
+ "Epoch 291/3000\n",
+ " - 32s - loss: 0.9164 - acc: 0.8274 - val_loss: 1.1341 - val_acc: 0.7820\n",
+ "\n",
+ "Epoch 00291: val_acc did not improve from 0.78241\n",
+ "Epoch 292/3000\n",
+ " - 29s - loss: 0.8966 - acc: 0.8357 - val_loss: 1.1341 - val_acc: 0.7785\n",
+ "\n",
+ "Epoch 00292: val_acc did not improve from 0.78241\n",
+ "Epoch 293/3000\n",
+ " - 32s - loss: 0.9236 - acc: 0.8179 - val_loss: 1.1191 - val_acc: 0.7805\n",
+ "\n",
+ "Epoch 00293: val_acc did not improve from 0.78241\n",
+ "Epoch 294/3000\n",
+ " - 29s - loss: 0.9055 - acc: 0.8303 - val_loss: 1.1488 - val_acc: 0.7645\n",
+ "\n",
+ "Epoch 00294: val_acc did not improve from 0.78241\n",
+ "Epoch 295/3000\n",
+ " - 31s - loss: 0.9143 - acc: 0.8197 - val_loss: 1.1451 - val_acc: 0.7801\n",
+ "\n",
+ "Epoch 00295: val_acc did not improve from 0.78241\n",
+ "Epoch 296/3000\n",
+ " - 30s - loss: 0.9034 - acc: 0.8320 - val_loss: 1.1841 - val_acc: 0.7641\n",
+ "\n",
+ "Epoch 00296: val_acc did not improve from 0.78241\n",
+ "Epoch 297/3000\n",
+ " - 30s - loss: 0.8955 - acc: 0.8294 - val_loss: 1.1107 - val_acc: 0.7844\n",
+ "\n",
+ "Epoch 00297: val_acc improved from 0.78241 to 0.78435, saving model to ./ModelSnapshots/LSTM-v1-297.h5\n",
+ "Epoch 298/3000\n",
+ " - 31s - loss: 0.8964 - acc: 0.8287 - val_loss: 1.1443 - val_acc: 0.7816\n",
+ "\n",
+ "Epoch 00298: val_acc did not improve from 0.78435\n",
+ "Epoch 299/3000\n",
+ " - 30s - loss: 0.9105 - acc: 0.8261 - val_loss: 1.1129 - val_acc: 0.7824\n",
+ "\n",
+ "Epoch 00299: val_acc did not improve from 0.78435\n",
+ "Epoch 300/3000\n",
+ " - 30s - loss: 0.8894 - acc: 0.8308 - val_loss: 1.0606 - val_acc: 0.7960\n",
+ "\n",
+ "Epoch 00300: val_acc improved from 0.78435 to 0.79603, saving model to ./ModelSnapshots/LSTM-v1-300.h5\n",
+ "Epoch 301/3000\n",
+ " - 31s - loss: 0.8664 - acc: 0.8401 - val_loss: 1.1634 - val_acc: 0.7773\n",
+ "\n",
+ "Epoch 00301: val_acc did not improve from 0.79603\n",
+ "Epoch 302/3000\n",
+ " - 30s - loss: 0.8953 - acc: 0.8302 - val_loss: 1.1336 - val_acc: 0.7847\n",
+ "\n",
+ "Epoch 00302: val_acc did not improve from 0.79603\n",
+ "Epoch 303/3000\n",
+ " - 31s - loss: 0.8806 - acc: 0.8383 - val_loss: 1.0879 - val_acc: 0.7902\n",
+ "\n",
+ "Epoch 00303: val_acc did not improve from 0.79603\n",
+ "Epoch 304/3000\n",
+ " - 31s - loss: 0.8800 - acc: 0.8356 - val_loss: 1.0758 - val_acc: 0.7953\n",
+ "\n",
+ "Epoch 00304: val_acc did not improve from 0.79603\n",
+ "Epoch 305/3000\n",
+ " - 29s - loss: 0.8654 - acc: 0.8362 - val_loss: 1.0556 - val_acc: 0.8046\n",
+ "\n",
+ "Epoch 00305: val_acc improved from 0.79603 to 0.80459, saving model to ./ModelSnapshots/LSTM-v1-305.h5\n",
+ "Epoch 306/3000\n",
+ " - 32s - loss: 0.8789 - acc: 0.8357 - val_loss: 1.1561 - val_acc: 0.7789\n",
+ "\n",
+ "Epoch 00306: val_acc did not improve from 0.80459\n",
+ "Epoch 307/3000\n",
+ " - 32s - loss: 0.8707 - acc: 0.8353 - val_loss: 1.1298 - val_acc: 0.7871\n",
+ "\n",
+ "Epoch 00307: val_acc did not improve from 0.80459\n",
+ "Epoch 308/3000\n",
+ " - 29s - loss: 0.8679 - acc: 0.8380 - val_loss: 1.0785 - val_acc: 0.7937\n",
+ "\n",
+ "Epoch 00308: val_acc did not improve from 0.80459\n",
+ "Epoch 309/3000\n",
+ " - 30s - loss: 0.8757 - acc: 0.8388 - val_loss: 1.1374 - val_acc: 0.7789\n",
+ "\n",
+ "Epoch 00309: val_acc did not improve from 0.80459\n",
+ "Epoch 310/3000\n",
+ " - 30s - loss: 0.8687 - acc: 0.8397 - val_loss: 1.1523 - val_acc: 0.7766\n",
+ "\n",
+ "Epoch 00310: val_acc did not improve from 0.80459\n",
+ "Epoch 311/3000\n",
+ " - 32s - loss: 0.8563 - acc: 0.8409 - val_loss: 1.0828 - val_acc: 0.7875\n",
+ "\n",
+ "Epoch 00311: val_acc did not improve from 0.80459\n",
+ "Epoch 312/3000\n",
+ " - 31s - loss: 0.8589 - acc: 0.8441 - val_loss: 1.0684 - val_acc: 0.7875\n",
+ "\n",
+ "Epoch 00312: val_acc did not improve from 0.80459\n",
+ "Epoch 313/3000\n",
+ " - 29s - loss: 0.8382 - acc: 0.8487 - val_loss: 1.0717 - val_acc: 0.7921\n",
+ "\n",
+ "Epoch 00313: val_acc did not improve from 0.80459\n",
+ "Epoch 314/3000\n",
+ " - 30s - loss: 0.8542 - acc: 0.8373 - val_loss: 1.0827 - val_acc: 0.7941\n",
+ "\n",
+ "Epoch 00314: val_acc did not improve from 0.80459\n",
+ "Epoch 315/3000\n",
+ " - 31s - loss: 0.8552 - acc: 0.8448 - val_loss: 1.0516 - val_acc: 0.7921\n",
+ "\n",
+ "Epoch 00315: val_acc did not improve from 0.80459\n",
+ "Epoch 316/3000\n",
+ " - 31s - loss: 0.8527 - acc: 0.8419 - val_loss: 1.1131 - val_acc: 0.7836\n",
+ "\n",
+ "Epoch 00316: val_acc did not improve from 0.80459\n",
+ "Epoch 317/3000\n",
+ " - 28s - loss: 0.8614 - acc: 0.8403 - val_loss: 1.0943 - val_acc: 0.8019\n",
+ "\n",
+ "Epoch 00317: val_acc did not improve from 0.80459\n",
+ "Epoch 318/3000\n",
+ " - 31s - loss: 0.8316 - acc: 0.8489 - val_loss: 1.0385 - val_acc: 0.7988\n",
+ "\n",
+ "Epoch 00318: val_acc did not improve from 0.80459\n",
+ "Epoch 319/3000\n",
+ " - 31s - loss: 0.8518 - acc: 0.8481 - val_loss: 1.1031 - val_acc: 0.7960\n",
+ "\n",
+ "Epoch 00319: val_acc did not improve from 0.80459\n",
+ "Epoch 320/3000\n",
+ " - 30s - loss: 0.8534 - acc: 0.8406 - val_loss: 1.0793 - val_acc: 0.8007\n",
+ "\n",
+ "Epoch 00320: val_acc did not improve from 0.80459\n",
+ "Epoch 321/3000\n",
+ " - 29s - loss: 0.8349 - acc: 0.8518 - val_loss: 1.0607 - val_acc: 0.7984\n",
+ "\n",
+ "Epoch 00321: val_acc did not improve from 0.80459\n",
+ "Epoch 322/3000\n",
+ " - 32s - loss: 0.8109 - acc: 0.8564 - val_loss: 1.0437 - val_acc: 0.8100\n",
+ "\n",
+ "Epoch 00322: val_acc improved from 0.80459 to 0.81004, saving model to ./ModelSnapshots/LSTM-v1-322.h5\n",
+ "Epoch 323/3000\n",
+ " - 28s - loss: 0.8229 - acc: 0.8505 - val_loss: 1.0624 - val_acc: 0.7991\n",
+ "\n",
+ "Epoch 00323: val_acc did not improve from 0.81004\n",
+ "Epoch 324/3000\n",
+ " - 31s - loss: 0.8265 - acc: 0.8501 - val_loss: 1.0149 - val_acc: 0.8077\n",
+ "\n",
+ "Epoch 00324: val_acc did not improve from 0.81004\n",
+ "Epoch 325/3000\n",
+ " - 29s - loss: 0.8172 - acc: 0.8554 - val_loss: 1.0836 - val_acc: 0.7988\n",
+ "\n",
+ "Epoch 00325: val_acc did not improve from 0.81004\n",
+ "Epoch 326/3000\n",
+ " - 31s - loss: 0.8124 - acc: 0.8527 - val_loss: 1.0351 - val_acc: 0.8058\n",
+ "\n",
+ "Epoch 00326: val_acc did not improve from 0.81004\n",
+ "Epoch 327/3000\n",
+ " - 31s - loss: 0.8286 - acc: 0.8477 - val_loss: 1.0974 - val_acc: 0.7929\n",
+ "\n",
+ "Epoch 00327: val_acc did not improve from 0.81004\n",
+ "Epoch 328/3000\n",
+ " - 29s - loss: 0.8182 - acc: 0.8477 - val_loss: 1.0974 - val_acc: 0.7953\n",
+ "\n",
+ "Epoch 00328: val_acc did not improve from 0.81004\n",
+ "Epoch 329/3000\n",
+ " - 32s - loss: 0.8181 - acc: 0.8527 - val_loss: 1.0273 - val_acc: 0.8046\n",
+ "\n",
+ "Epoch 00329: val_acc did not improve from 0.81004\n",
+ "Epoch 330/3000\n",
+ " - 29s - loss: 0.8217 - acc: 0.8533 - val_loss: 1.0229 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00330: val_acc did not improve from 0.81004\n",
+ "Epoch 331/3000\n",
+ " - 32s - loss: 0.8182 - acc: 0.8545 - val_loss: 1.0118 - val_acc: 0.8015\n",
+ "\n",
+ "Epoch 00331: val_acc did not improve from 0.81004\n",
+ "Epoch 332/3000\n",
+ " - 30s - loss: 0.8302 - acc: 0.8498 - val_loss: 1.0600 - val_acc: 0.7917\n",
+ "\n",
+ "Epoch 00332: val_acc did not improve from 0.81004\n",
+ "Epoch 333/3000\n",
+ " - 30s - loss: 0.8036 - acc: 0.8584 - val_loss: 1.0229 - val_acc: 0.7984\n",
+ "\n",
+ "Epoch 00333: val_acc did not improve from 0.81004\n",
+ "Epoch 334/3000\n",
+ " - 32s - loss: 0.8212 - acc: 0.8522 - val_loss: 1.0190 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00334: val_acc did not improve from 0.81004\n",
+ "Epoch 335/3000\n",
+ " - 29s - loss: 0.8111 - acc: 0.8560 - val_loss: 1.0210 - val_acc: 0.8093\n",
+ "\n",
+ "Epoch 00335: val_acc did not improve from 0.81004\n",
+ "Epoch 336/3000\n",
+ " - 30s - loss: 0.7902 - acc: 0.8591 - val_loss: 1.0020 - val_acc: 0.8034\n",
+ "\n",
+ "Epoch 00336: val_acc did not improve from 0.81004\n",
+ "Epoch 337/3000\n",
+ " - 30s - loss: 0.8256 - acc: 0.8501 - val_loss: 1.0480 - val_acc: 0.7976\n",
+ "\n",
+ "Epoch 00337: val_acc did not improve from 0.81004\n",
+ "Epoch 338/3000\n",
+ " - 30s - loss: 0.8021 - acc: 0.8647 - val_loss: 1.0069 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00338: val_acc did not improve from 0.81004\n",
+ "Epoch 339/3000\n",
+ " - 33s - loss: 0.8013 - acc: 0.8599 - val_loss: 1.0146 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00339: val_acc did not improve from 0.81004\n",
+ "Epoch 340/3000\n",
+ " - 29s - loss: 0.7923 - acc: 0.8528 - val_loss: 1.0288 - val_acc: 0.8054\n",
+ "\n",
+ "Epoch 00340: val_acc did not improve from 0.81004\n",
+ "Epoch 341/3000\n",
+ " - 32s - loss: 0.7774 - acc: 0.8607 - val_loss: 1.0056 - val_acc: 0.8069\n",
+ "\n",
+ "Epoch 00341: val_acc did not improve from 0.81004\n",
+ "Epoch 342/3000\n",
+ " - 29s - loss: 0.7752 - acc: 0.8616 - val_loss: 1.0470 - val_acc: 0.7964\n",
+ "\n",
+ "Epoch 00342: val_acc did not improve from 0.81004\n",
+ "Epoch 343/3000\n",
+ " - 30s - loss: 0.7852 - acc: 0.8551 - val_loss: 1.0359 - val_acc: 0.8015\n",
+ "\n",
+ "Epoch 00343: val_acc did not improve from 0.81004\n",
+ "Epoch 344/3000\n",
+ " - 32s - loss: 0.7997 - acc: 0.8570 - val_loss: 1.0239 - val_acc: 0.8116\n",
+ "\n",
+ "Epoch 00344: val_acc improved from 0.81004 to 0.81160, saving model to ./ModelSnapshots/LSTM-v1-344.h5\n",
+ "Epoch 345/3000\n",
+ " - 31s - loss: 0.7952 - acc: 0.8593 - val_loss: 1.0793 - val_acc: 0.7953\n",
+ "\n",
+ "Epoch 00345: val_acc did not improve from 0.81160\n",
+ "Epoch 346/3000\n",
+ " - 31s - loss: 0.8099 - acc: 0.8563 - val_loss: 1.0191 - val_acc: 0.8046\n",
+ "\n",
+ "Epoch 00346: val_acc did not improve from 0.81160\n",
+ "Epoch 347/3000\n",
+ " - 30s - loss: 0.7724 - acc: 0.8664 - val_loss: 1.0545 - val_acc: 0.7980\n",
+ "\n",
+ "Epoch 00347: val_acc did not improve from 0.81160\n",
+ "Epoch 348/3000\n",
+ " - 30s - loss: 0.8051 - acc: 0.8579 - val_loss: 1.0120 - val_acc: 0.8108\n",
+ "\n",
+ "Epoch 00348: val_acc did not improve from 0.81160\n",
+ "Epoch 349/3000\n",
+ " - 31s - loss: 0.7717 - acc: 0.8671 - val_loss: 1.0290 - val_acc: 0.8030\n",
+ "\n",
+ "Epoch 00349: val_acc did not improve from 0.81160\n",
+ "Epoch 350/3000\n",
+ " - 30s - loss: 0.8217 - acc: 0.8486 - val_loss: 1.0772 - val_acc: 0.8026\n",
+ "\n",
+ "Epoch 00350: val_acc did not improve from 0.81160\n",
+ "Epoch 351/3000\n",
+ " - 30s - loss: 0.8070 - acc: 0.8601 - val_loss: 1.0177 - val_acc: 0.8097\n",
+ "\n",
+ "Epoch 00351: val_acc did not improve from 0.81160\n",
+ "Epoch 352/3000\n",
+ " - 30s - loss: 0.7847 - acc: 0.8610 - val_loss: 1.0620 - val_acc: 0.8085\n",
+ "\n",
+ "Epoch 00352: val_acc did not improve from 0.81160\n",
+ "Epoch 353/3000\n",
+ " - 31s - loss: 0.7748 - acc: 0.8671 - val_loss: 1.0256 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00353: val_acc did not improve from 0.81160\n",
+ "Epoch 354/3000\n",
+ " - 29s - loss: 0.7647 - acc: 0.8671 - val_loss: 1.0351 - val_acc: 0.8073\n",
+ "\n",
+ "Epoch 00354: val_acc did not improve from 0.81160\n",
+ "Epoch 355/3000\n",
+ " - 32s - loss: 0.7753 - acc: 0.8668 - val_loss: 1.0517 - val_acc: 0.8097\n",
+ "\n",
+ "Epoch 00355: val_acc did not improve from 0.81160\n",
+ "Epoch 356/3000\n",
+ " - 30s - loss: 0.7730 - acc: 0.8697 - val_loss: 1.0437 - val_acc: 0.8104\n",
+ "\n",
+ "Epoch 00356: val_acc did not improve from 0.81160\n",
+ "Epoch 357/3000\n",
+ " - 31s - loss: 0.7611 - acc: 0.8681 - val_loss: 1.0030 - val_acc: 0.8085\n",
+ "\n",
+ "Epoch 00357: val_acc did not improve from 0.81160\n",
+ "Epoch 358/3000\n",
+ " - 30s - loss: 0.7800 - acc: 0.8644 - val_loss: 1.0532 - val_acc: 0.8046\n",
+ "\n",
+ "Epoch 00358: val_acc did not improve from 0.81160\n",
+ "Epoch 359/3000\n",
+ " - 32s - loss: 0.7624 - acc: 0.8690 - val_loss: 1.0162 - val_acc: 0.8038\n",
+ "\n",
+ "Epoch 00359: val_acc did not improve from 0.81160\n",
+ "Epoch 360/3000\n",
+ " - 30s - loss: 0.7607 - acc: 0.8659 - val_loss: 1.0487 - val_acc: 0.7945\n",
+ "\n",
+ "Epoch 00360: val_acc did not improve from 0.81160\n",
+ "Epoch 361/3000\n",
+ " - 30s - loss: 0.7536 - acc: 0.8693 - val_loss: 1.0520 - val_acc: 0.8011\n",
+ "\n",
+ "Epoch 00361: val_acc did not improve from 0.81160\n",
+ "Epoch 362/3000\n",
+ " - 30s - loss: 0.7701 - acc: 0.8675 - val_loss: 1.0541 - val_acc: 0.8054\n",
+ "\n",
+ "Epoch 00362: val_acc did not improve from 0.81160\n",
+ "Epoch 363/3000\n",
+ " - 31s - loss: 0.7684 - acc: 0.8702 - val_loss: 0.9667 - val_acc: 0.8206\n",
+ "\n",
+ "Epoch 00363: val_acc improved from 0.81160 to 0.82055, saving model to ./ModelSnapshots/LSTM-v1-363.h5\n",
+ "Epoch 364/3000\n",
+ " - 32s - loss: 0.7409 - acc: 0.8687 - val_loss: 0.9735 - val_acc: 0.8194\n",
+ "\n",
+ "Epoch 00364: val_acc did not improve from 0.82055\n",
+ "Epoch 365/3000\n",
+ " - 31s - loss: 0.7482 - acc: 0.8688 - val_loss: 0.9960 - val_acc: 0.8132\n",
+ "\n",
+ "Epoch 00365: val_acc did not improve from 0.82055\n",
+ "Epoch 366/3000\n",
+ " - 29s - loss: 0.7497 - acc: 0.8748 - val_loss: 0.9743 - val_acc: 0.8159\n",
+ "\n",
+ "Epoch 00366: val_acc did not improve from 0.82055\n",
+ "Epoch 367/3000\n",
+ " - 30s - loss: 0.7463 - acc: 0.8721 - val_loss: 1.0472 - val_acc: 0.8038\n",
+ "\n",
+ "Epoch 00367: val_acc did not improve from 0.82055\n",
+ "Epoch 368/3000\n",
+ " - 30s - loss: 0.7387 - acc: 0.8730 - val_loss: 0.9919 - val_acc: 0.8170\n",
+ "\n",
+ "Epoch 00368: val_acc did not improve from 0.82055\n",
+ "Epoch 369/3000\n",
+ " - 30s - loss: 0.7461 - acc: 0.8717 - val_loss: 0.9794 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00369: val_acc improved from 0.82055 to 0.82756, saving model to ./ModelSnapshots/LSTM-v1-369.h5\n",
+ "Epoch 370/3000\n",
+ " - 29s - loss: 0.7625 - acc: 0.8664 - val_loss: 1.0434 - val_acc: 0.8089\n",
+ "\n",
+ "Epoch 00370: val_acc did not improve from 0.82756\n",
+ "Epoch 371/3000\n",
+ " - 31s - loss: 0.7543 - acc: 0.8724 - val_loss: 1.0034 - val_acc: 0.8128\n",
+ "\n",
+ "Epoch 00371: val_acc did not improve from 0.82756\n",
+ "Epoch 372/3000\n",
+ " - 31s - loss: 0.7449 - acc: 0.8711 - val_loss: 0.9866 - val_acc: 0.8132\n",
+ "\n",
+ "Epoch 00372: val_acc did not improve from 0.82756\n",
+ "Epoch 373/3000\n",
+ " - 30s - loss: 0.7404 - acc: 0.8785 - val_loss: 1.0178 - val_acc: 0.8073\n",
+ "\n",
+ "Epoch 00373: val_acc did not improve from 0.82756\n",
+ "Epoch 374/3000\n",
+ " - 30s - loss: 0.7380 - acc: 0.8727 - val_loss: 1.0444 - val_acc: 0.8081\n",
+ "\n",
+ "Epoch 00374: val_acc did not improve from 0.82756\n",
+ "Epoch 375/3000\n",
+ " - 31s - loss: 0.7159 - acc: 0.8785 - val_loss: 1.0397 - val_acc: 0.8093\n",
+ "\n",
+ "Epoch 00375: val_acc did not improve from 0.82756\n",
+ "Epoch 376/3000\n",
+ " - 30s - loss: 0.7268 - acc: 0.8783 - val_loss: 1.0407 - val_acc: 0.8155\n",
+ "\n",
+ "Epoch 00376: val_acc did not improve from 0.82756\n",
+ "Epoch 377/3000\n",
+ " - 31s - loss: 0.7648 - acc: 0.8702 - val_loss: 1.0383 - val_acc: 0.8143\n",
+ "\n",
+ "Epoch 00377: val_acc did not improve from 0.82756\n",
+ "Epoch 378/3000\n",
+ " - 30s - loss: 0.7418 - acc: 0.8744 - val_loss: 0.9557 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00378: val_acc did not improve from 0.82756\n",
+ "Epoch 379/3000\n",
+ " - 31s - loss: 0.7631 - acc: 0.8691 - val_loss: 1.0920 - val_acc: 0.7925\n",
+ "\n",
+ "Epoch 00379: val_acc did not improve from 0.82756\n",
+ "Epoch 380/3000\n",
+ " - 31s - loss: 0.7403 - acc: 0.8738 - val_loss: 0.9497 - val_acc: 0.8174\n",
+ "\n",
+ "Epoch 00380: val_acc did not improve from 0.82756\n",
+ "Epoch 381/3000\n",
+ " - 29s - loss: 0.7280 - acc: 0.8756 - val_loss: 1.0507 - val_acc: 0.8069\n",
+ "\n",
+ "Epoch 00381: val_acc did not improve from 0.82756\n",
+ "Epoch 382/3000\n",
+ " - 31s - loss: 0.7467 - acc: 0.8714 - val_loss: 1.0647 - val_acc: 0.8042\n",
+ "\n",
+ "Epoch 00382: val_acc did not improve from 0.82756\n",
+ "Epoch 383/3000\n",
+ " - 30s - loss: 0.7147 - acc: 0.8798 - val_loss: 1.0431 - val_acc: 0.8120\n",
+ "\n",
+ "Epoch 00383: val_acc did not improve from 0.82756\n",
+ "Epoch 384/3000\n",
+ " - 31s - loss: 0.7253 - acc: 0.8767 - val_loss: 1.0173 - val_acc: 0.8147\n",
+ "\n",
+ "Epoch 00384: val_acc did not improve from 0.82756\n",
+ "Epoch 385/3000\n",
+ " - 32s - loss: 0.7267 - acc: 0.8791 - val_loss: 1.0566 - val_acc: 0.8097\n",
+ "\n",
+ "Epoch 00385: val_acc did not improve from 0.82756\n",
+ "Epoch 386/3000\n",
+ " - 29s - loss: 0.7260 - acc: 0.8747 - val_loss: 1.0022 - val_acc: 0.8163\n",
+ "\n",
+ "Epoch 00386: val_acc did not improve from 0.82756\n",
+ "Epoch 387/3000\n",
+ " - 32s - loss: 0.7341 - acc: 0.8739 - val_loss: 1.0223 - val_acc: 0.8104\n",
+ "\n",
+ "Epoch 00387: val_acc did not improve from 0.82756\n",
+ "Epoch 388/3000\n",
+ " - 31s - loss: 0.7092 - acc: 0.8809 - val_loss: 1.0109 - val_acc: 0.8178\n",
+ "\n",
+ "Epoch 00388: val_acc did not improve from 0.82756\n",
+ "Epoch 389/3000\n",
+ " - 29s - loss: 0.7368 - acc: 0.8789 - val_loss: 1.0589 - val_acc: 0.8042\n",
+ "\n",
+ "Epoch 00389: val_acc did not improve from 0.82756\n",
+ "Epoch 390/3000\n",
+ " - 31s - loss: 0.7278 - acc: 0.8755 - val_loss: 0.9617 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00390: val_acc did not improve from 0.82756\n",
+ "Epoch 391/3000\n",
+ " - 31s - loss: 0.7115 - acc: 0.8832 - val_loss: 0.9896 - val_acc: 0.8151\n",
+ "\n",
+ "Epoch 00391: val_acc did not improve from 0.82756\n",
+ "Epoch 392/3000\n",
+ " - 29s - loss: 0.7167 - acc: 0.8806 - val_loss: 0.9741 - val_acc: 0.8229\n",
+ "\n",
+ "Epoch 00392: val_acc did not improve from 0.82756\n",
+ "Epoch 393/3000\n",
+ " - 32s - loss: 0.7167 - acc: 0.8807 - val_loss: 1.0856 - val_acc: 0.7914\n",
+ "\n",
+ "Epoch 00393: val_acc did not improve from 0.82756\n",
+ "Epoch 394/3000\n",
+ " - 29s - loss: 0.7462 - acc: 0.8747 - val_loss: 0.9327 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00394: val_acc improved from 0.82756 to 0.83067, saving model to ./ModelSnapshots/LSTM-v1-394.h5\n",
+ "Epoch 395/3000\n",
+ " - 29s - loss: 0.7202 - acc: 0.8762 - val_loss: 0.9674 - val_acc: 0.8318\n",
+ "\n",
+ "Epoch 00395: val_acc improved from 0.83067 to 0.83184, saving model to ./ModelSnapshots/LSTM-v1-395.h5\n",
+ "Epoch 396/3000\n",
+ " - 32s - loss: 0.7261 - acc: 0.8800 - val_loss: 1.0525 - val_acc: 0.8100\n",
+ "\n",
+ "Epoch 00396: val_acc did not improve from 0.83184\n",
+ "Epoch 397/3000\n",
+ " - 29s - loss: 0.7172 - acc: 0.8807 - val_loss: 0.9525 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00397: val_acc did not improve from 0.83184\n",
+ "Epoch 398/3000\n",
+ " - 31s - loss: 0.7170 - acc: 0.8815 - val_loss: 1.0284 - val_acc: 0.8151\n",
+ "\n",
+ "Epoch 00398: val_acc did not improve from 0.83184\n",
+ "Epoch 399/3000\n",
+ " - 31s - loss: 0.7246 - acc: 0.8792 - val_loss: 0.9941 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00399: val_acc did not improve from 0.83184\n",
+ "Epoch 400/3000\n",
+ " - 29s - loss: 0.7085 - acc: 0.8818 - val_loss: 1.0028 - val_acc: 0.8147\n",
+ "\n",
+ "Epoch 00400: val_acc did not improve from 0.83184\n",
+ "Epoch 401/3000\n",
+ " - 31s - loss: 0.7097 - acc: 0.8800 - val_loss: 1.0143 - val_acc: 0.8167\n",
+ "\n",
+ "Epoch 00401: val_acc did not improve from 0.83184\n",
+ "Epoch 402/3000\n",
+ " - 31s - loss: 0.7154 - acc: 0.8815 - val_loss: 0.9683 - val_acc: 0.8287\n",
+ "\n",
+ "Epoch 00402: val_acc did not improve from 0.83184\n",
+ "Epoch 403/3000\n",
+ " - 31s - loss: 0.6962 - acc: 0.8866 - val_loss: 1.0118 - val_acc: 0.8128\n",
+ "\n",
+ "Epoch 00403: val_acc did not improve from 0.83184\n",
+ "Epoch 404/3000\n",
+ " - 31s - loss: 0.7014 - acc: 0.8847 - val_loss: 0.9564 - val_acc: 0.8260\n",
+ "\n",
+ "Epoch 00404: val_acc did not improve from 0.83184\n",
+ "Epoch 405/3000\n",
+ " - 32s - loss: 0.6864 - acc: 0.8872 - val_loss: 1.0074 - val_acc: 0.8135\n",
+ "\n",
+ "Epoch 00405: val_acc did not improve from 0.83184\n",
+ "Epoch 406/3000\n",
+ " - 30s - loss: 0.7071 - acc: 0.8801 - val_loss: 1.0440 - val_acc: 0.8120\n",
+ "\n",
+ "Epoch 00406: val_acc did not improve from 0.83184\n",
+ "Epoch 407/3000\n",
+ " - 31s - loss: 0.7017 - acc: 0.8812 - val_loss: 0.9209 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 00407: val_acc improved from 0.83184 to 0.83223, saving model to ./ModelSnapshots/LSTM-v1-407.h5\n",
+ "Epoch 408/3000\n",
+ " - 30s - loss: 0.6888 - acc: 0.8818 - val_loss: 0.9703 - val_acc: 0.8244\n",
+ "\n",
+ "Epoch 00408: val_acc did not improve from 0.83223\n",
+ "Epoch 409/3000\n",
+ " - 29s - loss: 0.7102 - acc: 0.8807 - val_loss: 0.9795 - val_acc: 0.8093\n",
+ "\n",
+ "Epoch 00409: val_acc did not improve from 0.83223\n",
+ "Epoch 410/3000\n",
+ " - 31s - loss: 0.7016 - acc: 0.8832 - val_loss: 1.0575 - val_acc: 0.8085\n",
+ "\n",
+ "Epoch 00410: val_acc did not improve from 0.83223\n",
+ "Epoch 411/3000\n",
+ " - 32s - loss: 0.7020 - acc: 0.8842 - val_loss: 1.0143 - val_acc: 0.8128\n",
+ "\n",
+ "Epoch 00411: val_acc did not improve from 0.83223\n",
+ "Epoch 412/3000\n",
+ " - 29s - loss: 0.6913 - acc: 0.8883 - val_loss: 0.9655 - val_acc: 0.8283\n",
+ "\n",
+ "Epoch 00412: val_acc did not improve from 0.83223\n",
+ "Epoch 413/3000\n",
+ " - 30s - loss: 0.6830 - acc: 0.8863 - val_loss: 0.9658 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00413: val_acc did not improve from 0.83223\n",
+ "Epoch 414/3000\n",
+ " - 33s - loss: 0.6980 - acc: 0.8844 - val_loss: 0.9808 - val_acc: 0.8170\n",
+ "\n",
+ "Epoch 00414: val_acc did not improve from 0.83223\n",
+ "Epoch 415/3000\n",
+ " - 30s - loss: 0.7306 - acc: 0.8819 - val_loss: 0.9565 - val_acc: 0.8225\n",
+ "\n",
+ "Epoch 00415: val_acc did not improve from 0.83223\n",
+ "Epoch 416/3000\n",
+ " - 30s - loss: 0.6786 - acc: 0.8869 - val_loss: 0.9240 - val_acc: 0.8377\n",
+ "\n",
+ "Epoch 00416: val_acc improved from 0.83223 to 0.83768, saving model to ./ModelSnapshots/LSTM-v1-416.h5\n",
+ "Epoch 417/3000\n",
+ " - 31s - loss: 0.6800 - acc: 0.8865 - val_loss: 1.0447 - val_acc: 0.8135\n",
+ "\n",
+ "Epoch 00417: val_acc did not improve from 0.83768\n",
+ "Epoch 418/3000\n",
+ " - 30s - loss: 0.6849 - acc: 0.8884 - val_loss: 1.0136 - val_acc: 0.8112\n",
+ "\n",
+ "Epoch 00418: val_acc did not improve from 0.83768\n",
+ "Epoch 419/3000\n",
+ " - 32s - loss: 0.7044 - acc: 0.8833 - val_loss: 0.9230 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00419: val_acc did not improve from 0.83768\n",
+ "Epoch 420/3000\n",
+ " - 29s - loss: 0.6763 - acc: 0.8936 - val_loss: 0.9175 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00420: val_acc did not improve from 0.83768\n",
+ "Epoch 421/3000\n",
+ " - 31s - loss: 0.6856 - acc: 0.8863 - val_loss: 0.9694 - val_acc: 0.8198\n",
+ "\n",
+ "Epoch 00421: val_acc did not improve from 0.83768\n",
+ "Epoch 422/3000\n",
+ " - 30s - loss: 0.6960 - acc: 0.8862 - val_loss: 0.9924 - val_acc: 0.8221\n",
+ "\n",
+ "Epoch 00422: val_acc did not improve from 0.83768\n",
+ "Epoch 423/3000\n",
+ " - 31s - loss: 0.6809 - acc: 0.8872 - val_loss: 0.9568 - val_acc: 0.8256\n",
+ "\n",
+ "Epoch 00423: val_acc did not improve from 0.83768\n",
+ "Epoch 424/3000\n",
+ " - 32s - loss: 0.6743 - acc: 0.8869 - val_loss: 0.9685 - val_acc: 0.8272\n",
+ "\n",
+ "Epoch 00424: val_acc did not improve from 0.83768\n",
+ "Epoch 425/3000\n",
+ " - 30s - loss: 0.6761 - acc: 0.8909 - val_loss: 0.9737 - val_acc: 0.8170\n",
+ "\n",
+ "Epoch 00425: val_acc did not improve from 0.83768\n",
+ "Epoch 426/3000\n",
+ " - 29s - loss: 0.6811 - acc: 0.8877 - val_loss: 0.9116 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00426: val_acc did not improve from 0.83768\n",
+ "Epoch 427/3000\n",
+ " - 32s - loss: 0.6778 - acc: 0.8905 - val_loss: 0.9641 - val_acc: 0.8190\n",
+ "\n",
+ "Epoch 00427: val_acc did not improve from 0.83768\n",
+ "Epoch 428/3000\n",
+ " - 30s - loss: 0.6847 - acc: 0.8896 - val_loss: 0.9862 - val_acc: 0.8241\n",
+ "\n",
+ "Epoch 00428: val_acc did not improve from 0.83768\n",
+ "Epoch 429/3000\n",
+ " - 31s - loss: 0.6805 - acc: 0.8893 - val_loss: 0.9692 - val_acc: 0.8139\n",
+ "\n",
+ "Epoch 00429: val_acc did not improve from 0.83768\n",
+ "Epoch 430/3000\n",
+ " - 30s - loss: 0.6785 - acc: 0.8881 - val_loss: 0.9187 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00430: val_acc did not improve from 0.83768\n",
+ "Epoch 431/3000\n",
+ " - 31s - loss: 0.6840 - acc: 0.8827 - val_loss: 0.9195 - val_acc: 0.8268\n",
+ "\n",
+ "Epoch 00431: val_acc did not improve from 0.83768\n",
+ "Epoch 432/3000\n",
+ " - 30s - loss: 0.6737 - acc: 0.8883 - val_loss: 1.0007 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00432: val_acc did not improve from 0.83768\n",
+ "Epoch 433/3000\n",
+ " - 29s - loss: 0.6745 - acc: 0.8890 - val_loss: 0.9841 - val_acc: 0.8268\n",
+ "\n",
+ "Epoch 00433: val_acc did not improve from 0.83768\n",
+ "Epoch 434/3000\n",
+ " - 30s - loss: 0.6760 - acc: 0.8869 - val_loss: 0.9422 - val_acc: 0.8326\n",
+ "\n",
+ "Epoch 00434: val_acc did not improve from 0.83768\n",
+ "Epoch 435/3000\n",
+ " - 32s - loss: 0.6775 - acc: 0.8913 - val_loss: 1.0096 - val_acc: 0.8186\n",
+ "\n",
+ "Epoch 00435: val_acc did not improve from 0.83768\n",
+ "Epoch 436/3000\n",
+ " - 31s - loss: 0.6689 - acc: 0.8919 - val_loss: 0.9567 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00436: val_acc did not improve from 0.83768\n",
+ "Epoch 437/3000\n",
+ " - 30s - loss: 0.6949 - acc: 0.8889 - val_loss: 1.0712 - val_acc: 0.8081\n",
+ "\n",
+ "Epoch 00437: val_acc did not improve from 0.83768\n",
+ "Epoch 438/3000\n",
+ " - 30s - loss: 0.6819 - acc: 0.8871 - val_loss: 0.9538 - val_acc: 0.8233\n",
+ "\n",
+ "Epoch 00438: val_acc did not improve from 0.83768\n",
+ "Epoch 439/3000\n",
+ " - 31s - loss: 0.6687 - acc: 0.8912 - val_loss: 1.0322 - val_acc: 0.8139\n",
+ "\n",
+ "Epoch 00439: val_acc did not improve from 0.83768\n",
+ "Epoch 440/3000\n",
+ " - 30s - loss: 0.6751 - acc: 0.8889 - val_loss: 0.9630 - val_acc: 0.8256\n",
+ "\n",
+ "Epoch 00440: val_acc did not improve from 0.83768\n",
+ "Epoch 441/3000\n",
+ " - 31s - loss: 0.6622 - acc: 0.8915 - val_loss: 0.9559 - val_acc: 0.8241\n",
+ "\n",
+ "Epoch 00441: val_acc did not improve from 0.83768\n",
+ "Epoch 442/3000\n",
+ " - 32s - loss: 0.6632 - acc: 0.8934 - val_loss: 0.9613 - val_acc: 0.8206\n",
+ "\n",
+ "Epoch 00442: val_acc did not improve from 0.83768\n",
+ "Epoch 443/3000\n",
+ " - 30s - loss: 0.6698 - acc: 0.8934 - val_loss: 0.9520 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00443: val_acc did not improve from 0.83768\n",
+ "Epoch 444/3000\n",
+ " - 32s - loss: 0.6589 - acc: 0.8978 - val_loss: 0.9992 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00444: val_acc did not improve from 0.83768\n",
+ "Epoch 445/3000\n",
+ " - 29s - loss: 0.6619 - acc: 0.8930 - val_loss: 0.9466 - val_acc: 0.8291\n",
+ "\n",
+ "Epoch 00445: val_acc did not improve from 0.83768\n",
+ "Epoch 446/3000\n",
+ " - 31s - loss: 0.6575 - acc: 0.8949 - val_loss: 0.9641 - val_acc: 0.8248\n",
+ "\n",
+ "Epoch 00446: val_acc did not improve from 0.83768\n",
+ "Epoch 447/3000\n",
+ " - 32s - loss: 0.6643 - acc: 0.8937 - val_loss: 0.9351 - val_acc: 0.8295\n",
+ "\n",
+ "Epoch 00447: val_acc did not improve from 0.83768\n",
+ "Epoch 448/3000\n",
+ " - 31s - loss: 0.6409 - acc: 0.8967 - val_loss: 1.0199 - val_acc: 0.8167\n",
+ "\n",
+ "Epoch 00448: val_acc did not improve from 0.83768\n",
+ "Epoch 449/3000\n",
+ " - 30s - loss: 0.6707 - acc: 0.8913 - val_loss: 0.9216 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00449: val_acc did not improve from 0.83768\n",
+ "Epoch 450/3000\n",
+ " - 30s - loss: 0.6656 - acc: 0.8889 - val_loss: 1.0240 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00450: val_acc did not improve from 0.83768\n",
+ "Epoch 451/3000\n",
+ " - 30s - loss: 0.6704 - acc: 0.8890 - val_loss: 0.9845 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 00451: val_acc did not improve from 0.83768\n",
+ "Epoch 452/3000\n",
+ " - 31s - loss: 0.6570 - acc: 0.8942 - val_loss: 0.9081 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00452: val_acc improved from 0.83768 to 0.84274, saving model to ./ModelSnapshots/LSTM-v1-452.h5\n",
+ "Epoch 453/3000\n",
+ " - 32s - loss: 0.6687 - acc: 0.8904 - val_loss: 0.9301 - val_acc: 0.8268\n",
+ "\n",
+ "Epoch 00453: val_acc did not improve from 0.84274\n",
+ "Epoch 454/3000\n",
+ " - 29s - loss: 0.6622 - acc: 0.8909 - val_loss: 0.9276 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00454: val_acc did not improve from 0.84274\n",
+ "Epoch 455/3000\n",
+ " - 33s - loss: 0.6512 - acc: 0.8945 - val_loss: 0.8959 - val_acc: 0.8377\n",
+ "\n",
+ "Epoch 00455: val_acc did not improve from 0.84274\n",
+ "Epoch 456/3000\n",
+ " - 30s - loss: 0.6434 - acc: 0.8975 - val_loss: 1.0070 - val_acc: 0.8202\n",
+ "\n",
+ "Epoch 00456: val_acc did not improve from 0.84274\n",
+ "Epoch 457/3000\n",
+ " - 31s - loss: 0.6649 - acc: 0.8913 - val_loss: 0.8930 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00457: val_acc did not improve from 0.84274\n",
+ "Epoch 458/3000\n",
+ " - 31s - loss: 0.6446 - acc: 0.8963 - val_loss: 0.9378 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00458: val_acc did not improve from 0.84274\n",
+ "Epoch 459/3000\n",
+ " - 32s - loss: 0.6460 - acc: 0.8933 - val_loss: 0.9946 - val_acc: 0.8163\n",
+ "\n",
+ "Epoch 00459: val_acc did not improve from 0.84274\n",
+ "Epoch 460/3000\n",
+ " - 30s - loss: 0.6373 - acc: 0.9002 - val_loss: 0.9416 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00460: val_acc did not improve from 0.84274\n",
+ "Epoch 461/3000\n",
+ " - 32s - loss: 0.6478 - acc: 0.8995 - val_loss: 0.9468 - val_acc: 0.8299\n",
+ "\n",
+ "Epoch 00461: val_acc did not improve from 0.84274\n",
+ "Epoch 462/3000\n",
+ " - 30s - loss: 0.6483 - acc: 0.8946 - val_loss: 0.9606 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00462: val_acc did not improve from 0.84274\n",
+ "Epoch 463/3000\n",
+ " - 31s - loss: 0.6565 - acc: 0.8949 - val_loss: 0.9546 - val_acc: 0.8272\n",
+ "\n",
+ "Epoch 00463: val_acc did not improve from 0.84274\n",
+ "Epoch 464/3000\n",
+ " - 30s - loss: 0.6421 - acc: 0.8978 - val_loss: 0.9096 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00464: val_acc did not improve from 0.84274\n",
+ "Epoch 465/3000\n",
+ " - 30s - loss: 0.6411 - acc: 0.9008 - val_loss: 0.9590 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00465: val_acc did not improve from 0.84274\n",
+ "Epoch 466/3000\n",
+ " - 32s - loss: 0.6386 - acc: 0.8975 - val_loss: 0.9847 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00466: val_acc did not improve from 0.84274\n",
+ "Epoch 467/3000\n",
+ " - 29s - loss: 0.6587 - acc: 0.8966 - val_loss: 0.9514 - val_acc: 0.8299\n",
+ "\n",
+ "Epoch 00467: val_acc did not improve from 0.84274\n",
+ "Epoch 468/3000\n",
+ " - 32s - loss: 0.6471 - acc: 0.8955 - val_loss: 0.9761 - val_acc: 0.8268\n",
+ "\n",
+ "Epoch 00468: val_acc did not improve from 0.84274\n",
+ "Epoch 469/3000\n",
+ " - 29s - loss: 0.6345 - acc: 0.8989 - val_loss: 0.9755 - val_acc: 0.8272\n",
+ "\n",
+ "Epoch 00469: val_acc did not improve from 0.84274\n",
+ "Epoch 470/3000\n",
+ " - 33s - loss: 0.6434 - acc: 0.8961 - val_loss: 0.9514 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00470: val_acc did not improve from 0.84274\n",
+ "Epoch 471/3000\n",
+ " - 29s - loss: 0.6647 - acc: 0.8910 - val_loss: 0.9720 - val_acc: 0.8229\n",
+ "\n",
+ "Epoch 00471: val_acc did not improve from 0.84274\n",
+ "Epoch 472/3000\n",
+ " - 29s - loss: 0.6248 - acc: 0.9028 - val_loss: 1.0007 - val_acc: 0.8104\n",
+ "\n",
+ "Epoch 00472: val_acc did not improve from 0.84274\n",
+ "Epoch 473/3000\n",
+ " - 31s - loss: 0.6511 - acc: 0.8948 - val_loss: 0.9711 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00473: val_acc did not improve from 0.84274\n",
+ "Epoch 474/3000\n",
+ " - 31s - loss: 0.6358 - acc: 0.8996 - val_loss: 0.9535 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00474: val_acc did not improve from 0.84274\n",
+ "Epoch 475/3000\n",
+ " - 30s - loss: 0.6237 - acc: 0.9023 - val_loss: 0.9315 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00475: val_acc did not improve from 0.84274\n",
+ "Epoch 476/3000\n",
+ " - 31s - loss: 0.6363 - acc: 0.8990 - val_loss: 0.9828 - val_acc: 0.8260\n",
+ "\n",
+ "Epoch 00476: val_acc did not improve from 0.84274\n",
+ "Epoch 477/3000\n",
+ " - 29s - loss: 0.6449 - acc: 0.8960 - val_loss: 0.9743 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00477: val_acc did not improve from 0.84274\n",
+ "Epoch 478/3000\n",
+ " - 31s - loss: 0.6603 - acc: 0.8909 - val_loss: 1.0280 - val_acc: 0.8147\n",
+ "\n",
+ "Epoch 00478: val_acc did not improve from 0.84274\n",
+ "Epoch 479/3000\n",
+ " - 31s - loss: 0.6296 - acc: 0.8976 - val_loss: 0.9203 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00479: val_acc did not improve from 0.84274\n",
+ "Epoch 480/3000\n",
+ " - 32s - loss: 0.6218 - acc: 0.9029 - val_loss: 0.8990 - val_acc: 0.8295\n",
+ "\n",
+ "Epoch 00480: val_acc did not improve from 0.84274\n",
+ "Epoch 481/3000\n",
+ " - 30s - loss: 0.6194 - acc: 0.9029 - val_loss: 0.8981 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 00481: val_acc did not improve from 0.84274\n",
+ "Epoch 482/3000\n",
+ " - 31s - loss: 0.6214 - acc: 0.9066 - val_loss: 0.9426 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00482: val_acc did not improve from 0.84274\n",
+ "Epoch 483/3000\n",
+ " - 31s - loss: 0.6336 - acc: 0.8964 - val_loss: 0.9820 - val_acc: 0.8287\n",
+ "\n",
+ "Epoch 00483: val_acc did not improve from 0.84274\n",
+ "Epoch 484/3000\n",
+ " - 29s - loss: 0.6330 - acc: 0.8976 - val_loss: 0.9949 - val_acc: 0.8221\n",
+ "\n",
+ "Epoch 00484: val_acc did not improve from 0.84274\n",
+ "Epoch 485/3000\n",
+ " - 31s - loss: 0.6278 - acc: 0.8976 - val_loss: 0.9057 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00485: val_acc did not improve from 0.84274\n",
+ "Epoch 486/3000\n",
+ " - 31s - loss: 0.6178 - acc: 0.9031 - val_loss: 0.9053 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00486: val_acc did not improve from 0.84274\n",
+ "Epoch 487/3000\n",
+ " - 29s - loss: 0.6206 - acc: 0.9038 - val_loss: 0.9601 - val_acc: 0.8283\n",
+ "\n",
+ "Epoch 00487: val_acc did not improve from 0.84274\n",
+ "\n",
+ "Epoch 00487: ReduceLROnPlateau reducing learning rate to 9.499999760009814e-05.\n",
+ "Epoch 488/3000\n",
+ " - 31s - loss: 0.6209 - acc: 0.9022 - val_loss: 0.9219 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00488: val_acc did not improve from 0.84274\n",
+ "Epoch 489/3000\n",
+ " - 29s - loss: 0.6177 - acc: 0.9014 - val_loss: 0.9217 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00489: val_acc did not improve from 0.84274\n",
+ "Epoch 490/3000\n",
+ " - 31s - loss: 0.6256 - acc: 0.9041 - val_loss: 0.9317 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00490: val_acc did not improve from 0.84274\n",
+ "Epoch 491/3000\n",
+ " - 32s - loss: 0.6288 - acc: 0.9007 - val_loss: 0.9040 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00491: val_acc did not improve from 0.84274\n",
+ "Epoch 492/3000\n",
+ " - 29s - loss: 0.6171 - acc: 0.9023 - val_loss: 0.9252 - val_acc: 0.8334\n",
+ "\n",
+ "Epoch 00492: val_acc did not improve from 0.84274\n",
+ "Epoch 493/3000\n",
+ " - 32s - loss: 0.6164 - acc: 0.9026 - val_loss: 0.9921 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00493: val_acc did not improve from 0.84274\n",
+ "Epoch 494/3000\n",
+ " - 31s - loss: 0.6113 - acc: 0.9040 - val_loss: 0.9450 - val_acc: 0.8287\n",
+ "\n",
+ "Epoch 00494: val_acc did not improve from 0.84274\n",
+ "Epoch 495/3000\n",
+ " - 30s - loss: 0.6169 - acc: 0.9014 - val_loss: 0.9621 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00495: val_acc did not improve from 0.84274\n",
+ "Epoch 496/3000\n",
+ " - 29s - loss: 0.6022 - acc: 0.9062 - val_loss: 0.9252 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00496: val_acc did not improve from 0.84274\n",
+ "Epoch 497/3000\n",
+ " - 32s - loss: 0.6095 - acc: 0.9049 - val_loss: 0.9989 - val_acc: 0.8198\n",
+ "\n",
+ "Epoch 00497: val_acc did not improve from 0.84274\n",
+ "Epoch 498/3000\n",
+ " - 29s - loss: 0.6256 - acc: 0.8986 - val_loss: 0.9319 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00498: val_acc did not improve from 0.84274\n",
+ "Epoch 499/3000\n",
+ " - 30s - loss: 0.6068 - acc: 0.9064 - val_loss: 0.9506 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00499: val_acc did not improve from 0.84274\n",
+ "Epoch 500/3000\n",
+ " - 33s - loss: 0.6110 - acc: 0.9035 - val_loss: 0.9020 - val_acc: 0.8322\n",
+ "\n",
+ "Epoch 00500: val_acc did not improve from 0.84274\n",
+ "Epoch 501/3000\n",
+ " - 29s - loss: 0.5981 - acc: 0.9067 - val_loss: 0.9281 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00501: val_acc did not improve from 0.84274\n",
+ "Epoch 502/3000\n",
+ " - 30s - loss: 0.6126 - acc: 0.9044 - val_loss: 0.9046 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00502: val_acc did not improve from 0.84274\n",
+ "Epoch 503/3000\n",
+ " - 29s - loss: 0.6225 - acc: 0.8992 - val_loss: 0.8745 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00503: val_acc did not improve from 0.84274\n",
+ "Epoch 504/3000\n",
+ " - 30s - loss: 0.6211 - acc: 0.9044 - val_loss: 0.9955 - val_acc: 0.8225\n",
+ "\n",
+ "Epoch 00504: val_acc did not improve from 0.84274\n",
+ "Epoch 505/3000\n",
+ " - 32s - loss: 0.6212 - acc: 0.8993 - val_loss: 0.8950 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00505: val_acc did not improve from 0.84274\n",
+ "Epoch 506/3000\n",
+ " - 31s - loss: 0.6094 - acc: 0.9053 - val_loss: 0.9933 - val_acc: 0.8233\n",
+ "\n",
+ "Epoch 00506: val_acc did not improve from 0.84274\n",
+ "Epoch 507/3000\n",
+ " - 29s - loss: 0.6067 - acc: 0.9037 - val_loss: 0.9005 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00507: val_acc did not improve from 0.84274\n",
+ "Epoch 508/3000\n",
+ " - 32s - loss: 0.6113 - acc: 0.9035 - val_loss: 0.9273 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00508: val_acc did not improve from 0.84274\n",
+ "Epoch 509/3000\n",
+ " - 28s - loss: 0.6074 - acc: 0.9062 - val_loss: 0.8480 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00509: val_acc did not improve from 0.84274\n",
+ "Epoch 510/3000\n",
+ " - 32s - loss: 0.6066 - acc: 0.9041 - val_loss: 0.9606 - val_acc: 0.8303\n",
+ "\n",
+ "Epoch 00510: val_acc did not improve from 0.84274\n",
+ "Epoch 511/3000\n",
+ " - 29s - loss: 0.6203 - acc: 0.9017 - val_loss: 0.9328 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00511: val_acc did not improve from 0.84274\n",
+ "Epoch 512/3000\n",
+ " - 30s - loss: 0.6036 - acc: 0.9050 - val_loss: 0.8756 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00512: val_acc improved from 0.84274 to 0.84663, saving model to ./ModelSnapshots/LSTM-v1-512.h5\n",
+ "Epoch 513/3000\n",
+ " - 31s - loss: 0.5944 - acc: 0.9053 - val_loss: 0.8823 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00513: val_acc did not improve from 0.84663\n",
+ "Epoch 514/3000\n",
+ " - 30s - loss: 0.5969 - acc: 0.9105 - val_loss: 0.9693 - val_acc: 0.8299\n",
+ "\n",
+ "Epoch 00514: val_acc did not improve from 0.84663\n",
+ "Epoch 515/3000\n",
+ " - 31s - loss: 0.6015 - acc: 0.9032 - val_loss: 0.9040 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00515: val_acc did not improve from 0.84663\n",
+ "Epoch 516/3000\n",
+ " - 30s - loss: 0.6017 - acc: 0.9022 - val_loss: 0.9142 - val_acc: 0.8373\n",
+ "\n",
+ "Epoch 00516: val_acc did not improve from 0.84663\n",
+ "Epoch 517/3000\n",
+ " - 30s - loss: 0.6046 - acc: 0.9014 - val_loss: 1.0028 - val_acc: 0.8217\n",
+ "\n",
+ "Epoch 00517: val_acc did not improve from 0.84663\n",
+ "Epoch 518/3000\n",
+ " - 30s - loss: 0.6052 - acc: 0.9028 - val_loss: 0.9557 - val_acc: 0.8248\n",
+ "\n",
+ "Epoch 00518: val_acc did not improve from 0.84663\n",
+ "Epoch 519/3000\n",
+ " - 31s - loss: 0.6058 - acc: 0.9049 - val_loss: 0.9401 - val_acc: 0.8315\n",
+ "\n",
+ "Epoch 00519: val_acc did not improve from 0.84663\n",
+ "Epoch 520/3000\n",
+ " - 31s - loss: 0.5908 - acc: 0.9061 - val_loss: 0.9356 - val_acc: 0.8361\n",
+ "\n",
+ "Epoch 00520: val_acc did not improve from 0.84663\n",
+ "Epoch 521/3000\n",
+ " - 30s - loss: 0.5869 - acc: 0.9056 - val_loss: 0.9188 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00521: val_acc did not improve from 0.84663\n",
+ "Epoch 522/3000\n",
+ " - 30s - loss: 0.5908 - acc: 0.9081 - val_loss: 0.9738 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00522: val_acc did not improve from 0.84663\n",
+ "Epoch 523/3000\n",
+ " - 30s - loss: 0.5922 - acc: 0.9099 - val_loss: 0.8900 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00523: val_acc did not improve from 0.84663\n",
+ "Epoch 524/3000\n",
+ " - 30s - loss: 0.5825 - acc: 0.9102 - val_loss: 0.8922 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00524: val_acc did not improve from 0.84663\n",
+ "Epoch 525/3000\n",
+ " - 32s - loss: 0.5922 - acc: 0.9090 - val_loss: 1.0021 - val_acc: 0.8209\n",
+ "\n",
+ "Epoch 00525: val_acc did not improve from 0.84663\n",
+ "Epoch 526/3000\n",
+ " - 29s - loss: 0.6023 - acc: 0.9052 - val_loss: 0.9643 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00526: val_acc did not improve from 0.84663\n",
+ "Epoch 527/3000\n",
+ " - 30s - loss: 0.6061 - acc: 0.9035 - val_loss: 0.8903 - val_acc: 0.8353\n",
+ "\n",
+ "Epoch 00527: val_acc did not improve from 0.84663\n",
+ "Epoch 528/3000\n",
+ " - 30s - loss: 0.6028 - acc: 0.9075 - val_loss: 0.9078 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00528: val_acc did not improve from 0.84663\n",
+ "Epoch 529/3000\n",
+ " - 30s - loss: 0.5908 - acc: 0.9085 - val_loss: 0.8997 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00529: val_acc did not improve from 0.84663\n",
+ "Epoch 530/3000\n",
+ " - 30s - loss: 0.5908 - acc: 0.9069 - val_loss: 0.8971 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00530: val_acc did not improve from 0.84663\n",
+ "Epoch 531/3000\n",
+ " - 32s - loss: 0.6067 - acc: 0.8996 - val_loss: 0.9471 - val_acc: 0.8252\n",
+ "\n",
+ "Epoch 00531: val_acc did not improve from 0.84663\n",
+ "Epoch 532/3000\n",
+ " - 29s - loss: 0.5918 - acc: 0.9058 - val_loss: 0.9280 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00532: val_acc did not improve from 0.84663\n",
+ "Epoch 533/3000\n",
+ " - 30s - loss: 0.5971 - acc: 0.9050 - val_loss: 0.8608 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00533: val_acc did not improve from 0.84663\n",
+ "Epoch 534/3000\n",
+ " - 30s - loss: 0.5908 - acc: 0.9085 - val_loss: 0.8661 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00534: val_acc improved from 0.84663 to 0.85091, saving model to ./ModelSnapshots/LSTM-v1-534.h5\n",
+ "Epoch 535/3000\n",
+ " - 31s - loss: 0.5981 - acc: 0.9052 - val_loss: 1.1163 - val_acc: 0.8124\n",
+ "\n",
+ "Epoch 00535: val_acc did not improve from 0.85091\n",
+ "Epoch 536/3000\n",
+ " - 30s - loss: 0.6046 - acc: 0.9004 - val_loss: 0.9089 - val_acc: 0.8377\n",
+ "\n",
+ "Epoch 00536: val_acc did not improve from 0.85091\n",
+ "Epoch 537/3000\n",
+ " - 30s - loss: 0.5962 - acc: 0.9084 - val_loss: 0.9151 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00537: val_acc did not improve from 0.85091\n",
+ "Epoch 538/3000\n",
+ " - 30s - loss: 0.5858 - acc: 0.9075 - val_loss: 0.9157 - val_acc: 0.8303\n",
+ "\n",
+ "Epoch 00538: val_acc did not improve from 0.85091\n",
+ "Epoch 539/3000\n",
+ " - 32s - loss: 0.5867 - acc: 0.9079 - val_loss: 0.9277 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00539: val_acc did not improve from 0.85091\n",
+ "\n",
+ "Epoch 00539: ReduceLROnPlateau reducing learning rate to 9.02499959920533e-05.\n",
+ "Epoch 540/3000\n",
+ " - 31s - loss: 0.5884 - acc: 0.9085 - val_loss: 0.9444 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00540: val_acc did not improve from 0.85091\n",
+ "Epoch 541/3000\n",
+ " - 31s - loss: 0.5837 - acc: 0.9081 - val_loss: 0.8909 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00541: val_acc did not improve from 0.85091\n",
+ "Epoch 542/3000\n",
+ " - 30s - loss: 0.5881 - acc: 0.9079 - val_loss: 0.9310 - val_acc: 0.8318\n",
+ "\n",
+ "Epoch 00542: val_acc did not improve from 0.85091\n",
+ "Epoch 543/3000\n",
+ " - 30s - loss: 0.5840 - acc: 0.9084 - val_loss: 0.9961 - val_acc: 0.8248\n",
+ "\n",
+ "Epoch 00543: val_acc did not improve from 0.85091\n",
+ "Epoch 544/3000\n",
+ " - 30s - loss: 0.5883 - acc: 0.9061 - val_loss: 1.0121 - val_acc: 0.8182\n",
+ "\n",
+ "Epoch 00544: val_acc did not improve from 0.85091\n",
+ "Epoch 545/3000\n",
+ " - 31s - loss: 0.5747 - acc: 0.9106 - val_loss: 0.9573 - val_acc: 0.8279\n",
+ "\n",
+ "Epoch 00545: val_acc did not improve from 0.85091\n",
+ "Epoch 546/3000\n",
+ " - 32s - loss: 0.5794 - acc: 0.9088 - val_loss: 0.9523 - val_acc: 0.8350\n",
+ "\n",
+ "Epoch 00546: val_acc did not improve from 0.85091\n",
+ "Epoch 547/3000\n",
+ " - 30s - loss: 0.5902 - acc: 0.9097 - val_loss: 0.9324 - val_acc: 0.8287\n",
+ "\n",
+ "Epoch 00547: val_acc did not improve from 0.85091\n",
+ "Epoch 548/3000\n",
+ " - 30s - loss: 0.5840 - acc: 0.9075 - val_loss: 0.8749 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00548: val_acc did not improve from 0.85091\n",
+ "Epoch 549/3000\n"
+ ]
+ }
+ ],
+ "source": [
+ "batch_size = 50\n",
+ "epochs = 3000\n",
+ "timesteps = 50\n",
+ "data_dim = (27,15)\n",
+ "l1v = 0.005\n",
+ "l2v = 0.015\n",
+ "\n",
+ "\n",
+ "tf.get_default_graph()\n",
+ "model = Sequential()\n",
+ "\n",
+ "model.add(TimeDistributed(Conv2D(64, kernel_size=(3,3), activation='relu', \n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v)),\n",
+ " input_shape=(timesteps ,27, 15, 1)))\n",
+ "model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(MaxPooling2D(pool_size=(2,2), strides=None,\n",
+ " padding='same', data_format='channels_last')))\n",
+ "model.add(TimeDistributed(Dropout(0.50)))\n",
+ "\n",
+ "model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(Conv2D(16, kernel_size=(3, 3), activation='relu',\n",
+ " padding='same', kernel_regularizer=regularizers.l1_l2(l1v,l2v))))\n",
+ "model.add(TimeDistributed(MaxPooling2D(pool_size=(2,2), strides=None, padding='same', data_format='channels_last')))\n",
+ "model.add(TimeDistributed(Dropout(0.50)))\n",
+ "\n",
+ "model.add(TimeDistributed(Flatten()))\n",
+ "\n",
+ "model.add(keras.layers.CuDNNLSTM(80, return_sequences=True, input_shape=(timesteps, data_dim), kernel_regularizer=regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(Dropout(0.5))\n",
+ "\n",
+ "model.add(keras.layers.CuDNNLSTM(50, return_sequences=False, input_shape=(timesteps, data_dim), kernel_regularizer=regularizers.l1_l2(l1v,l2v)))\n",
+ "model.add(Dropout(0.5))\n",
+ "\n",
+ "model.add(Dense(num_classes, activation='softmax'))\n",
+ "\n",
+ "#optimizer = optimizers.Adagrad()\n",
+ "optimizer = optimizers.Adam(lr = 0.0001, decay=1e-6)\n",
+ "#optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.1)\n",
+ "model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=optimizer,\n",
+ " metrics=['accuracy'])\n",
+ " \n",
+ "#Broadcast progress to the tensorboard.\n",
+ "\n",
+ "config = \"\"\n",
+ "for layer in model.layers:\n",
+ " config += str(layer.output).split('\\\"')[1].split(\"/\")[0] + str(layer.output_shape) + \"\\n\\n\"\n",
+ "config += \"batchsize: \" + str(batch_size) + \"\\n\\n\" + \"epochs: \" + str(epochs) + \"\\n\\n\" \n",
+ "config += \"l1: \" + str(l1v) + \"\\n\\n\" + \"l2: \" + str(l2v) + \"\\n\\n\"\n",
+ "\n",
+ "model.summary()\n",
+ "current_name = \"LSTM-v1\"\n",
+ "readable_timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')\n",
+ "tensorflowfolder = \"/srv/share/tensorboardfiles/\" + current_name + readable_timestamp\n",
+ "print(current_name + readable_timestamp)\n",
+ "logger = LoggingTensorBoard(settings_str_to_log = config, log_dir=tensorflowfolder, histogram_freq=0,\n",
+ " write_graph=True, write_images=True, update_freq = 'epoch')\n",
+ "\n",
+ "storer = ModelCheckpoint(\"./ModelSnapshots/\" + current_name + readable_timestamp + '-{epoch:03d}.h5',\n",
+ " monitor='val_acc', verbose=1,\n",
+ " save_best_only=True, save_weights_only=False,\n",
+ " mode='auto', period=1)\n",
+ "\n",
+ "learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', \n",
+ " patience=30, \n",
+ " verbose=1, \n",
+ " factor=0.95, \n",
+ " min_lr=0.00001)\n",
+ "\n",
+ "history = model.fit(x_train, y_train_one_hot,\n",
+ " batch_size=batch_size,\n",
+ " epochs=epochs,\n",
+ " verbose=2,\n",
+ " validation_data=(x_test, y_test_one_hot),\n",
+ " callbacks=[storer,logger, learning_rate_reduction])\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model.save(\"./ModelSnapshots/\" + current_name + \"_DONE.h5\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/python/Step_12_LSTM-WarmStart.ipynb b/python/Step_12_LSTM-WarmStart.ipynb
new file mode 100644
index 0000000..64305e5
--- /dev/null
+++ b/python/Step_12_LSTM-WarmStart.ipynb
@@ -0,0 +1,10107 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## USE for Multi GPU Systems\n",
+ "#import os\n",
+ "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+ "\n",
+ "from keras.models import Sequential, load_model\n",
+ "from keras.layers import *\n",
+ "from keras import optimizers\n",
+ "from keras import utils\n",
+ "from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau\n",
+ "import keras\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import pandas as pd\n",
+ "import math\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Importing matplotlib to plot images.\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "\n",
+ "# Importing SK-learn to calculate precision and recall\n",
+ "import sklearn\n",
+ "from sklearn import metrics\n",
+ "from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneGroupOut\n",
+ "from sklearn.utils import shuffle \n",
+ "\n",
+ "# Used for graph export\n",
+ "from keras import backend as K"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class LoggingTensorBoard(TensorBoard): \n",
+ "\n",
+ " def __init__(self, log_dir, settings_str_to_log, **kwargs):\n",
+ " super(LoggingTensorBoard, self).__init__(log_dir, **kwargs)\n",
+ "\n",
+ " self.settings_str = settings_str_to_log\n",
+ "\n",
+ " def on_train_begin(self, logs=None):\n",
+ " TensorBoard.on_train_begin(self, logs=logs)\n",
+ "\n",
+ " tensor = tf.convert_to_tensor(self.settings_str)\n",
+ " summary = tf.summary.text (\"Run_Settings\", tensor)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " s = sess.run(summary)\n",
+ " self.writer.add_summary(s)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 1 2 9 6 4 14 17 16 12 3 10 18 5] [13 8 11 15 7]\n"
+ ]
+ }
+ ],
+ "source": [
+ "dfAll = pd.read_pickle(\"DataStudyCollection/df_lstm_norm50.pkl\")\n",
+ "\n",
+ "lst = dfAll.userID.unique()\n",
+ "np.random.seed(42)\n",
+ "np.random.shuffle(lst)\n",
+ "test_ids = lst[-5:]\n",
+ "train_ids = lst[:-5]\n",
+ "print(train_ids, test_ids)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\n",
+ " 18])"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dfAll.userID.unique()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfAll.TaskID = dfAll.TaskID % 17"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_train = dfAll[dfAll.userID.isin(train_ids)][['Blobs', 'TaskID']]\n",
+ "df_test = dfAll[dfAll.userID.isin(test_ids)][['Blobs', 'TaskID']]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_train = np.concatenate(df_train.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "x_test = np.concatenate(df_test.Blobs.values).reshape(-1,50,27,15,1)\n",
+ "\n",
+ "y_train = df_train.TaskID.values\n",
+ "y_test = df_test.TaskID.values\n",
+ "\n",
+ "x_train = x_train / 255.0\n",
+ "x_test = x_test / 255.0\n",
+ "\n",
+ "# convert class vectors to binary class matrices (one-hot notation)\n",
+ "num_classes = len(dfAll.TaskID.unique())\n",
+ "y_train_one_hot = utils.to_categorical(df_train.TaskID, num_classes)\n",
+ "y_test_one_hot = utils.to_categorical(df_test.TaskID, num_classes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If GPU is not available: \n",
+ "# GPU_USE = '/cpu:0'\n",
+ "#config = tf.ConfigProto(device_count = {\"GPU\": 1})\n",
+ "\n",
+ "\n",
+ "# If GPU is available: \n",
+ "config = tf.ConfigProto()\n",
+ "config.log_device_placement = True\n",
+ "config.allow_soft_placement = True\n",
+ "config.gpu_options.allow_growth=True\n",
+ "config.gpu_options.allocator_type = 'BFC'\n",
+ "\n",
+ "# Limit the maximum memory used\n",
+ "config.gpu_options.per_process_gpu_memory_fraction = 0.3\n",
+ "\n",
+ "# set session config\n",
+ "tf.keras.backend.set_session(tf.Session(config=config))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "_________________________________________________________________\n",
+ "Layer (type) Output Shape Param # \n",
+ "=================================================================\n",
+ "time_distributed_10 (TimeDis (None, 50, 27, 15, 64) 640 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_11 (TimeDis (None, 50, 27, 15, 32) 18464 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_12 (TimeDis (None, 50, 14, 8, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_13 (TimeDis (None, 50, 14, 8, 32) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_14 (TimeDis (None, 50, 14, 8, 32) 9248 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_15 (TimeDis (None, 50, 14, 8, 16) 4624 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_16 (TimeDis (None, 50, 7, 4, 16) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_17 (TimeDis (None, 50, 7, 4, 16) 0 \n",
+ "_________________________________________________________________\n",
+ "time_distributed_18 (TimeDis (None, 50, 448) 0 \n",
+ "_________________________________________________________________\n",
+ "lstm_3 (LSTM) (None, 50, 80) 169280 \n",
+ "_________________________________________________________________\n",
+ "dropout_7 (Dropout) (None, 50, 80) 0 \n",
+ "_________________________________________________________________\n",
+ "lstm_4 (LSTM) (None, 50) 26200 \n",
+ "_________________________________________________________________\n",
+ "dropout_8 (Dropout) (None, 50) 0 \n",
+ "_________________________________________________________________\n",
+ "dense_2 (Dense) (None, 17) 867 \n",
+ "=================================================================\n",
+ "Total params: 229,323\n",
+ "Trainable params: 229,323\n",
+ "Non-trainable params: 0\n",
+ "_________________________________________________________________\n",
+ "LSTM-v2\n",
+ "Train on 6624 samples, validate on 2569 samples\n",
+ "Epoch 1/3000\n",
+ " - 38s - loss: 0.6056 - acc: 0.8918 - val_loss: 1.0498 - val_acc: 0.8198\n",
+ "\n",
+ "Epoch 00001: val_acc improved from -inf to 0.81977, saving model to ./ModelSnapshots/LSTM-v2-001.h5\n",
+ "Epoch 2/3000\n",
+ " - 38s - loss: 0.5269 - acc: 0.9111 - val_loss: 0.9867 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00002: val_acc improved from 0.81977 to 0.83418, saving model to ./ModelSnapshots/LSTM-v2-002.h5\n",
+ "Epoch 3/3000\n",
+ " - 40s - loss: 0.4995 - acc: 0.9215 - val_loss: 1.0149 - val_acc: 0.8315\n",
+ "\n",
+ "Epoch 00003: val_acc did not improve from 0.83418\n",
+ "Epoch 4/3000\n",
+ " - 39s - loss: 0.4941 - acc: 0.9198 - val_loss: 0.9717 - val_acc: 0.8326\n",
+ "\n",
+ "Epoch 00004: val_acc did not improve from 0.83418\n",
+ "Epoch 5/3000\n",
+ " - 39s - loss: 0.4916 - acc: 0.9238 - val_loss: 0.9620 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00005: val_acc improved from 0.83418 to 0.83924, saving model to ./ModelSnapshots/LSTM-v2-005.h5\n",
+ "Epoch 6/3000\n",
+ " - 40s - loss: 0.4997 - acc: 0.9241 - val_loss: 1.1411 - val_acc: 0.8108\n",
+ "\n",
+ "Epoch 00006: val_acc did not improve from 0.83924\n",
+ "Epoch 7/3000\n",
+ " - 40s - loss: 0.5363 - acc: 0.9159 - val_loss: 0.9515 - val_acc: 0.8279\n",
+ "\n",
+ "Epoch 00007: val_acc did not improve from 0.83924\n",
+ "Epoch 8/3000\n",
+ " - 39s - loss: 0.4610 - acc: 0.9289 - val_loss: 0.9546 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00008: val_acc did not improve from 0.83924\n",
+ "Epoch 9/3000\n",
+ " - 39s - loss: 0.4673 - acc: 0.9283 - val_loss: 0.9096 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00009: val_acc improved from 0.83924 to 0.84585, saving model to ./ModelSnapshots/LSTM-v2-009.h5\n",
+ "Epoch 10/3000\n",
+ " - 39s - loss: 0.4555 - acc: 0.9295 - val_loss: 1.0060 - val_acc: 0.8330\n",
+ "\n",
+ "Epoch 00010: val_acc did not improve from 0.84585\n",
+ "Epoch 11/3000\n",
+ " - 39s - loss: 0.4624 - acc: 0.9296 - val_loss: 0.9722 - val_acc: 0.8318\n",
+ "\n",
+ "Epoch 00011: val_acc did not improve from 0.84585\n",
+ "Epoch 12/3000\n",
+ " - 39s - loss: 0.4971 - acc: 0.9209 - val_loss: 1.1189 - val_acc: 0.8062\n",
+ "\n",
+ "Epoch 00012: val_acc did not improve from 0.84585\n",
+ "Epoch 13/3000\n",
+ " - 39s - loss: 0.5239 - acc: 0.9126 - val_loss: 0.9452 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00013: val_acc did not improve from 0.84585\n",
+ "Epoch 14/3000\n",
+ " - 40s - loss: 0.4821 - acc: 0.9224 - val_loss: 0.9675 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00014: val_acc did not improve from 0.84585\n",
+ "Epoch 15/3000\n",
+ " - 39s - loss: 0.4525 - acc: 0.9281 - val_loss: 0.9729 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00015: val_acc did not improve from 0.84585\n",
+ "Epoch 16/3000\n",
+ " - 40s - loss: 0.4651 - acc: 0.9295 - val_loss: 0.9648 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00016: val_acc did not improve from 0.84585\n",
+ "Epoch 17/3000\n",
+ " - 39s - loss: 0.4269 - acc: 0.9366 - val_loss: 0.8995 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00017: val_acc improved from 0.84585 to 0.84663, saving model to ./ModelSnapshots/LSTM-v2-017.h5\n",
+ "Epoch 18/3000\n",
+ " - 39s - loss: 0.4641 - acc: 0.9268 - val_loss: 0.9171 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00018: val_acc did not improve from 0.84663\n",
+ "Epoch 19/3000\n",
+ " - 39s - loss: 0.4250 - acc: 0.9398 - val_loss: 0.9650 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00019: val_acc did not improve from 0.84663\n",
+ "Epoch 20/3000\n",
+ " - 39s - loss: 0.4639 - acc: 0.9330 - val_loss: 0.9631 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00020: val_acc did not improve from 0.84663\n",
+ "Epoch 21/3000\n",
+ " - 39s - loss: 0.4601 - acc: 0.9315 - val_loss: 0.9159 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00021: val_acc did not improve from 0.84663\n",
+ "Epoch 22/3000\n",
+ " - 39s - loss: 0.4361 - acc: 0.9370 - val_loss: 0.8874 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00022: val_acc improved from 0.84663 to 0.84702, saving model to ./ModelSnapshots/LSTM-v2-022.h5\n",
+ "Epoch 23/3000\n",
+ " - 39s - loss: 0.4560 - acc: 0.9304 - val_loss: 0.8797 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00023: val_acc improved from 0.84702 to 0.84936, saving model to ./ModelSnapshots/LSTM-v2-023.h5\n",
+ "Epoch 24/3000\n",
+ " - 39s - loss: 0.4309 - acc: 0.9355 - val_loss: 0.8776 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00024: val_acc improved from 0.84936 to 0.85286, saving model to ./ModelSnapshots/LSTM-v2-024.h5\n",
+ "Epoch 25/3000\n",
+ " - 40s - loss: 0.4402 - acc: 0.9345 - val_loss: 0.9515 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00025: val_acc did not improve from 0.85286\n",
+ "Epoch 26/3000\n",
+ " - 39s - loss: 0.4727 - acc: 0.9309 - val_loss: 0.9700 - val_acc: 0.8256\n",
+ "\n",
+ "Epoch 00026: val_acc did not improve from 0.85286\n",
+ "Epoch 27/3000\n",
+ " - 39s - loss: 0.4610 - acc: 0.9286 - val_loss: 0.9460 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00027: val_acc did not improve from 0.85286\n",
+ "Epoch 28/3000\n",
+ " - 39s - loss: 0.4874 - acc: 0.9265 - val_loss: 0.9083 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00028: val_acc did not improve from 0.85286\n",
+ "Epoch 29/3000\n",
+ " - 38s - loss: 0.4450 - acc: 0.9321 - val_loss: 0.8898 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00029: val_acc did not improve from 0.85286\n",
+ "Epoch 30/3000\n",
+ " - 40s - loss: 0.4875 - acc: 0.9229 - val_loss: 0.9824 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00030: val_acc did not improve from 0.85286\n",
+ "Epoch 31/3000\n",
+ " - 39s - loss: 0.4338 - acc: 0.9386 - val_loss: 0.9317 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00031: val_acc did not improve from 0.85286\n",
+ "Epoch 32/3000\n",
+ " - 39s - loss: 0.4561 - acc: 0.9327 - val_loss: 0.9210 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00032: val_acc did not improve from 0.85286\n",
+ "Epoch 33/3000\n",
+ " - 39s - loss: 0.4344 - acc: 0.9319 - val_loss: 0.9535 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00033: val_acc did not improve from 0.85286\n",
+ "Epoch 34/3000\n",
+ " - 37s - loss: 0.4195 - acc: 0.9413 - val_loss: 0.8583 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00034: val_acc improved from 0.85286 to 0.85520, saving model to ./ModelSnapshots/LSTM-v2-034.h5\n",
+ "Epoch 35/3000\n",
+ " - 38s - loss: 0.4471 - acc: 0.9343 - val_loss: 0.9999 - val_acc: 0.8373\n",
+ "\n",
+ "Epoch 00035: val_acc did not improve from 0.85520\n",
+ "Epoch 36/3000\n",
+ " - 40s - loss: 0.4331 - acc: 0.9351 - val_loss: 0.9634 - val_acc: 0.8361\n",
+ "\n",
+ "Epoch 00036: val_acc did not improve from 0.85520\n",
+ "Epoch 37/3000\n",
+ " - 39s - loss: 0.4321 - acc: 0.9373 - val_loss: 0.9405 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00037: val_acc did not improve from 0.85520\n",
+ "Epoch 38/3000\n",
+ " - 44s - loss: 0.4193 - acc: 0.9402 - val_loss: 0.9312 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00038: val_acc did not improve from 0.85520\n",
+ "Epoch 39/3000\n",
+ " - 40s - loss: 0.4308 - acc: 0.9396 - val_loss: 0.9360 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00039: val_acc did not improve from 0.85520\n",
+ "Epoch 40/3000\n",
+ " - 42s - loss: 0.4237 - acc: 0.9373 - val_loss: 0.8747 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00040: val_acc improved from 0.85520 to 0.86026, saving model to ./ModelSnapshots/LSTM-v2-040.h5\n",
+ "Epoch 41/3000\n",
+ " - 41s - loss: 0.4495 - acc: 0.9327 - val_loss: 0.9276 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00041: val_acc did not improve from 0.86026\n",
+ "Epoch 42/3000\n",
+ " - 39s - loss: 0.4586 - acc: 0.9358 - val_loss: 0.9724 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00042: val_acc did not improve from 0.86026\n",
+ "Epoch 43/3000\n",
+ " - 40s - loss: 0.4676 - acc: 0.9309 - val_loss: 0.9681 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00043: val_acc did not improve from 0.86026\n",
+ "Epoch 44/3000\n",
+ " - 39s - loss: 0.4382 - acc: 0.9364 - val_loss: 0.8846 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00044: val_acc did not improve from 0.86026\n",
+ "Epoch 45/3000\n",
+ " - 39s - loss: 0.4467 - acc: 0.9346 - val_loss: 0.9072 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00045: val_acc did not improve from 0.86026\n",
+ "Epoch 46/3000\n",
+ " - 39s - loss: 0.4176 - acc: 0.9410 - val_loss: 0.9573 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00046: val_acc did not improve from 0.86026\n",
+ "Epoch 47/3000\n",
+ " - 39s - loss: 0.4432 - acc: 0.9327 - val_loss: 0.9924 - val_acc: 0.8350\n",
+ "\n",
+ "Epoch 00047: val_acc did not improve from 0.86026\n",
+ "Epoch 48/3000\n",
+ " - 39s - loss: 0.4237 - acc: 0.9387 - val_loss: 1.0595 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00048: val_acc did not improve from 0.86026\n",
+ "Epoch 49/3000\n",
+ " - 39s - loss: 0.4519 - acc: 0.9345 - val_loss: 0.9091 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00049: val_acc did not improve from 0.86026\n",
+ "Epoch 50/3000\n",
+ " - 40s - loss: 0.4514 - acc: 0.9331 - val_loss: 0.8948 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00050: val_acc did not improve from 0.86026\n",
+ "Epoch 51/3000\n",
+ " - 44s - loss: 0.4385 - acc: 0.9380 - val_loss: 0.9468 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00051: val_acc did not improve from 0.86026\n",
+ "Epoch 52/3000\n",
+ " - 39s - loss: 0.4371 - acc: 0.9373 - val_loss: 0.9737 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00052: val_acc did not improve from 0.86026\n",
+ "Epoch 53/3000\n",
+ " - 40s - loss: 0.4320 - acc: 0.9411 - val_loss: 0.9623 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00053: val_acc did not improve from 0.86026\n",
+ "Epoch 54/3000\n",
+ " - 40s - loss: 0.4183 - acc: 0.9438 - val_loss: 0.8416 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00054: val_acc did not improve from 0.86026\n",
+ "Epoch 55/3000\n",
+ " - 39s - loss: 0.4246 - acc: 0.9422 - val_loss: 0.9440 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00055: val_acc did not improve from 0.86026\n",
+ "Epoch 56/3000\n",
+ " - 40s - loss: 0.4368 - acc: 0.9387 - val_loss: 0.9290 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00056: val_acc did not improve from 0.86026\n",
+ "Epoch 57/3000\n",
+ " - 39s - loss: 0.4627 - acc: 0.9333 - val_loss: 0.8787 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00057: val_acc did not improve from 0.86026\n",
+ "Epoch 58/3000\n",
+ " - 40s - loss: 0.4149 - acc: 0.9428 - val_loss: 0.9141 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00058: val_acc did not improve from 0.86026\n",
+ "Epoch 59/3000\n",
+ " - 39s - loss: 0.4551 - acc: 0.9372 - val_loss: 0.8431 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00059: val_acc did not improve from 0.86026\n",
+ "Epoch 60/3000\n",
+ " - 39s - loss: 0.4260 - acc: 0.9399 - val_loss: 0.8984 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00060: val_acc did not improve from 0.86026\n",
+ "Epoch 61/3000\n",
+ " - 40s - loss: 0.4569 - acc: 0.9348 - val_loss: 0.8772 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00061: val_acc did not improve from 0.86026\n",
+ "Epoch 62/3000\n",
+ " - 39s - loss: 0.4428 - acc: 0.9405 - val_loss: 0.9370 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00062: val_acc did not improve from 0.86026\n",
+ "Epoch 63/3000\n",
+ " - 39s - loss: 0.4368 - acc: 0.9363 - val_loss: 0.9072 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00063: val_acc did not improve from 0.86026\n",
+ "Epoch 64/3000\n",
+ " - 40s - loss: 0.4608 - acc: 0.9307 - val_loss: 0.8726 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00064: val_acc did not improve from 0.86026\n",
+ "Epoch 65/3000\n",
+ " - 39s - loss: 0.4588 - acc: 0.9364 - val_loss: 0.9302 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00065: val_acc did not improve from 0.86026\n",
+ "Epoch 66/3000\n",
+ " - 40s - loss: 0.4514 - acc: 0.9369 - val_loss: 0.9376 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00066: val_acc did not improve from 0.86026\n",
+ "Epoch 67/3000\n",
+ " - 39s - loss: 0.4393 - acc: 0.9395 - val_loss: 0.9064 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00067: val_acc did not improve from 0.86026\n",
+ "Epoch 68/3000\n",
+ " - 40s - loss: 0.4319 - acc: 0.9389 - val_loss: 0.8993 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00068: val_acc did not improve from 0.86026\n",
+ "Epoch 69/3000\n",
+ " - 39s - loss: 0.4387 - acc: 0.9360 - val_loss: 0.8797 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00069: val_acc did not improve from 0.86026\n",
+ "Epoch 70/3000\n",
+ " - 39s - loss: 0.4307 - acc: 0.9392 - val_loss: 0.9072 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00070: val_acc did not improve from 0.86026\n",
+ "Epoch 71/3000\n",
+ " - 38s - loss: 0.4298 - acc: 0.9386 - val_loss: 0.9000 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00071: val_acc did not improve from 0.86026\n",
+ "Epoch 72/3000\n",
+ " - 39s - loss: 0.4512 - acc: 0.9358 - val_loss: 0.9628 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00072: val_acc did not improve from 0.86026\n",
+ "Epoch 73/3000\n",
+ " - 39s - loss: 0.4398 - acc: 0.9380 - val_loss: 0.9015 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00073: val_acc did not improve from 0.86026\n",
+ "Epoch 74/3000\n",
+ " - 39s - loss: 0.4250 - acc: 0.9405 - val_loss: 0.9228 - val_acc: 0.8416\n",
+ "\n",
+ "Epoch 00074: val_acc did not improve from 0.86026\n",
+ "Epoch 75/3000\n",
+ " - 39s - loss: 0.4376 - acc: 0.9389 - val_loss: 1.0756 - val_acc: 0.8264\n",
+ "\n",
+ "Epoch 00075: val_acc did not improve from 0.86026\n",
+ "Epoch 76/3000\n",
+ " - 40s - loss: 0.4561 - acc: 0.9345 - val_loss: 0.9589 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00076: val_acc did not improve from 0.86026\n",
+ "Epoch 77/3000\n",
+ " - 39s - loss: 0.4814 - acc: 0.9318 - val_loss: 0.9344 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00077: val_acc did not improve from 0.86026\n",
+ "Epoch 78/3000\n",
+ " - 39s - loss: 0.4547 - acc: 0.9336 - val_loss: 0.9314 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00078: val_acc did not improve from 0.86026\n",
+ "Epoch 79/3000\n",
+ " - 40s - loss: 0.4177 - acc: 0.9438 - val_loss: 0.9750 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00079: val_acc did not improve from 0.86026\n",
+ "Epoch 80/3000\n",
+ " - 42s - loss: 0.4243 - acc: 0.9408 - val_loss: 0.9280 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00080: val_acc did not improve from 0.86026\n",
+ "Epoch 81/3000\n",
+ " - 41s - loss: 0.4167 - acc: 0.9432 - val_loss: 0.9659 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00081: val_acc did not improve from 0.86026\n",
+ "Epoch 82/3000\n",
+ " - 39s - loss: 0.4214 - acc: 0.9413 - val_loss: 0.9068 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00082: val_acc did not improve from 0.86026\n",
+ "Epoch 83/3000\n",
+ " - 39s - loss: 0.4220 - acc: 0.9389 - val_loss: 0.9267 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00083: val_acc did not improve from 0.86026\n",
+ "Epoch 84/3000\n",
+ " - 39s - loss: 0.4349 - acc: 0.9399 - val_loss: 0.8673 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00084: val_acc did not improve from 0.86026\n",
+ "\n",
+ "Epoch 00084: ReduceLROnPlateau reducing learning rate to 9.499999760009814e-05.\n",
+ "Epoch 85/3000\n",
+ " - 39s - loss: 0.4043 - acc: 0.9423 - val_loss: 0.9716 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00085: val_acc did not improve from 0.86026\n",
+ "Epoch 86/3000\n",
+ " - 39s - loss: 0.4033 - acc: 0.9470 - val_loss: 0.8990 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00086: val_acc did not improve from 0.86026\n",
+ "Epoch 87/3000\n",
+ " - 39s - loss: 0.4128 - acc: 0.9420 - val_loss: 0.9176 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00087: val_acc did not improve from 0.86026\n",
+ "Epoch 88/3000\n",
+ " - 39s - loss: 0.4322 - acc: 0.9413 - val_loss: 0.9477 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00088: val_acc did not improve from 0.86026\n",
+ "Epoch 89/3000\n",
+ " - 39s - loss: 0.4287 - acc: 0.9398 - val_loss: 0.9319 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00089: val_acc did not improve from 0.86026\n",
+ "Epoch 90/3000\n",
+ " - 39s - loss: 0.4087 - acc: 0.9429 - val_loss: 0.9390 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00090: val_acc did not improve from 0.86026\n",
+ "Epoch 91/3000\n",
+ " - 39s - loss: 0.4065 - acc: 0.9457 - val_loss: 0.9405 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00091: val_acc did not improve from 0.86026\n",
+ "Epoch 92/3000\n",
+ " - 39s - loss: 0.4121 - acc: 0.9438 - val_loss: 0.9280 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00092: val_acc did not improve from 0.86026\n",
+ "Epoch 93/3000\n",
+ " - 39s - loss: 0.4262 - acc: 0.9389 - val_loss: 0.9276 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00093: val_acc did not improve from 0.86026\n",
+ "Epoch 94/3000\n",
+ " - 39s - loss: 0.4168 - acc: 0.9432 - val_loss: 0.8850 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00094: val_acc did not improve from 0.86026\n",
+ "Epoch 95/3000\n",
+ " - 39s - loss: 0.4275 - acc: 0.9401 - val_loss: 0.9073 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00095: val_acc did not improve from 0.86026\n",
+ "Epoch 96/3000\n",
+ " - 39s - loss: 0.4154 - acc: 0.9410 - val_loss: 0.9839 - val_acc: 0.8299\n",
+ "\n",
+ "Epoch 00096: val_acc did not improve from 0.86026\n",
+ "Epoch 97/3000\n",
+ " - 40s - loss: 0.4134 - acc: 0.9446 - val_loss: 1.0332 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00097: val_acc did not improve from 0.86026\n",
+ "Epoch 98/3000\n",
+ " - 39s - loss: 0.4648 - acc: 0.9306 - val_loss: 1.0208 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00098: val_acc did not improve from 0.86026\n",
+ "Epoch 99/3000\n",
+ " - 39s - loss: 0.4515 - acc: 0.9369 - val_loss: 0.9568 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00099: val_acc did not improve from 0.86026\n",
+ "Epoch 100/3000\n",
+ " - 39s - loss: 0.4186 - acc: 0.9411 - val_loss: 0.9582 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00100: val_acc did not improve from 0.86026\n",
+ "Epoch 101/3000\n",
+ " - 39s - loss: 0.4065 - acc: 0.9441 - val_loss: 0.9101 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00101: val_acc did not improve from 0.86026\n",
+ "Epoch 102/3000\n",
+ " - 39s - loss: 0.4159 - acc: 0.9429 - val_loss: 0.9969 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00102: val_acc did not improve from 0.86026\n",
+ "Epoch 103/3000\n",
+ " - 39s - loss: 0.4549 - acc: 0.9334 - val_loss: 1.0005 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00103: val_acc did not improve from 0.86026\n",
+ "Epoch 104/3000\n",
+ " - 39s - loss: 0.4101 - acc: 0.9449 - val_loss: 0.7630 - val_acc: 0.8665\n",
+ "\n",
+ "Epoch 00104: val_acc improved from 0.86026 to 0.86649, saving model to ./ModelSnapshots/LSTM-v2-104.h5\n",
+ "Epoch 105/3000\n",
+ " - 39s - loss: 0.4220 - acc: 0.9405 - val_loss: 0.8933 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00105: val_acc did not improve from 0.86649\n",
+ "Epoch 106/3000\n",
+ " - 38s - loss: 0.4094 - acc: 0.9417 - val_loss: 0.8874 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00106: val_acc did not improve from 0.86649\n",
+ "Epoch 107/3000\n",
+ " - 38s - loss: 0.4191 - acc: 0.9401 - val_loss: 0.8584 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00107: val_acc did not improve from 0.86649\n",
+ "Epoch 108/3000\n",
+ " - 39s - loss: 0.4098 - acc: 0.9446 - val_loss: 0.9456 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00108: val_acc did not improve from 0.86649\n",
+ "Epoch 109/3000\n",
+ " - 39s - loss: 0.4520 - acc: 0.9348 - val_loss: 0.9103 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00109: val_acc did not improve from 0.86649\n",
+ "Epoch 110/3000\n",
+ " - 39s - loss: 0.4052 - acc: 0.9447 - val_loss: 0.8925 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00110: val_acc did not improve from 0.86649\n",
+ "Epoch 111/3000\n",
+ " - 39s - loss: 0.3867 - acc: 0.9467 - val_loss: 0.9218 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00111: val_acc did not improve from 0.86649\n",
+ "Epoch 112/3000\n",
+ " - 39s - loss: 0.4004 - acc: 0.9466 - val_loss: 0.9572 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00112: val_acc did not improve from 0.86649\n",
+ "Epoch 113/3000\n",
+ " - 40s - loss: 0.4357 - acc: 0.9370 - val_loss: 0.9873 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00113: val_acc did not improve from 0.86649\n",
+ "Epoch 114/3000\n",
+ " - 39s - loss: 0.4327 - acc: 0.9399 - val_loss: 0.9470 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00114: val_acc did not improve from 0.86649\n",
+ "Epoch 115/3000\n",
+ " - 39s - loss: 0.4282 - acc: 0.9454 - val_loss: 0.8582 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00115: val_acc did not improve from 0.86649\n",
+ "Epoch 116/3000\n",
+ " - 39s - loss: 0.4082 - acc: 0.9417 - val_loss: 1.0239 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00116: val_acc did not improve from 0.86649\n",
+ "Epoch 117/3000\n",
+ " - 39s - loss: 0.4483 - acc: 0.9348 - val_loss: 0.8706 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00117: val_acc did not improve from 0.86649\n",
+ "Epoch 118/3000\n",
+ " - 40s - loss: 0.4208 - acc: 0.9408 - val_loss: 0.9734 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00118: val_acc did not improve from 0.86649\n",
+ "Epoch 119/3000\n",
+ " - 38s - loss: 0.4357 - acc: 0.9364 - val_loss: 0.9709 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00119: val_acc did not improve from 0.86649\n",
+ "Epoch 120/3000\n",
+ " - 39s - loss: 0.4519 - acc: 0.9331 - val_loss: 0.9216 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00120: val_acc did not improve from 0.86649\n",
+ "Epoch 121/3000\n",
+ " - 39s - loss: 0.4216 - acc: 0.9413 - val_loss: 0.9375 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00121: val_acc did not improve from 0.86649\n",
+ "Epoch 122/3000\n",
+ " - 39s - loss: 0.4187 - acc: 0.9413 - val_loss: 0.8751 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00122: val_acc did not improve from 0.86649\n",
+ "Epoch 123/3000\n",
+ " - 39s - loss: 0.4169 - acc: 0.9469 - val_loss: 0.8852 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00123: val_acc did not improve from 0.86649\n",
+ "Epoch 124/3000\n",
+ " - 39s - loss: 0.4069 - acc: 0.9461 - val_loss: 0.9012 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00124: val_acc did not improve from 0.86649\n",
+ "Epoch 125/3000\n",
+ " - 39s - loss: 0.4158 - acc: 0.9405 - val_loss: 0.8521 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00125: val_acc did not improve from 0.86649\n",
+ "Epoch 126/3000\n",
+ " - 39s - loss: 0.4191 - acc: 0.9414 - val_loss: 0.9001 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00126: val_acc did not improve from 0.86649\n",
+ "Epoch 127/3000\n",
+ " - 39s - loss: 0.3975 - acc: 0.9476 - val_loss: 0.9316 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00127: val_acc did not improve from 0.86649\n",
+ "Epoch 128/3000\n",
+ " - 39s - loss: 0.4252 - acc: 0.9402 - val_loss: 0.8905 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00128: val_acc did not improve from 0.86649\n",
+ "Epoch 129/3000\n",
+ " - 39s - loss: 0.4259 - acc: 0.9392 - val_loss: 0.8737 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00129: val_acc did not improve from 0.86649\n",
+ "Epoch 130/3000\n",
+ " - 39s - loss: 0.4075 - acc: 0.9441 - val_loss: 0.9467 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00130: val_acc did not improve from 0.86649\n",
+ "Epoch 131/3000\n",
+ " - 39s - loss: 0.3948 - acc: 0.9472 - val_loss: 0.8441 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00131: val_acc did not improve from 0.86649\n",
+ "Epoch 132/3000\n",
+ " - 39s - loss: 0.4289 - acc: 0.9410 - val_loss: 0.9384 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00132: val_acc did not improve from 0.86649\n",
+ "Epoch 133/3000\n",
+ " - 39s - loss: 0.4148 - acc: 0.9431 - val_loss: 0.9680 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00133: val_acc did not improve from 0.86649\n",
+ "Epoch 134/3000\n",
+ " - 39s - loss: 0.4544 - acc: 0.9387 - val_loss: 0.9413 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00134: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00134: ReduceLROnPlateau reducing learning rate to 9.02499959920533e-05.\n",
+ "Epoch 135/3000\n",
+ " - 39s - loss: 0.4370 - acc: 0.9402 - val_loss: 0.9007 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00135: val_acc did not improve from 0.86649\n",
+ "Epoch 136/3000\n",
+ " - 38s - loss: 0.3923 - acc: 0.9472 - val_loss: 0.8570 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00136: val_acc did not improve from 0.86649\n",
+ "Epoch 137/3000\n",
+ " - 39s - loss: 0.3934 - acc: 0.9450 - val_loss: 0.9029 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00137: val_acc did not improve from 0.86649\n",
+ "Epoch 138/3000\n",
+ " - 39s - loss: 0.3801 - acc: 0.9508 - val_loss: 0.9045 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00138: val_acc did not improve from 0.86649\n",
+ "Epoch 139/3000\n",
+ " - 40s - loss: 0.4030 - acc: 0.9454 - val_loss: 0.9157 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00139: val_acc did not improve from 0.86649\n",
+ "Epoch 140/3000\n",
+ " - 39s - loss: 0.3966 - acc: 0.9485 - val_loss: 0.9957 - val_acc: 0.8342\n",
+ "\n",
+ "Epoch 00140: val_acc did not improve from 0.86649\n",
+ "Epoch 141/3000\n",
+ " - 39s - loss: 0.4135 - acc: 0.9443 - val_loss: 0.9842 - val_acc: 0.8346\n",
+ "\n",
+ "Epoch 00141: val_acc did not improve from 0.86649\n",
+ "Epoch 142/3000\n",
+ " - 39s - loss: 0.3960 - acc: 0.9490 - val_loss: 0.9163 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00142: val_acc did not improve from 0.86649\n",
+ "Epoch 143/3000\n",
+ " - 39s - loss: 0.4101 - acc: 0.9410 - val_loss: 0.8855 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00143: val_acc did not improve from 0.86649\n",
+ "Epoch 144/3000\n",
+ " - 39s - loss: 0.3965 - acc: 0.9509 - val_loss: 1.0028 - val_acc: 0.8303\n",
+ "\n",
+ "Epoch 00144: val_acc did not improve from 0.86649\n",
+ "Epoch 145/3000\n",
+ " - 40s - loss: 0.4101 - acc: 0.9413 - val_loss: 0.9372 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00145: val_acc did not improve from 0.86649\n",
+ "Epoch 146/3000\n",
+ " - 39s - loss: 0.4004 - acc: 0.9444 - val_loss: 0.9043 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00146: val_acc did not improve from 0.86649\n",
+ "Epoch 147/3000\n",
+ " - 39s - loss: 0.4090 - acc: 0.9440 - val_loss: 0.8569 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00147: val_acc did not improve from 0.86649\n",
+ "Epoch 148/3000\n",
+ " - 39s - loss: 0.4181 - acc: 0.9431 - val_loss: 0.9812 - val_acc: 0.8334\n",
+ "\n",
+ "Epoch 00148: val_acc did not improve from 0.86649\n",
+ "Epoch 149/3000\n",
+ " - 39s - loss: 0.4085 - acc: 0.9450 - val_loss: 0.8226 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00149: val_acc did not improve from 0.86649\n",
+ "Epoch 150/3000\n",
+ " - 39s - loss: 0.4201 - acc: 0.9411 - val_loss: 0.9429 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00150: val_acc did not improve from 0.86649\n",
+ "Epoch 151/3000\n",
+ " - 39s - loss: 0.4101 - acc: 0.9428 - val_loss: 0.8741 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00151: val_acc did not improve from 0.86649\n",
+ "Epoch 152/3000\n",
+ " - 39s - loss: 0.4096 - acc: 0.9438 - val_loss: 0.8906 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00152: val_acc did not improve from 0.86649\n",
+ "Epoch 153/3000\n",
+ " - 39s - loss: 0.4016 - acc: 0.9444 - val_loss: 0.9297 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00153: val_acc did not improve from 0.86649\n",
+ "Epoch 154/3000\n",
+ " - 40s - loss: 0.4003 - acc: 0.9473 - val_loss: 1.0061 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00154: val_acc did not improve from 0.86649\n",
+ "Epoch 155/3000\n",
+ " - 39s - loss: 0.4044 - acc: 0.9454 - val_loss: 1.0106 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00155: val_acc did not improve from 0.86649\n",
+ "Epoch 156/3000\n",
+ " - 39s - loss: 0.4122 - acc: 0.9414 - val_loss: 0.9389 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00156: val_acc did not improve from 0.86649\n",
+ "Epoch 157/3000\n",
+ " - 39s - loss: 0.3976 - acc: 0.9446 - val_loss: 0.9240 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00157: val_acc did not improve from 0.86649\n",
+ "Epoch 158/3000\n",
+ " - 39s - loss: 0.3838 - acc: 0.9523 - val_loss: 0.9041 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00158: val_acc did not improve from 0.86649\n",
+ "Epoch 159/3000\n",
+ " - 39s - loss: 0.4119 - acc: 0.9441 - val_loss: 0.9310 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00159: val_acc did not improve from 0.86649\n",
+ "Epoch 160/3000\n",
+ " - 39s - loss: 0.3936 - acc: 0.9464 - val_loss: 0.9601 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00160: val_acc did not improve from 0.86649\n",
+ "Epoch 161/3000\n",
+ " - 39s - loss: 0.4161 - acc: 0.9435 - val_loss: 0.9179 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00161: val_acc did not improve from 0.86649\n",
+ "Epoch 162/3000\n",
+ " - 39s - loss: 0.4000 - acc: 0.9458 - val_loss: 0.9565 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00162: val_acc did not improve from 0.86649\n",
+ "Epoch 163/3000\n",
+ " - 39s - loss: 0.4164 - acc: 0.9419 - val_loss: 1.0898 - val_acc: 0.8272\n",
+ "\n",
+ "Epoch 00163: val_acc did not improve from 0.86649\n",
+ "Epoch 164/3000\n",
+ " - 39s - loss: 0.4255 - acc: 0.9398 - val_loss: 0.9603 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00164: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00164: ReduceLROnPlateau reducing learning rate to 8.573749619245064e-05.\n",
+ "Epoch 165/3000\n",
+ " - 39s - loss: 0.4097 - acc: 0.9446 - val_loss: 0.9117 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00165: val_acc did not improve from 0.86649\n",
+ "Epoch 166/3000\n",
+ " - 39s - loss: 0.3816 - acc: 0.9491 - val_loss: 0.9233 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00166: val_acc did not improve from 0.86649\n",
+ "Epoch 167/3000\n",
+ " - 39s - loss: 0.4109 - acc: 0.9426 - val_loss: 0.9021 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00167: val_acc did not improve from 0.86649\n",
+ "Epoch 168/3000\n",
+ " - 40s - loss: 0.4019 - acc: 0.9446 - val_loss: 0.9406 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00168: val_acc did not improve from 0.86649\n",
+ "Epoch 169/3000\n",
+ " - 40s - loss: 0.3951 - acc: 0.9464 - val_loss: 0.9533 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00169: val_acc did not improve from 0.86649\n",
+ "Epoch 170/3000\n",
+ " - 39s - loss: 0.3935 - acc: 0.9485 - val_loss: 0.9551 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00170: val_acc did not improve from 0.86649\n",
+ "Epoch 171/3000\n",
+ " - 39s - loss: 0.3931 - acc: 0.9496 - val_loss: 0.9058 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00171: val_acc did not improve from 0.86649\n",
+ "Epoch 172/3000\n",
+ " - 40s - loss: 0.3885 - acc: 0.9467 - val_loss: 0.9725 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00172: val_acc did not improve from 0.86649\n",
+ "Epoch 173/3000\n",
+ " - 39s - loss: 0.3981 - acc: 0.9481 - val_loss: 0.9014 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00173: val_acc did not improve from 0.86649\n",
+ "Epoch 174/3000\n",
+ " - 40s - loss: 0.4189 - acc: 0.9460 - val_loss: 0.9159 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00174: val_acc did not improve from 0.86649\n",
+ "Epoch 175/3000\n",
+ " - 39s - loss: 0.4159 - acc: 0.9380 - val_loss: 0.9044 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00175: val_acc did not improve from 0.86649\n",
+ "Epoch 176/3000\n",
+ " - 39s - loss: 0.3944 - acc: 0.9482 - val_loss: 0.9412 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00176: val_acc did not improve from 0.86649\n",
+ "Epoch 177/3000\n",
+ " - 39s - loss: 0.3890 - acc: 0.9502 - val_loss: 0.9233 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00177: val_acc did not improve from 0.86649\n",
+ "Epoch 178/3000\n",
+ " - 39s - loss: 0.3900 - acc: 0.9515 - val_loss: 0.8865 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00178: val_acc did not improve from 0.86649\n",
+ "Epoch 179/3000\n",
+ " - 40s - loss: 0.4063 - acc: 0.9455 - val_loss: 0.9673 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00179: val_acc did not improve from 0.86649\n",
+ "Epoch 180/3000\n",
+ " - 39s - loss: 0.3787 - acc: 0.9499 - val_loss: 0.9494 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00180: val_acc did not improve from 0.86649\n",
+ "Epoch 181/3000\n",
+ " - 40s - loss: 0.4018 - acc: 0.9461 - val_loss: 0.9874 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00181: val_acc did not improve from 0.86649\n",
+ "Epoch 182/3000\n",
+ " - 39s - loss: 0.3996 - acc: 0.9493 - val_loss: 0.8876 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00182: val_acc did not improve from 0.86649\n",
+ "Epoch 183/3000\n",
+ " - 40s - loss: 0.4109 - acc: 0.9461 - val_loss: 0.8838 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00183: val_acc did not improve from 0.86649\n",
+ "Epoch 184/3000\n",
+ " - 39s - loss: 0.3980 - acc: 0.9457 - val_loss: 0.8478 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00184: val_acc did not improve from 0.86649\n",
+ "Epoch 185/3000\n",
+ " - 40s - loss: 0.3885 - acc: 0.9467 - val_loss: 0.9210 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00185: val_acc did not improve from 0.86649\n",
+ "Epoch 186/3000\n",
+ " - 40s - loss: 0.3894 - acc: 0.9499 - val_loss: 0.9790 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00186: val_acc did not improve from 0.86649\n",
+ "Epoch 187/3000\n",
+ " - 39s - loss: 0.3871 - acc: 0.9494 - val_loss: 0.9039 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00187: val_acc did not improve from 0.86649\n",
+ "Epoch 188/3000\n",
+ " - 40s - loss: 0.3955 - acc: 0.9470 - val_loss: 0.9084 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00188: val_acc did not improve from 0.86649\n",
+ "Epoch 189/3000\n",
+ " - 40s - loss: 0.4062 - acc: 0.9446 - val_loss: 0.9570 - val_acc: 0.8318\n",
+ "\n",
+ "Epoch 00189: val_acc did not improve from 0.86649\n",
+ "Epoch 190/3000\n",
+ " - 39s - loss: 0.3884 - acc: 0.9469 - val_loss: 0.9881 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00190: val_acc did not improve from 0.86649\n",
+ "Epoch 191/3000\n",
+ " - 39s - loss: 0.4025 - acc: 0.9472 - val_loss: 0.9180 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00191: val_acc did not improve from 0.86649\n",
+ "Epoch 192/3000\n",
+ " - 39s - loss: 0.3804 - acc: 0.9502 - val_loss: 0.9212 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00192: val_acc did not improve from 0.86649\n",
+ "Epoch 193/3000\n",
+ " - 39s - loss: 0.4011 - acc: 0.9416 - val_loss: 1.0128 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00193: val_acc did not improve from 0.86649\n",
+ "Epoch 194/3000\n",
+ " - 40s - loss: 0.4137 - acc: 0.9426 - val_loss: 0.9164 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00194: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00194: ReduceLROnPlateau reducing learning rate to 8.145062311086804e-05.\n",
+ "Epoch 195/3000\n",
+ " - 39s - loss: 0.3887 - acc: 0.9482 - val_loss: 0.9217 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00195: val_acc did not improve from 0.86649\n",
+ "Epoch 196/3000\n",
+ " - 40s - loss: 0.3835 - acc: 0.9490 - val_loss: 0.9337 - val_acc: 0.8412\n",
+ "\n",
+ "Epoch 00196: val_acc did not improve from 0.86649\n",
+ "Epoch 197/3000\n",
+ " - 39s - loss: 0.3901 - acc: 0.9478 - val_loss: 0.9818 - val_acc: 0.8353\n",
+ "\n",
+ "Epoch 00197: val_acc did not improve from 0.86649\n",
+ "Epoch 198/3000\n",
+ " - 39s - loss: 0.4046 - acc: 0.9461 - val_loss: 1.0099 - val_acc: 0.8326\n",
+ "\n",
+ "Epoch 00198: val_acc did not improve from 0.86649\n",
+ "Epoch 199/3000\n",
+ " - 39s - loss: 0.4022 - acc: 0.9463 - val_loss: 0.8810 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00199: val_acc did not improve from 0.86649\n",
+ "Epoch 200/3000\n",
+ " - 39s - loss: 0.3884 - acc: 0.9505 - val_loss: 0.8836 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00200: val_acc did not improve from 0.86649\n",
+ "Epoch 201/3000\n",
+ " - 39s - loss: 0.4034 - acc: 0.9461 - val_loss: 0.9049 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00201: val_acc did not improve from 0.86649\n",
+ "Epoch 202/3000\n",
+ " - 39s - loss: 0.3845 - acc: 0.9475 - val_loss: 0.9071 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00202: val_acc did not improve from 0.86649\n",
+ "Epoch 203/3000\n",
+ " - 40s - loss: 0.3795 - acc: 0.9503 - val_loss: 0.9272 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00203: val_acc did not improve from 0.86649\n",
+ "Epoch 204/3000\n",
+ " - 39s - loss: 0.3677 - acc: 0.9521 - val_loss: 0.8768 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00204: val_acc did not improve from 0.86649\n",
+ "Epoch 205/3000\n",
+ " - 40s - loss: 0.3786 - acc: 0.9527 - val_loss: 0.9458 - val_acc: 0.8365\n",
+ "\n",
+ "Epoch 00205: val_acc did not improve from 0.86649\n",
+ "Epoch 206/3000\n",
+ " - 39s - loss: 0.4036 - acc: 0.9481 - val_loss: 0.8992 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00206: val_acc did not improve from 0.86649\n",
+ "Epoch 207/3000\n",
+ " - 39s - loss: 0.3865 - acc: 0.9467 - val_loss: 0.8995 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00207: val_acc did not improve from 0.86649\n",
+ "Epoch 208/3000\n",
+ " - 39s - loss: 0.3734 - acc: 0.9532 - val_loss: 0.9297 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00208: val_acc did not improve from 0.86649\n",
+ "Epoch 209/3000\n",
+ " - 40s - loss: 0.3811 - acc: 0.9517 - val_loss: 0.8610 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00209: val_acc did not improve from 0.86649\n",
+ "Epoch 210/3000\n",
+ " - 39s - loss: 0.3746 - acc: 0.9517 - val_loss: 0.9199 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00210: val_acc did not improve from 0.86649\n",
+ "Epoch 211/3000\n",
+ " - 39s - loss: 0.3745 - acc: 0.9505 - val_loss: 0.9545 - val_acc: 0.8361\n",
+ "\n",
+ "Epoch 00211: val_acc did not improve from 0.86649\n",
+ "Epoch 212/3000\n",
+ " - 40s - loss: 0.3858 - acc: 0.9478 - val_loss: 0.8884 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00212: val_acc did not improve from 0.86649\n",
+ "Epoch 213/3000\n",
+ " - 39s - loss: 0.3898 - acc: 0.9491 - val_loss: 0.9675 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00213: val_acc did not improve from 0.86649\n",
+ "Epoch 214/3000\n",
+ " - 39s - loss: 0.4043 - acc: 0.9485 - val_loss: 0.8941 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00214: val_acc did not improve from 0.86649\n",
+ "Epoch 215/3000\n",
+ " - 39s - loss: 0.3720 - acc: 0.9538 - val_loss: 0.9408 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00215: val_acc did not improve from 0.86649\n",
+ "Epoch 216/3000\n",
+ " - 39s - loss: 0.3596 - acc: 0.9541 - val_loss: 0.9415 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00216: val_acc did not improve from 0.86649\n",
+ "Epoch 217/3000\n",
+ " - 39s - loss: 0.3797 - acc: 0.9508 - val_loss: 0.9347 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00217: val_acc did not improve from 0.86649\n",
+ "Epoch 218/3000\n",
+ " - 39s - loss: 0.3925 - acc: 0.9473 - val_loss: 0.8918 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00218: val_acc did not improve from 0.86649\n",
+ "Epoch 219/3000\n",
+ " - 39s - loss: 0.3767 - acc: 0.9488 - val_loss: 0.8936 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00219: val_acc did not improve from 0.86649\n",
+ "Epoch 220/3000\n",
+ " - 40s - loss: 0.3981 - acc: 0.9444 - val_loss: 0.9101 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00220: val_acc did not improve from 0.86649\n",
+ "Epoch 221/3000\n",
+ " - 39s - loss: 0.3935 - acc: 0.9484 - val_loss: 0.9340 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00221: val_acc did not improve from 0.86649\n",
+ "Epoch 222/3000\n",
+ " - 39s - loss: 0.3827 - acc: 0.9454 - val_loss: 0.9111 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00224: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00224: ReduceLROnPlateau reducing learning rate to 7.737808919046074e-05.\n",
+ "Epoch 225/3000\n",
+ " - 39s - loss: 0.4087 - acc: 0.9426 - val_loss: 0.9580 - val_acc: 0.8396\n",
+ "\n",
+ "Epoch 00225: val_acc did not improve from 0.86649\n",
+ "Epoch 226/3000\n",
+ " - 40s - loss: 0.3773 - acc: 0.9485 - val_loss: 0.9078 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00226: val_acc did not improve from 0.86649\n",
+ "Epoch 227/3000\n",
+ " - 40s - loss: 0.3660 - acc: 0.9530 - val_loss: 0.8983 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00227: val_acc did not improve from 0.86649\n",
+ "Epoch 228/3000\n",
+ " - 39s - loss: 0.3804 - acc: 0.9490 - val_loss: 0.8204 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 00228: val_acc did not improve from 0.86649\n",
+ "Epoch 229/3000\n",
+ " - 39s - loss: 0.3861 - acc: 0.9479 - val_loss: 0.8979 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00229: val_acc did not improve from 0.86649\n",
+ "Epoch 230/3000\n",
+ " - 39s - loss: 0.4036 - acc: 0.9449 - val_loss: 0.8535 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00230: val_acc did not improve from 0.86649\n",
+ "Epoch 231/3000\n",
+ " - 39s - loss: 0.3948 - acc: 0.9464 - val_loss: 0.9545 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00231: val_acc did not improve from 0.86649\n",
+ "Epoch 232/3000\n",
+ " - 39s - loss: 0.3763 - acc: 0.9517 - val_loss: 1.0186 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00232: val_acc did not improve from 0.86649\n",
+ "Epoch 233/3000\n",
+ " - 39s - loss: 0.3607 - acc: 0.9521 - val_loss: 0.9043 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00233: val_acc did not improve from 0.86649\n",
+ "Epoch 234/3000\n",
+ " - 40s - loss: 0.3710 - acc: 0.9505 - val_loss: 0.9583 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00234: val_acc did not improve from 0.86649\n",
+ "Epoch 235/3000\n",
+ " - 39s - loss: 0.3781 - acc: 0.9488 - val_loss: 0.8799 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00235: val_acc did not improve from 0.86649\n",
+ "Epoch 236/3000\n",
+ " - 40s - loss: 0.3754 - acc: 0.9534 - val_loss: 0.9165 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00236: val_acc did not improve from 0.86649\n",
+ "Epoch 237/3000\n",
+ " - 40s - loss: 0.3724 - acc: 0.9540 - val_loss: 0.9543 - val_acc: 0.8388\n",
+ "\n",
+ "Epoch 00237: val_acc did not improve from 0.86649\n",
+ "Epoch 238/3000\n",
+ " - 40s - loss: 0.3855 - acc: 0.9484 - val_loss: 0.9242 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00238: val_acc did not improve from 0.86649\n",
+ "Epoch 239/3000\n",
+ " - 39s - loss: 0.3637 - acc: 0.9529 - val_loss: 0.9195 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00239: val_acc did not improve from 0.86649\n",
+ "Epoch 240/3000\n",
+ " - 39s - loss: 0.3883 - acc: 0.9500 - val_loss: 0.9836 - val_acc: 0.8357\n",
+ "\n",
+ "Epoch 00240: val_acc did not improve from 0.86649\n",
+ "Epoch 241/3000\n",
+ " - 40s - loss: 0.3729 - acc: 0.9505 - val_loss: 0.8994 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00241: val_acc did not improve from 0.86649\n",
+ "Epoch 242/3000\n",
+ " - 39s - loss: 0.3858 - acc: 0.9478 - val_loss: 0.8928 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00242: val_acc did not improve from 0.86649\n",
+ "Epoch 243/3000\n",
+ " - 40s - loss: 0.3655 - acc: 0.9547 - val_loss: 0.8937 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00243: val_acc did not improve from 0.86649\n",
+ "Epoch 244/3000\n",
+ " - 39s - loss: 0.3747 - acc: 0.9490 - val_loss: 0.9015 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00244: val_acc did not improve from 0.86649\n",
+ "Epoch 245/3000\n",
+ " - 39s - loss: 0.3713 - acc: 0.9499 - val_loss: 0.9938 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00245: val_acc did not improve from 0.86649\n",
+ "Epoch 246/3000\n",
+ " - 39s - loss: 0.3796 - acc: 0.9500 - val_loss: 0.9559 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00246: val_acc did not improve from 0.86649\n",
+ "Epoch 247/3000\n",
+ " - 40s - loss: 0.3666 - acc: 0.9523 - val_loss: 0.9361 - val_acc: 0.8385\n",
+ "\n",
+ "Epoch 00247: val_acc did not improve from 0.86649\n",
+ "Epoch 248/3000\n",
+ " - 40s - loss: 0.3775 - acc: 0.9515 - val_loss: 0.8607 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00248: val_acc did not improve from 0.86649\n",
+ "Epoch 249/3000\n",
+ " - 39s - loss: 0.3624 - acc: 0.9541 - val_loss: 0.9073 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00249: val_acc did not improve from 0.86649\n",
+ "Epoch 250/3000\n",
+ " - 39s - loss: 0.3687 - acc: 0.9524 - val_loss: 0.9094 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00250: val_acc did not improve from 0.86649\n",
+ "Epoch 251/3000\n",
+ " - 39s - loss: 0.3681 - acc: 0.9534 - val_loss: 0.9190 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00251: val_acc did not improve from 0.86649\n",
+ "Epoch 252/3000\n",
+ " - 39s - loss: 0.3672 - acc: 0.9508 - val_loss: 0.8954 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00252: val_acc did not improve from 0.86649\n",
+ "Epoch 253/3000\n",
+ " - 40s - loss: 0.3870 - acc: 0.9473 - val_loss: 0.8745 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00253: val_acc did not improve from 0.86649\n",
+ "Epoch 254/3000\n",
+ " - 40s - loss: 0.3811 - acc: 0.9481 - val_loss: 0.9155 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00254: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00254: ReduceLROnPlateau reducing learning rate to 7.350918749580159e-05.\n",
+ "Epoch 255/3000\n",
+ " - 40s - loss: 0.3656 - acc: 0.9534 - val_loss: 0.8551 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00255: val_acc did not improve from 0.86649\n",
+ "Epoch 256/3000\n",
+ " - 39s - loss: 0.3653 - acc: 0.9514 - val_loss: 0.9610 - val_acc: 0.8338\n",
+ "\n",
+ "Epoch 00256: val_acc did not improve from 0.86649\n",
+ "Epoch 257/3000\n",
+ " - 39s - loss: 0.3980 - acc: 0.9463 - val_loss: 1.0120 - val_acc: 0.8295\n",
+ "\n",
+ "Epoch 00257: val_acc did not improve from 0.86649\n",
+ "Epoch 258/3000\n",
+ " - 40s - loss: 0.3869 - acc: 0.9481 - val_loss: 0.9067 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00258: val_acc did not improve from 0.86649\n",
+ "Epoch 259/3000\n",
+ " - 39s - loss: 0.4147 - acc: 0.9423 - val_loss: 0.9569 - val_acc: 0.8416\n",
+ "\n",
+ "Epoch 00259: val_acc did not improve from 0.86649\n",
+ "Epoch 260/3000\n",
+ " - 39s - loss: 0.3765 - acc: 0.9493 - val_loss: 0.9149 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00260: val_acc did not improve from 0.86649\n",
+ "Epoch 261/3000\n",
+ " - 39s - loss: 0.3723 - acc: 0.9494 - val_loss: 0.9884 - val_acc: 0.8350\n",
+ "\n",
+ "Epoch 00261: val_acc did not improve from 0.86649\n",
+ "Epoch 262/3000\n",
+ " - 39s - loss: 0.3825 - acc: 0.9463 - val_loss: 0.9140 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00262: val_acc did not improve from 0.86649\n",
+ "Epoch 263/3000\n",
+ " - 40s - loss: 0.3643 - acc: 0.9535 - val_loss: 0.9832 - val_acc: 0.8369\n",
+ "\n",
+ "Epoch 00263: val_acc did not improve from 0.86649\n",
+ "Epoch 264/3000\n",
+ " - 39s - loss: 0.3596 - acc: 0.9526 - val_loss: 0.9553 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00264: val_acc did not improve from 0.86649\n",
+ "Epoch 265/3000\n",
+ " - 39s - loss: 0.3603 - acc: 0.9532 - val_loss: 0.9267 - val_acc: 0.8420\n",
+ "\n",
+ "Epoch 00265: val_acc did not improve from 0.86649\n",
+ "Epoch 266/3000\n",
+ " - 39s - loss: 0.3523 - acc: 0.9583 - val_loss: 0.9068 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00266: val_acc did not improve from 0.86649\n",
+ "Epoch 267/3000\n",
+ " - 39s - loss: 0.3496 - acc: 0.9553 - val_loss: 0.8808 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00267: val_acc did not improve from 0.86649\n",
+ "Epoch 268/3000\n",
+ " - 39s - loss: 0.3839 - acc: 0.9479 - val_loss: 0.8474 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00268: val_acc did not improve from 0.86649\n",
+ "Epoch 269/3000\n",
+ " - 39s - loss: 0.3651 - acc: 0.9552 - val_loss: 0.9066 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00269: val_acc did not improve from 0.86649\n",
+ "Epoch 270/3000\n",
+ " - 39s - loss: 0.3781 - acc: 0.9494 - val_loss: 0.9599 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00270: val_acc did not improve from 0.86649\n",
+ "Epoch 271/3000\n",
+ " - 40s - loss: 0.3577 - acc: 0.9529 - val_loss: 0.9236 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00271: val_acc did not improve from 0.86649\n",
+ "Epoch 272/3000\n",
+ " - 40s - loss: 0.3792 - acc: 0.9473 - val_loss: 0.9265 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00272: val_acc did not improve from 0.86649\n",
+ "Epoch 273/3000\n",
+ " - 40s - loss: 0.3674 - acc: 0.9523 - val_loss: 0.9896 - val_acc: 0.8373\n",
+ "\n",
+ "Epoch 00273: val_acc did not improve from 0.86649\n",
+ "Epoch 274/3000\n",
+ " - 39s - loss: 0.3655 - acc: 0.9526 - val_loss: 0.8524 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00274: val_acc did not improve from 0.86649\n",
+ "Epoch 275/3000\n",
+ " - 39s - loss: 0.3658 - acc: 0.9502 - val_loss: 0.9661 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00275: val_acc did not improve from 0.86649\n",
+ "Epoch 276/3000\n",
+ " - 39s - loss: 0.3713 - acc: 0.9500 - val_loss: 0.8588 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00276: val_acc did not improve from 0.86649\n",
+ "Epoch 277/3000\n",
+ " - 39s - loss: 0.3685 - acc: 0.9540 - val_loss: 0.9974 - val_acc: 0.8408\n",
+ "\n",
+ "Epoch 00277: val_acc did not improve from 0.86649\n",
+ "Epoch 278/3000\n",
+ " - 39s - loss: 0.3696 - acc: 0.9526 - val_loss: 0.8494 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00278: val_acc did not improve from 0.86649\n",
+ "Epoch 279/3000\n",
+ " - 39s - loss: 0.3585 - acc: 0.9538 - val_loss: 0.9636 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00279: val_acc did not improve from 0.86649\n",
+ "Epoch 280/3000\n",
+ " - 39s - loss: 0.3744 - acc: 0.9490 - val_loss: 0.8726 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00280: val_acc did not improve from 0.86649\n",
+ "Epoch 281/3000\n",
+ " - 40s - loss: 0.3828 - acc: 0.9469 - val_loss: 0.9184 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00281: val_acc did not improve from 0.86649\n",
+ "Epoch 282/3000\n",
+ " - 39s - loss: 0.3731 - acc: 0.9505 - val_loss: 0.8927 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00282: val_acc did not improve from 0.86649\n",
+ "Epoch 283/3000\n",
+ " - 39s - loss: 0.3597 - acc: 0.9524 - val_loss: 0.8174 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 00283: val_acc did not improve from 0.86649\n",
+ "Epoch 284/3000\n",
+ " - 39s - loss: 0.3644 - acc: 0.9515 - val_loss: 0.9366 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00284: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00284: ReduceLROnPlateau reducing learning rate to 6.983372950344346e-05.\n",
+ "Epoch 285/3000\n",
+ " - 38s - loss: 0.3537 - acc: 0.9524 - val_loss: 0.9509 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00285: val_acc did not improve from 0.86649\n",
+ "Epoch 286/3000\n",
+ " - 39s - loss: 0.3539 - acc: 0.9556 - val_loss: 0.8748 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00286: val_acc did not improve from 0.86649\n",
+ "Epoch 287/3000\n",
+ " - 40s - loss: 0.3665 - acc: 0.9512 - val_loss: 0.9176 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00287: val_acc did not improve from 0.86649\n",
+ "Epoch 288/3000\n",
+ " - 39s - loss: 0.3592 - acc: 0.9523 - val_loss: 0.8962 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00288: val_acc did not improve from 0.86649\n",
+ "Epoch 289/3000\n",
+ " - 39s - loss: 0.3485 - acc: 0.9558 - val_loss: 0.8369 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00289: val_acc did not improve from 0.86649\n",
+ "Epoch 290/3000\n",
+ " - 39s - loss: 0.3433 - acc: 0.9537 - val_loss: 0.8916 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00290: val_acc did not improve from 0.86649\n",
+ "Epoch 291/3000\n",
+ " - 39s - loss: 0.3764 - acc: 0.9499 - val_loss: 0.9457 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00291: val_acc did not improve from 0.86649\n",
+ "Epoch 292/3000\n",
+ " - 39s - loss: 0.3546 - acc: 0.9543 - val_loss: 0.9729 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00292: val_acc did not improve from 0.86649\n",
+ "Epoch 293/3000\n",
+ " - 40s - loss: 0.3677 - acc: 0.9505 - val_loss: 0.9273 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00293: val_acc did not improve from 0.86649\n",
+ "Epoch 294/3000\n",
+ " - 39s - loss: 0.3894 - acc: 0.9470 - val_loss: 0.9159 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00294: val_acc did not improve from 0.86649\n",
+ "Epoch 295/3000\n",
+ " - 40s - loss: 0.3607 - acc: 0.9532 - val_loss: 0.9351 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00295: val_acc did not improve from 0.86649\n",
+ "Epoch 296/3000\n",
+ " - 39s - loss: 0.3785 - acc: 0.9485 - val_loss: 0.9046 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00296: val_acc did not improve from 0.86649\n",
+ "Epoch 297/3000\n",
+ " - 39s - loss: 0.3575 - acc: 0.9547 - val_loss: 0.9030 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00297: val_acc did not improve from 0.86649\n",
+ "Epoch 298/3000\n",
+ " - 40s - loss: 0.3679 - acc: 0.9521 - val_loss: 0.8527 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00298: val_acc did not improve from 0.86649\n",
+ "Epoch 299/3000\n",
+ " - 40s - loss: 0.3658 - acc: 0.9527 - val_loss: 0.8763 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00299: val_acc did not improve from 0.86649\n",
+ "Epoch 300/3000\n",
+ " - 39s - loss: 0.3501 - acc: 0.9561 - val_loss: 0.8783 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00300: val_acc did not improve from 0.86649\n",
+ "Epoch 301/3000\n",
+ " - 40s - loss: 0.3594 - acc: 0.9538 - val_loss: 0.9492 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00301: val_acc did not improve from 0.86649\n",
+ "Epoch 302/3000\n",
+ " - 40s - loss: 0.3566 - acc: 0.9541 - val_loss: 0.9000 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00302: val_acc did not improve from 0.86649\n",
+ "Epoch 303/3000\n",
+ " - 39s - loss: 0.3498 - acc: 0.9567 - val_loss: 0.9092 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00303: val_acc did not improve from 0.86649\n",
+ "Epoch 304/3000\n",
+ " - 39s - loss: 0.3646 - acc: 0.9534 - val_loss: 0.9032 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00304: val_acc did not improve from 0.86649\n",
+ "Epoch 305/3000\n",
+ " - 39s - loss: 0.3381 - acc: 0.9588 - val_loss: 0.8957 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00305: val_acc did not improve from 0.86649\n",
+ "Epoch 306/3000\n",
+ " - 39s - loss: 0.3567 - acc: 0.9541 - val_loss: 0.8587 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00306: val_acc did not improve from 0.86649\n",
+ "Epoch 307/3000\n",
+ " - 39s - loss: 0.3696 - acc: 0.9493 - val_loss: 0.9091 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00307: val_acc did not improve from 0.86649\n",
+ "Epoch 308/3000\n",
+ " - 39s - loss: 0.3529 - acc: 0.9530 - val_loss: 0.9425 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00308: val_acc did not improve from 0.86649\n",
+ "Epoch 309/3000\n",
+ " - 39s - loss: 0.3585 - acc: 0.9540 - val_loss: 0.9658 - val_acc: 0.8307\n",
+ "\n",
+ "Epoch 00309: val_acc did not improve from 0.86649\n",
+ "Epoch 310/3000\n",
+ " - 39s - loss: 0.3701 - acc: 0.9482 - val_loss: 0.9474 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00310: val_acc did not improve from 0.86649\n",
+ "Epoch 311/3000\n",
+ " - 39s - loss: 0.3728 - acc: 0.9485 - val_loss: 0.8468 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00311: val_acc did not improve from 0.86649\n",
+ "Epoch 312/3000\n",
+ " - 40s - loss: 0.3428 - acc: 0.9547 - val_loss: 0.8516 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00312: val_acc did not improve from 0.86649\n",
+ "Epoch 313/3000\n",
+ " - 39s - loss: 0.3676 - acc: 0.9511 - val_loss: 0.9169 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00313: val_acc did not improve from 0.86649\n",
+ "Epoch 314/3000\n",
+ " - 39s - loss: 0.3761 - acc: 0.9464 - val_loss: 0.8943 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00314: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00314: ReduceLROnPlateau reducing learning rate to 6.634204510191921e-05.\n",
+ "Epoch 315/3000\n",
+ " - 39s - loss: 0.3402 - acc: 0.9567 - val_loss: 0.9325 - val_acc: 0.8377\n",
+ "\n",
+ "Epoch 00315: val_acc did not improve from 0.86649\n",
+ "Epoch 316/3000\n",
+ " - 39s - loss: 0.3531 - acc: 0.9526 - val_loss: 0.8943 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00316: val_acc did not improve from 0.86649\n",
+ "Epoch 317/3000\n",
+ " - 39s - loss: 0.3568 - acc: 0.9538 - val_loss: 0.9239 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00317: val_acc did not improve from 0.86649\n",
+ "Epoch 318/3000\n",
+ " - 39s - loss: 0.3511 - acc: 0.9546 - val_loss: 0.8757 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00318: val_acc did not improve from 0.86649\n",
+ "Epoch 319/3000\n",
+ " - 39s - loss: 0.3493 - acc: 0.9565 - val_loss: 0.8912 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00319: val_acc did not improve from 0.86649\n",
+ "Epoch 320/3000\n",
+ " - 40s - loss: 0.3661 - acc: 0.9515 - val_loss: 0.9028 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00320: val_acc did not improve from 0.86649\n",
+ "Epoch 321/3000\n",
+ " - 39s - loss: 0.3615 - acc: 0.9559 - val_loss: 0.9068 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00321: val_acc did not improve from 0.86649\n",
+ "Epoch 322/3000\n",
+ " - 39s - loss: 0.3571 - acc: 0.9530 - val_loss: 0.9152 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00322: val_acc did not improve from 0.86649\n",
+ "Epoch 323/3000\n",
+ " - 39s - loss: 0.3619 - acc: 0.9535 - val_loss: 0.8739 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00323: val_acc did not improve from 0.86649\n",
+ "Epoch 324/3000\n",
+ " - 39s - loss: 0.3484 - acc: 0.9538 - val_loss: 0.9118 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00324: val_acc did not improve from 0.86649\n",
+ "Epoch 325/3000\n",
+ " - 39s - loss: 0.3583 - acc: 0.9556 - val_loss: 0.9312 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00325: val_acc did not improve from 0.86649\n",
+ "Epoch 326/3000\n",
+ " - 39s - loss: 0.3683 - acc: 0.9506 - val_loss: 0.9007 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00326: val_acc did not improve from 0.86649\n",
+ "Epoch 327/3000\n",
+ " - 39s - loss: 0.3640 - acc: 0.9529 - val_loss: 0.9076 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00327: val_acc did not improve from 0.86649\n",
+ "Epoch 328/3000\n",
+ " - 39s - loss: 0.3379 - acc: 0.9583 - val_loss: 0.8766 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00328: val_acc did not improve from 0.86649\n",
+ "Epoch 329/3000\n",
+ " - 39s - loss: 0.3538 - acc: 0.9537 - val_loss: 0.9582 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00329: val_acc did not improve from 0.86649\n",
+ "Epoch 330/3000\n",
+ " - 39s - loss: 0.3485 - acc: 0.9538 - val_loss: 0.8958 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00330: val_acc did not improve from 0.86649\n",
+ "Epoch 331/3000\n",
+ " - 40s - loss: 0.3540 - acc: 0.9534 - val_loss: 0.8465 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00331: val_acc did not improve from 0.86649\n",
+ "Epoch 332/3000\n",
+ " - 39s - loss: 0.3498 - acc: 0.9574 - val_loss: 0.9472 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00332: val_acc did not improve from 0.86649\n",
+ "Epoch 333/3000\n",
+ " - 39s - loss: 0.3407 - acc: 0.9570 - val_loss: 0.9152 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00333: val_acc did not improve from 0.86649\n",
+ "Epoch 334/3000\n",
+ " - 39s - loss: 0.3713 - acc: 0.9541 - val_loss: 0.8634 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00334: val_acc did not improve from 0.86649\n",
+ "Epoch 335/3000\n",
+ " - 39s - loss: 0.3541 - acc: 0.9537 - val_loss: 0.9005 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00335: val_acc did not improve from 0.86649\n",
+ "Epoch 336/3000\n",
+ " - 39s - loss: 0.3709 - acc: 0.9506 - val_loss: 0.9184 - val_acc: 0.8392\n",
+ "\n",
+ "Epoch 00336: val_acc did not improve from 0.86649\n",
+ "Epoch 337/3000\n",
+ " - 39s - loss: 0.3675 - acc: 0.9500 - val_loss: 0.8765 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00337: val_acc did not improve from 0.86649\n",
+ "Epoch 338/3000\n",
+ " - 39s - loss: 0.3534 - acc: 0.9562 - val_loss: 0.9150 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00338: val_acc did not improve from 0.86649\n",
+ "Epoch 339/3000\n",
+ " - 39s - loss: 0.3440 - acc: 0.9561 - val_loss: 0.9922 - val_acc: 0.8400\n",
+ "\n",
+ "Epoch 00339: val_acc did not improve from 0.86649\n",
+ "Epoch 340/3000\n",
+ " - 39s - loss: 0.3579 - acc: 0.9535 - val_loss: 0.9198 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00340: val_acc did not improve from 0.86649\n",
+ "Epoch 341/3000\n",
+ " - 39s - loss: 0.3605 - acc: 0.9511 - val_loss: 0.8777 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00341: val_acc did not improve from 0.86649\n",
+ "Epoch 342/3000\n",
+ " - 39s - loss: 0.3638 - acc: 0.9511 - val_loss: 0.9121 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00342: val_acc did not improve from 0.86649\n",
+ "Epoch 343/3000\n",
+ " - 39s - loss: 0.3619 - acc: 0.9514 - val_loss: 0.9426 - val_acc: 0.8424\n",
+ "\n",
+ "Epoch 00343: val_acc did not improve from 0.86649\n",
+ "Epoch 344/3000\n",
+ " - 39s - loss: 0.3542 - acc: 0.9553 - val_loss: 0.9400 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00344: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00344: ReduceLROnPlateau reducing learning rate to 6.302494111878331e-05.\n",
+ "Epoch 345/3000\n",
+ " - 40s - loss: 0.3759 - acc: 0.9470 - val_loss: 0.8714 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00345: val_acc did not improve from 0.86649\n",
+ "Epoch 346/3000\n",
+ " - 39s - loss: 0.3568 - acc: 0.9523 - val_loss: 0.9523 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00346: val_acc did not improve from 0.86649\n",
+ "Epoch 347/3000\n",
+ " - 39s - loss: 0.3402 - acc: 0.9568 - val_loss: 0.9460 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00347: val_acc did not improve from 0.86649\n",
+ "Epoch 348/3000\n",
+ " - 39s - loss: 0.3569 - acc: 0.9527 - val_loss: 0.8606 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00348: val_acc did not improve from 0.86649\n",
+ "Epoch 349/3000\n",
+ " - 39s - loss: 0.3515 - acc: 0.9555 - val_loss: 0.9580 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00349: val_acc did not improve from 0.86649\n",
+ "Epoch 350/3000\n",
+ " - 39s - loss: 0.3514 - acc: 0.9541 - val_loss: 0.9491 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00350: val_acc did not improve from 0.86649\n",
+ "Epoch 351/3000\n",
+ " - 39s - loss: 0.3516 - acc: 0.9535 - val_loss: 0.9365 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00351: val_acc did not improve from 0.86649\n",
+ "Epoch 352/3000\n",
+ " - 39s - loss: 0.3413 - acc: 0.9561 - val_loss: 0.9349 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00352: val_acc did not improve from 0.86649\n",
+ "Epoch 353/3000\n",
+ " - 39s - loss: 0.3529 - acc: 0.9534 - val_loss: 0.9106 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00353: val_acc did not improve from 0.86649\n",
+ "Epoch 354/3000\n",
+ " - 40s - loss: 0.3388 - acc: 0.9576 - val_loss: 0.8650 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00354: val_acc did not improve from 0.86649\n",
+ "Epoch 355/3000\n",
+ " - 39s - loss: 0.3457 - acc: 0.9576 - val_loss: 0.9217 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00355: val_acc did not improve from 0.86649\n",
+ "Epoch 356/3000\n",
+ " - 39s - loss: 0.3485 - acc: 0.9546 - val_loss: 0.9274 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00356: val_acc did not improve from 0.86649\n",
+ "Epoch 357/3000\n",
+ " - 39s - loss: 0.3410 - acc: 0.9561 - val_loss: 0.9489 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00357: val_acc did not improve from 0.86649\n",
+ "Epoch 358/3000\n",
+ " - 39s - loss: 0.3405 - acc: 0.9576 - val_loss: 0.9048 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00358: val_acc did not improve from 0.86649\n",
+ "Epoch 359/3000\n",
+ " - 39s - loss: 0.3482 - acc: 0.9550 - val_loss: 0.9928 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00359: val_acc did not improve from 0.86649\n",
+ "Epoch 360/3000\n",
+ " - 39s - loss: 0.3555 - acc: 0.9561 - val_loss: 0.9409 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00360: val_acc did not improve from 0.86649\n",
+ "Epoch 361/3000\n",
+ " - 39s - loss: 0.3373 - acc: 0.9562 - val_loss: 0.8830 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00361: val_acc did not improve from 0.86649\n",
+ "Epoch 362/3000\n",
+ " - 39s - loss: 0.3407 - acc: 0.9527 - val_loss: 0.9251 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00362: val_acc did not improve from 0.86649\n",
+ "Epoch 363/3000\n",
+ " - 39s - loss: 0.3546 - acc: 0.9565 - val_loss: 0.8666 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00363: val_acc did not improve from 0.86649\n",
+ "Epoch 364/3000\n",
+ " - 39s - loss: 0.3530 - acc: 0.9526 - val_loss: 0.9651 - val_acc: 0.8462\n",
+ "\n",
+ "Epoch 00364: val_acc did not improve from 0.86649\n",
+ "Epoch 365/3000\n",
+ " - 39s - loss: 0.3449 - acc: 0.9586 - val_loss: 0.9295 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00365: val_acc did not improve from 0.86649\n",
+ "Epoch 366/3000\n",
+ " - 39s - loss: 0.3288 - acc: 0.9567 - val_loss: 1.0094 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00366: val_acc did not improve from 0.86649\n",
+ "Epoch 367/3000\n",
+ " - 39s - loss: 0.3459 - acc: 0.9553 - val_loss: 0.9412 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00367: val_acc did not improve from 0.86649\n",
+ "Epoch 368/3000\n",
+ " - 39s - loss: 0.3469 - acc: 0.9565 - val_loss: 0.8496 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00368: val_acc did not improve from 0.86649\n",
+ "Epoch 369/3000\n",
+ " - 39s - loss: 0.3490 - acc: 0.9527 - val_loss: 1.0578 - val_acc: 0.8276\n",
+ "\n",
+ "Epoch 00369: val_acc did not improve from 0.86649\n",
+ "Epoch 370/3000\n",
+ " - 40s - loss: 0.3739 - acc: 0.9506 - val_loss: 0.9138 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00370: val_acc did not improve from 0.86649\n",
+ "Epoch 371/3000\n",
+ " - 39s - loss: 0.3419 - acc: 0.9556 - val_loss: 0.9400 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00371: val_acc did not improve from 0.86649\n",
+ "Epoch 372/3000\n",
+ " - 39s - loss: 0.3464 - acc: 0.9555 - val_loss: 0.9172 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00372: val_acc did not improve from 0.86649\n",
+ "Epoch 373/3000\n",
+ " - 39s - loss: 0.3294 - acc: 0.9595 - val_loss: 0.8920 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00373: val_acc did not improve from 0.86649\n",
+ "Epoch 374/3000\n",
+ " - 39s - loss: 0.3560 - acc: 0.9523 - val_loss: 0.8554 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00374: val_acc did not improve from 0.86649\n",
+ "\n",
+ "Epoch 00374: ReduceLROnPlateau reducing learning rate to 5.9873694408452134e-05.\n",
+ "Epoch 375/3000\n",
+ " - 40s - loss: 0.3404 - acc: 0.9543 - val_loss: 0.9453 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00375: val_acc did not improve from 0.86649\n",
+ "Epoch 376/3000\n",
+ " - 39s - loss: 0.3477 - acc: 0.9555 - val_loss: 0.9316 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00376: val_acc did not improve from 0.86649\n",
+ "Epoch 377/3000\n",
+ " - 39s - loss: 0.3447 - acc: 0.9543 - val_loss: 0.8490 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 00377: val_acc did not improve from 0.86649\n",
+ "Epoch 378/3000\n",
+ " - 40s - loss: 0.3414 - acc: 0.9564 - val_loss: 0.9153 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00378: val_acc did not improve from 0.86649\n",
+ "Epoch 379/3000\n",
+ " - 39s - loss: 0.3439 - acc: 0.9565 - val_loss: 0.8819 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00379: val_acc did not improve from 0.86649\n",
+ "Epoch 380/3000\n",
+ " - 39s - loss: 0.3306 - acc: 0.9571 - val_loss: 0.8860 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00380: val_acc did not improve from 0.86649\n",
+ "Epoch 381/3000\n",
+ " - 39s - loss: 0.3328 - acc: 0.9592 - val_loss: 0.9399 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00381: val_acc did not improve from 0.86649\n",
+ "Epoch 382/3000\n",
+ " - 39s - loss: 0.3626 - acc: 0.9530 - val_loss: 0.8948 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00382: val_acc did not improve from 0.86649\n",
+ "Epoch 383/3000\n",
+ " - 39s - loss: 0.3342 - acc: 0.9562 - val_loss: 0.9457 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00383: val_acc did not improve from 0.86649\n",
+ "Epoch 384/3000\n",
+ " - 39s - loss: 0.3262 - acc: 0.9594 - val_loss: 0.8822 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00384: val_acc did not improve from 0.86649\n",
+ "Epoch 385/3000\n",
+ " - 39s - loss: 0.3446 - acc: 0.9561 - val_loss: 0.8581 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00385: val_acc did not improve from 0.86649\n",
+ "Epoch 386/3000\n",
+ " - 40s - loss: 0.3536 - acc: 0.9561 - val_loss: 0.9542 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00386: val_acc did not improve from 0.86649\n",
+ "Epoch 387/3000\n",
+ " - 39s - loss: 0.3523 - acc: 0.9553 - val_loss: 0.9583 - val_acc: 0.8334\n",
+ "\n",
+ "Epoch 00387: val_acc did not improve from 0.86649\n",
+ "Epoch 388/3000\n",
+ " - 40s - loss: 0.3573 - acc: 0.9558 - val_loss: 0.8497 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 00388: val_acc did not improve from 0.86649\n",
+ "Epoch 389/3000\n",
+ " - 39s - loss: 0.3448 - acc: 0.9564 - val_loss: 0.9241 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00390: val_acc did not improve from 0.86649\n",
+ "Epoch 391/3000\n",
+ " - 39s - loss: 0.3355 - acc: 0.9601 - val_loss: 0.9487 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00391: val_acc did not improve from 0.86649\n",
+ "Epoch 392/3000\n",
+ " - 39s - loss: 0.3368 - acc: 0.9591 - val_loss: 0.9421 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00392: val_acc did not improve from 0.86649\n",
+ "Epoch 393/3000\n",
+ " - 40s - loss: 0.3445 - acc: 0.9546 - val_loss: 0.9602 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00393: val_acc did not improve from 0.86649\n",
+ "Epoch 394/3000\n",
+ " - 39s - loss: 0.3369 - acc: 0.9582 - val_loss: 1.0126 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00394: val_acc did not improve from 0.86649\n",
+ "Epoch 395/3000\n",
+ " - 39s - loss: 0.3393 - acc: 0.9553 - val_loss: 0.9342 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00395: val_acc did not improve from 0.86649\n",
+ "Epoch 396/3000\n",
+ " - 39s - loss: 0.3227 - acc: 0.9615 - val_loss: 0.8635 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00396: val_acc did not improve from 0.86649\n",
+ "Epoch 397/3000\n",
+ " - 39s - loss: 0.3304 - acc: 0.9580 - val_loss: 0.9210 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00397: val_acc did not improve from 0.86649\n",
+ "Epoch 398/3000\n",
+ " - 39s - loss: 0.3367 - acc: 0.9574 - val_loss: 0.8472 - val_acc: 0.8677\n",
+ "\n",
+ "Epoch 00398: val_acc improved from 0.86649 to 0.86765, saving model to ./ModelSnapshots/LSTM-v2-398.h5\n",
+ "Epoch 399/3000\n",
+ " - 39s - loss: 0.3272 - acc: 0.9583 - val_loss: 0.8903 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00399: val_acc did not improve from 0.86765\n",
+ "Epoch 400/3000\n",
+ " - 39s - loss: 0.3386 - acc: 0.9577 - val_loss: 0.8627 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00400: val_acc did not improve from 0.86765\n",
+ "Epoch 401/3000\n",
+ " - 40s - loss: 0.3461 - acc: 0.9568 - val_loss: 0.9331 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00401: val_acc did not improve from 0.86765\n",
+ "Epoch 402/3000\n",
+ " - 39s - loss: 0.3497 - acc: 0.9555 - val_loss: 0.8815 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00402: val_acc did not improve from 0.86765\n",
+ "Epoch 403/3000\n",
+ " - 40s - loss: 0.3369 - acc: 0.9562 - val_loss: 0.9459 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00403: val_acc did not improve from 0.86765\n",
+ "Epoch 404/3000\n",
+ " - 39s - loss: 0.3439 - acc: 0.9532 - val_loss: 0.8664 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00404: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00404: ReduceLROnPlateau reducing learning rate to 5.68800103792455e-05.\n",
+ "Epoch 405/3000\n",
+ " - 39s - loss: 0.3288 - acc: 0.9592 - val_loss: 0.8931 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00405: val_acc did not improve from 0.86765\n",
+ "Epoch 406/3000\n",
+ " - 39s - loss: 0.3252 - acc: 0.9591 - val_loss: 0.9370 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00406: val_acc did not improve from 0.86765\n",
+ "Epoch 407/3000\n",
+ " - 39s - loss: 0.3420 - acc: 0.9562 - val_loss: 0.8960 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00407: val_acc did not improve from 0.86765\n",
+ "Epoch 408/3000\n",
+ " - 39s - loss: 0.3438 - acc: 0.9552 - val_loss: 0.9718 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00408: val_acc did not improve from 0.86765\n",
+ "Epoch 409/3000\n",
+ " - 39s - loss: 0.3356 - acc: 0.9574 - val_loss: 0.9235 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00409: val_acc did not improve from 0.86765\n",
+ "Epoch 410/3000\n",
+ " - 39s - loss: 0.3196 - acc: 0.9603 - val_loss: 0.8746 - val_acc: 0.8614\n",
+ "\n",
+ "Epoch 00410: val_acc did not improve from 0.86765\n",
+ "Epoch 411/3000\n",
+ " - 39s - loss: 0.3455 - acc: 0.9568 - val_loss: 0.8772 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00411: val_acc did not improve from 0.86765\n",
+ "Epoch 412/3000\n",
+ " - 39s - loss: 0.3620 - acc: 0.9526 - val_loss: 0.8925 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00412: val_acc did not improve from 0.86765\n",
+ "Epoch 413/3000\n",
+ " - 39s - loss: 0.3418 - acc: 0.9570 - val_loss: 0.8578 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 00413: val_acc did not improve from 0.86765\n",
+ "Epoch 414/3000\n",
+ " - 39s - loss: 0.3230 - acc: 0.9583 - val_loss: 0.9399 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00414: val_acc did not improve from 0.86765\n",
+ "Epoch 415/3000\n",
+ " - 39s - loss: 0.3225 - acc: 0.9577 - val_loss: 0.9693 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00415: val_acc did not improve from 0.86765\n",
+ "Epoch 416/3000\n",
+ " - 39s - loss: 0.3343 - acc: 0.9574 - val_loss: 0.8807 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00416: val_acc did not improve from 0.86765\n",
+ "Epoch 417/3000\n",
+ " - 39s - loss: 0.3379 - acc: 0.9561 - val_loss: 0.8396 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00417: val_acc did not improve from 0.86765\n",
+ "Epoch 418/3000\n",
+ " - 39s - loss: 0.3500 - acc: 0.9515 - val_loss: 0.8685 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00418: val_acc did not improve from 0.86765\n",
+ "Epoch 419/3000\n",
+ " - 39s - loss: 0.3236 - acc: 0.9594 - val_loss: 0.8988 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00419: val_acc did not improve from 0.86765\n",
+ "Epoch 420/3000\n",
+ " - 39s - loss: 0.3380 - acc: 0.9535 - val_loss: 0.8662 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00420: val_acc did not improve from 0.86765\n",
+ "Epoch 421/3000\n",
+ " - 39s - loss: 0.3282 - acc: 0.9588 - val_loss: 0.9285 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00421: val_acc did not improve from 0.86765\n",
+ "Epoch 422/3000\n",
+ " - 39s - loss: 0.3402 - acc: 0.9573 - val_loss: 0.9211 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00422: val_acc did not improve from 0.86765\n",
+ "Epoch 423/3000\n",
+ " - 39s - loss: 0.3400 - acc: 0.9570 - val_loss: 0.8997 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00423: val_acc did not improve from 0.86765\n",
+ "Epoch 424/3000\n",
+ " - 39s - loss: 0.3168 - acc: 0.9633 - val_loss: 0.8881 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00424: val_acc did not improve from 0.86765\n",
+ "Epoch 425/3000\n",
+ " - 39s - loss: 0.3453 - acc: 0.9556 - val_loss: 0.9445 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00425: val_acc did not improve from 0.86765\n",
+ "Epoch 426/3000\n",
+ " - 39s - loss: 0.3390 - acc: 0.9570 - val_loss: 0.9125 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00426: val_acc did not improve from 0.86765\n",
+ "Epoch 427/3000\n",
+ " - 39s - loss: 0.3397 - acc: 0.9552 - val_loss: 0.8986 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00427: val_acc did not improve from 0.86765\n",
+ "Epoch 428/3000\n",
+ " - 39s - loss: 0.3488 - acc: 0.9553 - val_loss: 0.8960 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00428: val_acc did not improve from 0.86765\n",
+ "Epoch 429/3000\n",
+ " - 39s - loss: 0.3300 - acc: 0.9600 - val_loss: 0.8449 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 00429: val_acc did not improve from 0.86765\n",
+ "Epoch 430/3000\n",
+ " - 39s - loss: 0.3296 - acc: 0.9588 - val_loss: 0.9478 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00430: val_acc did not improve from 0.86765\n",
+ "Epoch 431/3000\n",
+ " - 39s - loss: 0.3305 - acc: 0.9589 - val_loss: 0.8713 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00431: val_acc did not improve from 0.86765\n",
+ "Epoch 432/3000\n",
+ " - 39s - loss: 0.3278 - acc: 0.9580 - val_loss: 0.9387 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00432: val_acc did not improve from 0.86765\n",
+ "Epoch 433/3000\n",
+ " - 39s - loss: 0.3489 - acc: 0.9515 - val_loss: 0.9361 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00433: val_acc did not improve from 0.86765\n",
+ "Epoch 434/3000\n",
+ " - 39s - loss: 0.3327 - acc: 0.9562 - val_loss: 0.9641 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00434: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00434: ReduceLROnPlateau reducing learning rate to 5.4036009169067255e-05.\n",
+ "Epoch 435/3000\n",
+ " - 39s - loss: 0.3291 - acc: 0.9597 - val_loss: 0.9595 - val_acc: 0.8381\n",
+ "\n",
+ "Epoch 00435: val_acc did not improve from 0.86765\n",
+ "Epoch 436/3000\n",
+ " - 39s - loss: 0.3637 - acc: 0.9553 - val_loss: 0.9519 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00436: val_acc did not improve from 0.86765\n",
+ "Epoch 437/3000\n",
+ " - 39s - loss: 0.3492 - acc: 0.9565 - val_loss: 1.0486 - val_acc: 0.8326\n",
+ "\n",
+ "Epoch 00437: val_acc did not improve from 0.86765\n",
+ "Epoch 438/3000\n",
+ " - 39s - loss: 0.3500 - acc: 0.9518 - val_loss: 0.9460 - val_acc: 0.8451\n",
+ "\n",
+ "Epoch 00438: val_acc did not improve from 0.86765\n",
+ "Epoch 439/3000\n",
+ " - 39s - loss: 0.3331 - acc: 0.9579 - val_loss: 0.9051 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00439: val_acc did not improve from 0.86765\n",
+ "Epoch 440/3000\n",
+ " - 39s - loss: 0.3301 - acc: 0.9580 - val_loss: 0.8741 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00440: val_acc did not improve from 0.86765\n",
+ "Epoch 441/3000\n",
+ " - 39s - loss: 0.3285 - acc: 0.9606 - val_loss: 0.9382 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00441: val_acc did not improve from 0.86765\n",
+ "Epoch 442/3000\n",
+ " - 39s - loss: 0.3185 - acc: 0.9626 - val_loss: 0.9443 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00442: val_acc did not improve from 0.86765\n",
+ "Epoch 443/3000\n",
+ " - 39s - loss: 0.3345 - acc: 0.9580 - val_loss: 0.9091 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00443: val_acc did not improve from 0.86765\n",
+ "Epoch 444/3000\n",
+ " - 40s - loss: 0.3237 - acc: 0.9589 - val_loss: 0.8787 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00444: val_acc did not improve from 0.86765\n",
+ "Epoch 445/3000\n",
+ " - 39s - loss: 0.3170 - acc: 0.9595 - val_loss: 0.9144 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00445: val_acc did not improve from 0.86765\n",
+ "Epoch 446/3000\n",
+ " - 39s - loss: 0.3250 - acc: 0.9580 - val_loss: 0.9949 - val_acc: 0.8404\n",
+ "\n",
+ "Epoch 00446: val_acc did not improve from 0.86765\n",
+ "Epoch 447/3000\n",
+ " - 40s - loss: 0.3482 - acc: 0.9532 - val_loss: 0.9225 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00447: val_acc did not improve from 0.86765\n",
+ "Epoch 448/3000\n",
+ " - 39s - loss: 0.3508 - acc: 0.9574 - val_loss: 0.9944 - val_acc: 0.8431\n",
+ "\n",
+ "Epoch 00448: val_acc did not improve from 0.86765\n",
+ "Epoch 449/3000\n",
+ " - 39s - loss: 0.3301 - acc: 0.9565 - val_loss: 0.8755 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00449: val_acc did not improve from 0.86765\n",
+ "Epoch 450/3000\n",
+ " - 39s - loss: 0.3210 - acc: 0.9615 - val_loss: 0.9460 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00450: val_acc did not improve from 0.86765\n",
+ "Epoch 451/3000\n",
+ " - 39s - loss: 0.3255 - acc: 0.9562 - val_loss: 0.9479 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00451: val_acc did not improve from 0.86765\n",
+ "Epoch 452/3000\n",
+ " - 40s - loss: 0.3187 - acc: 0.9589 - val_loss: 0.9648 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00452: val_acc did not improve from 0.86765\n",
+ "Epoch 453/3000\n",
+ " - 40s - loss: 0.3272 - acc: 0.9603 - val_loss: 0.9094 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00453: val_acc did not improve from 0.86765\n",
+ "Epoch 454/3000\n",
+ " - 39s - loss: 0.3186 - acc: 0.9583 - val_loss: 0.9130 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00454: val_acc did not improve from 0.86765\n",
+ "Epoch 455/3000\n",
+ " - 39s - loss: 0.3492 - acc: 0.9540 - val_loss: 0.9120 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00455: val_acc did not improve from 0.86765\n",
+ "Epoch 456/3000\n",
+ " - 39s - loss: 0.3335 - acc: 0.9586 - val_loss: 0.9360 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00456: val_acc did not improve from 0.86765\n",
+ "Epoch 457/3000\n",
+ " - 39s - loss: 0.3273 - acc: 0.9568 - val_loss: 0.9653 - val_acc: 0.8435\n",
+ "\n",
+ "Epoch 00457: val_acc did not improve from 0.86765\n",
+ "Epoch 458/3000\n",
+ " - 40s - loss: 0.3327 - acc: 0.9576 - val_loss: 0.9670 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00458: val_acc did not improve from 0.86765\n",
+ "Epoch 459/3000\n",
+ " - 39s - loss: 0.3384 - acc: 0.9565 - val_loss: 0.9111 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00459: val_acc did not improve from 0.86765\n",
+ "Epoch 460/3000\n",
+ " - 39s - loss: 0.3256 - acc: 0.9582 - val_loss: 0.9494 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00460: val_acc did not improve from 0.86765\n",
+ "Epoch 461/3000\n",
+ " - 39s - loss: 0.3388 - acc: 0.9567 - val_loss: 0.8919 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00461: val_acc did not improve from 0.86765\n",
+ "Epoch 462/3000\n",
+ " - 39s - loss: 0.3470 - acc: 0.9543 - val_loss: 0.8662 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00462: val_acc did not improve from 0.86765\n",
+ "Epoch 463/3000\n",
+ " - 40s - loss: 0.3261 - acc: 0.9583 - val_loss: 0.8317 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00463: val_acc did not improve from 0.86765\n",
+ "Epoch 464/3000\n",
+ " - 39s - loss: 0.3419 - acc: 0.9561 - val_loss: 1.0010 - val_acc: 0.8416\n",
+ "\n",
+ "Epoch 00464: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00464: ReduceLROnPlateau reducing learning rate to 5.133420836500591e-05.\n",
+ "Epoch 465/3000\n",
+ " - 39s - loss: 0.3346 - acc: 0.9576 - val_loss: 0.8434 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00465: val_acc did not improve from 0.86765\n",
+ "Epoch 466/3000\n",
+ " - 39s - loss: 0.3281 - acc: 0.9611 - val_loss: 0.8669 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00466: val_acc did not improve from 0.86765\n",
+ "Epoch 467/3000\n",
+ " - 39s - loss: 0.3298 - acc: 0.9591 - val_loss: 0.9269 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00467: val_acc did not improve from 0.86765\n",
+ "Epoch 468/3000\n",
+ " - 39s - loss: 0.3375 - acc: 0.9564 - val_loss: 0.9649 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00468: val_acc did not improve from 0.86765\n",
+ "Epoch 469/3000\n",
+ " - 40s - loss: 0.3302 - acc: 0.9604 - val_loss: 0.9569 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00469: val_acc did not improve from 0.86765\n",
+ "Epoch 470/3000\n",
+ " - 39s - loss: 0.3267 - acc: 0.9589 - val_loss: 0.9243 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00470: val_acc did not improve from 0.86765\n",
+ "Epoch 471/3000\n",
+ " - 39s - loss: 0.3168 - acc: 0.9603 - val_loss: 0.9311 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00471: val_acc did not improve from 0.86765\n",
+ "Epoch 472/3000\n",
+ " - 39s - loss: 0.3068 - acc: 0.9645 - val_loss: 0.8702 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00472: val_acc did not improve from 0.86765\n",
+ "Epoch 473/3000\n",
+ " - 39s - loss: 0.3163 - acc: 0.9595 - val_loss: 0.9633 - val_acc: 0.8474\n",
+ "\n",
+ "Epoch 00473: val_acc did not improve from 0.86765\n",
+ "Epoch 474/3000\n",
+ " - 39s - loss: 0.3149 - acc: 0.9624 - val_loss: 0.9056 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00474: val_acc did not improve from 0.86765\n",
+ "Epoch 475/3000\n",
+ " - 39s - loss: 0.3333 - acc: 0.9568 - val_loss: 0.8720 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00475: val_acc did not improve from 0.86765\n",
+ "Epoch 476/3000\n",
+ " - 39s - loss: 0.3150 - acc: 0.9623 - val_loss: 0.9025 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00476: val_acc did not improve from 0.86765\n",
+ "Epoch 477/3000\n",
+ " - 39s - loss: 0.3332 - acc: 0.9570 - val_loss: 0.9184 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00477: val_acc did not improve from 0.86765\n",
+ "Epoch 478/3000\n",
+ " - 39s - loss: 0.3236 - acc: 0.9585 - val_loss: 0.8487 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00478: val_acc did not improve from 0.86765\n",
+ "Epoch 479/3000\n",
+ " - 39s - loss: 0.3201 - acc: 0.9592 - val_loss: 0.8885 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00479: val_acc did not improve from 0.86765\n",
+ "Epoch 480/3000\n",
+ " - 39s - loss: 0.3304 - acc: 0.9562 - val_loss: 0.9052 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00480: val_acc did not improve from 0.86765\n",
+ "Epoch 481/3000\n",
+ " - 39s - loss: 0.3175 - acc: 0.9612 - val_loss: 0.8701 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00481: val_acc did not improve from 0.86765\n",
+ "Epoch 482/3000\n",
+ " - 40s - loss: 0.3281 - acc: 0.9573 - val_loss: 0.9248 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00482: val_acc did not improve from 0.86765\n",
+ "Epoch 483/3000\n",
+ " - 40s - loss: 0.3284 - acc: 0.9576 - val_loss: 0.9771 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00483: val_acc did not improve from 0.86765\n",
+ "Epoch 484/3000\n",
+ " - 39s - loss: 0.3366 - acc: 0.9570 - val_loss: 0.9807 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00484: val_acc did not improve from 0.86765\n",
+ "Epoch 485/3000\n",
+ " - 39s - loss: 0.3184 - acc: 0.9614 - val_loss: 0.9132 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00485: val_acc did not improve from 0.86765\n",
+ "Epoch 486/3000\n",
+ " - 39s - loss: 0.3115 - acc: 0.9617 - val_loss: 0.9153 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00486: val_acc did not improve from 0.86765\n",
+ "Epoch 487/3000\n",
+ " - 39s - loss: 0.3381 - acc: 0.9532 - val_loss: 0.9092 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00487: val_acc did not improve from 0.86765\n",
+ "Epoch 488/3000\n",
+ " - 39s - loss: 0.3372 - acc: 0.9574 - val_loss: 0.9252 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00488: val_acc did not improve from 0.86765\n",
+ "Epoch 489/3000\n",
+ " - 39s - loss: 0.3209 - acc: 0.9597 - val_loss: 0.9447 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00489: val_acc did not improve from 0.86765\n",
+ "Epoch 490/3000\n",
+ " - 39s - loss: 0.3298 - acc: 0.9591 - val_loss: 0.8678 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00490: val_acc did not improve from 0.86765\n",
+ "Epoch 491/3000\n",
+ " - 39s - loss: 0.3257 - acc: 0.9594 - val_loss: 0.8909 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00491: val_acc did not improve from 0.86765\n",
+ "Epoch 492/3000\n",
+ " - 39s - loss: 0.3173 - acc: 0.9597 - val_loss: 0.9157 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00492: val_acc did not improve from 0.86765\n",
+ "Epoch 493/3000\n",
+ " - 39s - loss: 0.3303 - acc: 0.9611 - val_loss: 0.9396 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00493: val_acc did not improve from 0.86765\n",
+ "Epoch 494/3000\n",
+ " - 39s - loss: 0.3222 - acc: 0.9609 - val_loss: 0.9693 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00494: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00494: ReduceLROnPlateau reducing learning rate to 4.876749881077558e-05.\n",
+ "Epoch 495/3000\n",
+ " - 40s - loss: 0.3281 - acc: 0.9583 - val_loss: 0.9320 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00495: val_acc did not improve from 0.86765\n",
+ "Epoch 496/3000\n",
+ " - 39s - loss: 0.3316 - acc: 0.9574 - val_loss: 0.9316 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00496: val_acc did not improve from 0.86765\n",
+ "Epoch 497/3000\n",
+ " - 39s - loss: 0.3135 - acc: 0.9614 - val_loss: 0.8739 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00497: val_acc did not improve from 0.86765\n",
+ "Epoch 498/3000\n",
+ " - 39s - loss: 0.3212 - acc: 0.9586 - val_loss: 0.8895 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00498: val_acc did not improve from 0.86765\n",
+ "Epoch 499/3000\n",
+ " - 39s - loss: 0.3216 - acc: 0.9595 - val_loss: 0.8799 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00499: val_acc did not improve from 0.86765\n",
+ "Epoch 500/3000\n",
+ " - 39s - loss: 0.3230 - acc: 0.9583 - val_loss: 0.8783 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00500: val_acc did not improve from 0.86765\n",
+ "Epoch 501/3000\n",
+ " - 39s - loss: 0.3177 - acc: 0.9606 - val_loss: 0.9315 - val_acc: 0.8525\n",
+ "\n",
+ "Epoch 00501: val_acc did not improve from 0.86765\n",
+ "Epoch 502/3000\n",
+ " - 39s - loss: 0.3369 - acc: 0.9550 - val_loss: 0.9903 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00502: val_acc did not improve from 0.86765\n",
+ "Epoch 503/3000\n",
+ " - 39s - loss: 0.3324 - acc: 0.9603 - val_loss: 0.8952 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 00503: val_acc did not improve from 0.86765\n",
+ "Epoch 504/3000\n",
+ " - 40s - loss: 0.3390 - acc: 0.9556 - val_loss: 0.9544 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00504: val_acc did not improve from 0.86765\n",
+ "Epoch 505/3000\n",
+ " - 40s - loss: 0.3353 - acc: 0.9564 - val_loss: 0.9124 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00505: val_acc did not improve from 0.86765\n",
+ "Epoch 506/3000\n",
+ " - 39s - loss: 0.3210 - acc: 0.9594 - val_loss: 0.8594 - val_acc: 0.8603\n",
+ "\n",
+ "Epoch 00506: val_acc did not improve from 0.86765\n",
+ "Epoch 507/3000\n",
+ " - 39s - loss: 0.3219 - acc: 0.9573 - val_loss: 0.8915 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00507: val_acc did not improve from 0.86765\n",
+ "Epoch 508/3000\n",
+ " - 39s - loss: 0.3247 - acc: 0.9588 - val_loss: 0.8953 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00508: val_acc did not improve from 0.86765\n",
+ "Epoch 509/3000\n",
+ " - 39s - loss: 0.3191 - acc: 0.9607 - val_loss: 0.8783 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00509: val_acc did not improve from 0.86765\n",
+ "Epoch 510/3000\n",
+ " - 39s - loss: 0.3234 - acc: 0.9574 - val_loss: 0.9082 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00510: val_acc did not improve from 0.86765\n",
+ "Epoch 511/3000\n",
+ " - 39s - loss: 0.3298 - acc: 0.9562 - val_loss: 0.8984 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00511: val_acc did not improve from 0.86765\n",
+ "Epoch 512/3000\n",
+ " - 39s - loss: 0.3041 - acc: 0.9641 - val_loss: 0.8977 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00512: val_acc did not improve from 0.86765\n",
+ "Epoch 513/3000\n",
+ " - 39s - loss: 0.3270 - acc: 0.9555 - val_loss: 0.8402 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00513: val_acc did not improve from 0.86765\n",
+ "Epoch 514/3000\n",
+ " - 40s - loss: 0.3115 - acc: 0.9606 - val_loss: 0.8794 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 00514: val_acc did not improve from 0.86765\n",
+ "Epoch 515/3000\n",
+ " - 39s - loss: 0.3252 - acc: 0.9562 - val_loss: 0.9451 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00515: val_acc did not improve from 0.86765\n",
+ "Epoch 516/3000\n",
+ " - 39s - loss: 0.3219 - acc: 0.9597 - val_loss: 0.8295 - val_acc: 0.8657\n",
+ "\n",
+ "Epoch 00516: val_acc did not improve from 0.86765\n",
+ "Epoch 517/3000\n",
+ " - 39s - loss: 0.3225 - acc: 0.9577 - val_loss: 0.8308 - val_acc: 0.8638\n",
+ "\n",
+ "Epoch 00517: val_acc did not improve from 0.86765\n",
+ "Epoch 518/3000\n",
+ " - 39s - loss: 0.3120 - acc: 0.9614 - val_loss: 0.8791 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00518: val_acc did not improve from 0.86765\n",
+ "Epoch 519/3000\n",
+ " - 39s - loss: 0.3207 - acc: 0.9588 - val_loss: 0.8841 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00519: val_acc did not improve from 0.86765\n",
+ "Epoch 520/3000\n",
+ " - 39s - loss: 0.3166 - acc: 0.9603 - val_loss: 0.8913 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00520: val_acc did not improve from 0.86765\n",
+ "Epoch 521/3000\n",
+ " - 39s - loss: 0.3218 - acc: 0.9604 - val_loss: 0.8800 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00521: val_acc did not improve from 0.86765\n",
+ "Epoch 522/3000\n",
+ " - 39s - loss: 0.3278 - acc: 0.9586 - val_loss: 0.8885 - val_acc: 0.8455\n",
+ "\n",
+ "Epoch 00522: val_acc did not improve from 0.86765\n",
+ "Epoch 523/3000\n",
+ " - 40s - loss: 0.3203 - acc: 0.9577 - val_loss: 0.8893 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00523: val_acc did not improve from 0.86765\n",
+ "Epoch 524/3000\n",
+ " - 39s - loss: 0.3078 - acc: 0.9635 - val_loss: 0.8748 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00524: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00524: ReduceLROnPlateau reducing learning rate to 4.63291238702368e-05.\n",
+ "Epoch 525/3000\n",
+ " - 40s - loss: 0.3199 - acc: 0.9588 - val_loss: 0.9706 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00525: val_acc did not improve from 0.86765\n",
+ "Epoch 526/3000\n",
+ " - 40s - loss: 0.3191 - acc: 0.9583 - val_loss: 0.8217 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00526: val_acc did not improve from 0.86765\n",
+ "Epoch 527/3000\n",
+ " - 39s - loss: 0.3051 - acc: 0.9626 - val_loss: 0.9206 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00527: val_acc did not improve from 0.86765\n",
+ "Epoch 528/3000\n",
+ " - 39s - loss: 0.3100 - acc: 0.9618 - val_loss: 0.9187 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00528: val_acc did not improve from 0.86765\n",
+ "Epoch 529/3000\n",
+ " - 39s - loss: 0.3182 - acc: 0.9600 - val_loss: 0.8847 - val_acc: 0.8587\n",
+ "\n",
+ "Epoch 00529: val_acc did not improve from 0.86765\n",
+ "Epoch 530/3000\n",
+ " - 40s - loss: 0.3149 - acc: 0.9598 - val_loss: 0.8858 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00530: val_acc did not improve from 0.86765\n",
+ "Epoch 531/3000\n",
+ " - 39s - loss: 0.3266 - acc: 0.9588 - val_loss: 0.8866 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00531: val_acc did not improve from 0.86765\n",
+ "Epoch 532/3000\n",
+ " - 39s - loss: 0.3166 - acc: 0.9598 - val_loss: 0.8903 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00532: val_acc did not improve from 0.86765\n",
+ "Epoch 533/3000\n",
+ " - 39s - loss: 0.3091 - acc: 0.9632 - val_loss: 0.8742 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00533: val_acc did not improve from 0.86765\n",
+ "Epoch 534/3000\n",
+ " - 39s - loss: 0.3330 - acc: 0.9564 - val_loss: 0.9164 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00534: val_acc did not improve from 0.86765\n",
+ "Epoch 535/3000\n",
+ " - 40s - loss: 0.3164 - acc: 0.9606 - val_loss: 0.9253 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00535: val_acc did not improve from 0.86765\n",
+ "Epoch 536/3000\n",
+ " - 39s - loss: 0.3114 - acc: 0.9585 - val_loss: 0.8726 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00536: val_acc did not improve from 0.86765\n",
+ "Epoch 537/3000\n",
+ " - 39s - loss: 0.3251 - acc: 0.9600 - val_loss: 0.9935 - val_acc: 0.8427\n",
+ "\n",
+ "Epoch 00537: val_acc did not improve from 0.86765\n",
+ "Epoch 538/3000\n",
+ " - 39s - loss: 0.3159 - acc: 0.9623 - val_loss: 0.9470 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00538: val_acc did not improve from 0.86765\n",
+ "Epoch 539/3000\n",
+ " - 39s - loss: 0.3156 - acc: 0.9591 - val_loss: 0.8687 - val_acc: 0.8630\n",
+ "\n",
+ "Epoch 00539: val_acc did not improve from 0.86765\n",
+ "Epoch 540/3000\n",
+ " - 39s - loss: 0.3105 - acc: 0.9612 - val_loss: 0.9334 - val_acc: 0.8447\n",
+ "\n",
+ "Epoch 00540: val_acc did not improve from 0.86765\n",
+ "Epoch 541/3000\n",
+ " - 40s - loss: 0.3124 - acc: 0.9598 - val_loss: 0.9087 - val_acc: 0.8529\n",
+ "\n",
+ "Epoch 00541: val_acc did not improve from 0.86765\n",
+ "Epoch 542/3000\n",
+ " - 39s - loss: 0.3080 - acc: 0.9615 - val_loss: 0.8623 - val_acc: 0.8622\n",
+ "\n",
+ "Epoch 00542: val_acc did not improve from 0.86765\n",
+ "Epoch 543/3000\n",
+ " - 39s - loss: 0.3236 - acc: 0.9583 - val_loss: 0.8804 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00543: val_acc did not improve from 0.86765\n",
+ "Epoch 544/3000\n",
+ " - 40s - loss: 0.3359 - acc: 0.9550 - val_loss: 0.8948 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00544: val_acc did not improve from 0.86765\n",
+ "Epoch 545/3000\n",
+ " - 39s - loss: 0.3287 - acc: 0.9595 - val_loss: 0.8948 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00545: val_acc did not improve from 0.86765\n",
+ "Epoch 546/3000\n",
+ " - 39s - loss: 0.3135 - acc: 0.9629 - val_loss: 0.9767 - val_acc: 0.8466\n",
+ "\n",
+ "Epoch 00546: val_acc did not improve from 0.86765\n",
+ "Epoch 547/3000\n",
+ " - 39s - loss: 0.3047 - acc: 0.9607 - val_loss: 0.9010 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00547: val_acc did not improve from 0.86765\n",
+ "Epoch 548/3000\n",
+ " - 39s - loss: 0.3219 - acc: 0.9603 - val_loss: 0.9056 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00548: val_acc did not improve from 0.86765\n",
+ "Epoch 549/3000\n",
+ " - 40s - loss: 0.3148 - acc: 0.9621 - val_loss: 0.8763 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00549: val_acc did not improve from 0.86765\n",
+ "Epoch 550/3000\n",
+ " - 39s - loss: 0.3018 - acc: 0.9630 - val_loss: 0.9044 - val_acc: 0.8443\n",
+ "\n",
+ "Epoch 00550: val_acc did not improve from 0.86765\n",
+ "Epoch 551/3000\n",
+ " - 39s - loss: 0.3134 - acc: 0.9621 - val_loss: 0.9063 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00551: val_acc did not improve from 0.86765\n",
+ "Epoch 552/3000\n",
+ " - 39s - loss: 0.3164 - acc: 0.9630 - val_loss: 0.8837 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00552: val_acc did not improve from 0.86765\n",
+ "Epoch 553/3000\n",
+ " - 39s - loss: 0.3270 - acc: 0.9586 - val_loss: 0.8592 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00553: val_acc did not improve from 0.86765\n",
+ "Epoch 554/3000\n",
+ " - 39s - loss: 0.3205 - acc: 0.9588 - val_loss: 0.9231 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00554: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00554: ReduceLROnPlateau reducing learning rate to 4.4012669059156905e-05.\n",
+ "Epoch 555/3000\n",
+ " - 39s - loss: 0.3312 - acc: 0.9577 - val_loss: 0.8746 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00555: val_acc did not improve from 0.86765\n",
+ "Epoch 556/3000\n",
+ " - 39s - loss: 0.3002 - acc: 0.9606 - val_loss: 0.8975 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00556: val_acc did not improve from 0.86765\n",
+ "Epoch 557/3000\n",
+ " - 39s - loss: 0.3142 - acc: 0.9614 - val_loss: 0.8877 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00557: val_acc did not improve from 0.86765\n",
+ "Epoch 558/3000\n",
+ " - 39s - loss: 0.3120 - acc: 0.9601 - val_loss: 0.9035 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00558: val_acc did not improve from 0.86765\n",
+ "Epoch 559/3000\n",
+ " - 39s - loss: 0.3106 - acc: 0.9635 - val_loss: 0.8927 - val_acc: 0.8459\n",
+ "\n",
+ "Epoch 00559: val_acc did not improve from 0.86765\n",
+ "Epoch 560/3000\n",
+ " - 39s - loss: 0.3132 - acc: 0.9592 - val_loss: 0.8772 - val_acc: 0.8509\n",
+ "\n",
+ "Epoch 00560: val_acc did not improve from 0.86765\n",
+ "Epoch 561/3000\n",
+ " - 39s - loss: 0.3115 - acc: 0.9624 - val_loss: 0.9050 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00561: val_acc did not improve from 0.86765\n",
+ "Epoch 562/3000\n",
+ " - 39s - loss: 0.3087 - acc: 0.9621 - val_loss: 0.8773 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00564: val_acc did not improve from 0.86765\n",
+ "Epoch 565/3000\n",
+ " - 39s - loss: 0.3089 - acc: 0.9632 - val_loss: 0.9398 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00565: val_acc did not improve from 0.86765\n",
+ "Epoch 566/3000\n",
+ " - 40s - loss: 0.3065 - acc: 0.9612 - val_loss: 0.9517 - val_acc: 0.8482\n",
+ "\n",
+ "Epoch 00566: val_acc did not improve from 0.86765\n",
+ "Epoch 567/3000\n",
+ " - 40s - loss: 0.3049 - acc: 0.9630 - val_loss: 0.9277 - val_acc: 0.8470\n",
+ "\n",
+ "Epoch 00567: val_acc did not improve from 0.86765\n",
+ "Epoch 568/3000\n",
+ " - 40s - loss: 0.3161 - acc: 0.9567 - val_loss: 0.8848 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00568: val_acc did not improve from 0.86765\n",
+ "Epoch 569/3000\n",
+ " - 39s - loss: 0.3028 - acc: 0.9624 - val_loss: 0.9450 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00569: val_acc did not improve from 0.86765\n",
+ "Epoch 570/3000\n",
+ " - 39s - loss: 0.3059 - acc: 0.9601 - val_loss: 0.9400 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00570: val_acc did not improve from 0.86765\n",
+ "Epoch 571/3000\n",
+ " - 39s - loss: 0.3174 - acc: 0.9626 - val_loss: 0.9213 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00571: val_acc did not improve from 0.86765\n",
+ "Epoch 572/3000\n",
+ " - 39s - loss: 0.3332 - acc: 0.9565 - val_loss: 0.9168 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00572: val_acc did not improve from 0.86765\n",
+ "Epoch 573/3000\n",
+ " - 39s - loss: 0.3111 - acc: 0.9617 - val_loss: 0.9227 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00573: val_acc did not improve from 0.86765\n",
+ "Epoch 574/3000\n",
+ " - 39s - loss: 0.3135 - acc: 0.9582 - val_loss: 0.9085 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00574: val_acc did not improve from 0.86765\n",
+ "Epoch 575/3000\n",
+ " - 39s - loss: 0.3082 - acc: 0.9589 - val_loss: 0.9385 - val_acc: 0.8494\n",
+ "\n",
+ "Epoch 00575: val_acc did not improve from 0.86765\n",
+ "Epoch 576/3000\n",
+ " - 39s - loss: 0.3147 - acc: 0.9614 - val_loss: 0.8738 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00576: val_acc did not improve from 0.86765\n",
+ "Epoch 577/3000\n",
+ " - 39s - loss: 0.3130 - acc: 0.9606 - val_loss: 0.9327 - val_acc: 0.8497\n",
+ "\n",
+ "Epoch 00577: val_acc did not improve from 0.86765\n",
+ "Epoch 578/3000\n",
+ " - 39s - loss: 0.3073 - acc: 0.9598 - val_loss: 0.9454 - val_acc: 0.8439\n",
+ "\n",
+ "Epoch 00578: val_acc did not improve from 0.86765\n",
+ "Epoch 579/3000\n",
+ " - 39s - loss: 0.3095 - acc: 0.9617 - val_loss: 0.9060 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00579: val_acc did not improve from 0.86765\n",
+ "Epoch 580/3000\n",
+ " - 39s - loss: 0.3184 - acc: 0.9598 - val_loss: 0.8865 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00580: val_acc did not improve from 0.86765\n",
+ "Epoch 581/3000\n",
+ " - 39s - loss: 0.3052 - acc: 0.9624 - val_loss: 0.8943 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00581: val_acc did not improve from 0.86765\n",
+ "Epoch 582/3000\n",
+ " - 39s - loss: 0.3107 - acc: 0.9618 - val_loss: 0.9659 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00582: val_acc did not improve from 0.86765\n",
+ "Epoch 583/3000\n",
+ " - 39s - loss: 0.3174 - acc: 0.9611 - val_loss: 0.9223 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00583: val_acc did not improve from 0.86765\n",
+ "Epoch 584/3000\n",
+ " - 39s - loss: 0.2990 - acc: 0.9624 - val_loss: 0.9150 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00584: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00584: ReduceLROnPlateau reducing learning rate to 4.181203439657111e-05.\n",
+ "Epoch 585/3000\n",
+ " - 39s - loss: 0.3184 - acc: 0.9577 - val_loss: 0.9063 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00585: val_acc did not improve from 0.86765\n",
+ "Epoch 586/3000\n",
+ " - 39s - loss: 0.3116 - acc: 0.9614 - val_loss: 0.8674 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00586: val_acc did not improve from 0.86765\n",
+ "Epoch 587/3000\n",
+ " - 39s - loss: 0.3115 - acc: 0.9629 - val_loss: 0.8713 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00587: val_acc did not improve from 0.86765\n",
+ "Epoch 588/3000\n",
+ " - 39s - loss: 0.3092 - acc: 0.9627 - val_loss: 0.9005 - val_acc: 0.8568\n",
+ "\n",
+ "Epoch 00588: val_acc did not improve from 0.86765\n",
+ "Epoch 589/3000\n",
+ " - 40s - loss: 0.3278 - acc: 0.9556 - val_loss: 0.8666 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00589: val_acc did not improve from 0.86765\n",
+ "Epoch 590/3000\n",
+ " - 39s - loss: 0.3093 - acc: 0.9611 - val_loss: 0.8830 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00590: val_acc did not improve from 0.86765\n",
+ "Epoch 591/3000\n",
+ " - 39s - loss: 0.3041 - acc: 0.9595 - val_loss: 0.8750 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00591: val_acc did not improve from 0.86765\n",
+ "Epoch 592/3000\n",
+ " - 39s - loss: 0.3028 - acc: 0.9633 - val_loss: 0.8818 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00592: val_acc did not improve from 0.86765\n",
+ "Epoch 593/3000\n",
+ " - 39s - loss: 0.3133 - acc: 0.9576 - val_loss: 0.9473 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00593: val_acc did not improve from 0.86765\n",
+ "Epoch 594/3000\n",
+ " - 40s - loss: 0.3150 - acc: 0.9597 - val_loss: 0.9213 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00594: val_acc did not improve from 0.86765\n",
+ "Epoch 595/3000\n",
+ " - 39s - loss: 0.3025 - acc: 0.9630 - val_loss: 0.9128 - val_acc: 0.8478\n",
+ "\n",
+ "Epoch 00595: val_acc did not improve from 0.86765\n",
+ "Epoch 596/3000\n",
+ " - 39s - loss: 0.2899 - acc: 0.9645 - val_loss: 0.9197 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00596: val_acc did not improve from 0.86765\n",
+ "Epoch 597/3000\n",
+ " - 39s - loss: 0.2943 - acc: 0.9633 - val_loss: 0.9544 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00597: val_acc did not improve from 0.86765\n",
+ "Epoch 598/3000\n",
+ " - 38s - loss: 0.3093 - acc: 0.9623 - val_loss: 0.9325 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00598: val_acc did not improve from 0.86765\n",
+ "Epoch 599/3000\n",
+ " - 39s - loss: 0.3032 - acc: 0.9626 - val_loss: 0.9353 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00599: val_acc did not improve from 0.86765\n",
+ "Epoch 600/3000\n",
+ " - 39s - loss: 0.2977 - acc: 0.9621 - val_loss: 0.9069 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00600: val_acc did not improve from 0.86765\n",
+ "Epoch 601/3000\n",
+ " - 39s - loss: 0.3101 - acc: 0.9614 - val_loss: 0.9313 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00601: val_acc did not improve from 0.86765\n",
+ "Epoch 602/3000\n",
+ " - 39s - loss: 0.3042 - acc: 0.9639 - val_loss: 0.9277 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00602: val_acc did not improve from 0.86765\n",
+ "Epoch 603/3000\n",
+ " - 39s - loss: 0.3144 - acc: 0.9589 - val_loss: 0.8583 - val_acc: 0.8626\n",
+ "\n",
+ "Epoch 00603: val_acc did not improve from 0.86765\n",
+ "Epoch 604/3000\n",
+ " - 40s - loss: 0.3224 - acc: 0.9591 - val_loss: 0.8898 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00604: val_acc did not improve from 0.86765\n",
+ "Epoch 605/3000\n",
+ " - 39s - loss: 0.3099 - acc: 0.9611 - val_loss: 0.8822 - val_acc: 0.8641\n",
+ "\n",
+ "Epoch 00605: val_acc did not improve from 0.86765\n",
+ "Epoch 606/3000\n",
+ " - 40s - loss: 0.3005 - acc: 0.9627 - val_loss: 0.9255 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00606: val_acc did not improve from 0.86765\n",
+ "Epoch 607/3000\n",
+ " - 39s - loss: 0.3039 - acc: 0.9620 - val_loss: 0.8470 - val_acc: 0.8610\n",
+ "\n",
+ "Epoch 00607: val_acc did not improve from 0.86765\n",
+ "Epoch 608/3000\n",
+ " - 39s - loss: 0.3012 - acc: 0.9611 - val_loss: 0.8784 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00608: val_acc did not improve from 0.86765\n",
+ "Epoch 609/3000\n",
+ " - 39s - loss: 0.2958 - acc: 0.9641 - val_loss: 0.9520 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00609: val_acc did not improve from 0.86765\n",
+ "Epoch 610/3000\n",
+ " - 39s - loss: 0.3149 - acc: 0.9615 - val_loss: 0.8923 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00610: val_acc did not improve from 0.86765\n",
+ "Epoch 611/3000\n",
+ " - 39s - loss: 0.3170 - acc: 0.9623 - val_loss: 0.9307 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00611: val_acc did not improve from 0.86765\n",
+ "Epoch 612/3000\n",
+ " - 39s - loss: 0.3073 - acc: 0.9653 - val_loss: 0.8659 - val_acc: 0.8599\n",
+ "\n",
+ "Epoch 00612: val_acc did not improve from 0.86765\n",
+ "Epoch 613/3000\n",
+ " - 40s - loss: 0.2899 - acc: 0.9662 - val_loss: 0.9281 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00613: val_acc did not improve from 0.86765\n",
+ "Epoch 614/3000\n",
+ " - 39s - loss: 0.3131 - acc: 0.9591 - val_loss: 0.8495 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00614: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00614: ReduceLROnPlateau reducing learning rate to 3.9721430948702614e-05.\n",
+ "Epoch 615/3000\n",
+ " - 39s - loss: 0.2953 - acc: 0.9629 - val_loss: 0.9233 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00615: val_acc did not improve from 0.86765\n",
+ "Epoch 616/3000\n",
+ " - 39s - loss: 0.3051 - acc: 0.9621 - val_loss: 0.9077 - val_acc: 0.8564\n",
+ "\n",
+ "Epoch 00616: val_acc did not improve from 0.86765\n",
+ "Epoch 617/3000\n",
+ " - 40s - loss: 0.2863 - acc: 0.9662 - val_loss: 0.8860 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00617: val_acc did not improve from 0.86765\n",
+ "Epoch 618/3000\n",
+ " - 39s - loss: 0.3088 - acc: 0.9597 - val_loss: 0.8963 - val_acc: 0.8540\n",
+ "\n",
+ "Epoch 00618: val_acc did not improve from 0.86765\n",
+ "Epoch 619/3000\n",
+ " - 39s - loss: 0.3013 - acc: 0.9650 - val_loss: 0.9202 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00619: val_acc did not improve from 0.86765\n",
+ "Epoch 620/3000\n",
+ " - 39s - loss: 0.3097 - acc: 0.9604 - val_loss: 0.9244 - val_acc: 0.8505\n",
+ "\n",
+ "Epoch 00620: val_acc did not improve from 0.86765\n",
+ "Epoch 621/3000\n",
+ " - 39s - loss: 0.3048 - acc: 0.9626 - val_loss: 0.9033 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00621: val_acc did not improve from 0.86765\n",
+ "Epoch 622/3000\n",
+ " - 39s - loss: 0.2948 - acc: 0.9648 - val_loss: 0.8670 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00622: val_acc did not improve from 0.86765\n",
+ "Epoch 623/3000\n",
+ " - 39s - loss: 0.2986 - acc: 0.9647 - val_loss: 0.8519 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00623: val_acc did not improve from 0.86765\n",
+ "Epoch 624/3000\n",
+ " - 39s - loss: 0.3045 - acc: 0.9632 - val_loss: 0.9180 - val_acc: 0.8560\n",
+ "\n",
+ "Epoch 00624: val_acc did not improve from 0.86765\n",
+ "Epoch 625/3000\n",
+ " - 39s - loss: 0.3078 - acc: 0.9595 - val_loss: 0.9361 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00625: val_acc did not improve from 0.86765\n",
+ "Epoch 626/3000\n",
+ " - 39s - loss: 0.3033 - acc: 0.9620 - val_loss: 0.8913 - val_acc: 0.8548\n",
+ "\n",
+ "Epoch 00626: val_acc did not improve from 0.86765\n",
+ "Epoch 627/3000\n",
+ " - 39s - loss: 0.2974 - acc: 0.9641 - val_loss: 0.8778 - val_acc: 0.8575\n",
+ "\n",
+ "Epoch 00627: val_acc did not improve from 0.86765\n",
+ "Epoch 628/3000\n",
+ " - 39s - loss: 0.3035 - acc: 0.9609 - val_loss: 0.8390 - val_acc: 0.8606\n",
+ "\n",
+ "Epoch 00628: val_acc did not improve from 0.86765\n",
+ "Epoch 629/3000\n",
+ " - 40s - loss: 0.2961 - acc: 0.9639 - val_loss: 0.9099 - val_acc: 0.8556\n",
+ "\n",
+ "Epoch 00629: val_acc did not improve from 0.86765\n",
+ "Epoch 630/3000\n",
+ " - 39s - loss: 0.3082 - acc: 0.9594 - val_loss: 0.9248 - val_acc: 0.8517\n",
+ "\n",
+ "Epoch 00630: val_acc did not improve from 0.86765\n",
+ "Epoch 631/3000\n",
+ " - 40s - loss: 0.2951 - acc: 0.9651 - val_loss: 0.8578 - val_acc: 0.8571\n",
+ "\n",
+ "Epoch 00631: val_acc did not improve from 0.86765\n",
+ "Epoch 632/3000\n",
+ " - 39s - loss: 0.2976 - acc: 0.9635 - val_loss: 0.9248 - val_acc: 0.8501\n",
+ "\n",
+ "Epoch 00632: val_acc did not improve from 0.86765\n",
+ "Epoch 633/3000\n",
+ " - 39s - loss: 0.2952 - acc: 0.9662 - val_loss: 0.9488 - val_acc: 0.8486\n",
+ "\n",
+ "Epoch 00633: val_acc did not improve from 0.86765\n",
+ "Epoch 634/3000\n",
+ " - 39s - loss: 0.2984 - acc: 0.9651 - val_loss: 0.8497 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00634: val_acc did not improve from 0.86765\n",
+ "Epoch 635/3000\n",
+ " - 39s - loss: 0.2962 - acc: 0.9627 - val_loss: 0.9404 - val_acc: 0.8490\n",
+ "\n",
+ "Epoch 00635: val_acc did not improve from 0.86765\n",
+ "Epoch 636/3000\n",
+ " - 39s - loss: 0.3032 - acc: 0.9617 - val_loss: 0.8722 - val_acc: 0.8583\n",
+ "\n",
+ "Epoch 00636: val_acc did not improve from 0.86765\n",
+ "Epoch 637/3000\n",
+ " - 39s - loss: 0.3138 - acc: 0.9592 - val_loss: 0.8681 - val_acc: 0.8533\n",
+ "\n",
+ "Epoch 00637: val_acc did not improve from 0.86765\n",
+ "Epoch 638/3000\n",
+ " - 39s - loss: 0.2964 - acc: 0.9638 - val_loss: 0.9499 - val_acc: 0.8513\n",
+ "\n",
+ "Epoch 00638: val_acc did not improve from 0.86765\n",
+ "Epoch 639/3000\n",
+ " - 39s - loss: 0.3060 - acc: 0.9618 - val_loss: 0.9046 - val_acc: 0.8536\n",
+ "\n",
+ "Epoch 00639: val_acc did not improve from 0.86765\n",
+ "Epoch 640/3000\n",
+ " - 39s - loss: 0.2985 - acc: 0.9601 - val_loss: 0.9265 - val_acc: 0.8552\n",
+ "\n",
+ "Epoch 00640: val_acc did not improve from 0.86765\n",
+ "Epoch 641/3000\n",
+ " - 39s - loss: 0.3079 - acc: 0.9600 - val_loss: 0.8866 - val_acc: 0.8579\n",
+ "\n",
+ "Epoch 00641: val_acc did not improve from 0.86765\n",
+ "Epoch 642/3000\n",
+ " - 39s - loss: 0.3059 - acc: 0.9606 - val_loss: 0.8764 - val_acc: 0.8595\n",
+ "\n",
+ "Epoch 00642: val_acc did not improve from 0.86765\n",
+ "Epoch 643/3000\n",
+ " - 39s - loss: 0.2926 - acc: 0.9636 - val_loss: 0.9022 - val_acc: 0.8521\n",
+ "\n",
+ "Epoch 00643: val_acc did not improve from 0.86765\n",
+ "Epoch 644/3000\n",
+ " - 39s - loss: 0.2894 - acc: 0.9656 - val_loss: 0.8993 - val_acc: 0.8544\n",
+ "\n",
+ "Epoch 00644: val_acc did not improve from 0.86765\n",
+ "\n",
+ "Epoch 00644: ReduceLROnPlateau reducing learning rate to 3.773536009248346e-05.\n",
+ "Epoch 645/3000\n",
+ " - 39s - loss: 0.2983 - acc: 0.9624 - val_loss: 0.8809 - val_acc: 0.8618\n",
+ "\n",
+ "Epoch 00645: val_acc did not improve from 0.86765\n",
+ "Epoch 646/3000\n",
+ " - 40s - loss: 0.2960 - acc: 0.9615 - val_loss: 0.8956 - val_acc: 0.8591\n",
+ "\n",
+ "Epoch 00646: val_acc did not improve from 0.86765\n",
+ "Epoch 647/3000\n",
+ " - 39s - los