From 8a0ae50dd0243fdbaa79e75c3aa94ba17f99520e Mon Sep 17 00:00:00 2001 From: Arthur DANJOU Date: Wed, 7 May 2025 11:02:43 +0200 Subject: [PATCH] Update execution counts and model definitions in TP6 Keras introduction notebook --- M1/Statistical Learning/TP6_keras_intro.ipynb | 894 ++++++++++++++++-- 1 file changed, 802 insertions(+), 92 deletions(-) diff --git a/M1/Statistical Learning/TP6_keras_intro.ipynb b/M1/Statistical Learning/TP6_keras_intro.ipynb index f8d62a7..b1fd7aa 100644 --- a/M1/Statistical Learning/TP6_keras_intro.ipynb +++ b/M1/Statistical Learning/TP6_keras_intro.ipynb @@ -80,7 +80,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 2, "id": "5260add2-2092-4849-b39b-0b4416d60275", "metadata": { "colab": { @@ -110,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 3, "id": "cf702fe0-4b88-441e-a6c1-73fd5c57111f", "metadata": { "colab": { @@ -126,7 +126,7 @@ "((60000, 28, 28), dtype('uint8'))" ] }, - "execution_count": 38, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -147,7 +147,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 4, "id": "aoanwQnmYa3K", "metadata": { "id": "aoanwQnmYa3K" @@ -177,7 +177,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 5, "id": "d5d925c1-6a53-4a9d-99ee-4ebb1a9c9026", "metadata": { "id": "d5d925c1-6a53-4a9d-99ee-4ebb1a9c9026" @@ -211,7 +211,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 6, "id": "731ad9e7-57ae-47c5-b50d-5d77c28216bd", "metadata": { "id": "731ad9e7-57ae-47c5-b50d-5d77c28216bd" @@ -234,7 +234,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 7, "id": "WDW-zdxKxv13", "metadata": { "id": "WDW-zdxKxv13" @@ -273,7 +273,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 8, "id": "b2e30200-0700-435f-89cb-98e0ad0440bc", "metadata": { "colab": { @@ -321,7 +321,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 9, "id": "ed84911d-a6d5-484d-9ba6-97570069a4fb", "metadata": { "id": "ed84911d-a6d5-484d-9ba6-97570069a4fb" @@ -391,7 +391,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 10, "id": "b7d520b6-738e-413d-bf00-a47cc71c1c93", "metadata": { "colab": { @@ -405,11 +405,11 @@ { "data": { "text/html": [ - "
Model: \"sequential_3\"\n",
+              "
Model: \"sequential\"\n",
               "
\n" ], "text/plain": [ - "\u001b[1mModel: \"sequential_3\"\u001b[0m\n" + "\u001b[1mModel: \"sequential\"\u001b[0m\n" ] }, "metadata": {}, @@ -421,13 +421,13 @@ "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
               "┃ Layer (type)                     Output Shape                  Param # ┃\n",
               "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
-              "│ flatten_3 (Flatten)             │ (None, 784)            │             0 │\n",
+              "│ flatten (Flatten)               │ (None, 784)            │             0 │\n",
               "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ dense_9 (Dense)                 │ (None, 300)            │       235,500 │\n",
+              "│ dense (Dense)                   │ (None, 300)            │       235,500 │\n",
               "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ dense_10 (Dense)                │ (None, 100)            │        30,100 │\n",
+              "│ dense_1 (Dense)                 │ (None, 100)            │        30,100 │\n",
               "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ dense_11 (Dense)                │ (None, 10)             │         1,010 │\n",
+              "│ dense_2 (Dense)                 │ (None, 10)             │         1,010 │\n",
               "└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
               "
\n" ], @@ -435,13 +435,13 @@ "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n", "┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n", "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n", - "│ flatten_3 (\u001b[38;5;33mFlatten\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m784\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n", + "│ flatten (\u001b[38;5;33mFlatten\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m784\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n", "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ dense_9 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m300\u001b[0m) │ \u001b[38;5;34m235,500\u001b[0m │\n", + "│ dense (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m300\u001b[0m) │ \u001b[38;5;34m235,500\u001b[0m │\n", "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ dense_10 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m100\u001b[0m) │ \u001b[38;5;34m30,100\u001b[0m │\n", + "│ dense_1 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m100\u001b[0m) │ \u001b[38;5;34m30,100\u001b[0m │\n", "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ dense_11 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m10\u001b[0m) │ \u001b[38;5;34m1,010\u001b[0m │\n", + "│ dense_2 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m10\u001b[0m) │ \u001b[38;5;34m1,010\u001b[0m │\n", "└─────────────────────────────────┴────────────────────────┴───────────────┘\n" ] }, @@ -527,7 +527,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 11, "id": "Khr8wuf_DKW-", "metadata": { "colab": { @@ -555,7 +555,7 @@ " -0.02017345, 0.07210501]], shape=(784, 300), dtype=float32)" ] }, - "execution_count": 46, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -568,7 +568,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 12, "id": "S__6iEM6NwHA", "metadata": { "colab": { @@ -601,7 +601,7 @@ " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)" ] }, - "execution_count": 47, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -670,7 +670,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 13, "id": "3fcfe918-d745-4c66-8350-dd89e34ac93c", "metadata": { "id": "3fcfe918-d745-4c66-8350-dd89e34ac93c" @@ -722,7 +722,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 14, "id": "e784cc36-b04c-4aca-abfc-9fc081fd726b", "metadata": { "colab": { @@ -737,7 +737,7 @@ "output_type": "stream", "text": [ "Epoch 1/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7850 - loss: 0.6068 - val_accuracy: 0.8392 - val_loss: 0.4062\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m4s\u001b[0m 2ms/step - accuracy: 0.7850 - loss: 0.6068 - val_accuracy: 0.8392 - val_loss: 0.4062\n", "Epoch 2/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8627 - loss: 0.3769 - val_accuracy: 0.8496 - val_loss: 0.3903\n", "Epoch 3/60\n", @@ -767,7 +767,7 @@ "Epoch 15/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9324 - loss: 0.1763 - val_accuracy: 0.8736 - val_loss: 0.4419\n", "Epoch 16/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9362 - loss: 0.1691 - val_accuracy: 0.8732 - val_loss: 0.4577\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m5s\u001b[0m 3ms/step - accuracy: 0.9362 - loss: 0.1691 - val_accuracy: 0.8732 - val_loss: 0.4577\n", "Epoch 17/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9379 - loss: 0.1626 - val_accuracy: 0.8760 - val_loss: 0.4588\n", "Epoch 18/60\n", @@ -781,9 +781,9 @@ "Epoch 22/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9448 - loss: 0.1445 - val_accuracy: 0.8808 - val_loss: 0.4766\n", "Epoch 23/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9471 - loss: 0.1395 - val_accuracy: 0.8732 - val_loss: 0.5531\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.9471 - loss: 0.1395 - val_accuracy: 0.8732 - val_loss: 0.5531\n", "Epoch 24/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9481 - loss: 0.1316 - val_accuracy: 0.8692 - val_loss: 0.5819\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 1ms/step - accuracy: 0.9481 - loss: 0.1316 - val_accuracy: 0.8692 - val_loss: 0.5819\n", "Epoch 25/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9482 - loss: 0.1355 - val_accuracy: 0.8780 - val_loss: 0.5516\n", "Epoch 26/60\n", @@ -799,25 +799,25 @@ "Epoch 31/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9550 - loss: 0.1166 - val_accuracy: 0.8772 - val_loss: 0.6220\n", "Epoch 32/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9566 - loss: 0.1126 - val_accuracy: 0.8652 - val_loss: 0.6661\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m4s\u001b[0m 2ms/step - accuracy: 0.9566 - loss: 0.1126 - val_accuracy: 0.8652 - val_loss: 0.6661\n", "Epoch 33/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9586 - loss: 0.1101 - val_accuracy: 0.8742 - val_loss: 0.6406\n", "Epoch 34/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9555 - loss: 0.1156 - val_accuracy: 0.8818 - val_loss: 0.6403\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.9555 - loss: 0.1156 - val_accuracy: 0.8818 - val_loss: 0.6403\n", "Epoch 35/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9604 - loss: 0.1054 - val_accuracy: 0.8742 - val_loss: 0.6165\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.9604 - loss: 0.1054 - val_accuracy: 0.8742 - val_loss: 0.6165\n", "Epoch 36/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9610 - loss: 0.1020 - val_accuracy: 0.8746 - val_loss: 0.6116\n", "Epoch 37/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9621 - loss: 0.0999 - val_accuracy: 0.8772 - val_loss: 0.6669\n", "Epoch 38/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9618 - loss: 0.0983 - val_accuracy: 0.8732 - val_loss: 0.7090\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.9618 - loss: 0.0983 - val_accuracy: 0.8732 - val_loss: 0.7090\n", "Epoch 39/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9616 - loss: 0.1001 - val_accuracy: 0.8822 - val_loss: 0.6457\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.9616 - loss: 0.1001 - val_accuracy: 0.8822 - val_loss: 0.6457\n", "Epoch 40/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9631 - loss: 0.0970 - val_accuracy: 0.8748 - val_loss: 0.7724\n", "Epoch 41/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9637 - loss: 0.0975 - val_accuracy: 0.8788 - val_loss: 0.6992\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.9637 - loss: 0.0975 - val_accuracy: 0.8788 - val_loss: 0.6992\n", "Epoch 42/60\n", "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9637 - loss: 0.0961 - val_accuracy: 0.8720 - val_loss: 0.7026\n", "Epoch 43/60\n", @@ -918,7 +918,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 15, "id": "_UQsOj8JPc3q", "metadata": { "colab": { @@ -996,7 +996,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 16, "id": "MUnWm1MfyBBk", "metadata": { "id": "MUnWm1MfyBBk" @@ -1006,7 +1006,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step \n" + "\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step \n" ] } ], @@ -1028,7 +1028,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 17, "id": "DBsp72CAqRef", "metadata": { "colab": { @@ -1043,34 +1043,34 @@ "output_type": "stream", "text": [ "Epoch 1/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7864 - loss: 0.5990 - val_accuracy: 0.8298 - val_loss: 0.4282\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7866 - loss: 0.5999 - val_accuracy: 0.8326 - val_loss: 0.4162\n", "Epoch 2/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8627 - loss: 0.3785 - val_accuracy: 0.8450 - val_loss: 0.3963\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8624 - loss: 0.3776 - val_accuracy: 0.8512 - val_loss: 0.3772\n", "Epoch 3/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8772 - loss: 0.3354 - val_accuracy: 0.8544 - val_loss: 0.3724\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8763 - loss: 0.3341 - val_accuracy: 0.8502 - val_loss: 0.3915\n", "Epoch 4/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8889 - loss: 0.3041 - val_accuracy: 0.8632 - val_loss: 0.3656\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.8875 - loss: 0.3049 - val_accuracy: 0.8592 - val_loss: 0.3675\n", "Epoch 5/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8957 - loss: 0.2829 - val_accuracy: 0.8686 - val_loss: 0.3534\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8936 - loss: 0.2835 - val_accuracy: 0.8720 - val_loss: 0.3465\n", "Epoch 6/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9019 - loss: 0.2651 - val_accuracy: 0.8746 - val_loss: 0.3442\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9017 - loss: 0.2663 - val_accuracy: 0.8752 - val_loss: 0.3437\n", "Epoch 7/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9071 - loss: 0.2491 - val_accuracy: 0.8844 - val_loss: 0.3403\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9065 - loss: 0.2499 - val_accuracy: 0.8752 - val_loss: 0.3545\n", "Epoch 8/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9088 - loss: 0.2387 - val_accuracy: 0.8840 - val_loss: 0.3357\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9102 - loss: 0.2404 - val_accuracy: 0.8810 - val_loss: 0.3369\n", "Epoch 9/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9157 - loss: 0.2245 - val_accuracy: 0.8834 - val_loss: 0.3541\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9145 - loss: 0.2271 - val_accuracy: 0.8740 - val_loss: 0.3714\n", "Epoch 10/10\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9198 - loss: 0.2157 - val_accuracy: 0.8714 - val_loss: 0.4132\n" + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9161 - loss: 0.2195 - val_accuracy: 0.8816 - val_loss: 0.3601\n" ] }, { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 55, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -1113,7 +1113,7 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 18, "id": "7w49W7ecsb42", "metadata": { "colab": { @@ -1127,7 +1127,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step \n" + "\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step \n" ] } ], @@ -1152,7 +1152,7 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 19, "id": "Hf7QyWqMFnUs", "metadata": { "colab": { @@ -1166,11 +1166,11 @@ "data": { "text/plain": [ "array([0.001, 0.393, 0.343, 0. , 0. , 0.001, 0.023, 0.189, 0. ,\n", - " 0. , 0. , 0.11 , 0.002, 0.001, 0. , 0.005, 0.021],\n", + " 0. , 0. , 0.001, 0.11 , 0.002, 0. , 0.005, 0.006, 0.021],\n", " dtype=float32)" ] }, - "execution_count": 57, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -1205,7 +1205,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 20, "id": "vYe04DM5cu4r", "metadata": { "colab": { @@ -1220,29 +1220,31 @@ "output_type": "stream", "text": [ "Epoch 1/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7868 - loss: 0.6074 - val_accuracy: 0.8344 - val_loss: 0.4256\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7864 - loss: 0.5990 - val_accuracy: 0.8298 - val_loss: 0.4282\n", "Epoch 2/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8641 - loss: 0.3754 - val_accuracy: 0.8472 - val_loss: 0.4021\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8627 - loss: 0.3785 - val_accuracy: 0.8450 - val_loss: 0.3963\n", "Epoch 3/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8780 - loss: 0.3332 - val_accuracy: 0.8576 - val_loss: 0.3729\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8772 - loss: 0.3354 - val_accuracy: 0.8544 - val_loss: 0.3724\n", "Epoch 4/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8891 - loss: 0.3013 - val_accuracy: 0.8640 - val_loss: 0.3877\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8889 - loss: 0.3041 - val_accuracy: 0.8632 - val_loss: 0.3656\n", "Epoch 5/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8944 - loss: 0.2812 - val_accuracy: 0.8622 - val_loss: 0.3908\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8957 - loss: 0.2829 - val_accuracy: 0.8686 - val_loss: 0.3534\n", "Epoch 6/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9011 - loss: 0.2639 - val_accuracy: 0.8732 - val_loss: 0.3573\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9019 - loss: 0.2651 - val_accuracy: 0.8746 - val_loss: 0.3442\n", "Epoch 7/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9054 - loss: 0.2494 - val_accuracy: 0.8802 - val_loss: 0.3363\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9071 - loss: 0.2491 - val_accuracy: 0.8844 - val_loss: 0.3403\n", "Epoch 8/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9113 - loss: 0.2360 - val_accuracy: 0.8774 - val_loss: 0.3491\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9088 - loss: 0.2387 - val_accuracy: 0.8840 - val_loss: 0.3357\n", "Epoch 9/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9146 - loss: 0.2249 - val_accuracy: 0.8846 - val_loss: 0.3559\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9157 - loss: 0.2245 - val_accuracy: 0.8834 - val_loss: 0.3541\n", "Epoch 10/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9209 - loss: 0.2124 - val_accuracy: 0.8756 - val_loss: 0.3703\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9198 - loss: 0.2157 - val_accuracy: 0.8714 - val_loss: 0.4132\n", "Epoch 11/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9224 - loss: 0.2046 - val_accuracy: 0.8754 - val_loss: 0.3859\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9220 - loss: 0.2063 - val_accuracy: 0.8770 - val_loss: 0.4210\n", "Epoch 12/60\n", - "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9254 - loss: 0.1968 - val_accuracy: 0.8748 - val_loss: 0.4145\n" + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9256 - loss: 0.1986 - val_accuracy: 0.8768 - val_loss: 0.4228\n", + "Epoch 13/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9291 - loss: 0.1875 - val_accuracy: 0.8738 - val_loss: 0.4382\n" ] } ], @@ -1313,7 +1315,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 21, "id": "q6BXm1YJUmKK", "metadata": { "colab": { @@ -1327,16 +1329,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[1m313/313\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 568us/step - accuracy: 0.8652 - loss: 57.6924\n" + "\u001b[1m313/313\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 547us/step - accuracy: 0.8669 - loss: 55.3559\n" ] }, { "data": { "text/plain": [ - "[59.54043197631836, 0.8644000291824341]" + "[58.230831146240234, 0.864799976348877]" ] }, - "execution_count": 59, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -1357,14 +1359,219 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "id": "GUZldkyGyYwF", "metadata": { "id": "GUZldkyGyYwF" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 464us/step - accuracy: 0.7328 - loss: 0.7938 - val_accuracy: 0.8340 - val_loss: 0.4826\n", + "Epoch 2/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 430us/step - accuracy: 0.8391 - loss: 0.4794 - val_accuracy: 0.8422 - val_loss: 0.4516\n", + "Epoch 3/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 425us/step - accuracy: 0.8498 - loss: 0.4464 - val_accuracy: 0.8442 - val_loss: 0.4396\n", + "Epoch 4/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 425us/step - accuracy: 0.8539 - loss: 0.4302 - val_accuracy: 0.8468 - val_loss: 0.4333\n", + "Epoch 5/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 428us/step - accuracy: 0.8566 - loss: 0.4200 - val_accuracy: 0.8476 - val_loss: 0.4295\n", + "Epoch 6/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 427us/step - accuracy: 0.8587 - loss: 0.4128 - val_accuracy: 0.8486 - val_loss: 0.4271\n", + "Epoch 7/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 428us/step - accuracy: 0.8605 - loss: 0.4072 - val_accuracy: 0.8470 - val_loss: 0.4256\n", + "Epoch 8/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 461us/step - accuracy: 0.8622 - loss: 0.4028 - val_accuracy: 0.8486 - val_loss: 0.4246\n", + "Epoch 9/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 427us/step - accuracy: 0.8629 - loss: 0.3992 - val_accuracy: 0.8488 - val_loss: 0.4239\n", + "Epoch 10/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 427us/step - accuracy: 0.8634 - loss: 0.3960 - val_accuracy: 0.8482 - val_loss: 0.4236\n", + "Epoch 11/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 427us/step - accuracy: 0.8641 - loss: 0.3933 - val_accuracy: 0.8480 - val_loss: 0.4235\n", + "Epoch 12/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 428us/step - accuracy: 0.8648 - loss: 0.3910 - val_accuracy: 0.8476 - val_loss: 0.4235\n", + "Epoch 13/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 428us/step - accuracy: 0.8659 - loss: 0.3889 - val_accuracy: 0.8484 - val_loss: 0.4236\n", + "Epoch 14/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 423us/step - accuracy: 0.8665 - loss: 0.3870 - val_accuracy: 0.8490 - val_loss: 0.4238\n", + "Epoch 15/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 424us/step - accuracy: 0.8671 - loss: 0.3852 - val_accuracy: 0.8486 - val_loss: 0.4241\n", + "Epoch 16/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 423us/step - accuracy: 0.8677 - loss: 0.3837 - val_accuracy: 0.8486 - val_loss: 0.4244\n", + "Epoch 17/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 466us/step - accuracy: 0.8683 - loss: 0.3822 - val_accuracy: 0.8482 - val_loss: 0.4248\n", + "Epoch 18/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 439us/step - accuracy: 0.8689 - loss: 0.3809 - val_accuracy: 0.8486 - val_loss: 0.4252\n", + "Epoch 19/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 429us/step - accuracy: 0.8694 - loss: 0.3797 - val_accuracy: 0.8476 - val_loss: 0.4256\n", + "Epoch 20/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 424us/step - accuracy: 0.8698 - loss: 0.3785 - val_accuracy: 0.8472 - val_loss: 0.4260\n", + "Epoch 21/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 429us/step - accuracy: 0.8701 - loss: 0.3774 - val_accuracy: 0.8468 - val_loss: 0.4264\n", + "Epoch 22/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 426us/step - accuracy: 0.8704 - loss: 0.3764 - val_accuracy: 0.8468 - val_loss: 0.4269\n", + "Epoch 23/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 425us/step - accuracy: 0.8705 - loss: 0.3755 - val_accuracy: 0.8468 - val_loss: 0.4274\n", + "Epoch 24/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 426us/step - accuracy: 0.8705 - loss: 0.3746 - val_accuracy: 0.8470 - val_loss: 0.4278\n", + "Epoch 25/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 453us/step - accuracy: 0.8707 - loss: 0.3737 - val_accuracy: 0.8468 - val_loss: 0.4283\n", + "Epoch 26/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 448us/step - accuracy: 0.8708 - loss: 0.3729 - val_accuracy: 0.8470 - val_loss: 0.4288\n", + "Epoch 27/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 443us/step - accuracy: 0.8709 - loss: 0.3721 - val_accuracy: 0.8464 - val_loss: 0.4292\n", + "Epoch 28/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 443us/step - accuracy: 0.8709 - loss: 0.3714 - val_accuracy: 0.8464 - val_loss: 0.4297\n", + "Epoch 29/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 462us/step - accuracy: 0.8710 - loss: 0.3707 - val_accuracy: 0.8464 - val_loss: 0.4302\n", + "Epoch 30/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 453us/step - accuracy: 0.8711 - loss: 0.3700 - val_accuracy: 0.8468 - val_loss: 0.4307\n", + "Epoch 31/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 420us/step - accuracy: 0.8710 - loss: 0.3693 - val_accuracy: 0.8462 - val_loss: 0.4311\n", + "Epoch 32/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 418us/step - accuracy: 0.8714 - loss: 0.3687 - val_accuracy: 0.8458 - val_loss: 0.4316\n", + "Epoch 33/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 417us/step - accuracy: 0.8716 - loss: 0.3681 - val_accuracy: 0.8456 - val_loss: 0.4321\n", + "Epoch 34/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 420us/step - accuracy: 0.8721 - loss: 0.3675 - val_accuracy: 0.8452 - val_loss: 0.4326\n", + "Epoch 35/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 415us/step - accuracy: 0.8724 - loss: 0.3670 - val_accuracy: 0.8448 - val_loss: 0.4331\n", + "Epoch 36/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 420us/step - accuracy: 0.8726 - loss: 0.3665 - val_accuracy: 0.8450 - val_loss: 0.4335\n", + "Epoch 37/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 418us/step - accuracy: 0.8726 - loss: 0.3659 - val_accuracy: 0.8456 - val_loss: 0.4340\n", + "Epoch 38/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 418us/step - accuracy: 0.8727 - loss: 0.3654 - val_accuracy: 0.8450 - val_loss: 0.4345\n", + "Epoch 39/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 419us/step - accuracy: 0.8729 - loss: 0.3650 - val_accuracy: 0.8452 - val_loss: 0.4350\n", + "Epoch 40/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 415us/step - accuracy: 0.8731 - loss: 0.3645 - val_accuracy: 0.8448 - val_loss: 0.4354\n", + "Epoch 41/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 417us/step - accuracy: 0.8732 - loss: 0.3640 - val_accuracy: 0.8448 - val_loss: 0.4359\n", + "Epoch 42/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 417us/step - accuracy: 0.8733 - loss: 0.3636 - val_accuracy: 0.8446 - val_loss: 0.4364\n", + "Epoch 43/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 415us/step - accuracy: 0.8736 - loss: 0.3632 - val_accuracy: 0.8440 - val_loss: 0.4369\n", + "Epoch 44/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 453us/step - accuracy: 0.8738 - loss: 0.3628 - val_accuracy: 0.8434 - val_loss: 0.4373\n", + "Epoch 45/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 417us/step - accuracy: 0.8740 - loss: 0.3624 - val_accuracy: 0.8434 - val_loss: 0.4378\n", + "Epoch 46/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 418us/step - accuracy: 0.8740 - loss: 0.3620 - val_accuracy: 0.8426 - val_loss: 0.4382\n", + "Epoch 47/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 416us/step - accuracy: 0.8741 - loss: 0.3616 - val_accuracy: 0.8428 - val_loss: 0.4387\n", + "Epoch 48/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 448us/step - accuracy: 0.8743 - loss: 0.3612 - val_accuracy: 0.8428 - val_loss: 0.4392\n", + "Epoch 49/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 417us/step - accuracy: 0.8745 - loss: 0.3608 - val_accuracy: 0.8424 - val_loss: 0.4396\n", + "Epoch 50/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 418us/step - accuracy: 0.8746 - loss: 0.3605 - val_accuracy: 0.8420 - val_loss: 0.4401\n", + "Epoch 51/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 416us/step - accuracy: 0.8747 - loss: 0.3601 - val_accuracy: 0.8420 - val_loss: 0.4405\n", + "Epoch 52/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 417us/step - accuracy: 0.8747 - loss: 0.3598 - val_accuracy: 0.8422 - val_loss: 0.4410\n", + "Epoch 53/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 419us/step - accuracy: 0.8746 - loss: 0.3595 - val_accuracy: 0.8420 - val_loss: 0.4414\n", + "Epoch 54/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 414us/step - accuracy: 0.8748 - loss: 0.3592 - val_accuracy: 0.8418 - val_loss: 0.4419\n", + "Epoch 55/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 416us/step - accuracy: 0.8747 - loss: 0.3589 - val_accuracy: 0.8420 - val_loss: 0.4423\n", + "Epoch 56/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 416us/step - accuracy: 0.8748 - loss: 0.3585 - val_accuracy: 0.8422 - val_loss: 0.4428\n", + "Epoch 57/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 450us/step - accuracy: 0.8747 - loss: 0.3582 - val_accuracy: 0.8424 - val_loss: 0.4432\n", + "Epoch 58/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 416us/step - accuracy: 0.8749 - loss: 0.3580 - val_accuracy: 0.8426 - val_loss: 0.4436\n", + "Epoch 59/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 416us/step - accuracy: 0.8751 - loss: 0.3577 - val_accuracy: 0.8424 - val_loss: 0.4441\n", + "Epoch 60/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 414us/step - accuracy: 0.8752 - loss: 0.3574 - val_accuracy: 0.8422 - val_loss: 0.4445\n", + "Epoch 61/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 421us/step - accuracy: 0.8751 - loss: 0.3571 - val_accuracy: 0.8424 - val_loss: 0.4449\n", + "Epoch 62/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 419us/step - accuracy: 0.8752 - loss: 0.3568 - val_accuracy: 0.8420 - val_loss: 0.4453\n", + "Epoch 63/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 415us/step - accuracy: 0.8752 - loss: 0.3566 - val_accuracy: 0.8420 - val_loss: 0.4458\n", + "Epoch 64/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 424us/step - accuracy: 0.8754 - loss: 0.3563 - val_accuracy: 0.8418 - val_loss: 0.4462\n", + "Epoch 65/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 416us/step - accuracy: 0.8753 - loss: 0.3561 - val_accuracy: 0.8418 - val_loss: 0.4466\n", + "Epoch 66/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 508us/step - accuracy: 0.8754 - loss: 0.3558 - val_accuracy: 0.8416 - val_loss: 0.4470\n", + "Epoch 67/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 444us/step - accuracy: 0.8755 - loss: 0.3556 - val_accuracy: 0.8418 - val_loss: 0.4474\n", + "Epoch 68/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 435us/step - accuracy: 0.8755 - loss: 0.3553 - val_accuracy: 0.8416 - val_loss: 0.4478\n", + "Epoch 69/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 491us/step - accuracy: 0.8756 - loss: 0.3551 - val_accuracy: 0.8410 - val_loss: 0.4482\n", + "Epoch 70/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 435us/step - accuracy: 0.8756 - loss: 0.3549 - val_accuracy: 0.8408 - val_loss: 0.4486\n", + "Epoch 71/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 423us/step - accuracy: 0.8758 - loss: 0.3546 - val_accuracy: 0.8408 - val_loss: 0.4490\n", + "Epoch 72/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 422us/step - accuracy: 0.8758 - loss: 0.3544 - val_accuracy: 0.8408 - val_loss: 0.4494\n", + "Epoch 73/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 421us/step - accuracy: 0.8758 - loss: 0.3542 - val_accuracy: 0.8410 - val_loss: 0.4498\n", + "Epoch 74/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 421us/step - accuracy: 0.8759 - loss: 0.3540 - val_accuracy: 0.8410 - val_loss: 0.4502\n", + "Epoch 75/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 426us/step - accuracy: 0.8760 - loss: 0.3538 - val_accuracy: 0.8410 - val_loss: 0.4506\n", + "Epoch 76/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 471us/step - accuracy: 0.8760 - loss: 0.3536 - val_accuracy: 0.8410 - val_loss: 0.4510\n", + "Epoch 77/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 420us/step - accuracy: 0.8760 - loss: 0.3533 - val_accuracy: 0.8412 - val_loss: 0.4514\n", + "Epoch 78/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 421us/step - accuracy: 0.8761 - loss: 0.3531 - val_accuracy: 0.8412 - val_loss: 0.4518\n", + "Epoch 79/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 457us/step - accuracy: 0.8759 - loss: 0.3529 - val_accuracy: 0.8408 - val_loss: 0.4522\n", + "Epoch 80/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 420us/step - accuracy: 0.8761 - loss: 0.3527 - val_accuracy: 0.8406 - val_loss: 0.4526\n", + "Epoch 81/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 418us/step - accuracy: 0.8761 - loss: 0.3526 - val_accuracy: 0.8408 - val_loss: 0.4529\n", + "Epoch 82/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 418us/step - accuracy: 0.8763 - loss: 0.3524 - val_accuracy: 0.8404 - val_loss: 0.4533\n", + "Epoch 83/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 421us/step - accuracy: 0.8763 - loss: 0.3522 - val_accuracy: 0.8402 - val_loss: 0.4537\n", + "Epoch 84/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 421us/step - accuracy: 0.8763 - loss: 0.3520 - val_accuracy: 0.8400 - val_loss: 0.4541\n", + "Epoch 85/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 464us/step - accuracy: 0.8764 - loss: 0.3518 - val_accuracy: 0.8398 - val_loss: 0.4544\n", + "Epoch 86/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 421us/step - accuracy: 0.8765 - loss: 0.3516 - val_accuracy: 0.8398 - val_loss: 0.4548\n", + "Epoch 87/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 420us/step - accuracy: 0.8764 - loss: 0.3514 - val_accuracy: 0.8396 - val_loss: 0.4552\n", + "Epoch 88/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 450us/step - accuracy: 0.8764 - loss: 0.3513 - val_accuracy: 0.8394 - val_loss: 0.4555\n", + "Epoch 89/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 456us/step - accuracy: 0.8765 - loss: 0.3511 - val_accuracy: 0.8392 - val_loss: 0.4559\n", + "Epoch 90/90\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 419us/step - accuracy: 0.8765 - loss: 0.3509 - val_accuracy: 0.8390 - val_loss: 0.4563\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# answer Optional 1" + "reg_log = tf.keras.Sequential([\n", + " tf.keras.layers.Input(shape=[28,28]),\n", + " tf.keras.layers.Flatten(),\n", + " tf.keras.layers.Dense(10,activation=\"softmax\"),\n", + "])\n", + "reg_log.compile(loss=\"sparse_categorical_crossentropy\",\n", + " optimizer=\"adam\",\n", + " metrics=[\"accuracy\"])\n", + "reg_log.fit(X_train01, y_train, epochs=90, validation_data=(X_val01, y_val))" ] }, { @@ -1379,14 +1586,101 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "id": "zv-yV-xQyVd8", "metadata": { "id": "zv-yV-xQyVd8" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.6586 - loss: 7.7191 - val_accuracy: 0.7086 - val_loss: 0.7708\n", + "Epoch 2/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7257 - loss: 0.7339 - val_accuracy: 0.7668 - val_loss: 0.5891\n", + "Epoch 3/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7667 - loss: 0.6082 - val_accuracy: 0.7880 - val_loss: 0.5681\n", + "Epoch 4/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.8169 - loss: 0.5174 - val_accuracy: 0.7946 - val_loss: 0.5648\n", + "Epoch 5/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 1ms/step - accuracy: 0.8377 - loss: 0.4704 - val_accuracy: 0.8270 - val_loss: 0.5107\n", + "Epoch 6/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8448 - loss: 0.4490 - val_accuracy: 0.8330 - val_loss: 0.4713\n", + "Epoch 7/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8521 - loss: 0.4277 - val_accuracy: 0.8534 - val_loss: 0.4069\n", + "Epoch 8/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8628 - loss: 0.3925 - val_accuracy: 0.8542 - val_loss: 0.4263\n", + "Epoch 9/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8668 - loss: 0.3824 - val_accuracy: 0.8584 - val_loss: 0.4099\n", + "Epoch 10/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8705 - loss: 0.3706 - val_accuracy: 0.8496 - val_loss: 0.4153\n", + "Epoch 11/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8745 - loss: 0.3588 - val_accuracy: 0.8550 - val_loss: 0.4377\n", + "Epoch 12/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8766 - loss: 0.3548 - val_accuracy: 0.8652 - val_loss: 0.4027\n", + "Epoch 13/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8786 - loss: 0.3448 - val_accuracy: 0.8562 - val_loss: 0.4194\n", + "Epoch 14/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8769 - loss: 0.3622 - val_accuracy: 0.8440 - val_loss: 0.4315\n", + "Epoch 15/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8831 - loss: 0.3358 - val_accuracy: 0.8530 - val_loss: 0.4754\n", + "Epoch 16/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8853 - loss: 0.3256 - val_accuracy: 0.8578 - val_loss: 0.4148\n", + "Epoch 17/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8865 - loss: 0.3263 - val_accuracy: 0.8510 - val_loss: 0.4600\n", + "Epoch 18/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8854 - loss: 0.3193 - val_accuracy: 0.8552 - val_loss: 0.4512\n", + "Epoch 19/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8879 - loss: 0.3197 - val_accuracy: 0.8530 - val_loss: 0.4577\n", + "Epoch 20/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8876 - loss: 0.3229 - val_accuracy: 0.8572 - val_loss: 0.4659\n", + "Epoch 21/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8892 - loss: 0.3099 - val_accuracy: 0.8506 - val_loss: 0.4705\n", + "Epoch 22/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8880 - loss: 0.3213 - val_accuracy: 0.8626 - val_loss: 0.4255\n", + "Epoch 23/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8911 - loss: 0.3110 - val_accuracy: 0.8508 - val_loss: 0.4534\n", + "Epoch 24/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8951 - loss: 0.2982 - val_accuracy: 0.8580 - val_loss: 0.4383\n", + "Epoch 25/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8930 - loss: 0.3023 - val_accuracy: 0.8578 - val_loss: 0.4875\n", + "Epoch 26/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8929 - loss: 0.3012 - val_accuracy: 0.8634 - val_loss: 0.4421\n", + "Epoch 27/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8950 - loss: 0.2971 - val_accuracy: 0.8592 - val_loss: 0.4446\n", + "Epoch 28/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8946 - loss: 0.2928 - val_accuracy: 0.8622 - val_loss: 0.4505\n", + "Epoch 29/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8993 - loss: 0.2810 - val_accuracy: 0.8548 - val_loss: 0.5223\n", + "Epoch 30/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8961 - loss: 0.2942 - val_accuracy: 0.8574 - val_loss: 0.4575\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# answer Optional 2" + "model_ter = tf.keras.Sequential([\n", + " tf.keras.layers.Input(shape=[28,28]),\n", + " tf.keras.layers.Flatten(),\n", + " tf.keras.layers.Dense(300,activation=\"relu\", kernel_initializer=\"he_normal\"),\n", + " tf.keras.layers.Dense(100,activation=\"relu\",kernel_initializer=\"he_normal\"),\n", + " tf.keras.layers.Dense(10,activation=\"softmax\"),\n", + "])\n", + "model_ter.compile(loss=\"sparse_categorical_crossentropy\",\n", + " optimizer=\"adam\",\n", + " metrics=[\"accuracy\"])\n", + "model_ter.fit(X_train, y_train, epochs=30, validation_data=(X_val, y_val))" ] }, { @@ -1401,14 +1695,104 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "id": "1-KComPFy1wS", "metadata": { "id": "1-KComPFy1wS" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.6645 - loss: 0.9781 - val_accuracy: 0.8202 - val_loss: 0.4931\n", + "Epoch 2/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8294 - loss: 0.4769 - val_accuracy: 0.8336 - val_loss: 0.4414\n", + "Epoch 3/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8493 - loss: 0.4246 - val_accuracy: 0.8432 - val_loss: 0.4204\n", + "Epoch 4/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8587 - loss: 0.3927 - val_accuracy: 0.8468 - val_loss: 0.4029\n", + "Epoch 5/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8652 - loss: 0.3678 - val_accuracy: 0.8494 - val_loss: 0.3902\n", + "Epoch 6/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8718 - loss: 0.3475 - val_accuracy: 0.8546 - val_loss: 0.3777\n", + "Epoch 7/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8785 - loss: 0.3303 - val_accuracy: 0.8578 - val_loss: 0.3656\n", + "Epoch 8/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8828 - loss: 0.3153 - val_accuracy: 0.8598 - val_loss: 0.3589\n", + "Epoch 9/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8879 - loss: 0.3020 - val_accuracy: 0.8626 - val_loss: 0.3541\n", + "Epoch 10/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8918 - loss: 0.2908 - val_accuracy: 0.8648 - val_loss: 0.3486\n", + "Epoch 11/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8953 - loss: 0.2805 - val_accuracy: 0.8674 - val_loss: 0.3464\n", + "Epoch 12/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8993 - loss: 0.2703 - val_accuracy: 0.8688 - val_loss: 0.3441\n", + "Epoch 13/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9020 - loss: 0.2616 - val_accuracy: 0.8708 - val_loss: 0.3416\n", + "Epoch 14/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9050 - loss: 0.2533 - val_accuracy: 0.8732 - val_loss: 0.3379\n", + "Epoch 15/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9088 - loss: 0.2455 - val_accuracy: 0.8752 - val_loss: 0.3380\n", + "Epoch 16/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9120 - loss: 0.2377 - val_accuracy: 0.8758 - val_loss: 0.3348\n", + "Epoch 17/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9158 - loss: 0.2304 - val_accuracy: 0.8762 - val_loss: 0.3331\n", + "Epoch 18/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9181 - loss: 0.2232 - val_accuracy: 0.8756 - val_loss: 0.3341\n", + "Epoch 19/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9216 - loss: 0.2164 - val_accuracy: 0.8782 - val_loss: 0.3340\n", + "Epoch 20/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9231 - loss: 0.2097 - val_accuracy: 0.8802 - val_loss: 0.3352\n", + "Epoch 21/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9264 - loss: 0.2034 - val_accuracy: 0.8824 - val_loss: 0.3378\n", + "Epoch 22/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9279 - loss: 0.1971 - val_accuracy: 0.8824 - val_loss: 0.3412\n", + "Epoch 23/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9308 - loss: 0.1910 - val_accuracy: 0.8812 - val_loss: 0.3456\n", + "Epoch 24/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9327 - loss: 0.1850 - val_accuracy: 0.8844 - val_loss: 0.3487\n", + "Epoch 25/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9347 - loss: 0.1800 - val_accuracy: 0.8840 - val_loss: 0.3529\n", + "Epoch 26/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9370 - loss: 0.1740 - val_accuracy: 0.8836 - val_loss: 0.3559\n", + "Epoch 27/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9393 - loss: 0.1683 - val_accuracy: 0.8836 - val_loss: 0.3620\n", + "Epoch 28/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9410 - loss: 0.1630 - val_accuracy: 0.8830 - val_loss: 0.3677\n", + "Epoch 29/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9425 - loss: 0.1580 - val_accuracy: 0.8828 - val_loss: 0.3725\n", + "Epoch 30/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9439 - loss: 0.1531 - val_accuracy: 0.8850 - val_loss: 0.3794\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# answer Optional 3" + "model_5 = tf.keras.Sequential([\n", + " tf.keras.layers.Input(shape=[28,28]),\n", + " tf.keras.layers.Flatten(),\n", + " tf.keras.layers.Dense(300,activation=\"relu\", kernel_initializer=\"he_normal\"),\n", + " tf.keras.layers.Dense(100,activation=\"relu\",kernel_initializer=\"he_normal\"),\n", + " tf.keras.layers.Dense(10,activation=\"softmax\"),\n", + "])\n", + "model_5.compile(loss=\"sparse_categorical_crossentropy\",\n", + " optimizer=\"adam\",\n", + " metrics=[\"accuracy\"])\n", + "\n", + "X_train_far_too_small, X_val_far_too_small = X_train/25500.0, X_val/25500.0\n", + "\n", + "model_5.fit(X_train_far_too_small, y_train, epochs=30, validation_data=(X_val_far_too_small, y_val))" ] }, { @@ -1423,26 +1807,201 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "id": "dShxjtDny8HD", "metadata": { "id": "dShxjtDny8HD" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7407 - loss: 0.7888 - val_accuracy: 0.8360 - val_loss: 0.4605\n", + "Epoch 2/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8553 - loss: 0.3999 - val_accuracy: 0.8386 - val_loss: 0.4300\n", + "Epoch 3/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8715 - loss: 0.3539 - val_accuracy: 0.8498 - val_loss: 0.4042\n", + "Epoch 4/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8813 - loss: 0.3239 - val_accuracy: 0.8580 - val_loss: 0.3892\n", + "Epoch 5/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8894 - loss: 0.3013 - val_accuracy: 0.8626 - val_loss: 0.3787\n", + "Epoch 6/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8959 - loss: 0.2828 - val_accuracy: 0.8678 - val_loss: 0.3787\n", + "Epoch 7/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9021 - loss: 0.2669 - val_accuracy: 0.8678 - val_loss: 0.3790\n", + "Epoch 8/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9069 - loss: 0.2533 - val_accuracy: 0.8710 - val_loss: 0.3780\n", + "Epoch 9/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9120 - loss: 0.2399 - val_accuracy: 0.8704 - val_loss: 0.3782\n", + "Epoch 10/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9176 - loss: 0.2273 - val_accuracy: 0.8710 - val_loss: 0.3808\n", + "Epoch 11/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9214 - loss: 0.2158 - val_accuracy: 0.8726 - val_loss: 0.3805\n", + "Epoch 12/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9257 - loss: 0.2049 - val_accuracy: 0.8694 - val_loss: 0.3859\n", + "Epoch 13/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9304 - loss: 0.1945 - val_accuracy: 0.8720 - val_loss: 0.3934\n", + "Epoch 14/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9336 - loss: 0.1846 - val_accuracy: 0.8716 - val_loss: 0.4023\n", + "Epoch 15/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9372 - loss: 0.1754 - val_accuracy: 0.8676 - val_loss: 0.4147\n", + "Epoch 16/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9404 - loss: 0.1666 - val_accuracy: 0.8676 - val_loss: 0.4186\n", + "Epoch 17/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9441 - loss: 0.1582 - val_accuracy: 0.8696 - val_loss: 0.4338\n", + "Epoch 18/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9460 - loss: 0.1509 - val_accuracy: 0.8670 - val_loss: 0.4443\n", + "Epoch 19/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9500 - loss: 0.1429 - val_accuracy: 0.8706 - val_loss: 0.4454\n", + "Epoch 20/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9526 - loss: 0.1362 - val_accuracy: 0.8686 - val_loss: 0.4618\n", + "Epoch 21/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9554 - loss: 0.1292 - val_accuracy: 0.8656 - val_loss: 0.4863\n", + "Epoch 22/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9577 - loss: 0.1220 - val_accuracy: 0.8662 - val_loss: 0.4932\n", + "Epoch 23/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m4s\u001b[0m 2ms/step - accuracy: 0.9597 - loss: 0.1170 - val_accuracy: 0.8658 - val_loss: 0.5143\n", + "Epoch 24/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9599 - loss: 0.1135 - val_accuracy: 0.8648 - val_loss: 0.5415\n", + "Epoch 25/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9629 - loss: 0.1068 - val_accuracy: 0.8620 - val_loss: 0.5629\n", + "Epoch 26/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9644 - loss: 0.1021 - val_accuracy: 0.8590 - val_loss: 0.6076\n", + "Epoch 27/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9667 - loss: 0.0971 - val_accuracy: 0.8640 - val_loss: 0.6027\n", + "Epoch 28/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9673 - loss: 0.0953 - val_accuracy: 0.8594 - val_loss: 0.6309\n", + "Epoch 29/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9688 - loss: 0.0910 - val_accuracy: 0.8630 - val_loss: 0.6526\n", + "Epoch 30/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9710 - loss: 0.0864 - val_accuracy: 0.8640 - val_loss: 0.6396\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# answer Optional 4" + "# sigmoid activation, normalized data (scale : [0,1])\n", + "model_sig_norm = tf.keras.Sequential([\n", + " tf.keras.layers.Input(shape=[28,28]),\n", + " tf.keras.layers.Flatten(),\n", + " tf.keras.layers.Dense(300,activation=\"sigmoid\", kernel_initializer=\"he_normal\"),\n", + " tf.keras.layers.Dense(100,activation=\"sigmoid\",kernel_initializer=\"he_normal\"),\n", + " tf.keras.layers.Dense(10,activation=\"softmax\"),\n", + "])\n", + "model_sig_norm.compile(loss=\"sparse_categorical_crossentropy\",\n", + " optimizer=\"adam\",\n", + " metrics=[\"accuracy\"])\n", + "model_sig_norm.fit(X_train01, y_train, epochs=30, validation_data=(X_val, y_val))" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "id": "1O32YLVuy8k3", "metadata": { "id": "1O32YLVuy8k3" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.6551 - loss: 1.0137 - val_accuracy: 0.6920 - val_loss: 0.7548\n", + "Epoch 2/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7053 - loss: 0.7489 - val_accuracy: 0.7290 - val_loss: 0.7050\n", + "Epoch 3/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7162 - loss: 0.7261 - val_accuracy: 0.7220 - val_loss: 0.7043\n", + "Epoch 4/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7244 - loss: 0.6950 - val_accuracy: 0.7410 - val_loss: 0.6906\n", + "Epoch 5/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7329 - loss: 0.6854 - val_accuracy: 0.7522 - val_loss: 0.6677\n", + "Epoch 6/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7434 - loss: 0.6751 - val_accuracy: 0.7538 - val_loss: 0.6704\n", + "Epoch 7/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7419 - loss: 0.6836 - val_accuracy: 0.7470 - val_loss: 0.6619\n", + "Epoch 8/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7357 - loss: 0.6719 - val_accuracy: 0.7456 - val_loss: 0.6474\n", + "Epoch 9/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7386 - loss: 0.6690 - val_accuracy: 0.7386 - val_loss: 0.6632\n", + "Epoch 10/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7397 - loss: 0.6665 - val_accuracy: 0.7442 - val_loss: 0.6630\n", + "Epoch 11/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7473 - loss: 0.6595 - val_accuracy: 0.7466 - val_loss: 0.6551\n", + "Epoch 12/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7376 - loss: 0.6605 - val_accuracy: 0.7662 - val_loss: 0.6222\n", + "Epoch 13/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7563 - loss: 0.6315 - val_accuracy: 0.7732 - val_loss: 0.5941\n", + "Epoch 14/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7706 - loss: 0.6042 - val_accuracy: 0.7624 - val_loss: 0.6283\n", + "Epoch 15/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7615 - loss: 0.6226 - val_accuracy: 0.7598 - val_loss: 0.6130\n", + "Epoch 16/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7657 - loss: 0.6080 - val_accuracy: 0.7798 - val_loss: 0.5883\n", + "Epoch 17/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7628 - loss: 0.6154 - val_accuracy: 0.7702 - val_loss: 0.6045\n", + "Epoch 18/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7724 - loss: 0.5999 - val_accuracy: 0.7810 - val_loss: 0.5828\n", + "Epoch 19/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7839 - loss: 0.5758 - val_accuracy: 0.7930 - val_loss: 0.5618\n", + "Epoch 20/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7901 - loss: 0.5718 - val_accuracy: 0.7860 - val_loss: 0.5895\n", + "Epoch 21/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7845 - loss: 0.5713 - val_accuracy: 0.7808 - val_loss: 0.5898\n", + "Epoch 22/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7779 - loss: 0.5845 - val_accuracy: 0.7874 - val_loss: 0.5695\n", + "Epoch 23/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7872 - loss: 0.5686 - val_accuracy: 0.7676 - val_loss: 0.5934\n", + "Epoch 24/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7815 - loss: 0.5739 - val_accuracy: 0.7920 - val_loss: 0.5528\n", + "Epoch 25/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7892 - loss: 0.5618 - val_accuracy: 0.7928 - val_loss: 0.5675\n", + "Epoch 26/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7882 - loss: 0.5590 - val_accuracy: 0.7988 - val_loss: 0.5464\n", + "Epoch 27/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7866 - loss: 0.5598 - val_accuracy: 0.7764 - val_loss: 0.5784\n", + "Epoch 28/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7838 - loss: 0.5673 - val_accuracy: 0.7848 - val_loss: 0.5651\n", + "Epoch 29/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7886 - loss: 0.5563 - val_accuracy: 0.8008 - val_loss: 0.5436\n", + "Epoch 30/30\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7971 - loss: 0.5387 - val_accuracy: 0.8010 - val_loss: 0.5349\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# answer Optional 4" + "model_sig_un_norm = tf.keras.Sequential([\n", + " tf.keras.layers.Input(shape=[28,28]),\n", + " tf.keras.layers.Flatten(),\n", + " tf.keras.layers.Dense(300,activation=\"sigmoid\", kernel_initializer=\"he_normal\"),\n", + " tf.keras.layers.Dense(100,activation=\"sigmoid\",kernel_initializer=\"he_normal\"),\n", + " tf.keras.layers.Dense(10,activation=\"softmax\"),\n", + "])\n", + "model_sig_un_norm.compile(loss=\"sparse_categorical_crossentropy\",\n", + " optimizer=\"adam\",\n", + " metrics=[\"accuracy\"])\n", + "model_sig_un_norm.fit(X_train, y_train, epochs=30, validation_data=(X_val, y_val))" ] }, { @@ -1457,14 +2016,165 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "id": "p5nkk6t8zIzz", "metadata": { "id": "p5nkk6t8zIzz" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.6039 - loss: 86.8092 - val_accuracy: 0.7452 - val_loss: 19.3635\n", + "Epoch 2/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7728 - loss: 14.7505 - val_accuracy: 0.7648 - val_loss: 8.7317\n", + "Epoch 3/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7867 - loss: 6.8569 - val_accuracy: 0.7916 - val_loss: 3.7573\n", + "Epoch 4/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7893 - loss: 3.0035 - val_accuracy: 0.7756 - val_loss: 1.6859\n", + "Epoch 5/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7805 - loss: 1.3666 - val_accuracy: 0.7798 - val_loss: 1.0656\n", + "Epoch 6/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7814 - loss: 0.9145 - val_accuracy: 0.7768 - val_loss: 0.9132\n", + "Epoch 7/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7917 - loss: 0.7225 - val_accuracy: 0.7832 - val_loss: 0.8141\n", + "Epoch 8/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.7993 - loss: 0.6555 - val_accuracy: 0.7934 - val_loss: 0.7410\n", + "Epoch 9/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8125 - loss: 0.5778 - val_accuracy: 0.7986 - val_loss: 0.6734\n", + "Epoch 10/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8231 - loss: 0.5296 - val_accuracy: 0.8092 - val_loss: 0.6434\n", + "Epoch 11/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8347 - loss: 0.4960 - val_accuracy: 0.8220 - val_loss: 0.6397\n", + "Epoch 12/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8415 - loss: 0.4696 - val_accuracy: 0.8220 - val_loss: 0.6570\n", + "Epoch 13/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8482 - loss: 0.4438 - val_accuracy: 0.8270 - val_loss: 0.6685\n", + "Epoch 14/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8525 - loss: 0.4270 - val_accuracy: 0.8238 - val_loss: 0.6539\n", + "Epoch 15/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8581 - loss: 0.4113 - val_accuracy: 0.8290 - val_loss: 0.6831\n", + "Epoch 16/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8640 - loss: 0.3913 - val_accuracy: 0.8298 - val_loss: 0.7134\n", + "Epoch 17/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8691 - loss: 0.3778 - val_accuracy: 0.8262 - val_loss: 0.7223\n", + "Epoch 18/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8705 - loss: 0.3728 - val_accuracy: 0.8280 - val_loss: 0.7357\n", + "Epoch 19/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8743 - loss: 0.3598 - val_accuracy: 0.8288 - val_loss: 0.7622\n", + "Epoch 20/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8787 - loss: 0.3488 - val_accuracy: 0.8250 - val_loss: 0.7780\n", + "Epoch 21/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8801 - loss: 0.3455 - val_accuracy: 0.8354 - val_loss: 0.7774\n", + "Epoch 22/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8834 - loss: 0.3304 - val_accuracy: 0.8282 - val_loss: 0.7972\n", + "Epoch 23/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8846 - loss: 0.3250 - val_accuracy: 0.8316 - val_loss: 0.8399\n", + "Epoch 24/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8864 - loss: 0.3191 - val_accuracy: 0.8364 - val_loss: 0.8428\n", + "Epoch 25/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8881 - loss: 0.3186 - val_accuracy: 0.8266 - val_loss: 0.9485\n", + "Epoch 26/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8901 - loss: 0.3106 - val_accuracy: 0.8376 - val_loss: 0.9145\n", + "Epoch 27/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8923 - loss: 0.3062 - val_accuracy: 0.8310 - val_loss: 0.9605\n", + "Epoch 28/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8931 - loss: 0.3016 - val_accuracy: 0.8420 - val_loss: 0.9259\n", + "Epoch 29/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8948 - loss: 0.2966 - val_accuracy: 0.8424 - val_loss: 0.9897\n", + "Epoch 30/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8977 - loss: 0.2851 - val_accuracy: 0.8406 - val_loss: 1.0251\n", + "Epoch 31/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8985 - loss: 0.2840 - val_accuracy: 0.8340 - val_loss: 1.0227\n", + "Epoch 32/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.8979 - loss: 0.2931 - val_accuracy: 0.8336 - val_loss: 1.0029\n", + "Epoch 33/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9007 - loss: 0.2807 - val_accuracy: 0.8366 - val_loss: 1.0235\n", + "Epoch 34/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9013 - loss: 0.2741 - val_accuracy: 0.8410 - val_loss: 0.9453\n", + "Epoch 35/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9049 - loss: 0.2732 - val_accuracy: 0.8376 - val_loss: 1.0164\n", + "Epoch 36/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9045 - loss: 0.2665 - val_accuracy: 0.8408 - val_loss: 1.0273\n", + "Epoch 37/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9060 - loss: 0.2627 - val_accuracy: 0.8380 - val_loss: 1.0743\n", + "Epoch 38/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9054 - loss: 0.2668 - val_accuracy: 0.8386 - val_loss: 1.0879\n", + "Epoch 39/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9070 - loss: 0.2561 - val_accuracy: 0.8424 - val_loss: 1.0748\n", + "Epoch 40/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9097 - loss: 0.2540 - val_accuracy: 0.8308 - val_loss: 1.1934\n", + "Epoch 41/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9112 - loss: 0.2570 - val_accuracy: 0.8412 - val_loss: 1.0743\n", + "Epoch 42/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9100 - loss: 0.2514 - val_accuracy: 0.8400 - val_loss: 1.1461\n", + "Epoch 43/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9093 - loss: 0.2524 - val_accuracy: 0.8370 - val_loss: 1.1910\n", + "Epoch 44/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9118 - loss: 0.2487 - val_accuracy: 0.8394 - val_loss: 1.1850\n", + "Epoch 45/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9132 - loss: 0.2391 - val_accuracy: 0.8398 - val_loss: 1.2205\n", + "Epoch 46/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9142 - loss: 0.2433 - val_accuracy: 0.8356 - val_loss: 1.3211\n", + "Epoch 47/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9157 - loss: 0.2344 - val_accuracy: 0.8372 - val_loss: 1.2488\n", + "Epoch 48/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9160 - loss: 0.2348 - val_accuracy: 0.8420 - val_loss: 1.2971\n", + "Epoch 49/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9162 - loss: 0.2360 - val_accuracy: 0.8468 - val_loss: 1.2902\n", + "Epoch 50/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9179 - loss: 0.2328 - val_accuracy: 0.8336 - val_loss: 1.2355\n", + "Epoch 51/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9202 - loss: 0.2245 - val_accuracy: 0.8302 - val_loss: 1.4387\n", + "Epoch 52/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9182 - loss: 0.2329 - val_accuracy: 0.8346 - val_loss: 1.3602\n", + "Epoch 53/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9196 - loss: 0.2244 - val_accuracy: 0.8370 - val_loss: 1.2779\n", + "Epoch 54/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9206 - loss: 0.2242 - val_accuracy: 0.8444 - val_loss: 1.3331\n", + "Epoch 55/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9234 - loss: 0.2143 - val_accuracy: 0.8542 - val_loss: 1.2873\n", + "Epoch 56/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9205 - loss: 0.2204 - val_accuracy: 0.8406 - val_loss: 1.3728\n", + "Epoch 57/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9207 - loss: 0.2259 - val_accuracy: 0.8414 - val_loss: 1.3721\n", + "Epoch 58/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9237 - loss: 0.2151 - val_accuracy: 0.8474 - val_loss: 1.3719\n", + "Epoch 59/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9228 - loss: 0.2189 - val_accuracy: 0.8414 - val_loss: 1.3527\n", + "Epoch 60/60\n", + "\u001b[1m1719/1719\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 2ms/step - accuracy: 0.9254 - loss: 0.2101 - val_accuracy: 0.8394 - val_loss: 1.4407\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# answer Optional 5" + "model_high_variance = tf.keras.Sequential([\n", + " tf.keras.layers.Input(shape=[28,28]),\n", + " tf.keras.layers.Flatten(),\n", + " tf.keras.layers.Dense(300,activation=\"relu\"),\n", + " tf.keras.layers.Dense(100,activation=\"relu\"),\n", + " tf.keras.layers.Dense(10,activation=\"softmax\"),\n", + "])\n", + "model_high_variance.layers[1].set_weights([200*np.random.randn(28*28,300)/100, np.zeros(300)])\n", + "model_high_variance.layers[2].set_weights([200*np.random.randn(300,100)/100, np.zeros(100)])\n", + "\n", + "model_high_variance.compile(loss=\"sparse_categorical_crossentropy\",\n", + " optimizer=\"adam\",\n", + " metrics=[\"accuracy\"])\n", + "\n", + "model_high_variance.fit(X_train01, y_train, epochs=60, validation_data=(X_val01, y_val))" ] } ],