diff --git a/11_deep_learning.ipynb b/11_deep_learning.ipynb index d470157..b0c4eec 100644 --- a/11_deep_learning.ipynb +++ b/11_deep_learning.ipynb @@ -643,7 +643,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Sometimes applying BN before the activation function works better (there's a debate on this topic):" + "Sometimes applying BN before the activation function works better (there's a debate on this topic). Moreover, the layer before a `BatchNormalization` layer does not need to have bias terms, since the `BatchNormalization` layer some as well, it would be a waste of parameters, so you can set `use_bias=False` when creating those layers:" ] }, { @@ -655,10 +655,10 @@ "model = keras.models.Sequential([\n", " keras.layers.Flatten(input_shape=[28, 28]),\n", " keras.layers.BatchNormalization(),\n", - " keras.layers.Dense(300),\n", + " keras.layers.Dense(300, use_bias=False),\n", " keras.layers.BatchNormalization(),\n", " keras.layers.Activation(\"relu\"),\n", - " keras.layers.Dense(100),\n", + " keras.layers.Dense(100, use_bias=False),\n", " keras.layers.Activation(\"relu\"),\n", " keras.layers.BatchNormalization(),\n", " keras.layers.Dense(10, activation=\"softmax\")\n",