diff --git a/15_recurrent_neural_networks.ipynb b/15_recurrent_neural_networks.ipynb index 635be5a..3c5d541 100644 --- a/15_recurrent_neural_networks.ipynb +++ b/15_recurrent_neural_networks.ipynb @@ -30,7 +30,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -93,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 493, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -108,7 +108,7 @@ }, { "cell_type": "code", - "execution_count": 494, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -123,7 +123,7 @@ }, { "cell_type": "code", - "execution_count": 495, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -132,7 +132,7 @@ }, { "cell_type": "code", - "execution_count": 496, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -175,7 +175,7 @@ }, { "cell_type": "code", - "execution_count": 497, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -185,7 +185,7 @@ }, { "cell_type": "code", - "execution_count": 498, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -202,7 +202,7 @@ }, { "cell_type": "code", - "execution_count": 499, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -210,7 +210,7 @@ "tf.random.set_seed(42)\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.Flatten(),\n", + " keras.layers.Flatten(input_shape=[50, 1]),\n", " keras.layers.Dense(1)\n", "])\n", "\n", @@ -221,7 +221,7 @@ }, { "cell_type": "code", - "execution_count": 500, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -230,7 +230,7 @@ }, { "cell_type": "code", - "execution_count": 501, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -250,7 +250,7 @@ }, { "cell_type": "code", - "execution_count": 502, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -268,23 +268,26 @@ }, { "cell_type": "code", - "execution_count": 503, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "np.random.seed(42)\n", "tf.random.set_seed(42)\n", "\n", - "model = keras.models.Sequential([keras.layers.SimpleRNN(1)])\n", + "model = keras.models.Sequential([\n", + " keras.layers.SimpleRNN(1, input_shape=[None, 1])\n", + "])\n", "\n", - "model.compile(loss=\"mse\", optimizer=\"adam\")\n", + "optimizer = keras.optimizers.Adam(lr=0.005)\n", + "model.compile(loss=\"mse\", optimizer=optimizer)\n", "history = model.fit(X_train, y_train, epochs=20,\n", " validation_data=(X_valid, y_valid))" ] }, { "cell_type": "code", - "execution_count": 504, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -293,7 +296,7 @@ }, { "cell_type": "code", - "execution_count": 505, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -303,7 +306,7 @@ }, { "cell_type": "code", - "execution_count": 506, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -321,7 +324,7 @@ }, { "cell_type": "code", - "execution_count": 513, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -329,7 +332,7 @@ "tf.random.set_seed(42)\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.SimpleRNN(20, return_sequences=True),\n", + " keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n", " keras.layers.SimpleRNN(20, return_sequences=True),\n", " keras.layers.SimpleRNN(1)\n", "])\n", @@ -341,7 +344,7 @@ }, { "cell_type": "code", - "execution_count": 514, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -350,7 +353,7 @@ }, { "cell_type": "code", - "execution_count": 515, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ @@ -360,18 +363,25 @@ }, { "cell_type": "code", - "execution_count": 516, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "y_pred = model.predict(X_valid)\n", - "plot_series(X_valid[0, :, 0], y_valid[0, 0], y_valid[0, 0])\n", + "plot_series(X_valid[0, :, 0], y_valid[0, 0], y_pred[0, 0])\n", "plt.show()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Make the second `SimpleRNN` layer return only the last output:" + ] + }, { "cell_type": "code", - "execution_count": 517, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -379,7 +389,7 @@ "tf.random.set_seed(42)\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.SimpleRNN(20, return_sequences=True),\n", + " keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n", " keras.layers.SimpleRNN(20),\n", " keras.layers.Dense(1)\n", "])\n", @@ -391,7 +401,7 @@ }, { "cell_type": "code", - "execution_count": 518, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -400,7 +410,7 @@ }, { "cell_type": "code", - "execution_count": 519, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -410,12 +420,12 @@ }, { "cell_type": "code", - "execution_count": 520, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ "y_pred = model.predict(X_valid)\n", - "plot_series(X_valid[0, :, 0], y_valid[0, 0], y_valid[0, 0])\n", + "plot_series(X_valid[0, :, 0], y_valid[0, 0], y_pred[0, 0])\n", "plt.show()" ] }, @@ -428,25 +438,7 @@ }, { "cell_type": "code", - "execution_count": 390, - "metadata": {}, - "outputs": [], - "source": [ - "X_new.shape" - ] - }, - { - "cell_type": "code", - "execution_count": 394, - "metadata": {}, - "outputs": [], - "source": [ - "Y_pred.shape" - ] - }, - { - "cell_type": "code", - "execution_count": 401, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ @@ -464,7 +456,7 @@ }, { "cell_type": "code", - "execution_count": 402, + "execution_count": 25, "metadata": {}, "outputs": [], "source": [ @@ -473,14 +465,19 @@ }, { "cell_type": "code", - "execution_count": 403, + "execution_count": 26, "metadata": {}, "outputs": [], "source": [ - "plot_series(X_new[0, :50, 0])\n", - "plt.plot(np.arange(50, 60), Y_pred[0, :, 0], \"ro-\")\n", - "plt.plot(np.arange(50, 60), Y_new[0, :, 0], \"bx-\", markersize=10)\n", - "plt.axis([0, 60, -1, 1])\n", + "def plot_multiple_forecasts(X, Y, Y_pred):\n", + " n_steps = X.shape[1]\n", + " ahead = Y.shape[1]\n", + " plot_series(X[0, :, 0])\n", + " plt.plot(np.arange(n_steps, n_steps + ahead), Y_pred[0, :, 0], \"ro-\")\n", + " plt.plot(np.arange(n_steps, n_steps + ahead), Y[0, :, 0], \"bx-\", markersize=10)\n", + " plt.axis([0, n_steps + ahead, -1, 1])\n", + "\n", + "plot_multiple_forecasts(X_new, Y_new, Y_pred)\n", "save_fig(\"forecast_ahead_plot\")\n", "plt.show()" ] @@ -494,7 +491,7 @@ }, { "cell_type": "code", - "execution_count": 521, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -509,7 +506,7 @@ }, { "cell_type": "code", - "execution_count": 522, + "execution_count": 28, "metadata": {}, "outputs": [], "source": [ @@ -517,7 +514,7 @@ "tf.random.set_seed(42)\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.SimpleRNN(20, return_sequences=True),\n", + " keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n", " keras.layers.SimpleRNN(20, return_sequences=True),\n", " keras.layers.TimeDistributed(keras.layers.Dense(1)),\n", " keras.layers.Lambda(lambda Y_pred: Y_pred[:, -10:])\n", @@ -530,7 +527,7 @@ }, { "cell_type": "code", - "execution_count": 523, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -538,19 +535,16 @@ "\n", "series = generate_time_series(1, 50 + 10)\n", "X_new, Y_new = series[:, :50, :], series[:, -10:, :]\n", - "Y_pred = model.predict(X_new)" + "Y_pred = model.predict(X_new)[:, -10:, :]" ] }, { "cell_type": "code", - "execution_count": 524, + "execution_count": 30, "metadata": {}, "outputs": [], "source": [ - "plot_series(X_new[0, :50, 0])\n", - "plt.plot(np.arange(50, 60), Y_pred[0, :, 0], \"ro-\")\n", - "plt.plot(np.arange(50, 60), Y_new[0, :, 0], \"bx-\", markersize=10)\n", - "plt.axis([0, 60, -1, 1])\n", + "plot_multiple_forecasts(X_new, Y_new, Y_pred)\n", "plt.show()" ] }, @@ -558,12 +552,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's create an RNN that predicts the input sequence, shifted 10 steps into the future:" + "Now let's create an RNN that predicts the input sequence, shifted 10 steps into the future. That is, instead of just forecasting time steps 50 to 59 based on time steps 0 to 49, it will forecast time steps 10 to 59 based on time steps 0 to 49 (the time steps 10 to 49 are in the input, but the model is causal so at any time step it cannot see the future inputs):" ] }, { "cell_type": "code", - "execution_count": 531, + "execution_count": 31, "metadata": {}, "outputs": [], "source": [ @@ -578,7 +572,7 @@ }, { "cell_type": "code", - "execution_count": 532, + "execution_count": 32, "metadata": {}, "outputs": [], "source": [ @@ -587,7 +581,7 @@ }, { "cell_type": "code", - "execution_count": 527, + "execution_count": 33, "metadata": {}, "outputs": [], "source": [ @@ -595,7 +589,7 @@ "tf.random.set_seed(42)\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.SimpleRNN(20, return_sequences=True),\n", + " keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n", " keras.layers.SimpleRNN(20, return_sequences=True),\n", " keras.layers.TimeDistributed(keras.layers.Dense(1))\n", "])\n", @@ -610,7 +604,7 @@ }, { "cell_type": "code", - "execution_count": 409, + "execution_count": 34, "metadata": {}, "outputs": [], "source": [ @@ -623,15 +617,11 @@ }, { "cell_type": "code", - "execution_count": 410, + "execution_count": 35, "metadata": {}, "outputs": [], "source": [ - "plot_series(X_new[0, :50, 0])\n", - "plt.plot(np.arange(50, 60), Y_pred[0, :, 0], \"ro-\")\n", - "plt.plot(np.arange(50, 60), Y_new[0, :, 0], \"bx-\", markersize=10)\n", - "plt.axis([0, 60, -1, 1])\n", - "save_fig(\"forecast_ahead_multi_plot\")\n", + "plot_multiple_forecasts(X_new, Y_new, Y_pred)\n", "plt.show()" ] }, @@ -644,7 +634,7 @@ }, { "cell_type": "code", - "execution_count": 534, + "execution_count": 36, "metadata": {}, "outputs": [], "source": [ @@ -653,8 +643,7 @@ "\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.BatchNormalization(),\n", - " keras.layers.SimpleRNN(20, return_sequences=True),\n", + " keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n", " keras.layers.BatchNormalization(),\n", " keras.layers.SimpleRNN(20, return_sequences=True),\n", " keras.layers.BatchNormalization(),\n", @@ -666,15 +655,6 @@ " validation_data=(X_valid, Y_valid))" ] }, - { - "cell_type": "code", - "execution_count": 365, - "metadata": {}, - "outputs": [], - "source": [ - "model.summary()" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -684,21 +664,19 @@ }, { "cell_type": "code", - "execution_count": 619, + "execution_count": 37, "metadata": {}, "outputs": [], "source": [ - "keras.layers.GRUCell.get_initial_state?" + "from tensorflow.keras.layers.experimental import LayerNormalization" ] }, { "cell_type": "code", - "execution_count": 622, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ - "LayerNormalization = keras.layers.experimental.LayerNormalization\n", - "\n", "class LNSimpleRNNCell(keras.layers.Layer):\n", " def __init__(self, units, activation=\"tanh\", **kwargs):\n", " super().__init__(**kwargs)\n", @@ -709,7 +687,10 @@ " self.layer_norm = LayerNormalization()\n", " self.activation = keras.activations.get(activation)\n", " def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n", - " return tf.zeros([batch_size, self.state_size], dtype=dtype)\n", + " if inputs is not None:\n", + " batch_size = tf.shape(inputs)[0]\n", + " dtype = inputs.dtype\n", + " return [tf.zeros([batch_size, self.state_size], dtype=dtype)]\n", " def call(self, inputs, states):\n", " outputs, new_states = self.simple_rnn_cell(inputs, states)\n", " norm_outputs = self.activation(self.layer_norm(outputs))\n", @@ -718,7 +699,7 @@ }, { "cell_type": "code", - "execution_count": 623, + "execution_count": 39, "metadata": {}, "outputs": [], "source": [ @@ -726,8 +707,69 @@ "tf.random.set_seed(42)\n", "\n", "model = keras.models.Sequential([\n", + " keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True,\n", + " input_shape=[None, 1]),\n", " keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True),\n", - " keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True),\n", + " keras.layers.TimeDistributed(keras.layers.Dense(1))\n", + "])\n", + "\n", + "model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_10_time_steps_mse])\n", + "history = model.fit(X_train, Y_train, epochs=20,\n", + " validation_data=(X_valid, Y_valid))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Creating a Custom RNN Class" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "class MyRNN(keras.layers.Layer):\n", + " def __init__(self, cell, return_sequences=False, **kwargs):\n", + " super().__init__(**kwargs)\n", + " self.cell = cell\n", + " self.return_sequences = return_sequences\n", + " self.get_initial_state = getattr(\n", + " self.cell, \"get_initial_state\", self.fallback_initial_state)\n", + " def fallback_initial_state(self, inputs):\n", + " return [tf.zeros([self.cell.state_size], dtype=inputs.dtype)]\n", + " @tf.function\n", + " def call(self, inputs):\n", + " states = self.get_initial_state(inputs)\n", + " n_steps = tf.shape(inputs)[1]\n", + " if self.return_sequences:\n", + " sequences = tf.TensorArray(inputs.dtype, size=n_steps)\n", + " outputs = tf.zeros(shape=[n_steps, self.cell.output_size], dtype=inputs.dtype)\n", + " for step in tf.range(n_steps):\n", + " outputs, states = self.cell(inputs[:, step], states)\n", + " if self.return_sequences:\n", + " sequences = sequences.write(step, outputs)\n", + " if self.return_sequences:\n", + " return sequences.stack()\n", + " else:\n", + " return outputs" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "np.random.seed(42)\n", + "tf.random.set_seed(42)\n", + "\n", + "model = keras.models.Sequential([\n", + " MyRNN(LNSimpleRNNCell(20), return_sequences=True,\n", + " input_shape=[None, 1]),\n", + " MyRNN(LNSimpleRNNCell(20), return_sequences=True),\n", " keras.layers.TimeDistributed(keras.layers.Dense(1))\n", "])\n", "\n", @@ -745,7 +787,7 @@ }, { "cell_type": "code", - "execution_count": 626, + "execution_count": 42, "metadata": { "scrolled": true }, @@ -755,7 +797,7 @@ "tf.random.set_seed(42)\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.LSTM(20, return_sequences=True),\n", + " keras.layers.LSTM(20, return_sequences=True, input_shape=[None, 1]),\n", " keras.layers.LSTM(20, return_sequences=True),\n", " keras.layers.TimeDistributed(keras.layers.Dense(1))\n", "])\n", @@ -767,16 +809,16 @@ }, { "cell_type": "code", - "execution_count": 368, + "execution_count": 43, "metadata": {}, "outputs": [], "source": [ - "model.evaluate(X_valid, y_valid)" + "model.evaluate(X_valid, Y_valid)" ] }, { "cell_type": "code", - "execution_count": 369, + "execution_count": 44, "metadata": {}, "outputs": [], "source": [ @@ -786,14 +828,26 @@ }, { "cell_type": "code", - "execution_count": 370, + "execution_count": 45, + "metadata": {}, + "outputs": [], + "source": [ + "np.random.seed(43)\n", + "\n", + "series = generate_time_series(1, 50 + 10)\n", + "X_new, Y_new = series[:, :50, :], series[:, 50:, :]\n", + "Y_pred = model.predict(X_new)[:, -10:, :]" + ] + }, + { + "cell_type": "code", + "execution_count": 46, "metadata": { - "scrolled": false + "scrolled": true }, "outputs": [], "source": [ - "y_pred = model.predict(X_valid)\n", - "plot_series(X_valid[0, :, 0], y_valid[0, 0], y_valid[0, 0])\n", + "plot_multiple_forecasts(X_new, Y_new, Y_pred)\n", "plt.show()" ] }, @@ -806,9 +860,9 @@ }, { "cell_type": "code", - "execution_count": 648, + "execution_count": 47, "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [], "source": [ @@ -816,7 +870,7 @@ "tf.random.set_seed(42)\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.GRU(20, return_sequences=True),\n", + " keras.layers.GRU(20, return_sequences=True, input_shape=[None, 1]),\n", " keras.layers.GRU(20, return_sequences=True),\n", " keras.layers.TimeDistributed(keras.layers.Dense(1))\n", "])\n", @@ -828,16 +882,16 @@ }, { "cell_type": "code", - "execution_count": 372, + "execution_count": 48, "metadata": {}, "outputs": [], "source": [ - "model.evaluate(X_valid, y_valid)" + "model.evaluate(X_valid, Y_valid)" ] }, { "cell_type": "code", - "execution_count": 373, + "execution_count": 49, "metadata": {}, "outputs": [], "source": [ @@ -847,14 +901,26 @@ }, { "cell_type": "code", - "execution_count": 374, + "execution_count": 50, + "metadata": {}, + "outputs": [], + "source": [ + "np.random.seed(43)\n", + "\n", + "series = generate_time_series(1, 50 + 10)\n", + "X_new, Y_new = series[:, :50, :], series[:, 50:, :]\n", + "Y_pred = model.predict(X_new)[:, -10:, :]" + ] + }, + { + "cell_type": "code", + "execution_count": 51, "metadata": { "scrolled": true }, "outputs": [], "source": [ - "y_pred = model.predict(X_valid)\n", - "plot_series(X_valid[0, :, 0], y_valid[0, 0], y_valid[0, 0])\n", + "plot_multiple_forecasts(X_new, Y_new, Y_pred)\n", "plt.show()" ] }, @@ -887,7 +953,7 @@ }, { "cell_type": "code", - "execution_count": 638, + "execution_count": 52, "metadata": {}, "outputs": [], "source": [ @@ -898,7 +964,8 @@ " return keras.metrics.mean_squared_error(Y_true[:, -5:], Y_pred[:, -5:])\n", "\n", "model = keras.models.Sequential([\n", - " keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding=\"VALID\"),\n", + " keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding=\"VALID\",\n", + " input_shape=[None, 1]),\n", " keras.layers.GRU(20, return_sequences=True),\n", " keras.layers.GRU(20, return_sequences=True),\n", " keras.layers.TimeDistributed(keras.layers.Dense(1))\n", @@ -937,19 +1004,24 @@ }, { "cell_type": "code", - "execution_count": 671, + "execution_count": 53, "metadata": {}, "outputs": [], "source": [ - "model = keras.models.Sequential()\n", - "for rate in (1, 2, 4, 8) * 2:\n", - " activation = \"relu\" if len(model.layers) < 7 else None\n", - " model.add(keras.layers.Conv1D(filters=20, kernel_size=2, padding=\"VALID\",\n", - " activation=activation, dilation_rate=rate))\n", + "np.random.seed(42)\n", + "tf.random.set_seed(42)\n", "\n", + "model = keras.models.Sequential()\n", + "model.add(keras.layers.InputLayer(input_shape=[None, 1]))\n", + "for rate in (1, 2, 4, 8) * 2:\n", + " model.add(keras.layers.Lambda(\n", + " lambda inputs: keras.backend.temporal_padding(inputs, (rate, 0))))\n", + " model.add(keras.layers.Conv1D(filters=20, kernel_size=2, padding=\"VALID\",\n", + " activation=\"relu\", dilation_rate=rate))\n", + "model.add(keras.layers.Conv1D(filters=1, kernel_size=1))\n", "model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_10_time_steps_mse])\n", - "history = model.fit(X_train, Y_train[:, 30:], epochs=20,\n", - " validation_data=(X_valid, Y_valid[:, 30:]))\n" + "history = model.fit(X_train, Y_train, epochs=20,\n", + " validation_data=(X_valid, Y_valid))" ] }, { @@ -961,7 +1033,7 @@ }, { "cell_type": "code", - "execution_count": 734, + "execution_count": 54, "metadata": {}, "outputs": [], "source": [ @@ -980,7 +1052,7 @@ }, { "cell_type": "code", - "execution_count": 735, + "execution_count": 55, "metadata": {}, "outputs": [], "source": [ @@ -995,40 +1067,22 @@ }, { "cell_type": "code", - "execution_count": 736, + "execution_count": 56, "metadata": {}, "outputs": [], "source": [ - "inputs = keras.layers.Input(shape=[10000, 1])\n", - "skip_to_last = []\n", - "n_filters = 128\n", - "z = keras.backend.temporal_padding(inputs, (1, 0))\n", - "z = keras.layers.Conv1D(n_filters, kernel_size=2, kernel_size=1)(z)\n", - "for dilation_rate in [2**i for i in range(10)] * 3:\n", - " z, skip = wavenet_residual_block(z, 128, dilation_rate)\n", - " skip_to_last.append(skip)\n", - "z = keras.activations.relu(keras.layers.Add()(skip_to_last))\n", - "z = keras.layers.Conv1D(128, kernel_size=1, activation=\"relu\")(z)\n", - "Y_proba = keras.layers.Conv1D(256, kernel_size=1, activation=\"softmax\")(z)\n", + "np.random.seed(42)\n", + "tf.random.set_seed(42)\n", "\n", - "model = keras.models.Model(inputs=[inputs], outputs=[Y_proba])" - ] - }, - { - "cell_type": "code", - "execution_count": 732, - "metadata": {}, - "outputs": [], - "source": [ - "seq_length = 10000\n", "n_layers_per_block = 10\n", "n_blocks = 3\n", "n_filters = 128\n", "n_outputs = 256\n", "\n", - "inputs = keras.layers.Input(shape=[seq_length, 1])\n", + "inputs = keras.layers.Input(shape=[None, 1])\n", + "z = keras.backend.temporal_padding(inputs, (1, 0))\n", + "z = keras.layers.Conv1D(n_filters, kernel_size=2)(z)\n", "skip_to_last = []\n", - "z = inputs\n", "for dilation_rate in [2**i for i in range(n_layers_per_block)] * n_blocks:\n", " z, skip = wavenet_residual_block(z, n_filters, dilation_rate)\n", " skip_to_last.append(skip)\n", @@ -1039,87 +1093,14 @@ "model = keras.models.Model(inputs=[inputs], outputs=[Y_proba])" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Low-Level RNN API" - ] - }, { "cell_type": "code", - "execution_count": 611, + "execution_count": 57, "metadata": {}, "outputs": [], "source": [ - "class MyRNN(keras.layers.Layer):\n", - " def __init__(self, cell, return_sequences=False, **kwargs):\n", - " super().__init__(**kwargs)\n", - " self.cell = cell\n", - " self.return_sequences = return_sequences\n", - " try:\n", - " self.initial_state = self.cell.get_initial_state()\n", - " except AttributeError:\n", - " self.initial_state = [tf.zeros(shape=[size], dtype=inputs.dtype)\n", - " for size in self.cell.states_size]\n", - " def call(self, inputs):\n", - " n_steps = tf.shape(inputs)[1]\n", - " if self.return_sequences:\n", - " sequences = tf.TensorArray(inputs.dtype, size=n_steps)\n", - " for step in tf.range(n_steps):\n", - " outputs, states = self.cell(inputs[:, step], states)\n", - " if self.return_sequences:\n", - " sequences.write(step, outputs)\n", - " if self.return_sequences:\n", - " return sequences.stack(), states\n", - " else:\n", - " return outputs, states" - ] - }, - { - "cell_type": "code", - "execution_count": 612, - "metadata": {}, - "outputs": [], - "source": [ - "np.random.seed(42)\n", - "tf.random.set_seed(42)\n", - "\n", - "model = keras.models.Sequential([\n", - " MyRNN(LNSimpleRNNCell(20), return_sequences=True),\n", - " MyRNN(LNSimpleRNNCell(20), return_sequences=True),\n", - " keras.layers.TimeDistributed(keras.layers.Dense(1))\n", - "])\n", - "\n", - "model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_10_time_steps_mse])\n", - "history = model.fit(X_train, Y_train, epochs=20,\n", - " validation_data=(X_valid, Y_valid))" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [], - "source": [ - "batch_size = 32\n", - "\n", - "X_batch = X_train[:batch_size]\n", - "y_batch = y_train[:batch_size]" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "n_neurons = 10\n", - "cell = keras.layers.SimpleRNNCell(n_neurons)\n", - "\n", - "states = [tf.zeros((batch_size, n_neurons))]\n", - "for step in range(n_steps):\n", - " output, states = cell(X_batch[:, step], states)" + "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\")\n", + "history = model.fit(X_train, Y_train, epochs=2, validation_data=(X_valid, Y_valid))" ] }, { @@ -1138,23 +1119,26 @@ }, { "cell_type": "code", - "execution_count": 467, + "execution_count": 58, "metadata": { "scrolled": true }, "outputs": [], "source": [ + "np.random.seed(42)\n", + "tf.random.set_seed(42)\n", + "\n", "n_steps = 5\n", "dataset = tf.data.Dataset.from_tensor_slices(tf.range(15))\n", "dataset = dataset.window(n_steps, shift=2, drop_remainder=True)\n", "dataset = dataset.flat_map(lambda window: window.batch(n_steps))\n", "dataset = dataset.shuffle(10).map(lambda window: (window[:-1], window[1:]))\n", "dataset = dataset.batch(3).prefetch(1)\n", - "for index, (X_batch, y_batch) in enumerate(dataset):\n", + "for index, (X_batch, Y_batch) in enumerate(dataset):\n", " print(\"_\" * 20, \"Batch\", index, \"\\nX_batch\")\n", " print(X_batch.numpy())\n", " print(\"=\" * 5, \"\\nY_batch\")\n", - " print(y_batch.numpy())" + " print(Y_batch.numpy())" ] }, { @@ -1166,7 +1150,7 @@ }, { "cell_type": "code", - "execution_count": 452, + "execution_count": 59, "metadata": {}, "outputs": [], "source": [ @@ -1178,7 +1162,7 @@ }, { "cell_type": "code", - "execution_count": 453, + "execution_count": 60, "metadata": {}, "outputs": [], "source": [ @@ -1187,7 +1171,7 @@ }, { "cell_type": "code", - "execution_count": 454, + "execution_count": 61, "metadata": {}, "outputs": [], "source": [ @@ -1196,7 +1180,7 @@ }, { "cell_type": "code", - "execution_count": 468, + "execution_count": 62, "metadata": {}, "outputs": [], "source": [ @@ -1206,7 +1190,7 @@ }, { "cell_type": "code", - "execution_count": 469, + "execution_count": 63, "metadata": {}, "outputs": [], "source": [ @@ -1215,7 +1199,7 @@ }, { "cell_type": "code", - "execution_count": 470, + "execution_count": 64, "metadata": {}, "outputs": [], "source": [ @@ -1224,7 +1208,7 @@ }, { "cell_type": "code", - "execution_count": 471, + "execution_count": 65, "metadata": {}, "outputs": [], "source": [ @@ -1234,46 +1218,59 @@ }, { "cell_type": "code", - "execution_count": 472, + "execution_count": 66, "metadata": {}, "outputs": [], "source": [ - "[encoded] = np.array(tokenizer.texts_to_sequences([shakespeare_text]))\n", + "[encoded] = np.array(tokenizer.texts_to_sequences([shakespeare_text])) - 1\n", "train_size = dataset_size * 90 // 100\n", "dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])" ] }, { "cell_type": "code", - "execution_count": 473, + "execution_count": 67, "metadata": {}, "outputs": [], "source": [ - "n_steps = 100 + 1 # 100 input characters, 1 target\n", - "dataset = dataset.repeat().window(n_steps, shift=1, drop_remainder=True)" + "n_steps = 100\n", + "window_length = n_steps + 1 # target = input shifted 1 character ahead\n", + "dataset = dataset.repeat().window(window_length, shift=1, drop_remainder=True)" ] }, { "cell_type": "code", - "execution_count": 474, + "execution_count": 68, "metadata": {}, "outputs": [], "source": [ - "dataset = dataset.flat_map(lambda window: window.batch(n_steps))" + "dataset = dataset.flat_map(lambda window: window.batch(window_length))" ] }, { "cell_type": "code", - "execution_count": 475, + "execution_count": 69, "metadata": {}, "outputs": [], "source": [ - "dataset = dataset.shuffle(10000).map(lambda window: (window[:-1], window[1:]))" + "np.random.seed(42)\n", + "tf.random.set_seed(42)" ] }, { "cell_type": "code", - "execution_count": 476, + "execution_count": 70, + "metadata": {}, + "outputs": [], + "source": [ + "batch_size = 32\n", + "dataset = dataset.shuffle(10000).batch(batch_size)\n", + "dataset = dataset.map(lambda windows: (windows[:, :-1], windows[:, 1:]))" + ] + }, + { + "cell_type": "code", + "execution_count": 71, "metadata": {}, "outputs": [], "source": [ @@ -1283,17 +1280,16 @@ }, { "cell_type": "code", - "execution_count": 477, + "execution_count": 72, "metadata": {}, "outputs": [], "source": [ - "batch_size = 32\n", - "dataset = dataset.batch(batch_size).prefetch(1)" + "dataset = dataset.prefetch(1)" ] }, { "cell_type": "code", - "execution_count": 478, + "execution_count": 73, "metadata": {}, "outputs": [], "source": [ @@ -1301,1470 +1297,23 @@ " print(X_batch.shape, Y_batch.shape)" ] }, - { - "cell_type": "code", - "execution_count": 482, - "metadata": {}, - "outputs": [], - "source": [ - "model = keras.models.Sequential([\n", - " keras.layers.GRU(128, return_sequences=True),\n", - " keras.layers.GRU(128, return_sequences=True),\n", - " keras.layers.GRU(max_id, return_sequences=True, activation=\"softmax\"),\n", - "])\n", - "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\")\n", - "history = model.fit(dataset, steps_per_epoch=train_size // batch_size,\n", - " epochs=20)" - ] - }, - { - "cell_type": "code", - "execution_count": 490, - "metadata": {}, - "outputs": [], - "source": [ - "m = keras.models.Sequential([\n", - " keras.layers.LSTM(128, return_sequences=True),\n", - " keras.layers.LSTM(3, return_sequences=True, activation=\"softmax\"),\n", - " #keras.layers.TimeDistributed(keras.layers.Dense(3, activation=\"softmax\")),\n", - "])\n", - "m.predict(np.random.rand(1, 10, 20)).sum(axis=-1)" - ] - }, - { - "cell_type": "code", - "execution_count": 222, - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(texts):\n", - " X = np.array(tokenizer.texts_to_sequences(texts))\n", - " return tf.one_hot(X, max_id)" - ] - }, - { - "cell_type": "code", - "execution_count": 224, - "metadata": {}, - "outputs": [], - "source": [ - "X_new = preprocess([\"How are yo\"])\n", - "y_pred = model.predict_classes(X_new)\n", - "tokenizer.sequences_to_texts([y_pred])" - ] - }, - { - "cell_type": "code", - "execution_count": 146, - "metadata": {}, - "outputs": [], - "source": [ - "model.layers[-1].weights[1].shape" - ] - }, - { - "cell_type": "code", - "execution_count": 232, - "metadata": {}, - "outputs": [], - "source": [ - "def next_char(texts, temperature=1):\n", - " X_new = preprocess(texts)\n", - " y_proba = model.predict(X_new)\n", - " logits = tf.math.log(y_proba) / temperature\n", - " char_id = tf.random.categorical(logits, 1)\n", - " return tokenizer.sequences_to_texts(char_id.numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": 234, - "metadata": {}, - "outputs": [], - "source": [ - "def complete_text(text, n_chars=50, temperature=1):\n", - " for _ in range(n_chars):\n", - " text += next_char([text], temperature)[0]\n", - " return text" - ] - }, - { - "cell_type": "code", - "execution_count": 238, - "metadata": {}, - "outputs": [], - "source": [ - "print(complete_text(\"W\", temperature=0.001))" - ] - }, - { - "cell_type": "code", - "execution_count": 365, - "metadata": {}, - "outputs": [], - "source": [ - "print(complete_text(\"W\", temperature=0.5))" - ] - }, - { - "cell_type": "code", - "execution_count": 240, - "metadata": {}, - "outputs": [], - "source": [ - "print(complete_text(\"W\", temperature=1000))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Handling Sequences of Different Sizes" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's shorten each time series by chopping off a random number of time steps (from the start, so we don't need to change the targets):" - ] - }, - { - "cell_type": "code", - "execution_count": 51, - "metadata": {}, - "outputs": [], - "source": [ - "def shorten_series(X):\n", - " row_lengths = np.random.randint(10, n_steps + 1, size=len(X))\n", - " X_values = np.concatenate([row[-length:] for row, length in zip(X, row_lengths)])\n", - " return tf.RaggedTensor.from_row_lengths(X_values, row_lengths)" - ] - }, - { - "cell_type": "code", - "execution_count": 52, - "metadata": {}, - "outputs": [], - "source": [ - "np.random.seed(42)\n", - "\n", - "X_train_ragged = shorten_series(X_train)\n", - "X_valid_ragged = shorten_series(X_valid)\n", - "X_test_ragged = shorten_series(X_test)" - ] - }, - { - "cell_type": "code", - "execution_count": 57, - "metadata": {}, - "outputs": [], - "source": [ - "X_train_ragged.shape" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The lengths of the first 10 series:" - ] - }, - { - "cell_type": "code", - "execution_count": 53, - "metadata": {}, - "outputs": [], - "source": [ - "[len(series) for series in X_train_ragged[:10]]" - ] - }, - { - "cell_type": "code", - "execution_count": 148, - "metadata": {}, - "outputs": [], - "source": [ - "mask_value = 1000.\n", - "X_train_padded = X_train_ragged.to_tensor(default_value=mask_value)\n", - "X_valid_padded = X_valid_ragged.to_tensor(default_value=mask_value)\n", - "X_test_padded = X_test_ragged.to_tensor(default_value=mask_value)" - ] - }, - { - "cell_type": "code", - "execution_count": 149, - "metadata": {}, - "outputs": [], - "source": [ - "masking_layer = keras.layers.Masking(mask_value)\n", - "series = np.array([[[1.], [2.], [mask_value], [mask_value]],\n", - " [[3.], [4.], [5.], [mask_value]]])\n", - "masking_layer(series)" - ] - }, - { - "cell_type": "code", - "execution_count": 134, - "metadata": {}, - "outputs": [], - "source": [ - "masking_layer.compute_mask(series)" - ] - }, - { - "cell_type": "code", - "execution_count": 165, - "metadata": {}, - "outputs": [], - "source": [ - "np.random.seed(42)\n", - "tf.random.set_seed(42)\n", - "\n", - "model = keras.models.Sequential([\n", - " keras.layers.Masking(mask_value, input_shape=[50, 1]),\n", - " keras.layers.SimpleRNN(10, return_sequences=True),\n", - " keras.layers.SimpleRNN(10, return_sequences=True),\n", - " keras.layers.SimpleRNN(1, return_sequences=True),\n", - "])\n", - "model(X_train_padded[:1])" - ] - }, - { - "cell_type": "code", - "execution_count": 170, - "metadata": {}, - "outputs": [], - "source": [ - "np.random.seed(42)\n", - "tf.random.set_seed(42)\n", - "\n", - "model = keras.models.Sequential([\n", - " keras.layers.Masking(mask_value, input_shape=[50, 1]),\n", - " keras.layers.SimpleRNN(10, return_sequences=True),\n", - " keras.layers.SimpleRNN(100),\n", - "])\n", - "model(X_train_padded[:1])" - ] - }, - { - "cell_type": "code", - "execution_count": 114, - "metadata": {}, - "outputs": [], - "source": [ - "np.random.seed(42)\n", - "tf.random.set_seed(42)\n", - "\n", - "model = keras.models.Sequential([\n", - " keras.layers.Masking(input_shape=[50, 1]),\n", - " keras.layers.SimpleRNN(10, return_sequences=True),\n", - " keras.layers.SimpleRNN(10, return_sequences=True),\n", - " keras.layers.SimpleRNN(1, activation=None)\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "optimizer = keras.optimizers.SGD(lr=1e-4, momentum=0.95, nesterov=True)\n", - "model.compile(loss=\"mse\", optimizer=optimizer)\n", - "history = model.fit(X_train_padded, tf.constant(y_train), epochs=20,\n", - " validation_data=(X_valid_padded, tf.constant(y_valid)))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Sketch RNN" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow_datasets as tfds" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "datasets = tfds.load(\"quickdraw_sketch_rnn\", as_supervised=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "train_set = datasets[\"train\"]\n", - "valid_set = datasets[\"validation\"]\n", - "test_set = datasets[\"test\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "for X_batch, y_batch in train_set.take(2):\n", - " print(X_batch.shape)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "n_inputs = 3\n", - "n_neurons = 5\n", - "\n", - "X0 = tf.placeholder(tf.float32, [None, n_inputs])\n", - "X1 = tf.placeholder(tf.float32, [None, n_inputs])\n", - "\n", - "Wx = tf.Variable(tf.random_normal(shape=[n_inputs, n_neurons],dtype=tf.float32))\n", - "Wy = tf.Variable(tf.random_normal(shape=[n_neurons,n_neurons],dtype=tf.float32))\n", - "b = tf.Variable(tf.zeros([1, n_neurons], dtype=tf.float32))\n", - "\n", - "Y0 = tf.tanh(tf.matmul(X0, Wx) + b)\n", - "Y1 = tf.tanh(tf.matmul(Y0, Wy) + tf.matmul(X1, Wx) + b)\n", - "\n", - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "X0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]]) # t = 0\n", - "X1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]]) # t = 1\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "print(Y0_val)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "print(Y1_val)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using `static_rnn()`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Note**: `tf.contrib.rnn` was partially moved to the core API in TensorFlow 1.2. Most of the `*Cell` and `*Wrapper` classes are now available in `tf.nn.rnn_cell`, and the `tf.contrib.rnn.static_rnn()` function is available as `tf.nn.static_rnn()`." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "n_inputs = 3\n", - "n_neurons = 5" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "X0 = tf.placeholder(tf.float32, [None, n_inputs])\n", - "X1 = tf.placeholder(tf.float32, [None, n_inputs])\n", - "\n", - "basic_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)\n", - "output_seqs, states = tf.nn.static_rnn(basic_cell, [X0, X1],\n", - " dtype=tf.float32)\n", - "Y0, Y1 = output_seqs" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "X0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]])\n", - "X1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]])\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "Y0_val" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "Y1_val" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "from tensorflow_graph_in_jupyter import show_graph" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "show_graph(tf.get_default_graph())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Packing sequences" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "n_steps = 2\n", - "n_inputs = 3\n", - "n_neurons = 5" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "X_seqs = tf.unstack(tf.transpose(X, perm=[1, 0, 2]))\n", - "\n", - "basic_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)\n", - "output_seqs, states = tf.nn.static_rnn(basic_cell, X_seqs,\n", - " dtype=tf.float32)\n", - "outputs = tf.transpose(tf.stack(output_seqs), perm=[1, 0, 2])" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "X_batch = np.array([\n", - " # t = 0 t = 1 \n", - " [[0, 1, 2], [9, 8, 7]], # instance 1\n", - " [[3, 4, 5], [0, 0, 0]], # instance 2\n", - " [[6, 7, 8], [6, 5, 4]], # instance 3\n", - " [[9, 0, 1], [3, 2, 1]], # instance 4\n", - " ])\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " outputs_val = outputs.eval(feed_dict={X: X_batch})" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [], - "source": [ - "print(outputs_val)" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [], - "source": [ - "print(np.transpose(outputs_val, axes=[1, 0, 2])[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using `dynamic_rnn()`" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [], - "source": [ - "n_steps = 2\n", - "n_inputs = 3\n", - "n_neurons = 5" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "\n", - "basic_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)\n", - "outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [], - "source": [ - "X_batch = np.array([\n", - " [[0, 1, 2], [9, 8, 7]], # instance 1\n", - " [[3, 4, 5], [0, 0, 0]], # instance 2\n", - " [[6, 7, 8], [6, 5, 4]], # instance 3\n", - " [[9, 0, 1], [3, 2, 1]], # instance 4\n", - " ])\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " outputs_val = outputs.eval(feed_dict={X: X_batch})" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [], - "source": [ - "print(outputs_val)" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [], - "source": [ - "show_graph(tf.get_default_graph())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setting the sequence lengths" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "n_steps = 2\n", - "n_inputs = 3\n", - "n_neurons = 5\n", - "\n", - "reset_graph()\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "basic_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [], - "source": [ - "seq_length = tf.placeholder(tf.int32, [None])\n", - "outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32,\n", - " sequence_length=seq_length)" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [], - "source": [ - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "X_batch = np.array([\n", - " # step 0 step 1\n", - " [[0, 1, 2], [9, 8, 7]], # instance 1\n", - " [[3, 4, 5], [0, 0, 0]], # instance 2 (padded with zero vectors)\n", - " [[6, 7, 8], [6, 5, 4]], # instance 3\n", - " [[9, 0, 1], [3, 2, 1]], # instance 4\n", - " ])\n", - "seq_length_batch = np.array([2, 1, 2, 2])" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "with tf.Session() as sess:\n", - " init.run()\n", - " outputs_val, states_val = sess.run(\n", - " [outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch})" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [], - "source": [ - "print(outputs_val)" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "print(states_val)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Training a sequence classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note: the book uses `tensorflow.contrib.layers.fully_connected()` rather than `tf.layers.dense()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dense()`, because anything in the contrib module may change or be deleted without notice. The `dense()` function is almost identical to the `fully_connected()` function. The main differences relevant to this chapter are:\n", - "* several parameters are renamed: `scope` becomes `name`, `activation_fn` becomes `activation` (and similarly the `_fn` suffix is removed from other parameters such as `normalizer_fn`), `weights_initializer` becomes `kernel_initializer`, etc.\n", - "* the default `activation` is now `None` rather than `tf.nn.relu`." - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "n_steps = 28\n", - "n_inputs = 28\n", - "n_neurons = 150\n", - "n_outputs = 10\n", - "\n", - "learning_rate = 0.001\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "y = tf.placeholder(tf.int32, [None])\n", - "\n", - "basic_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)\n", - "outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)\n", - "\n", - "logits = tf.layers.dense(states, n_outputs)\n", - "xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,\n", - " logits=logits)\n", - "loss = tf.reduce_mean(xentropy)\n", - "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", - "training_op = optimizer.minimize(loss)\n", - "correct = tf.nn.in_top_k(logits, y, 1)\n", - "accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n", - "\n", - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Warning**: `tf.examples.tutorials.mnist` is deprecated. We will use `tf.keras.datasets.mnist` instead." - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [], - "source": [ - "(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()\n", - "X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0\n", - "X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0\n", - "y_train = y_train.astype(np.int32)\n", - "y_test = y_test.astype(np.int32)\n", - "X_valid, X_train = X_train[:5000], X_train[5000:]\n", - "y_valid, y_train = y_train[:5000], y_train[5000:]" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [], - "source": [ - "def shuffle_batch(X, y, batch_size):\n", - " rnd_idx = np.random.permutation(len(X))\n", - " n_batches = len(X) // batch_size\n", - " for batch_idx in np.array_split(rnd_idx, n_batches):\n", - " X_batch, y_batch = X[batch_idx], y[batch_idx]\n", - " yield X_batch, y_batch" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [], - "source": [ - "X_test = X_test.reshape((-1, n_steps, n_inputs))" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [], - "source": [ - "n_epochs = 100\n", - "batch_size = 150\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " for epoch in range(n_epochs):\n", - " for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n", - " X_batch = X_batch.reshape((-1, n_steps, n_inputs))\n", - " sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n", - " acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n", - " acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n", - " print(epoch, \"Last batch accuracy:\", acc_batch, \"Test accuracy:\", acc_test)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Multi-layer RNN" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "n_steps = 28\n", - "n_inputs = 28\n", - "n_outputs = 10\n", - "\n", - "learning_rate = 0.001\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "y = tf.placeholder(tf.int32, [None])" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [], - "source": [ - "n_neurons = 100\n", - "n_layers = 3\n", - "\n", - "layers = [tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons,\n", - " activation=tf.nn.relu)\n", - " for layer in range(n_layers)]\n", - "multi_layer_cell = tf.nn.rnn_cell.MultiRNNCell(layers)\n", - "outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [], - "source": [ - "states_concat = tf.concat(axis=1, values=states)\n", - "logits = tf.layers.dense(states_concat, n_outputs)\n", - "xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n", - "loss = tf.reduce_mean(xentropy)\n", - "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", - "training_op = optimizer.minimize(loss)\n", - "correct = tf.nn.in_top_k(logits, y, 1)\n", - "accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n", - "\n", - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [], - "source": [ - "n_epochs = 10\n", - "batch_size = 150\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " for epoch in range(n_epochs):\n", - " for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n", - " X_batch = X_batch.reshape((-1, n_steps, n_inputs))\n", - " sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n", - " acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n", - " acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n", - " print(epoch, \"Last batch accuracy:\", acc_batch, \"Test accuracy:\", acc_test)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Time series" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [], - "source": [ - "t_min, t_max = 0, 30\n", - "resolution = 0.1\n", - "\n", - "def time_series(t):\n", - " return t * np.sin(t) / 3 + 2 * np.sin(t*5)\n", - "\n", - "def next_batch(batch_size, n_steps):\n", - " t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)\n", - " Ts = t0 + np.arange(0., n_steps + 1) * resolution\n", - " ys = time_series(Ts)\n", - " return ys[:, :-1].reshape(-1, n_steps, 1), ys[:, 1:].reshape(-1, n_steps, 1)" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [], - "source": [ - "t = np.linspace(t_min, t_max, int((t_max - t_min) / resolution))\n", - "\n", - "n_steps = 20\n", - "t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)\n", - "\n", - "plt.figure(figsize=(11,4))\n", - "plt.subplot(121)\n", - "plt.title(\"A time series (generated)\", fontsize=14)\n", - "plt.plot(t, time_series(t), label=r\"$t . \\sin(t) / 3 + 2 . \\sin(5t)$\")\n", - "plt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"b-\", linewidth=3, label=\"A training instance\")\n", - "plt.legend(loc=\"lower left\", fontsize=14)\n", - "plt.axis([0, 30, -17, 13])\n", - "plt.xlabel(\"Time\")\n", - "plt.ylabel(\"Value\")\n", - "\n", - "plt.subplot(122)\n", - "plt.title(\"A training instance\", fontsize=14)\n", - "plt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"bo\", markersize=10, label=\"instance\")\n", - "plt.plot(t_instance[1:], time_series(t_instance[1:]), \"w*\", markersize=10, label=\"target\")\n", - "plt.legend(loc=\"upper left\")\n", - "plt.xlabel(\"Time\")\n", - "\n", - "\n", - "save_fig(\"time_series_plot\")\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "metadata": {}, - "outputs": [], - "source": [ - "X_batch, y_batch = next_batch(1, n_steps)" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "metadata": {}, - "outputs": [], - "source": [ - "np.c_[X_batch[0], y_batch[0]]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using an `OuputProjectionWrapper`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create the RNN. It will contain 100 recurrent neurons and we will unroll it over 20 time steps since each traiing instance will be 20 inputs long. Each input will contain only one feature (the value at that time). The targets are also sequences of 20 inputs, each containing a sigle value:" - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "n_steps = 20\n", - "n_inputs = 1\n", - "n_neurons = 100\n", - "n_outputs = 1\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])\n", - "\n", - "cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)\n", - "outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "At each time step we now have an output vector of size 100. But what we actually want is a single output value at each time step. The simplest solution is to wrap the cell in an `OutputProjectionWrapper`." - ] - }, - { - "cell_type": "code", - "execution_count": 48, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "n_steps = 20\n", - "n_inputs = 1\n", - "n_neurons = 100\n", - "n_outputs = 1\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])" - ] - }, - { - "cell_type": "code", - "execution_count": 50, - "metadata": {}, - "outputs": [], - "source": [ - "cell = tf.contrib.rnn.OutputProjectionWrapper(\n", - " tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),\n", - " output_size=n_outputs)" - ] - }, - { - "cell_type": "code", - "execution_count": 51, - "metadata": {}, - "outputs": [], - "source": [ - "outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": 52, - "metadata": {}, - "outputs": [], - "source": [ - "learning_rate = 0.001\n", - "\n", - "loss = tf.reduce_mean(tf.square(outputs - y)) # MSE\n", - "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", - "training_op = optimizer.minimize(loss)\n", - "\n", - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "code", - "execution_count": 53, - "metadata": {}, - "outputs": [], - "source": [ - "saver = tf.train.Saver()" - ] - }, - { - "cell_type": "code", - "execution_count": 54, - "metadata": {}, - "outputs": [], - "source": [ - "n_iterations = 1500\n", - "batch_size = 50\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " for iteration in range(n_iterations):\n", - " X_batch, y_batch = next_batch(batch_size, n_steps)\n", - " sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n", - " if iteration % 100 == 0:\n", - " mse = loss.eval(feed_dict={X: X_batch, y: y_batch})\n", - " print(iteration, \"\\tMSE:\", mse)\n", - " \n", - " saver.save(sess, \"./my_time_series_model\") # not shown in the book" - ] - }, - { - "cell_type": "code", - "execution_count": 55, - "metadata": {}, - "outputs": [], - "source": [ - "with tf.Session() as sess: # not shown in the book\n", - " saver.restore(sess, \"./my_time_series_model\") # not shown\n", - "\n", - " X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))\n", - " y_pred = sess.run(outputs, feed_dict={X: X_new})" - ] - }, - { - "cell_type": "code", - "execution_count": 56, - "metadata": {}, - "outputs": [], - "source": [ - "y_pred" - ] - }, - { - "cell_type": "code", - "execution_count": 57, - "metadata": {}, - "outputs": [], - "source": [ - "plt.title(\"Testing the model\", fontsize=14)\n", - "plt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"bo\", markersize=10, label=\"instance\")\n", - "plt.plot(t_instance[1:], time_series(t_instance[1:]), \"w*\", markersize=10, label=\"target\")\n", - "plt.plot(t_instance[1:], y_pred[0,:,0], \"r.\", markersize=10, label=\"prediction\")\n", - "plt.legend(loc=\"upper left\")\n", - "plt.xlabel(\"Time\")\n", - "\n", - "save_fig(\"time_series_pred_plot\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Without using an `OutputProjectionWrapper`" - ] - }, - { - "cell_type": "code", - "execution_count": 58, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "n_steps = 20\n", - "n_inputs = 1\n", - "n_neurons = 100\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])" - ] - }, - { - "cell_type": "code", - "execution_count": 59, - "metadata": {}, - "outputs": [], - "source": [ - "cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)\n", - "rnn_outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": 60, - "metadata": {}, - "outputs": [], - "source": [ - "n_outputs = 1\n", - "learning_rate = 0.001" - ] - }, - { - "cell_type": "code", - "execution_count": 61, - "metadata": {}, - "outputs": [], - "source": [ - "stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\n", - "stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\n", - "outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])" - ] - }, - { - "cell_type": "code", - "execution_count": 62, - "metadata": {}, - "outputs": [], - "source": [ - "loss = tf.reduce_mean(tf.square(outputs - y))\n", - "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", - "training_op = optimizer.minimize(loss)\n", - "\n", - "init = tf.global_variables_initializer()\n", - "saver = tf.train.Saver()" - ] - }, - { - "cell_type": "code", - "execution_count": 63, - "metadata": {}, - "outputs": [], - "source": [ - "n_iterations = 1500\n", - "batch_size = 50\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " for iteration in range(n_iterations):\n", - " X_batch, y_batch = next_batch(batch_size, n_steps)\n", - " sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n", - " if iteration % 100 == 0:\n", - " mse = loss.eval(feed_dict={X: X_batch, y: y_batch})\n", - " print(iteration, \"\\tMSE:\", mse)\n", - " \n", - " X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))\n", - " y_pred = sess.run(outputs, feed_dict={X: X_new})\n", - " \n", - " saver.save(sess, \"./my_time_series_model\")" - ] - }, - { - "cell_type": "code", - "execution_count": 64, - "metadata": {}, - "outputs": [], - "source": [ - "y_pred" - ] - }, - { - "cell_type": "code", - "execution_count": 65, - "metadata": {}, - "outputs": [], - "source": [ - "plt.title(\"Testing the model\", fontsize=14)\n", - "plt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"bo\", markersize=10, label=\"instance\")\n", - "plt.plot(t_instance[1:], time_series(t_instance[1:]), \"w*\", markersize=10, label=\"target\")\n", - "plt.plot(t_instance[1:], y_pred[0,:,0], \"r.\", markersize=10, label=\"prediction\")\n", - "plt.legend(loc=\"upper left\")\n", - "plt.xlabel(\"Time\")\n", - "\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Generating a creative new sequence" - ] - }, - { - "cell_type": "code", - "execution_count": 66, - "metadata": {}, - "outputs": [], - "source": [ - "with tf.Session() as sess: # not shown in the book\n", - " saver.restore(sess, \"./my_time_series_model\") # not shown\n", - "\n", - " sequence = [0.] * n_steps\n", - " for iteration in range(300):\n", - " X_batch = np.array(sequence[-n_steps:]).reshape(1, n_steps, 1)\n", - " y_pred = sess.run(outputs, feed_dict={X: X_batch})\n", - " sequence.append(y_pred[0, -1, 0])" - ] - }, - { - "cell_type": "code", - "execution_count": 67, - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure(figsize=(8,4))\n", - "plt.plot(np.arange(len(sequence)), sequence, \"b-\")\n", - "plt.plot(t[:n_steps], sequence[:n_steps], \"b-\", linewidth=3)\n", - "plt.xlabel(\"Time\")\n", - "plt.ylabel(\"Value\")\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 68, - "metadata": {}, - "outputs": [], - "source": [ - "with tf.Session() as sess:\n", - " saver.restore(sess, \"./my_time_series_model\")\n", - "\n", - " sequence1 = [0. for i in range(n_steps)]\n", - " for iteration in range(len(t) - n_steps):\n", - " X_batch = np.array(sequence1[-n_steps:]).reshape(1, n_steps, 1)\n", - " y_pred = sess.run(outputs, feed_dict={X: X_batch})\n", - " sequence1.append(y_pred[0, -1, 0])\n", - "\n", - " sequence2 = [time_series(i * resolution + t_min + (t_max-t_min/3)) for i in range(n_steps)]\n", - " for iteration in range(len(t) - n_steps):\n", - " X_batch = np.array(sequence2[-n_steps:]).reshape(1, n_steps, 1)\n", - " y_pred = sess.run(outputs, feed_dict={X: X_batch})\n", - " sequence2.append(y_pred[0, -1, 0])\n", - "\n", - "plt.figure(figsize=(11,4))\n", - "plt.subplot(121)\n", - "plt.plot(t, sequence1, \"b-\")\n", - "plt.plot(t[:n_steps], sequence1[:n_steps], \"b-\", linewidth=3)\n", - "plt.xlabel(\"Time\")\n", - "plt.ylabel(\"Value\")\n", - "\n", - "plt.subplot(122)\n", - "plt.plot(t, sequence2, \"b-\")\n", - "plt.plot(t[:n_steps], sequence2[:n_steps], \"b-\", linewidth=3)\n", - "plt.xlabel(\"Time\")\n", - "save_fig(\"creative_sequence_plot\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Deep RNN" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## MultiRNNCell" - ] - }, - { - "cell_type": "code", - "execution_count": 69, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "n_inputs = 2\n", - "n_steps = 5\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])" - ] - }, - { - "cell_type": "code", - "execution_count": 70, - "metadata": {}, - "outputs": [], - "source": [ - "n_neurons = 100\n", - "n_layers = 3\n", - "\n", - "layers = [tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)\n", - " for layer in range(n_layers)]\n", - "multi_layer_cell = tf.nn.rnn_cell.MultiRNNCell(layers)\n", - "outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": 71, - "metadata": {}, - "outputs": [], - "source": [ - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "code", - "execution_count": 72, - "metadata": {}, - "outputs": [], - "source": [ - "X_batch = np.random.rand(2, n_steps, n_inputs)" - ] - }, - { - "cell_type": "code", - "execution_count": 73, - "metadata": {}, - "outputs": [], - "source": [ - "with tf.Session() as sess:\n", - " init.run()\n", - " outputs_val, states_val = sess.run([outputs, states], feed_dict={X: X_batch})" - ] - }, { "cell_type": "code", "execution_count": 74, "metadata": {}, "outputs": [], "source": [ - "outputs_val.shape" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Distributing a Deep RNN Across Multiple GPUs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Do **NOT** do this:" + "model = keras.models.Sequential([\n", + " keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id],\n", + " dropout=0.2, recurrent_dropout=0.2),\n", + " keras.layers.GRU(128, return_sequences=True,\n", + " dropout=0.2, recurrent_dropout=0.2),\n", + " keras.layers.TimeDistributed(keras.layers.Dense(max_id,\n", + " activation=\"softmax\"))\n", + "])\n", + "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\")\n", + "history = model.fit(dataset, steps_per_epoch=train_size // batch_size,\n", + " epochs=10)" ] }, { @@ -2773,18 +1322,9 @@ "metadata": {}, "outputs": [], "source": [ - "with tf.device(\"/gpu:0\"): # BAD! This is ignored.\n", - " layer1 = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)\n", - "\n", - "with tf.device(\"/gpu:1\"): # BAD! Ignored again.\n", - " layer2 = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Instead, you need a `DeviceCellWrapper`:" + "def preprocess(texts):\n", + " X = np.array(tokenizer.texts_to_sequences(texts)) - 1\n", + " return tf.one_hot(X, max_id)" ] }, { @@ -2793,24 +1333,9 @@ "metadata": {}, "outputs": [], "source": [ - "import tensorflow as tf\n", - "\n", - "class DeviceCellWrapper(tf.nn.rnn_cell.RNNCell):\n", - " def __init__(self, device, cell):\n", - " self._cell = cell\n", - " self._device = device\n", - "\n", - " @property\n", - " def state_size(self):\n", - " return self._cell.state_size\n", - "\n", - " @property\n", - " def output_size(self):\n", - " return self._cell.output_size\n", - "\n", - " def __call__(self, inputs, state, scope=None):\n", - " with tf.device(self._device):\n", - " return self._cell(inputs, state, scope)" + "X_new = preprocess([\"How are yo\"])\n", + "Y_pred = model.predict_classes(X_new)\n", + "tokenizer.sequences_to_texts(Y_pred + 1)[0][-1] # 1st sentence, last char" ] }, { @@ -2819,13 +1344,9 @@ "metadata": {}, "outputs": [], "source": [ - "reset_graph()\n", + "tf.random.set_seed(42)\n", "\n", - "n_inputs = 5\n", - "n_steps = 20\n", - "n_neurons = 100\n", - "\n", - "X = tf.placeholder(tf.float32, shape=[None, n_steps, n_inputs])" + "tf.random.categorical([[np.log(0.5), np.log(0.4), np.log(0.1)]], num_samples=40).numpy()" ] }, { @@ -2834,18 +1355,12 @@ "metadata": {}, "outputs": [], "source": [ - "devices = [\"/cpu:0\", \"/cpu:0\", \"/cpu:0\"] # replace with [\"/gpu:0\", \"/gpu:1\", \"/gpu:2\"] if you have 3 GPUs\n", - "cells = [DeviceCellWrapper(dev,tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons))\n", - " for dev in devices]\n", - "multi_layer_cell = tf.nn.rnn_cell.MultiRNNCell(cells)\n", - "outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Alternatively, since TensorFlow 1.1, you can use the `tf.contrib.rnn.DeviceWrapper` class (alias `tf.nn.rnn_cell.DeviceWrapper` since TF 1.2)." + "def next_char(text, temperature=1):\n", + " X_new = preprocess([text])\n", + " y_proba = model.predict(X_new)[0, -1:, :]\n", + " rescaled_logits = tf.math.log(y_proba) / temperature\n", + " char_id = tf.random.categorical(rescaled_logits, num_samples=1) + 1\n", + " return tokenizer.sequences_to_texts(char_id.numpy())[0]" ] }, { @@ -2854,27 +1369,21 @@ "metadata": {}, "outputs": [], "source": [ - "init = tf.global_variables_initializer()" + "tf.random.set_seed(42)\n", + "\n", + "next_char(\"How are yo\", temperature=1)" ] }, { "cell_type": "code", "execution_count": 80, - "metadata": { - "scrolled": true - }, + "metadata": {}, "outputs": [], "source": [ - "with tf.Session() as sess:\n", - " init.run()\n", - " print(sess.run(outputs, feed_dict={X: np.random.rand(2, n_steps, n_inputs)}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Dropout" + "def complete_text(text, n_chars=50, temperature=1):\n", + " for _ in range(n_chars):\n", + " text += next_char(text, temperature)\n", + " return text" ] }, { @@ -2883,13 +1392,9 @@ "metadata": {}, "outputs": [], "source": [ - "reset_graph()\n", + "tf.random.set_seed(42)\n", "\n", - "n_inputs = 1\n", - "n_neurons = 100\n", - "n_layers = 3\n", - "n_steps = 20\n", - "n_outputs = 1" + "print(complete_text(\"t\", temperature=0.2))" ] }, { @@ -2898,15 +1403,7 @@ "metadata": {}, "outputs": [], "source": [ - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note: the `input_keep_prob` parameter can be a placeholder, making it possible to set it to any value you want during training, and to 1.0 during testing (effectively turning dropout off). This is a much more elegant solution than what was recommended in earlier versions of the book (i.e., writing your own wrapper class or having a separate model for training and testing). Thanks to Shen Cheng for bringing this to my attention." + "print(complete_text(\"t\", temperature=1))" ] }, { @@ -2915,13 +1412,14 @@ "metadata": {}, "outputs": [], "source": [ - "keep_prob = tf.placeholder_with_default(1.0, shape=())\n", - "cells = [tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)\n", - " for layer in range(n_layers)]\n", - "cells_drop = [tf.nn.rnn_cell.DropoutWrapper(cell, input_keep_prob=keep_prob)\n", - " for cell in cells]\n", - "multi_layer_cell = tf.nn.rnn_cell.MultiRNNCell(cells_drop)\n", - "rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)" + "print(complete_text(\"t\", temperature=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Stateful RNN" ] }, { @@ -2930,18 +1428,7 @@ "metadata": {}, "outputs": [], "source": [ - "learning_rate = 0.01\n", - "\n", - "stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\n", - "stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\n", - "outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\n", - "\n", - "loss = tf.reduce_mean(tf.square(outputs - y))\n", - "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", - "training_op = optimizer.minimize(loss)\n", - "\n", - "init = tf.global_variables_initializer()\n", - "saver = tf.train.Saver()" + "tf.random.set_seed(42)" ] }, { @@ -2950,21 +1437,14 @@ "metadata": {}, "outputs": [], "source": [ - "n_iterations = 1500\n", - "batch_size = 50\n", - "train_keep_prob = 0.5\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " for iteration in range(n_iterations):\n", - " X_batch, y_batch = next_batch(batch_size, n_steps)\n", - " _, mse = sess.run([training_op, loss],\n", - " feed_dict={X: X_batch, y: y_batch,\n", - " keep_prob: train_keep_prob})\n", - " if iteration % 100 == 0: # not shown in the book\n", - " print(iteration, \"Training MSE:\", mse) # not shown\n", - " \n", - " saver.save(sess, \"./my_dropout_time_series_model\")" + "dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])\n", + "dataset = dataset.window(window_length, shift=n_steps, drop_remainder=True)\n", + "dataset = dataset.flat_map(lambda window: window.batch(window_length))\n", + "dataset = dataset.repeat().batch(1)\n", + "dataset = dataset.map(lambda windows: (windows[:, :-1], windows[:, 1:]))\n", + "dataset = dataset.map(\n", + " lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))\n", + "dataset = dataset.prefetch(1)" ] }, { @@ -2973,11 +1453,19 @@ "metadata": {}, "outputs": [], "source": [ - "with tf.Session() as sess:\n", - " saver.restore(sess, \"./my_dropout_time_series_model\")\n", - "\n", - " X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))\n", - " y_pred = sess.run(outputs, feed_dict={X: X_new})" + "batch_size = 32\n", + "encoded_parts = np.array_split(encoded[:train_size], batch_size)\n", + "datasets = []\n", + "for encoded_part in encoded_parts:\n", + " dataset = tf.data.Dataset.from_tensor_slices(encoded_part)\n", + " dataset = dataset.window(window_length, shift=n_steps, drop_remainder=True)\n", + " dataset = dataset.flat_map(lambda window: window.batch(window_length))\n", + " datasets.append(dataset)\n", + "dataset = tf.data.Dataset.zip(tuple(datasets)).map(lambda *windows: tf.stack(windows))\n", + "dataset = dataset.repeat().map(lambda windows: (windows[:, :-1], windows[:, 1:]))\n", + "dataset = dataset.map(\n", + " lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))\n", + "dataset = dataset.prefetch(1)" ] }, { @@ -2986,28 +1474,16 @@ "metadata": {}, "outputs": [], "source": [ - "plt.title(\"Testing the model\", fontsize=14)\n", - "plt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"bo\", markersize=10, label=\"instance\")\n", - "plt.plot(t_instance[1:], time_series(t_instance[1:]), \"w*\", markersize=10, label=\"target\")\n", - "plt.plot(t_instance[1:], y_pred[0,:,0], \"r.\", markersize=10, label=\"prediction\")\n", - "plt.legend(loc=\"upper left\")\n", - "plt.xlabel(\"Time\")\n", - "\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Oops, it seems that Dropout does not help at all in this particular case. :/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# LSTM" + "model = keras.models.Sequential([\n", + " keras.layers.GRU(128, return_sequences=True, stateful=True,\n", + "# dropout=0.2, recurrent_dropout=0.2, # see TF issue #27829\n", + " batch_input_shape=[batch_size, None, max_id]),\n", + " keras.layers.GRU(128, return_sequences=True, stateful=True\n", + "# dropout=0.2, recurrent_dropout=0.2 # see TF issue #27829\n", + " ),\n", + " keras.layers.TimeDistributed(keras.layers.Dense(max_id,\n", + " activation=\"softmax\"))\n", + "])" ] }, { @@ -3016,9 +1492,9 @@ "metadata": {}, "outputs": [], "source": [ - "reset_graph()\n", - "\n", - "lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons)" + "class ResetStatesCallback(keras.callbacks.Callback):\n", + " def on_epoch_begin(self, epoch, logs):\n", + " self.model.reset_states()" ] }, { @@ -3027,31 +1503,17 @@ "metadata": {}, "outputs": [], "source": [ - "n_steps = 28\n", - "n_inputs = 28\n", - "n_neurons = 150\n", - "n_outputs = 10\n", - "n_layers = 3\n", - "\n", - "learning_rate = 0.001\n", - "\n", - "X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n", - "y = tf.placeholder(tf.int32, [None])\n", - "\n", - "lstm_cells = [tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons)\n", - " for layer in range(n_layers)]\n", - "multi_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells)\n", - "outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)\n", - "top_layer_h_state = states[-1][1]\n", - "logits = tf.layers.dense(top_layer_h_state, n_outputs, name=\"softmax\")\n", - "xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n", - "loss = tf.reduce_mean(xentropy, name=\"loss\")\n", - "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", - "training_op = optimizer.minimize(loss)\n", - "correct = tf.nn.in_top_k(logits, y, 1)\n", - "accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n", - " \n", - "init = tf.global_variables_initializer()" + "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\")\n", + "steps_per_epoch = train_size // batch_size // n_steps\n", + "model.fit(dataset, steps_per_epoch=steps_per_epoch, epochs=50,\n", + " callbacks=[ResetStatesCallback()])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To use the model with different batch sizes, we need to create a stateless copy. We can get rid of dropout since it is only used during training:" ] }, { @@ -3060,7 +1522,19 @@ "metadata": {}, "outputs": [], "source": [ - "states" + "stateless_model = keras.models.Sequential([\n", + " keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id]),\n", + " keras.layers.GRU(128, return_sequences=True),\n", + " keras.layers.TimeDistributed(keras.layers.Dense(max_id,\n", + " activation=\"softmax\"))\n", + "])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To set the weights, we first need to build the model (so the weights get created):" ] }, { @@ -3069,29 +1543,17 @@ "metadata": {}, "outputs": [], "source": [ - "top_layer_h_state" + "stateless_model.build(tf.TensorShape([None, None, max_id]))" ] }, { "cell_type": "code", "execution_count": 92, - "metadata": { - "scrolled": true - }, + "metadata": {}, "outputs": [], "source": [ - "n_epochs = 10\n", - "batch_size = 150\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " for epoch in range(n_epochs):\n", - " for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n", - " X_batch = X_batch.reshape((-1, n_steps, n_inputs))\n", - " sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n", - " acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n", - " acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n", - " print(epoch, \"Last batch accuracy:\", acc_batch, \"Test accuracy:\", acc_test)" + "stateless_model.set_weights(model.get_weights())\n", + "model = stateless_model" ] }, { @@ -3100,7 +1562,16 @@ "metadata": {}, "outputs": [], "source": [ - "lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=n_neurons, use_peepholes=True)" + "tf.random.set_seed(42)\n", + "\n", + "print(complete_text(\"t\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Sentiment Analysis" ] }, { @@ -3109,28 +1580,14 @@ "metadata": {}, "outputs": [], "source": [ - "gru_cell = tf.nn.rnn_cell.GRUCell(num_units=n_neurons)" + "tf.random.set_seed(42)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# Embeddings" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This section is based on TensorFlow's [Word2Vec tutorial](https://www.tensorflow.org/versions/r0.11/tutorials/word2vec/index.html)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Fetch the data" + "You can load the IMDB dataset easily:" ] }, { @@ -3139,37 +1596,7 @@ "metadata": {}, "outputs": [], "source": [ - "from six.moves import urllib\n", - "\n", - "import errno\n", - "import os\n", - "import zipfile\n", - "\n", - "WORDS_PATH = \"datasets/words\"\n", - "WORDS_URL = 'http://mattmahoney.net/dc/text8.zip'\n", - "\n", - "def mkdir_p(path):\n", - " \"\"\"Create directories, ok if they already exist.\n", - " \n", - " This is for python 2 support. In python >=3.2, simply use:\n", - " >>> os.makedirs(path, exist_ok=True)\n", - " \"\"\"\n", - " try:\n", - " os.makedirs(path)\n", - " except OSError as exc:\n", - " if exc.errno == errno.EEXIST and os.path.isdir(path):\n", - " pass\n", - " else:\n", - " raise\n", - "\n", - "def fetch_words_data(words_url=WORDS_URL, words_path=WORDS_PATH):\n", - " os.makedirs(words_path, exist_ok=True)\n", - " zip_path = os.path.join(words_path, \"words.zip\")\n", - " if not os.path.exists(zip_path):\n", - " urllib.request.urlretrieve(words_url, zip_path)\n", - " with zipfile.ZipFile(zip_path) as f:\n", - " data = f.read(f.namelist()[0])\n", - " return data.decode(\"ascii\").split()" + "(X_train, y_test), (X_valid, y_test) = keras.datasets.imdb.load_data()" ] }, { @@ -3178,7 +1605,7 @@ "metadata": {}, "outputs": [], "source": [ - "words = fetch_words_data()" + "X_train[0][:10]" ] }, { @@ -3187,14 +1614,11 @@ "metadata": {}, "outputs": [], "source": [ - "words[:5]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Build the dictionary" + "word_index = keras.datasets.imdb.get_word_index()\n", + "id_to_word = {id_ + 3: word for word, id_ in word_index.items()}\n", + "for id_, token in enumerate((\"\", \"\", \"\")):\n", + " id_to_word[id_] = token\n", + "\" \".join([id_to_word[id_] for id_ in X_train[0][:10]])" ] }, { @@ -3203,14 +1627,9 @@ "metadata": {}, "outputs": [], "source": [ - "from collections import Counter\n", + "import tensorflow_datasets as tfds\n", "\n", - "vocabulary_size = 50000\n", - "\n", - "vocabulary = [(\"UNK\", None)] + Counter(words).most_common(vocabulary_size - 1)\n", - "vocabulary = np.array([word for word, _ in vocabulary])\n", - "dictionary = {word: code for code, word in enumerate(vocabulary)}\n", - "data = np.array([dictionary.get(word, 0) for word in words])" + "datasets, info = tfds.load(\"imdb_reviews\", as_supervised=True, with_info=True)" ] }, { @@ -3219,7 +1638,7 @@ "metadata": {}, "outputs": [], "source": [ - "\" \".join(words[:9]), data[:9]" + "datasets.keys()" ] }, { @@ -3228,7 +1647,8 @@ "metadata": {}, "outputs": [], "source": [ - "\" \".join([vocabulary[word_index] for word_index in [5241, 3081, 12, 6, 195, 2, 3134, 46, 59]])" + "train_size = info.splits[\"train\"].num_examples\n", + "test_size = info.splits[\"test\"].num_examples" ] }, { @@ -3237,14 +1657,7 @@ "metadata": {}, "outputs": [], "source": [ - "words[24], data[24]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Generate batches" + "train_size, test_size" ] }, { @@ -3253,31 +1666,11 @@ "metadata": {}, "outputs": [], "source": [ - "from collections import deque\n", - "\n", - "def generate_batch(batch_size, num_skips, skip_window):\n", - " global data_index\n", - " assert batch_size % num_skips == 0\n", - " assert num_skips <= 2 * skip_window\n", - " batch = np.ndarray(shape=[batch_size], dtype=np.int32)\n", - " labels = np.ndarray(shape=[batch_size, 1], dtype=np.int32)\n", - " span = 2 * skip_window + 1 # [ skip_window target skip_window ]\n", - " buffer = deque(maxlen=span)\n", - " for _ in range(span):\n", - " buffer.append(data[data_index])\n", - " data_index = (data_index + 1) % len(data)\n", - " for i in range(batch_size // num_skips):\n", - " target = skip_window # target label at the center of the buffer\n", - " targets_to_avoid = [ skip_window ]\n", - " for j in range(num_skips):\n", - " while target in targets_to_avoid:\n", - " target = np.random.randint(0, span)\n", - " targets_to_avoid.append(target)\n", - " batch[i * num_skips + j] = buffer[skip_window]\n", - " labels[i * num_skips + j, 0] = buffer[target]\n", - " buffer.append(data[data_index])\n", - " data_index = (data_index + 1) % len(data)\n", - " return batch, labels" + "for X_batch, y_batch in datasets[\"train\"].batch(2).take(1):\n", + " for review, label in zip(X_batch.numpy(), y_batch.numpy()):\n", + " print(\"Review:\", review.decode(\"utf-8\")[:200], \"...\")\n", + " print(\"Label:\", label, \"= Positive\" if label else \"= Negative\")\n", + " print()" ] }, { @@ -3286,7 +1679,12 @@ "metadata": {}, "outputs": [], "source": [ - "np.random.seed(42)" + "def preprocess(X_batch, y_batch):\n", + " X_batch = tf.strings.substr(X_batch, 0, 300)\n", + " X_batch = tf.strings.regex_replace(X_batch, rb\"\", b\" \")\n", + " X_batch = tf.strings.regex_replace(X_batch, b\"[^a-zA-Z']\", b\" \")\n", + " X_batch = tf.strings.split(X_batch)\n", + " return X_batch.to_tensor(default_value=b\"\"), y_batch" ] }, { @@ -3295,8 +1693,7 @@ "metadata": {}, "outputs": [], "source": [ - "data_index = 0\n", - "batch, labels = generate_batch(8, 2, 1)" + "preprocess(X_batch, y_batch)" ] }, { @@ -3305,7 +1702,12 @@ "metadata": {}, "outputs": [], "source": [ - "batch, [vocabulary[word] for word in batch]" + "from collections import Counter\n", + "\n", + "vocabulary = Counter()\n", + "for X_batch, y_batch in datasets[\"train\"].batch(32).map(preprocess):\n", + " for review in X_batch:\n", + " vocabulary.update(list(review.numpy()))" ] }, { @@ -3314,14 +1716,7 @@ "metadata": {}, "outputs": [], "source": [ - "labels, [vocabulary[word] for word in labels[:, 0]]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Build the model" + "vocabulary.most_common()[:3]" ] }, { @@ -3330,20 +1725,7 @@ "metadata": {}, "outputs": [], "source": [ - "batch_size = 128\n", - "embedding_size = 128 # Dimension of the embedding vector.\n", - "skip_window = 1 # How many words to consider left and right.\n", - "num_skips = 2 # How many times to reuse an input to generate a label.\n", - "\n", - "# We pick a random validation set to sample nearest neighbors. Here we limit the\n", - "# validation samples to the words that have a low numeric ID, which by\n", - "# construction are also the most frequent.\n", - "valid_size = 16 # Random set of words to evaluate similarity on.\n", - "valid_window = 100 # Only pick dev samples in the head of the distribution.\n", - "valid_examples = np.random.choice(valid_window, valid_size, replace=False)\n", - "num_sampled = 64 # Number of negative examples to sample.\n", - "\n", - "learning_rate = 0.01" + "len(vocabulary)" ] }, { @@ -3352,11 +1734,9 @@ "metadata": {}, "outputs": [], "source": [ - "reset_graph()\n", - "\n", - "# Input data.\n", - "train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n", - "valid_dataset = tf.constant(valid_examples, dtype=tf.int32)" + "vocab_size = 10000\n", + "truncated_vocabulary = [\n", + " word for word, count in vocabulary.most_common()[:vocab_size]]" ] }, { @@ -3365,12 +1745,9 @@ "metadata": {}, "outputs": [], "source": [ - "vocabulary_size = 50000\n", - "embedding_size = 150\n", - "\n", - "# Look up embeddings for inputs.\n", - "init_embeds = tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)\n", - "embeddings = tf.Variable(init_embeds)" + "word_to_id = {word: index for index, word in enumerate(truncated_vocabulary)}\n", + "for word in b\"This movie was faaaaaantastic\".split():\n", + " print(word_to_id.get(word) or vocab_size)" ] }, { @@ -3379,8 +1756,11 @@ "metadata": {}, "outputs": [], "source": [ - "train_inputs = tf.placeholder(tf.int32, shape=[None])\n", - "embed = tf.nn.embedding_lookup(embeddings, train_inputs)" + "words = tf.constant(truncated_vocabulary)\n", + "word_ids = tf.range(len(truncated_vocabulary), dtype=tf.int64)\n", + "vocab_init = tf.lookup.KeyValueTensorInitializer(words, word_ids)\n", + "num_oov_buckets = 1000\n", + "table = tf.lookup.StaticVocabularyTable(vocab_init, num_oov_buckets)" ] }, { @@ -3389,38 +1769,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Construct the variables for the NCE loss\n", - "nce_weights = tf.Variable(\n", - " tf.truncated_normal([vocabulary_size, embedding_size],\n", - " stddev=1.0 / np.sqrt(embedding_size)))\n", - "nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n", - "\n", - "# Compute the average NCE loss for the batch.\n", - "# tf.nce_loss automatically draws a new sample of the negative labels each\n", - "# time we evaluate the loss.\n", - "loss = tf.reduce_mean(\n", - " tf.nn.nce_loss(nce_weights, nce_biases, train_labels, embed,\n", - " num_sampled, vocabulary_size))\n", - "\n", - "# Construct the Adam optimizer\n", - "optimizer = tf.train.AdamOptimizer(learning_rate)\n", - "training_op = optimizer.minimize(loss)\n", - "\n", - "# Compute the cosine similarity between minibatch examples and all embeddings.\n", - "norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), axis=1, keepdims=True))\n", - "normalized_embeddings = embeddings / norm\n", - "valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\n", - "similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)\n", - "\n", - "# Add variable initializer.\n", - "init = tf.global_variables_initializer()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Train the model" + "table.lookup(tf.constant([b\"This movie was faaaaaantastic\".split()]))" ] }, { @@ -3429,50 +1778,11 @@ "metadata": {}, "outputs": [], "source": [ - "num_steps = 10001\n", + "def encode_words(X_batch, y_batch):\n", + " return table.lookup(X_batch), y_batch\n", "\n", - "with tf.Session() as session:\n", - " init.run()\n", - "\n", - " average_loss = 0\n", - " for step in range(num_steps):\n", - " print(\"\\rIteration: {}\".format(step), end=\"\\t\")\n", - " batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)\n", - " feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}\n", - "\n", - " # We perform one update step by evaluating the training op (including it\n", - " # in the list of returned values for session.run()\n", - " _, loss_val = session.run([training_op, loss], feed_dict=feed_dict)\n", - " average_loss += loss_val\n", - "\n", - " if step % 2000 == 0:\n", - " if step > 0:\n", - " average_loss /= 2000\n", - " # The average loss is an estimate of the loss over the last 2000 batches.\n", - " print(\"Average loss at step \", step, \": \", average_loss)\n", - " average_loss = 0\n", - "\n", - " # Note that this is expensive (~20% slowdown if computed every 500 steps)\n", - " if step % 10000 == 0:\n", - " sim = similarity.eval()\n", - " for i in range(valid_size):\n", - " valid_word = vocabulary[valid_examples[i]]\n", - " top_k = 8 # number of nearest neighbors\n", - " nearest = (-sim[i, :]).argsort()[1:top_k+1]\n", - " log_str = \"Nearest to %s:\" % valid_word\n", - " for k in range(top_k):\n", - " close_word = vocabulary[nearest[k]]\n", - " log_str = \"%s %s,\" % (log_str, close_word)\n", - " print(log_str)\n", - "\n", - " final_embeddings = normalized_embeddings.eval()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's save the final embeddings (of course you can use a TensorFlow `Saver` if you prefer):" + "train_set = datasets[\"train\"].repeat().batch(32).map(preprocess)\n", + "train_set = train_set.map(encode_words).prefetch(1)" ] }, { @@ -3481,14 +1791,9 @@ "metadata": {}, "outputs": [], "source": [ - "np.save(\"./my_final_embeddings.npy\", final_embeddings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Plot the embeddings" + "for X_batch, y_batch in train_set.take(1):\n", + " print(X_batch)\n", + " print(y_batch)" ] }, { @@ -3497,18 +1802,24 @@ "metadata": {}, "outputs": [], "source": [ - "def plot_with_labels(low_dim_embs, labels):\n", - " assert low_dim_embs.shape[0] >= len(labels), \"More labels than embeddings\"\n", - " plt.figure(figsize=(18, 18)) #in inches\n", - " for i, label in enumerate(labels):\n", - " x, y = low_dim_embs[i,:]\n", - " plt.scatter(x, y)\n", - " plt.annotate(label,\n", - " xy=(x, y),\n", - " xytext=(5, 2),\n", - " textcoords='offset points',\n", - " ha='right',\n", - " va='bottom')" + "embed_size = 128\n", + "model = keras.models.Sequential([\n", + " keras.layers.Embedding(vocab_size + num_oov_buckets, embed_size,\n", + " mask_zero=True, # not shown in the book\n", + " input_shape=[None]),\n", + " keras.layers.GRU(128, return_sequences=True),\n", + " keras.layers.GRU(128),\n", + " keras.layers.Dense(1, activation=\"sigmoid\")\n", + "])\n", + "model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n", + "history = model.fit(train_set, steps_per_epoch=train_size // 32, epochs=5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Or using manual masking:" ] }, { @@ -3517,27 +1828,24 @@ "metadata": {}, "outputs": [], "source": [ - "from sklearn.manifold import TSNE\n", - "\n", - "tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\n", - "plot_only = 500\n", - "low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])\n", - "labels = [vocabulary[i] for i in range(plot_only)]\n", - "plot_with_labels(low_dim_embs, labels)" + "K = keras.backend\n", + "embed_size = 128\n", + "inputs = keras.layers.Input(shape=[None])\n", + "mask = keras.layers.Lambda(lambda inputs: K.not_equal(inputs, 0))(inputs)\n", + "z = keras.layers.Embedding(vocab_size + num_oov_buckets, embed_size)(inputs)\n", + "z = keras.layers.GRU(128, return_sequences=True)(z, mask=mask)\n", + "z = keras.layers.GRU(128)(z, mask=mask)\n", + "outputs = keras.layers.Dense(1, activation=\"sigmoid\")(z)\n", + "model = keras.models.Model(inputs=[inputs], outputs=[outputs])\n", + "model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n", + "history = model.fit(train_set, steps_per_epoch=train_size // 32, epochs=5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# Machine Translation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `basic_rnn_seq2seq()` function creates a simple Encoder/Decoder model: it first runs an RNN to encode `encoder_inputs` into a state vector, then runs a decoder initialized with the last encoder state on `decoder_inputs`. Encoder and decoder use the same RNN cell type but they don't share parameters." + "## Reusing Pretrained Embeddings" ] }, { @@ -3546,39 +1854,7 @@ "metadata": {}, "outputs": [], "source": [ - "import tensorflow as tf\n", - "reset_graph()\n", - "\n", - "n_steps = 50\n", - "n_neurons = 200\n", - "n_layers = 3\n", - "num_encoder_symbols = 20000\n", - "num_decoder_symbols = 20000\n", - "embedding_size = 150\n", - "learning_rate = 0.01\n", - "\n", - "X = tf.placeholder(tf.int32, [None, n_steps]) # English sentences\n", - "Y = tf.placeholder(tf.int32, [None, n_steps]) # French translations\n", - "W = tf.placeholder(tf.float32, [None, n_steps - 1, 1])\n", - "Y_input = Y[:, :-1]\n", - "Y_target = Y[:, 1:]\n", - "\n", - "encoder_inputs = tf.unstack(tf.transpose(X)) # list of 1D tensors\n", - "decoder_inputs = tf.unstack(tf.transpose(Y_input)) # list of 1D tensors\n", - "\n", - "lstm_cells = [tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons)\n", - " for layer in range(n_layers)]\n", - "cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells)\n", - "\n", - "output_seqs, states = tf.contrib.legacy_seq2seq.embedding_rnn_seq2seq(\n", - " encoder_inputs,\n", - " decoder_inputs,\n", - " cell,\n", - " num_encoder_symbols,\n", - " num_decoder_symbols,\n", - " embedding_size)\n", - "\n", - "logits = tf.transpose(tf.unstack(output_seqs), perm=[1, 0, 2])" + "tf.random.set_seed(42)" ] }, { @@ -3587,15 +1863,269 @@ "metadata": {}, "outputs": [], "source": [ - "logits_flat = tf.reshape(logits, [-1, num_decoder_symbols])\n", - "Y_target_flat = tf.reshape(Y_target, [-1])\n", - "W_flat = tf.reshape(W, [-1])\n", - "xentropy = W_flat * tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y_target_flat, logits=logits_flat)\n", - "loss = tf.reduce_mean(xentropy)\n", - "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", - "training_op = optimizer.minimize(loss)\n", + "TFHUB_CACHE_DIR = os.path.join(os.curdir, \"my_tfhub_cache\")\n", + "os.environ[\"TFHUB_CACHE_DIR\"] = TFHUB_CACHE_DIR" + ] + }, + { + "cell_type": "code", + "execution_count": 118, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow_hub as hub\n", "\n", - "init = tf.global_variables_initializer()" + "model = keras.Sequential([\n", + " hub.KerasLayer(\"https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1\",\n", + " dtype=tf.string, input_shape=[], output_shape=[50]),\n", + " keras.layers.Dense(128, activation=\"relu\"),\n", + " keras.layers.Dense(1, activation=\"sigmoid\")\n", + "])\n", + "model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\",\n", + " metrics=[\"accuracy\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 119, + "metadata": {}, + "outputs": [], + "source": [ + "for dirpath, dirnames, filenames in os.walk(TFHUB_CACHE_DIR):\n", + " for filename in filenames:\n", + " print(os.path.join(dirpath, filename))" + ] + }, + { + "cell_type": "code", + "execution_count": 120, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow_datasets as tfds\n", + "\n", + "datasets, info = tfds.load(\"imdb_reviews\", as_supervised=True, with_info=True)\n", + "train_size = info.splits[\"train\"].num_examples\n", + "batch_size = 32\n", + "train_set = datasets[\"train\"].repeat().batch(batch_size).prefetch(1)\n", + "history = model.fit(train_set, steps_per_epoch=train_size // batch_size, epochs=5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Automatic Translation" + ] + }, + { + "cell_type": "code", + "execution_count": 121, + "metadata": {}, + "outputs": [], + "source": [ + "tf.random.set_seed(42)" + ] + }, + { + "cell_type": "code", + "execution_count": 122, + "metadata": {}, + "outputs": [], + "source": [ + "vocab_size = 100\n", + "embed_size = 10" + ] + }, + { + "cell_type": "code", + "execution_count": 123, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow_addons as tfa\n", + "\n", + "encoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n", + "decoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n", + "sequence_lengths = keras.layers.Input(shape=[], dtype=np.int32)\n", + "\n", + "embeddings = keras.layers.Embedding(vocab_size, embed_size)\n", + "encoder_embeddings = embeddings(encoder_inputs)\n", + "decoder_embeddings = embeddings(decoder_inputs)\n", + "\n", + "encoder = keras.layers.LSTM(512, return_state=True)\n", + "encoder_outputs, state_h, state_c = encoder(encoder_embeddings)\n", + "encoder_state = [state_h, state_c]\n", + "\n", + "sampler = tfa.seq2seq.sampler.TrainingSampler()\n", + "\n", + "decoder_cell = keras.layers.LSTMCell(512)\n", + "output_layer = keras.layers.Dense(vocab_size)\n", + "decoder = tfa.seq2seq.basic_decoder.BasicDecoder(decoder_cell, sampler,\n", + " output_layer=output_layer)\n", + "final_outputs, final_state, final_sequence_lengths = decoder(\n", + " decoder_embeddings, initial_state=encoder_state,\n", + " sequence_length=sequence_lengths)\n", + "Y_proba = tf.nn.softmax(final_outputs.rnn_output)\n", + "\n", + "model = keras.models.Model(\n", + " inputs=[encoder_inputs, decoder_inputs, sequence_lengths],\n", + " outputs=[Y_proba])" + ] + }, + { + "cell_type": "code", + "execution_count": 124, + "metadata": {}, + "outputs": [], + "source": [ + "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\")" + ] + }, + { + "cell_type": "code", + "execution_count": 125, + "metadata": {}, + "outputs": [], + "source": [ + "X = np.random.randint(100, size=10*1000).reshape(1000, 10)\n", + "Y = np.random.randint(100, size=15*1000).reshape(1000, 15)\n", + "X_decoder = np.c_[np.zeros((1000, 1)), Y[:, :-1]]\n", + "seq_lengths = np.full([1000], 15)\n", + "\n", + "history = model.fit([X, X_decoder, seq_lengths], Y, epochs=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Bidirectional Recurrent Layers" + ] + }, + { + "cell_type": "code", + "execution_count": 126, + "metadata": {}, + "outputs": [], + "source": [ + "model = keras.models.Sequential([\n", + " keras.layers.GRU(10, return_sequences=True, input_shape=[None, 10]),\n", + " keras.layers.Bidirectional(keras.layers.GRU(10, return_sequences=True))\n", + "])\n", + "\n", + "model.summary()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Positional Encoding" + ] + }, + { + "cell_type": "code", + "execution_count": 127, + "metadata": {}, + "outputs": [], + "source": [ + "class PositionalEncoding(keras.layers.Layer):\n", + " def __init__(self, max_steps, max_dims, dtype=tf.float32, **kwargs):\n", + " super().__init__(dtype=dtype, **kwargs)\n", + " if max_dims % 2 == 1: max_dims += 1 # max_dims must be even\n", + " p, i = np.meshgrid(np.arange(max_steps), np.arange(max_dims // 2))\n", + " pos_emb = np.empty((1, max_steps, max_dims))\n", + " pos_emb[0, :, ::2] = np.sin(p / 10000**(2 * i / max_dims)).T\n", + " pos_emb[0, :, 1::2] = np.cos(p / 10000**(2 * i / max_dims)).T\n", + " self.positional_embedding = tf.constant(pos_emb.astype(self.dtype))\n", + " def call(self, inputs):\n", + " shape = tf.shape(inputs)\n", + " return inputs + self.positional_embedding[:, :shape[-2], :shape[-1]]" + ] + }, + { + "cell_type": "code", + "execution_count": 128, + "metadata": {}, + "outputs": [], + "source": [ + "max_steps = 201\n", + "max_dims = 512\n", + "pos_emb = PositionalEncoding(max_steps, max_dims)\n", + "PE = pos_emb(np.zeros((1, max_steps, max_dims), np.float32))[0].numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "metadata": {}, + "outputs": [], + "source": [ + "i1, i2, crop_i = 100, 101, 150\n", + "p1, p2, p3 = 22, 60, 35\n", + "fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(9, 5))\n", + "ax1.plot([p1, p1], [-1, 1], \"k--\", label=\"$p = {}$\".format(p1))\n", + "ax1.plot([p2, p2], [-1, 1], \"k--\", label=\"$p = {}$\".format(p2), alpha=0.5)\n", + "ax1.plot(p3, PE[p3, i1], \"bx\", label=\"$p = {}$\".format(p3))\n", + "ax1.plot(PE[:,i1], \"b-\", label=\"$i = {}$\".format(i1))\n", + "ax1.plot(PE[:,i2], \"r-\", label=\"$i = {}$\".format(i2))\n", + "ax1.plot([p1, p2], [PE[p1, i1], PE[p2, i1]], \"bo\")\n", + "ax1.plot([p1, p2], [PE[p1, i2], PE[p2, i2]], \"ro\")\n", + "ax1.legend(loc=\"center right\", fontsize=14, framealpha=0.95)\n", + "ax1.set_ylabel(\"$P_{(p,i)}$\", rotation=0, fontsize=16)\n", + "ax1.grid(True, alpha=0.3)\n", + "ax1.hlines(0, 0, max_steps - 1, color=\"k\", linewidth=1, alpha=0.3)\n", + "ax1.axis([0, max_steps - 1, -1, 1])\n", + "ax2.imshow(PE.T[:crop_i], cmap=\"gray\", interpolation=\"bilinear\", aspect=\"auto\")\n", + "ax2.hlines(i1, 0, max_steps - 1, color=\"b\")\n", + "cheat = 2 # need to raise the red line a bit, or else it hides the blue one\n", + "ax2.hlines(i2+cheat, 0, max_steps - 1, color=\"r\")\n", + "ax2.plot([p1, p1], [0, crop_i], \"k--\")\n", + "ax2.plot([p2, p2], [0, crop_i], \"k--\", alpha=0.5)\n", + "ax2.plot([p1, p2], [i2+cheat, i2+cheat], \"ro\")\n", + "ax2.plot([p1, p2], [i1, i1], \"bo\")\n", + "ax2.axis([0, max_steps - 1, 0, crop_i])\n", + "ax2.set_xlabel(\"$p$\", fontsize=16)\n", + "ax2.set_ylabel(\"$i$\", rotation=0, fontsize=16)\n", + "plt.savefig(\"positional_embedding_plot\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 130, + "metadata": {}, + "outputs": [], + "source": [ + "embed_size = 512; max_steps = 500; vocab_size = 10000\n", + "encoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n", + "decoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n", + "embeddings = keras.layers.Embedding(vocab_size, embed_size)\n", + "encoder_embeddings = embeddings(encoder_inputs)\n", + "decoder_embeddings = embeddings(decoder_inputs)\n", + "positional_encoding = PositionalEncoding(max_steps, max_dims=embed_size)\n", + "encoder_in = positional_encoding(encoder_embeddings)\n", + "decoder_in = positional_encoding(decoder_embeddings)" + ] + }, + { + "cell_type": "code", + "execution_count": 131, + "metadata": {}, + "outputs": [], + "source": [ + "for N in range(6):\n", + " encoder_attn = keras.layers.Attention(use_scale=True)\n", + " encoder_in = encoder_attn([encoder_in, encoder_in])\n", + " masked_decoder_attn = keras.layers.Attention(use_scale=True, causal=True)\n", + " decoder_in = masked_decoder_attn([decoder_in, decoder_in])\n", + " decoder_attn = keras.layers.Attention(use_scale=True)\n", + " final_enc = decoder_attn([decoder_in, encoder_in])\n", + "\n", + "output_layer = keras.layers.TimeDistributed(\n", + " keras.layers.Dense(vocab_size, activation=\"softmax\"))\n", + "outputs = output_layer(final_enc)" ] }, { @@ -3637,7 +2167,7 @@ }, { "cell_type": "code", - "execution_count": 118, + "execution_count": 132, "metadata": {}, "outputs": [], "source": [ @@ -3682,7 +2212,7 @@ }, { "cell_type": "code", - "execution_count": 119, + "execution_count": 133, "metadata": {}, "outputs": [], "source": [ @@ -3699,7 +2229,7 @@ }, { "cell_type": "code", - "execution_count": 120, + "execution_count": 134, "metadata": {}, "outputs": [], "source": [ @@ -3716,7 +2246,7 @@ }, { "cell_type": "code", - "execution_count": 121, + "execution_count": 135, "metadata": {}, "outputs": [], "source": [ @@ -3737,7 +2267,7 @@ }, { "cell_type": "code", - "execution_count": 122, + "execution_count": 136, "metadata": {}, "outputs": [], "source": [ @@ -3749,269 +2279,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "It's not possible to feed a string directly to an RNN: we need to convert it to a sequence of vectors, first. Each vector will represent a single letter, using a one-hot encoding. For example, the letter \"B\" will be represented as the vector `[1, 0, 0, 0, 0, 0, 0]`, the letter E will be represented as `[0, 1, 0, 0, 0, 0, 0]` and so on. Let's write a function that converts a string to a sequence of such one-hot vectors. Note that if the string is shorted than `n_steps`, it will be padded with zero vectors (later, we will tell TensorFlow how long each string actually is using the `sequence_length` parameter)." + "To be continued..." ] - }, - { - "cell_type": "code", - "execution_count": 123, - "metadata": {}, - "outputs": [], - "source": [ - "def string_to_one_hot_vectors(string, n_steps, chars=\"BEPSTVX\"):\n", - " char_to_index = {char: index for index, char in enumerate(chars)}\n", - " output = np.zeros((n_steps, len(chars)), dtype=np.int32)\n", - " for index, char in enumerate(string):\n", - " output[index, char_to_index[char]] = 1.\n", - " return output" - ] - }, - { - "cell_type": "code", - "execution_count": 124, - "metadata": {}, - "outputs": [], - "source": [ - "string_to_one_hot_vectors(\"BTBTXSETE\", 12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now generate the dataset, with 50% good strings, and 50% bad strings:" - ] - }, - { - "cell_type": "code", - "execution_count": 125, - "metadata": {}, - "outputs": [], - "source": [ - "def generate_dataset(size):\n", - " good_strings = [generate_string(embedded_reber_grammar)\n", - " for _ in range(size // 2)]\n", - " bad_strings = [generate_corrupted_string(embedded_reber_grammar)\n", - " for _ in range(size - size // 2)]\n", - " all_strings = good_strings + bad_strings\n", - " n_steps = max([len(string) for string in all_strings])\n", - " X = np.array([string_to_one_hot_vectors(string, n_steps)\n", - " for string in all_strings])\n", - " seq_length = np.array([len(string) for string in all_strings])\n", - " y = np.array([[1] for _ in range(len(good_strings))] +\n", - " [[0] for _ in range(len(bad_strings))])\n", - " rnd_idx = np.random.permutation(size)\n", - " return X[rnd_idx], seq_length[rnd_idx], y[rnd_idx]" - ] - }, - { - "cell_type": "code", - "execution_count": 126, - "metadata": {}, - "outputs": [], - "source": [ - "X_train, l_train, y_train = generate_dataset(10000)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's take a look at the first training instances:" - ] - }, - { - "cell_type": "code", - "execution_count": 127, - "metadata": {}, - "outputs": [], - "source": [ - "X_train[0]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It's padded with a lot of zeros because the longest string in the dataset is that long. How long is this particular string?" - ] - }, - { - "cell_type": "code", - "execution_count": 128, - "metadata": {}, - "outputs": [], - "source": [ - "l_train[0]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "What class is it?" - ] - }, - { - "cell_type": "code", - "execution_count": 129, - "metadata": {}, - "outputs": [], - "source": [ - "y_train[0]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Perfect! We are ready to create the RNN to identify good strings. We build a sequence classifier very similar to the one we built earlier to classify MNIST images, with two main differences:\n", - "* First, the input strings have variable length, so we need to specify the `sequence_length` when calling the `dynamic_rnn()` function.\n", - "* Second, this is a binary classifier, so we only need one output neuron that will output, for each input string, the estimated log probability that it is a good string. For multiclass classification, we used `sparse_softmax_cross_entropy_with_logits()` but for binary classification we use `sigmoid_cross_entropy_with_logits()`.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 130, - "metadata": {}, - "outputs": [], - "source": [ - "reset_graph()\n", - "\n", - "possible_chars = \"BEPSTVX\"\n", - "n_inputs = len(possible_chars)\n", - "n_neurons = 30\n", - "n_outputs = 1\n", - "\n", - "learning_rate = 0.02\n", - "momentum = 0.95\n", - "\n", - "X = tf.placeholder(tf.float32, [None, None, n_inputs], name=\"X\")\n", - "seq_length = tf.placeholder(tf.int32, [None], name=\"seq_length\")\n", - "y = tf.placeholder(tf.float32, [None, 1], name=\"y\")\n", - "\n", - "gru_cell = tf.nn.rnn_cell.GRUCell(num_units=n_neurons)\n", - "outputs, states = tf.nn.dynamic_rnn(gru_cell, X, dtype=tf.float32,\n", - " sequence_length=seq_length)\n", - "\n", - "logits = tf.layers.dense(states, n_outputs, name=\"logits\")\n", - "y_pred = tf.cast(tf.greater(logits, 0.), tf.float32, name=\"y_pred\")\n", - "y_proba = tf.nn.sigmoid(logits, name=\"y_proba\")\n", - "\n", - "xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)\n", - "loss = tf.reduce_mean(xentropy, name=\"loss\")\n", - "optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,\n", - " momentum=momentum,\n", - " use_nesterov=True)\n", - "training_op = optimizer.minimize(loss)\n", - "\n", - "correct = tf.equal(y_pred, y, name=\"correct\")\n", - "accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name=\"accuracy\")\n", - "\n", - "init = tf.global_variables_initializer()\n", - "saver = tf.train.Saver()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's generate a validation set so we can track progress during training:" - ] - }, - { - "cell_type": "code", - "execution_count": 131, - "metadata": {}, - "outputs": [], - "source": [ - "X_val, l_val, y_val = generate_dataset(5000)" - ] - }, - { - "cell_type": "code", - "execution_count": 132, - "metadata": {}, - "outputs": [], - "source": [ - "n_epochs = 50\n", - "batch_size = 50\n", - "\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " for epoch in range(n_epochs):\n", - " X_batches = np.array_split(X_train, len(X_train) // batch_size)\n", - " l_batches = np.array_split(l_train, len(l_train) // batch_size)\n", - " y_batches = np.array_split(y_train, len(y_train) // batch_size)\n", - " for X_batch, l_batch, y_batch in zip(X_batches, l_batches, y_batches):\n", - " loss_val, _ = sess.run(\n", - " [loss, training_op],\n", - " feed_dict={X: X_batch, seq_length: l_batch, y: y_batch})\n", - " acc_train = accuracy.eval(feed_dict={X: X_batch, seq_length: l_batch, y: y_batch})\n", - " acc_val = accuracy.eval(feed_dict={X: X_val, seq_length: l_val, y: y_val})\n", - " print(\"{:4d} Train loss: {:.4f}, accuracy: {:.2f}% Validation accuracy: {:.2f}%\".format(\n", - " epoch, loss_val, 100 * acc_train, 100 * acc_val))\n", - " saver.save(sess, \"./my_reber_classifier\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's test our RNN on two tricky strings: the first one is bad while the second one is good. They only differ by the second to last character. If the RNN gets this right, it shows that it managed to notice the pattern that the second letter should always be equal to the second to last letter. That requires a fairly long short-term memory (which is the reason why we used a GRU cell)." - ] - }, - { - "cell_type": "code", - "execution_count": 133, - "metadata": {}, - "outputs": [], - "source": [ - "test_strings = [\n", - " \"BPBTSSSSSSSXXTTVPXVPXTTTTTVVETE\",\n", - " \"BPBTSSSSSSSXXTTVPXVPXTTTTTVVEPE\"]\n", - "l_test = np.array([len(s) for s in test_strings])\n", - "max_length = l_test.max()\n", - "X_test = [string_to_one_hot_vectors(s, n_steps=max_length)\n", - " for s in test_strings]\n", - "\n", - "with tf.Session() as sess:\n", - " saver.restore(sess, \"./my_reber_classifier\")\n", - " y_proba_val = y_proba.eval(feed_dict={X: X_test, seq_length: l_test})\n", - "\n", - "print()\n", - "print(\"Estimated probability that these are Reber strings:\")\n", - "for index, string in enumerate(test_strings):\n", - " print(\"{}: {:.2f}%\".format(string, 100 * y_proba_val[index][0]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Ta-da! It worked fine. The RNN found the correct answers with high confidence. :)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 8. and 9." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Coming soon..." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": {