mirror of
https://github.com/ArthurDanjou/handson-ml3.git
synced 2026-01-14 20:19:29 +01:00
Replace keras with tf.keras
This commit is contained in:
@@ -548,13 +548,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"keras.backend.clear_session()\n",
|
||||
"tf.keras.backend.clear_session()\n",
|
||||
"np.random.seed(42)\n",
|
||||
"tf.random.set_seed(42)\n",
|
||||
"\n",
|
||||
"model = keras.models.Sequential([\n",
|
||||
" keras.layers.Dense(30, activation=\"relu\", input_shape=X_train.shape[1:]),\n",
|
||||
" keras.layers.Dense(1),\n",
|
||||
"model = tf.keras.Sequential([\n",
|
||||
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=X_train.shape[1:]),\n",
|
||||
" tf.keras.layers.Dense(1),\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
@@ -564,7 +564,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))"
|
||||
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -606,8 +606,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimizer = keras.optimizers.Nadam(learning_rate=0.01)\n",
|
||||
"loss_fn = keras.losses.mean_squared_error\n",
|
||||
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
|
||||
"loss_fn = tf.keras.losses.mean_squared_error\n",
|
||||
"\n",
|
||||
"n_epochs = 5\n",
|
||||
"batch_size = 32\n",
|
||||
@@ -631,7 +631,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"keras.backend.clear_session()\n",
|
||||
"tf.keras.backend.clear_session()\n",
|
||||
"np.random.seed(42)\n",
|
||||
"tf.random.set_seed(42)"
|
||||
]
|
||||
@@ -642,8 +642,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimizer = keras.optimizers.Nadam(learning_rate=0.01)\n",
|
||||
"loss_fn = keras.losses.mean_squared_error\n",
|
||||
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
|
||||
"loss_fn = tf.keras.losses.mean_squared_error\n",
|
||||
"\n",
|
||||
"@tf.function\n",
|
||||
"def train(model, n_epochs, batch_size=32,\n",
|
||||
@@ -668,7 +668,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"keras.backend.clear_session()\n",
|
||||
"tf.keras.backend.clear_session()\n",
|
||||
"np.random.seed(42)\n",
|
||||
"tf.random.set_seed(42)"
|
||||
]
|
||||
@@ -679,8 +679,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimizer = keras.optimizers.Nadam(learning_rate=0.01)\n",
|
||||
"loss_fn = keras.losses.mean_squared_error\n",
|
||||
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
|
||||
"loss_fn = tf.keras.losses.mean_squared_error\n",
|
||||
"\n",
|
||||
"@tf.function\n",
|
||||
"def train(model, n_epochs, batch_size=32,\n",
|
||||
@@ -1632,7 +1632,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"keras.backend.clear_session()\n",
|
||||
"tf.keras.backend.clear_session()\n",
|
||||
"np.random.seed(42)\n",
|
||||
"tf.random.set_seed(42)"
|
||||
]
|
||||
@@ -1667,12 +1667,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"columns_without_target = columns[:-1]\n",
|
||||
"model = keras.models.Sequential([\n",
|
||||
" keras.layers.DenseFeatures(feature_columns=columns_without_target),\n",
|
||||
" keras.layers.Dense(1)\n",
|
||||
"model = tf.keras.Sequential([\n",
|
||||
" tf.keras.layers.DenseFeatures(feature_columns=columns_without_target),\n",
|
||||
" tf.keras.layers.Dense(1)\n",
|
||||
"])\n",
|
||||
"model.compile(loss=\"mse\",\n",
|
||||
" optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n",
|
||||
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),\n",
|
||||
" metrics=[\"accuracy\"])\n",
|
||||
"model.fit(dataset, steps_per_epoch=len(X_train) // batch_size, epochs=5)"
|
||||
]
|
||||
@@ -1684,7 +1684,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"some_columns = [ocean_proximity_embed, bucketized_income]\n",
|
||||
"dense_features = keras.layers.DenseFeatures(some_columns)\n",
|
||||
"dense_features = tf.keras.layers.DenseFeatures(some_columns)\n",
|
||||
"dense_features({\n",
|
||||
" \"ocean_proximity\": [[\"NEAR OCEAN\"], [\"INLAND\"], [\"INLAND\"]],\n",
|
||||
" \"median_income\": [[3.], [7.2], [1.]]\n",
|
||||
@@ -1791,7 +1791,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"keras.backend.clear_session()\n",
|
||||
"tf.keras.backend.clear_session()\n",
|
||||
"np.random.seed(42)\n",
|
||||
"tf.random.set_seed(42)"
|
||||
]
|
||||
@@ -1804,12 +1804,12 @@
|
||||
"source": [
|
||||
"datasets = tfds.load(name=\"mnist\", batch_size=32, as_supervised=True)\n",
|
||||
"mnist_train = datasets[\"train\"].repeat().prefetch(1)\n",
|
||||
"model = keras.models.Sequential([\n",
|
||||
" keras.layers.Flatten(input_shape=[28, 28, 1]),\n",
|
||||
" keras.layers.Lambda(lambda images: tf.cast(images, tf.float32)),\n",
|
||||
" keras.layers.Dense(10, activation=\"softmax\")])\n",
|
||||
"model = tf.keras.Sequential([\n",
|
||||
" tf.keras.layers.Flatten(input_shape=[28, 28, 1]),\n",
|
||||
" tf.keras.layers.Lambda(lambda images: tf.cast(images, tf.float32)),\n",
|
||||
" tf.keras.layers.Dense(10, activation=\"softmax\")])\n",
|
||||
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
|
||||
" optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n",
|
||||
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),\n",
|
||||
" metrics=[\"accuracy\"])\n",
|
||||
"model.fit(mnist_train, steps_per_epoch=60000 // 32, epochs=5)"
|
||||
]
|
||||
@@ -1827,7 +1827,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"keras.backend.clear_session()\n",
|
||||
"tf.keras.backend.clear_session()\n",
|
||||
"np.random.seed(42)\n",
|
||||
"tf.random.set_seed(42)"
|
||||
]
|
||||
@@ -1843,10 +1843,10 @@
|
||||
"hub_layer = hub.KerasLayer(\"https://tfhub.dev/google/nnlm-en-dim50/2\",\n",
|
||||
" output_shape=[50], input_shape=[], dtype=tf.string)\n",
|
||||
"\n",
|
||||
"model = keras.Sequential()\n",
|
||||
"model = tf.keras.Sequential()\n",
|
||||
"model.add(hub_layer)\n",
|
||||
"model.add(keras.layers.Dense(16, activation='relu'))\n",
|
||||
"model.add(keras.layers.Dense(1, activation='sigmoid'))\n",
|
||||
"model.add(tf.keras.layers.Dense(16, activation='relu'))\n",
|
||||
"model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\n",
|
||||
"\n",
|
||||
"model.summary()"
|
||||
]
|
||||
@@ -1890,7 +1890,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()\n",
|
||||
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
|
||||
"X_valid, X_train = X_train_full[:5000], X_train_full[5000:]\n",
|
||||
"y_valid, y_train = y_train_full[:5000], y_train_full[5000:]"
|
||||
]
|
||||
@@ -1901,7 +1901,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"keras.backend.clear_session()\n",
|
||||
"tf.keras.backend.clear_session()\n",
|
||||
"np.random.seed(42)\n",
|
||||
"tf.random.set_seed(42)"
|
||||
]
|
||||
@@ -2052,31 +2052,31 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"keras.backend.clear_session()\n",
|
||||
"tf.keras.backend.clear_session()\n",
|
||||
"tf.random.set_seed(42)\n",
|
||||
"np.random.seed(42)\n",
|
||||
"\n",
|
||||
"class Standardization(keras.layers.Layer):\n",
|
||||
"class Standardization(tf.keras.layers.Layer):\n",
|
||||
" def adapt(self, data_sample):\n",
|
||||
" self.means_ = np.mean(data_sample, axis=0, keepdims=True)\n",
|
||||
" self.stds_ = np.std(data_sample, axis=0, keepdims=True)\n",
|
||||
" def call(self, inputs):\n",
|
||||
" return (inputs - self.means_) / (self.stds_ + keras.backend.epsilon())\n",
|
||||
" return (inputs - self.means_) / (self.stds_ + tf.keras.backend.epsilon())\n",
|
||||
"\n",
|
||||
"standardization = Standardization(input_shape=[28, 28])\n",
|
||||
"# or perhaps soon:\n",
|
||||
"#standardization = keras.layers.Normalization()\n",
|
||||
"#standardization = tf.keras.layers.Normalization()\n",
|
||||
"\n",
|
||||
"sample_image_batches = train_set.take(100).map(lambda image, label: image)\n",
|
||||
"sample_images = np.concatenate(list(sample_image_batches.as_numpy_iterator()),\n",
|
||||
" axis=0).astype(np.float32)\n",
|
||||
"standardization.adapt(sample_images)\n",
|
||||
"\n",
|
||||
"model = keras.models.Sequential([\n",
|
||||
"model = tf.keras.Sequential([\n",
|
||||
" standardization,\n",
|
||||
" keras.layers.Flatten(),\n",
|
||||
" keras.layers.Dense(100, activation=\"relu\"),\n",
|
||||
" keras.layers.Dense(10, activation=\"softmax\")\n",
|
||||
" tf.keras.layers.Flatten(),\n",
|
||||
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
|
||||
" tf.keras.layers.Dense(10, activation=\"softmax\")\n",
|
||||
"])\n",
|
||||
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
|
||||
" optimizer=\"nadam\", metrics=[\"accuracy\"])"
|
||||
@@ -2137,7 +2137,7 @@
|
||||
"\n",
|
||||
"root = \"http://ai.stanford.edu/~amaas/data/sentiment/\"\n",
|
||||
"filename = \"aclImdb_v1.tar.gz\"\n",
|
||||
"filepath = keras.utils.get_file(filename, root + filename, extract=True)\n",
|
||||
"filepath = tf.keras.utils.get_file(filename, root + filename, extract=True)\n",
|
||||
"path = Path(filepath).with_name(\"aclImdb\")\n",
|
||||
"path"
|
||||
]
|
||||
@@ -2416,7 +2416,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class TextVectorization(keras.layers.Layer):\n",
|
||||
"class TextVectorization(tf.keras.layers.Layer):\n",
|
||||
" def __init__(self, max_vocabulary_size=1000, n_oov_buckets=100, dtype=tf.string, **kwargs):\n",
|
||||
" super().__init__(dtype=dtype, **kwargs)\n",
|
||||
" self.max_vocabulary_size = max_vocabulary_size\n",
|
||||
@@ -2549,7 +2549,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class BagOfWords(keras.layers.Layer):\n",
|
||||
"class BagOfWords(tf.keras.layers.Layer):\n",
|
||||
" def __init__(self, n_tokens, dtype=tf.int32, **kwargs):\n",
|
||||
" super().__init__(dtype=dtype, **kwargs)\n",
|
||||
" self.n_tokens = n_tokens\n",
|
||||
@@ -2605,11 +2605,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = keras.models.Sequential([\n",
|
||||
"model = tf.keras.Sequential([\n",
|
||||
" text_vectorization,\n",
|
||||
" bag_of_words,\n",
|
||||
" keras.layers.Dense(100, activation=\"relu\"),\n",
|
||||
" keras.layers.Dense(1, activation=\"sigmoid\"),\n",
|
||||
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
|
||||
" tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n",
|
||||
"])\n",
|
||||
"model.compile(loss=\"binary_crossentropy\", optimizer=\"nadam\",\n",
|
||||
" metrics=[\"accuracy\"])\n",
|
||||
@@ -2702,14 +2702,14 @@
|
||||
"source": [
|
||||
"embedding_size = 20\n",
|
||||
"\n",
|
||||
"model = keras.models.Sequential([\n",
|
||||
"model = tf.keras.Sequential([\n",
|
||||
" text_vectorization,\n",
|
||||
" keras.layers.Embedding(input_dim=n_tokens,\n",
|
||||
" tf.keras.layers.Embedding(input_dim=n_tokens,\n",
|
||||
" output_dim=embedding_size,\n",
|
||||
" mask_zero=True), # <pad> tokens => zero vectors\n",
|
||||
" keras.layers.Lambda(compute_mean_embedding),\n",
|
||||
" keras.layers.Dense(100, activation=\"relu\"),\n",
|
||||
" keras.layers.Dense(1, activation=\"sigmoid\"),\n",
|
||||
" tf.keras.layers.Lambda(compute_mean_embedding),\n",
|
||||
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
|
||||
" tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user