mirror of
https://github.com/ArthurDanjou/ArtStudies.git
synced 2026-02-02 04:30:58 +01:00
Refactor code formatting and improve readability in Jupyter notebooks for TP_4 and TP_5
- Adjusted indentation and line breaks for better clarity in function definitions and import statements. - Standardized string quotes for consistency across the codebase. - Enhanced readability of DataFrame creation and manipulation by breaking long lines into multiple lines. - Cleaned up print statements and comments for improved understanding. - Ensured consistent use of whitespace around operators and after commas.
This commit is contained in:
@@ -24,20 +24,29 @@
|
||||
"%matplotlib inline\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import seaborn as sns\n",
|
||||
"sns.set(style='whitegrid')\n",
|
||||
"\n",
|
||||
"sns.set(style=\"whitegrid\")\n",
|
||||
"\n",
|
||||
"import tensorflow as tf\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from sklearn.preprocessing import StandardScaler\n",
|
||||
"from tensorflow import keras\n",
|
||||
"\n",
|
||||
"(X_train_full, y_train_full), (X_test, y_test) = (keras.datasets.mnist.load_data())\n",
|
||||
"X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, train_size=0.8)\n",
|
||||
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()\n",
|
||||
"X_train, X_valid, y_train, y_valid = train_test_split(\n",
|
||||
" X_train_full, y_train_full, train_size=0.8\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"scaler = StandardScaler()\n",
|
||||
"X_train = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)\n",
|
||||
"X_valid = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)\n",
|
||||
"X_test = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)"
|
||||
"X_train = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
|
||||
" -1, 28, 28\n",
|
||||
")\n",
|
||||
"X_valid = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
|
||||
" -1, 28, 28\n",
|
||||
")\n",
|
||||
"X_test = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
|
||||
" -1, 28, 28\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -69,11 +78,15 @@
|
||||
" [\n",
|
||||
" keras.layers.Input(shape=[28, 28]),\n",
|
||||
" keras.layers.Flatten(),\n",
|
||||
" keras.layers.Dense(256, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(0.001)),\n",
|
||||
" keras.layers.Dense(128, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(0.001)),\n",
|
||||
" keras.layers.Dense(\n",
|
||||
" 256, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(0.001)\n",
|
||||
" ),\n",
|
||||
" keras.layers.Dense(\n",
|
||||
" 128, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(0.001)\n",
|
||||
" ),\n",
|
||||
" keras.layers.Dense(10, activation=\"softmax\"),\n",
|
||||
" ]\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -150,8 +163,16 @@
|
||||
" [\n",
|
||||
" keras.layers.Input(shape=[28, 28]),\n",
|
||||
" keras.layers.Flatten(),\n",
|
||||
" keras.layers.Dense(256, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(lambda_l2)),\n",
|
||||
" keras.layers.Dense(128, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(lambda_l2)),\n",
|
||||
" keras.layers.Dense(\n",
|
||||
" 256,\n",
|
||||
" activation=\"relu\",\n",
|
||||
" kernel_regularizer=keras.regularizers.l2(lambda_l2),\n",
|
||||
" ),\n",
|
||||
" keras.layers.Dense(\n",
|
||||
" 128,\n",
|
||||
" activation=\"relu\",\n",
|
||||
" kernel_regularizer=keras.regularizers.l2(lambda_l2),\n",
|
||||
" ),\n",
|
||||
" keras.layers.Dense(10, activation=\"softmax\"),\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
@@ -218,20 +239,28 @@
|
||||
" lambda_l2 = result[\"lambda_l2\"]\n",
|
||||
"\n",
|
||||
" plt.subplot(1, 2, 1)\n",
|
||||
" plt.plot(history_df[\"val_loss\"], label=f\"LR={learning_rate}, L2={lambda_l2}\", color=colors[_])\n",
|
||||
" plt.plot(\n",
|
||||
" history_df[\"val_loss\"],\n",
|
||||
" label=f\"LR={learning_rate}, L2={lambda_l2}\",\n",
|
||||
" color=colors[_],\n",
|
||||
" )\n",
|
||||
" plt.plot(history_df[\"loss\"], linestyle=\"--\", color=colors[_])\n",
|
||||
" plt.xlabel(\"Epochs\")\n",
|
||||
" plt.ylabel(\"Loss\")\n",
|
||||
" plt.legend()\n",
|
||||
"\n",
|
||||
" plt.subplot(1, 2, 2)\n",
|
||||
" plt.plot(history_df[\"val_accuracy\"], label=f\"LR={learning_rate}, L2={lambda_l2}\", color=colors[_])\n",
|
||||
" plt.plot(\n",
|
||||
" history_df[\"val_accuracy\"],\n",
|
||||
" label=f\"LR={learning_rate}, L2={lambda_l2}\",\n",
|
||||
" color=colors[_],\n",
|
||||
" )\n",
|
||||
" plt.plot(history_df[\"accuracy\"], linestyle=\"--\", color=colors[_])\n",
|
||||
" plt.xlabel(\"Epochs\")\n",
|
||||
" plt.ylabel(\"Accuracy\")\n",
|
||||
" plt.legend()\n",
|
||||
"\n",
|
||||
" plt.show()\n"
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,11 +26,11 @@
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import seaborn as sns\n",
|
||||
"\n",
|
||||
"sns.set(style='whitegrid')\n",
|
||||
"sns.set(style=\"whitegrid\")\n",
|
||||
"\n",
|
||||
"from tensorflow import keras\n",
|
||||
"\n",
|
||||
"(X_train_full, y_train_full), (X_test, y_test) = (keras.datasets.mnist.load_data())"
|
||||
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -61,7 +61,7 @@
|
||||
" X_train_full, y_train_full, test_size=0.2, random_state=42\n",
|
||||
")\n",
|
||||
"print(X_train.shape, y_train.shape)\n",
|
||||
"print(X_valid.shape, y_valid.shape)\n"
|
||||
"print(X_valid.shape, y_valid.shape)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -88,9 +88,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"plt.figure(figsize=(10,10))\n",
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"for i in range(25):\n",
|
||||
" plt.subplot(5,5,i+1)\n",
|
||||
" plt.subplot(5, 5, i + 1)\n",
|
||||
" plt.xticks([])\n",
|
||||
" plt.yticks([])\n",
|
||||
" plt.grid(False)\n",
|
||||
@@ -174,13 +174,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = keras.models.Sequential([\n",
|
||||
" keras.layers.Input(shape=[28, 28]),\n",
|
||||
" keras.layers.Flatten(),\n",
|
||||
" keras.layers.Dense(256, activation=\"relu\"),\n",
|
||||
" keras.layers.Dense(128, activation=\"relu\"),\n",
|
||||
" keras.layers.Dense(10, activation=\"softmax\")\n",
|
||||
"])"
|
||||
"model = keras.models.Sequential(\n",
|
||||
" [\n",
|
||||
" keras.layers.Input(shape=[28, 28]),\n",
|
||||
" keras.layers.Flatten(),\n",
|
||||
" keras.layers.Dense(256, activation=\"relu\"),\n",
|
||||
" keras.layers.Dense(128, activation=\"relu\"),\n",
|
||||
" keras.layers.Dense(10, activation=\"softmax\"),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -293,7 +295,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(28*28)\n",
|
||||
"print(28 * 28)\n",
|
||||
"print(256)\n",
|
||||
"print(128)\n",
|
||||
"print(10)\n",
|
||||
@@ -332,7 +334,7 @@
|
||||
" loss=\"sparse_categorical_crossentropy\",\n",
|
||||
" optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n",
|
||||
" metrics=[\"accuracy\"],\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -379,7 +381,7 @@
|
||||
" epochs=epochs,\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" validation_data=(X_valid, y_valid),\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -435,17 +437,17 @@
|
||||
" plt.figure(figsize=(12, 4))\n",
|
||||
"\n",
|
||||
" plt.subplot(1, 2, 1)\n",
|
||||
" plt.plot(history_df['loss'], label='Training Loss')\n",
|
||||
" plt.plot(history_df[\"loss\"], label=\"Training Loss\")\n",
|
||||
" plt.plot(history_df[\"val_loss\"], label=\"Validation Loss\")\n",
|
||||
" plt.xlabel(\"Epochs\")\n",
|
||||
" plt.ylabel(\"Loss\")\n",
|
||||
" plt.legend()\n",
|
||||
"\n",
|
||||
" plt.subplot(1, 2, 2)\n",
|
||||
" plt.plot(history_df['accuracy'], label='Accuracy')\n",
|
||||
" plt.plot(history_df[\"accuracy\"], label=\"Accuracy\")\n",
|
||||
" plt.plot(history_df[\"val_accuracy\"], label=\"Validation Accuracy\")\n",
|
||||
" plt.xlabel('Epochs')\n",
|
||||
" plt.ylabel('Accuracy')\n",
|
||||
" plt.xlabel(\"Epochs\")\n",
|
||||
" plt.ylabel(\"Accuracy\")\n",
|
||||
" plt.legend()"
|
||||
]
|
||||
},
|
||||
@@ -645,7 +647,7 @@
|
||||
" \"n_epochs\": n_epochs,\n",
|
||||
" \"history\": pd.DataFrame(history.history),\n",
|
||||
" }\n",
|
||||
" results.append(result)\n"
|
||||
" results.append(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -669,36 +671,27 @@
|
||||
" learning_rate = result[\"learning_rate\"]\n",
|
||||
"\n",
|
||||
" plt.subplot(1, 2, 1)\n",
|
||||
" plt.plot(history_df[\"val_loss\"], linestyle=\"--\", color=colors[_])\n",
|
||||
" plt.plot(\n",
|
||||
" history_df[\"val_loss\"],\n",
|
||||
" linestyle=\"--\",\n",
|
||||
" color=colors[_]\n",
|
||||
" )\n",
|
||||
" plt.plot(\n",
|
||||
" history_df[\"loss\"], label=f\"LR={learning_rate}\", alpha=0.5,\n",
|
||||
" color=colors[_]\n",
|
||||
" history_df[\"loss\"], label=f\"LR={learning_rate}\", alpha=0.5, color=colors[_]\n",
|
||||
" )\n",
|
||||
" plt.xlabel(\"Epochs\")\n",
|
||||
" plt.ylabel(\"Loss\")\n",
|
||||
" plt.legend()\n",
|
||||
"\n",
|
||||
" plt.subplot(1, 2, 2)\n",
|
||||
" plt.plot(\n",
|
||||
" history_df[\"val_accuracy\"],\n",
|
||||
" linestyle=\"--\",\n",
|
||||
" color=colors[_]\n",
|
||||
" )\n",
|
||||
" plt.plot(history_df[\"val_accuracy\"], linestyle=\"--\", color=colors[_])\n",
|
||||
" plt.plot(\n",
|
||||
" history_df[\"accuracy\"],\n",
|
||||
" label=f\"LR={learning_rate}\",\n",
|
||||
" alpha=0.5,\n",
|
||||
" color=colors[_]\n",
|
||||
" color=colors[_],\n",
|
||||
" )\n",
|
||||
" plt.xlabel(\"Epochs\")\n",
|
||||
" plt.ylabel(\"Accuracy\")\n",
|
||||
" plt.legend()\n",
|
||||
"\n",
|
||||
" plt.show()\n"
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -767,7 +760,7 @@
|
||||
" \"n_epochs\": n_epochs,\n",
|
||||
" \"history\": pd.DataFrame(history.history),\n",
|
||||
" }\n",
|
||||
" results.append(result)\n"
|
||||
" results.append(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user