Refactor code formatting and improve readability in Jupyter notebooks for TP_4 and TP_5

- Adjusted indentation and line breaks for better clarity in function definitions and import statements.
- Standardized string quotes for consistency across the codebase.
- Enhanced readability of DataFrame creation and manipulation by breaking long lines into multiple lines.
- Cleaned up print statements and comments for improved understanding.
- Ensured consistent use of whitespace around operators and after commas.
This commit is contained in:
2025-11-25 10:46:16 +01:00
parent 751412c1cd
commit e57995ba85
17 changed files with 11975 additions and 11713 deletions

View File

@@ -24,20 +24,29 @@
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import seaborn as sns\n",
"sns.set(style='whitegrid')\n",
"\n",
"sns.set(style=\"whitegrid\")\n",
"\n",
"import tensorflow as tf\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler\n",
"from tensorflow import keras\n",
"\n",
"(X_train_full, y_train_full), (X_test, y_test) = (keras.datasets.mnist.load_data())\n",
"X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, train_size=0.8)\n",
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()\n",
"X_train, X_valid, y_train, y_valid = train_test_split(\n",
" X_train_full, y_train_full, train_size=0.8\n",
")\n",
"\n",
"scaler = StandardScaler()\n",
"X_train = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)\n",
"X_valid = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)\n",
"X_test = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)"
"X_train = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28\n",
")\n",
"X_valid = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28\n",
")\n",
"X_test = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28\n",
")"
]
},
{
@@ -69,11 +78,15 @@
" [\n",
" keras.layers.Input(shape=[28, 28]),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(256, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(0.001)),\n",
" keras.layers.Dense(128, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(0.001)),\n",
" keras.layers.Dense(\n",
" 256, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(0.001)\n",
" ),\n",
" keras.layers.Dense(\n",
" 128, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(0.001)\n",
" ),\n",
" keras.layers.Dense(10, activation=\"softmax\"),\n",
" ]\n",
")\n"
")"
]
},
{
@@ -150,8 +163,16 @@
" [\n",
" keras.layers.Input(shape=[28, 28]),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(256, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(lambda_l2)),\n",
" keras.layers.Dense(128, activation=\"relu\", kernel_regularizer=keras.regularizers.l2(lambda_l2)),\n",
" keras.layers.Dense(\n",
" 256,\n",
" activation=\"relu\",\n",
" kernel_regularizer=keras.regularizers.l2(lambda_l2),\n",
" ),\n",
" keras.layers.Dense(\n",
" 128,\n",
" activation=\"relu\",\n",
" kernel_regularizer=keras.regularizers.l2(lambda_l2),\n",
" ),\n",
" keras.layers.Dense(10, activation=\"softmax\"),\n",
" ]\n",
" )\n",
@@ -218,20 +239,28 @@
" lambda_l2 = result[\"lambda_l2\"]\n",
"\n",
" plt.subplot(1, 2, 1)\n",
" plt.plot(history_df[\"val_loss\"], label=f\"LR={learning_rate}, L2={lambda_l2}\", color=colors[_])\n",
" plt.plot(\n",
" history_df[\"val_loss\"],\n",
" label=f\"LR={learning_rate}, L2={lambda_l2}\",\n",
" color=colors[_],\n",
" )\n",
" plt.plot(history_df[\"loss\"], linestyle=\"--\", color=colors[_])\n",
" plt.xlabel(\"Epochs\")\n",
" plt.ylabel(\"Loss\")\n",
" plt.legend()\n",
"\n",
" plt.subplot(1, 2, 2)\n",
" plt.plot(history_df[\"val_accuracy\"], label=f\"LR={learning_rate}, L2={lambda_l2}\", color=colors[_])\n",
" plt.plot(\n",
" history_df[\"val_accuracy\"],\n",
" label=f\"LR={learning_rate}, L2={lambda_l2}\",\n",
" color=colors[_],\n",
" )\n",
" plt.plot(history_df[\"accuracy\"], linestyle=\"--\", color=colors[_])\n",
" plt.xlabel(\"Epochs\")\n",
" plt.ylabel(\"Accuracy\")\n",
" plt.legend()\n",
"\n",
" plt.show()\n"
" plt.show()"
]
},
{

View File

@@ -26,11 +26,11 @@
"import matplotlib.pyplot as plt\n",
"import seaborn as sns\n",
"\n",
"sns.set(style='whitegrid')\n",
"sns.set(style=\"whitegrid\")\n",
"\n",
"from tensorflow import keras\n",
"\n",
"(X_train_full, y_train_full), (X_test, y_test) = (keras.datasets.mnist.load_data())"
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()"
]
},
{
@@ -61,7 +61,7 @@
" X_train_full, y_train_full, test_size=0.2, random_state=42\n",
")\n",
"print(X_train.shape, y_train.shape)\n",
"print(X_valid.shape, y_valid.shape)\n"
"print(X_valid.shape, y_valid.shape)"
]
},
{
@@ -88,9 +88,9 @@
}
],
"source": [
"plt.figure(figsize=(10,10))\n",
"plt.figure(figsize=(10, 10))\n",
"for i in range(25):\n",
" plt.subplot(5,5,i+1)\n",
" plt.subplot(5, 5, i + 1)\n",
" plt.xticks([])\n",
" plt.yticks([])\n",
" plt.grid(False)\n",
@@ -174,13 +174,15 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Input(shape=[28, 28]),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(256, activation=\"relu\"),\n",
" keras.layers.Dense(128, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\")\n",
"])"
"model = keras.models.Sequential(\n",
" [\n",
" keras.layers.Input(shape=[28, 28]),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(256, activation=\"relu\"),\n",
" keras.layers.Dense(128, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\"),\n",
" ]\n",
")"
]
},
{
@@ -293,7 +295,7 @@
}
],
"source": [
"print(28*28)\n",
"print(28 * 28)\n",
"print(256)\n",
"print(128)\n",
"print(10)\n",
@@ -332,7 +334,7 @@
" loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n",
" metrics=[\"accuracy\"],\n",
")\n"
")"
]
},
{
@@ -379,7 +381,7 @@
" epochs=epochs,\n",
" batch_size=batch_size,\n",
" validation_data=(X_valid, y_valid),\n",
")\n"
")"
]
},
{
@@ -435,17 +437,17 @@
" plt.figure(figsize=(12, 4))\n",
"\n",
" plt.subplot(1, 2, 1)\n",
" plt.plot(history_df['loss'], label='Training Loss')\n",
" plt.plot(history_df[\"loss\"], label=\"Training Loss\")\n",
" plt.plot(history_df[\"val_loss\"], label=\"Validation Loss\")\n",
" plt.xlabel(\"Epochs\")\n",
" plt.ylabel(\"Loss\")\n",
" plt.legend()\n",
"\n",
" plt.subplot(1, 2, 2)\n",
" plt.plot(history_df['accuracy'], label='Accuracy')\n",
" plt.plot(history_df[\"accuracy\"], label=\"Accuracy\")\n",
" plt.plot(history_df[\"val_accuracy\"], label=\"Validation Accuracy\")\n",
" plt.xlabel('Epochs')\n",
" plt.ylabel('Accuracy')\n",
" plt.xlabel(\"Epochs\")\n",
" plt.ylabel(\"Accuracy\")\n",
" plt.legend()"
]
},
@@ -645,7 +647,7 @@
" \"n_epochs\": n_epochs,\n",
" \"history\": pd.DataFrame(history.history),\n",
" }\n",
" results.append(result)\n"
" results.append(result)"
]
},
{
@@ -669,36 +671,27 @@
" learning_rate = result[\"learning_rate\"]\n",
"\n",
" plt.subplot(1, 2, 1)\n",
" plt.plot(history_df[\"val_loss\"], linestyle=\"--\", color=colors[_])\n",
" plt.plot(\n",
" history_df[\"val_loss\"],\n",
" linestyle=\"--\",\n",
" color=colors[_]\n",
" )\n",
" plt.plot(\n",
" history_df[\"loss\"], label=f\"LR={learning_rate}\", alpha=0.5,\n",
" color=colors[_]\n",
" history_df[\"loss\"], label=f\"LR={learning_rate}\", alpha=0.5, color=colors[_]\n",
" )\n",
" plt.xlabel(\"Epochs\")\n",
" plt.ylabel(\"Loss\")\n",
" plt.legend()\n",
"\n",
" plt.subplot(1, 2, 2)\n",
" plt.plot(\n",
" history_df[\"val_accuracy\"],\n",
" linestyle=\"--\",\n",
" color=colors[_]\n",
" )\n",
" plt.plot(history_df[\"val_accuracy\"], linestyle=\"--\", color=colors[_])\n",
" plt.plot(\n",
" history_df[\"accuracy\"],\n",
" label=f\"LR={learning_rate}\",\n",
" alpha=0.5,\n",
" color=colors[_]\n",
" color=colors[_],\n",
" )\n",
" plt.xlabel(\"Epochs\")\n",
" plt.ylabel(\"Accuracy\")\n",
" plt.legend()\n",
"\n",
" plt.show()\n"
" plt.show()"
]
},
{
@@ -767,7 +760,7 @@
" \"n_epochs\": n_epochs,\n",
" \"history\": pd.DataFrame(history.history),\n",
" }\n",
" results.append(result)\n"
" results.append(result)"
]
},
{

View File

@@ -24,20 +24,30 @@
"import matplotlib.pyplot as plt\n",
"import seaborn as sns\n",
"\n",
"sns.set(style='whitegrid')\n",
"sns.set(style=\"whitegrid\")\n",
"\n",
"import tensorflow as tf\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler\n",
"from tensorflow import keras\n",
"\n",
"(X_train_full, y_train_full), (X_test, y_test) = (keras.datasets.fashion_mnist.load_data())\n",
"X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, train_size=0.8)\n",
"(X_train_full, y_train_full), (X_test, y_test) = (\n",
" keras.datasets.fashion_mnist.load_data()\n",
")\n",
"X_train, X_valid, y_train, y_valid = train_test_split(\n",
" X_train_full, y_train_full, train_size=0.8\n",
")\n",
"\n",
"scaler = StandardScaler()\n",
"X_train = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28, 1)\n",
"X_valid = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28, 1)\n",
"X_test = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28, 1)"
"X_train = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28, 1\n",
")\n",
"X_valid = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28, 1\n",
")\n",
"X_test = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28, 1\n",
")"
]
},
{

View File

@@ -26,11 +26,13 @@
"import matplotlib.pyplot as plt\n",
"import seaborn as sns\n",
"\n",
"sns.set(style='whitegrid')\n",
"sns.set(style=\"whitegrid\")\n",
"\n",
"from tensorflow import keras\n",
"\n",
"(X_train_full, y_train_full), (X_test, y_test) = (keras.datasets.fashion_mnist.load_data())"
"(X_train_full, y_train_full), (X_test, y_test) = (\n",
" keras.datasets.fashion_mnist.load_data()\n",
")"
]
},
{
@@ -186,7 +188,7 @@
" keras.layers.Dense(units=64, activation=\"relu\"),\n",
" keras.layers.Dense(units=10, activation=\"softmax\"),\n",
" ]\n",
")\n"
")"
]
},
{
@@ -627,10 +629,7 @@
" batch_size=batch_size,\n",
" validation_data=(X_valid, y_valid),\n",
" )\n",
" training_curves.append({\n",
" 'history': history,\n",
" 'normalization': normalized\n",
" })"
" training_curves.append({\"history\": history, \"normalization\": normalized})"
]
},
{
@@ -653,7 +652,9 @@
"metadata": {},
"outputs": [],
"source": [
"def agregate_result(results: list, normalized: bool, metric_name: str = 'accuracy') -> pd.DataFrame:\n",
"def agregate_result(\n",
" results: list, normalized: bool, metric_name: str = \"accuracy\"\n",
") -> pd.DataFrame:\n",
" train_curves = []\n",
" val_curves = []\n",
"\n",
@@ -663,7 +664,7 @@
" train_curves.append(hist_obj.history[metric_name])\n",
" val_curves.append(hist_obj.history[f\"val_{metric_name}\"])\n",
"\n",
" return np.array(train_curves).flatten(), np.array(val_curves).flatten()\n"
" return np.array(train_curves).flatten(), np.array(val_curves).flatten()"
]
},
{
@@ -697,7 +698,9 @@
"for idx, metric in enumerate(metrics):\n",
" ax = axs[idx]\n",
" for normalized in [True, False]:\n",
" train, val = agregate_result(training_curves, normalized=normalized, metric_name=metric)\n",
" train, val = agregate_result(\n",
" training_curves, normalized=normalized, metric_name=metric\n",
" )\n",
" train_runs = train.reshape(-1, epochs)\n",
" val_runs = val.reshape(-1, epochs)\n",
"\n",
@@ -710,10 +713,22 @@
" label_prefix = \"With BN\" if normalized else \"Without BN\"\n",
"\n",
" ax.plot(mean_train, label=label_prefix, color=color, linestyle=\"-\")\n",
" ax.fill_between(range(epochs), mean_train - std_train, mean_train + std_train, color=color, alpha=0.2)\n",
" ax.fill_between(\n",
" range(epochs),\n",
" mean_train - std_train,\n",
" mean_train + std_train,\n",
" color=color,\n",
" alpha=0.2,\n",
" )\n",
"\n",
" ax.plot(mean_val, color=color, linestyle=\"--\")\n",
" ax.fill_between(range(epochs), mean_val - std_val, mean_val + std_val, color=color, alpha=0.2)\n",
" ax.fill_between(\n",
" range(epochs),\n",
" mean_val - std_val,\n",
" mean_val + std_val,\n",
" color=color,\n",
" alpha=0.2,\n",
" )\n",
"\n",
" ax.set_title(f\"Training and Validation {metric.capitalize()}\")\n",
" ax.set_xlabel(\"Epochs\")\n",
@@ -721,7 +736,7 @@
" ax.legend()\n",
"\n",
"plt.tight_layout()\n",
"plt.show()\n"
"plt.show()"
]
},
{

View File

@@ -43,7 +43,7 @@
" 7: \"horse\",\n",
" 8: \"ship\",\n",
" 9: \"truck \",\n",
"}\n"
"}"
]
},
{
@@ -299,13 +299,21 @@
" model = keras.Sequential(\n",
" [\n",
" keras.layers.InputLayer(shape=(32, 32, 3)),\n",
" keras.layers.Conv2D(filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"),\n",
" keras.layers.Conv2D(\n",
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" ),\n",
" keras.layers.Dropout(0.2),\n",
" keras.layers.Conv2D(filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"),\n",
" keras.layers.Conv2D(\n",
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" ),\n",
" keras.layers.MaxPooling2D(pool_size=2),\n",
" keras.layers.Conv2D(filters=16, kernel_size=3, activation=\"relu\", padding=\"same\"),\n",
" keras.layers.Conv2D(\n",
" filters=16, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" ),\n",
" keras.layers.Dropout(0.2),\n",
" keras.layers.Conv2D(filters=16, kernel_size=3, activation=\"relu\", padding=\"same\"),\n",
" keras.layers.Conv2D(\n",
" filters=16, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" ),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(10, activation=\"softmax\"),\n",
" ]\n",
@@ -316,7 +324,7 @@
"\n",
"model = get_model()\n",
"model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n",
"model.summary()\n"
"model.summary()"
]
},
{
@@ -339,7 +347,9 @@
"metadata": {},
"outputs": [],
"source": [
"def compile_train(optimizer_function: str, learning_rate: float, **kwargs) -> keras.callbacks.History:\n",
"def compile_train(\n",
" optimizer_function: str, learning_rate: float, **kwargs\n",
") -> keras.callbacks.History:\n",
" model = get_model()\n",
" optimizer = optimizer_function(learning_rate=learning_rate)\n",
" model.compile(\n",
@@ -388,9 +398,11 @@
}
],
"source": [
"epochs=5\n",
"batch_size=64\n",
"history_adam = compile_train(keras.optimizers.Adam, learning_rate=0.001, epochs=epochs, batch_size=batch_size)"
"epochs = 5\n",
"batch_size = 64\n",
"history_adam = compile_train(\n",
" keras.optimizers.Adam, learning_rate=0.001, epochs=epochs, batch_size=batch_size\n",
")"
]
},
{
@@ -603,7 +615,7 @@
"plt.xlabel(\"Epochs\")\n",
"plt.ylabel(\"Validation Loss\")\n",
"plt.legend()\n",
"plt.show()\n"
"plt.show()"
]
},
{