Refactor code for improved readability and consistency across multiple Jupyter notebooks

- Added missing commas in various print statements and function calls for better syntax.
- Reformatted code to enhance clarity, including breaking long lines and aligning parameters.
- Updated function signatures to use float type for sigma parameters instead of int for better precision.
- Cleaned up comments and documentation strings for clarity and consistency.
- Ensured consistent formatting in plotting functions and data handling.
This commit is contained in:
2025-12-13 23:38:17 +01:00
parent f89ff4a016
commit d5a6bfd339
50 changed files with 779 additions and 449 deletions

View File

@@ -18,15 +18,12 @@
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import seaborn as sns\n",
"\n",
"sns.set(style=\"whitegrid\")\n",
"\n",
"import tensorflow as tf\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler\n",
"from tensorflow import keras\n",
@@ -35,18 +32,29 @@
" keras.datasets.fashion_mnist.load_data()\n",
")\n",
"X_train, X_valid, y_train, y_valid = train_test_split(\n",
" X_train_full, y_train_full, train_size=0.8\n",
" X_train_full,\n",
" y_train_full,\n",
" train_size=0.8,\n",
")\n",
"\n",
"scaler = StandardScaler()\n",
"X_train = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28, 1\n",
" -1,\n",
" 28,\n",
" 28,\n",
" 1,\n",
")\n",
"X_valid = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28, 1\n",
" -1,\n",
" 28,\n",
" 28,\n",
" 1,\n",
")\n",
"X_test = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(\n",
" -1, 28, 28, 1\n",
" -1,\n",
" 28,\n",
" 28,\n",
" 1,\n",
")"
]
},

View File

@@ -60,7 +60,10 @@
"from sklearn.model_selection import train_test_split\n",
"\n",
"X_train, X_valid, y_train, y_valid = train_test_split(\n",
" X_train_full, y_train_full, test_size=0.2, random_state=42\n",
" X_train_full,\n",
" y_train_full,\n",
" test_size=0.2,\n",
" random_state=42,\n",
")\n",
"print(X_train.shape, y_train.shape)\n",
"print(X_valid.shape, y_valid.shape)"
@@ -178,16 +181,22 @@
" [\n",
" keras.layers.Input(shape=(28, 28, 1)),\n",
" keras.layers.Conv2D(\n",
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" filters=32,\n",
" kernel_size=3,\n",
" activation=\"relu\",\n",
" padding=\"same\",\n",
" ),\n",
" keras.layers.Conv2D(\n",
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" filters=32,\n",
" kernel_size=3,\n",
" activation=\"relu\",\n",
" padding=\"same\",\n",
" ),\n",
" keras.layers.MaxPooling2D(pool_size=2, strides=2),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(units=64, activation=\"relu\"),\n",
" keras.layers.Dense(units=10, activation=\"softmax\"),\n",
" ]\n",
" ],\n",
")"
]
},
@@ -374,33 +383,45 @@
" [\n",
" keras.layers.Input(shape=(28, 28, 1)),\n",
" keras.layers.Conv2D(\n",
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" filters=32,\n",
" kernel_size=3,\n",
" activation=\"relu\",\n",
" padding=\"same\",\n",
" ),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(\n",
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" filters=32,\n",
" kernel_size=3,\n",
" activation=\"relu\",\n",
" padding=\"same\",\n",
" ),\n",
" keras.layers.MaxPooling2D(pool_size=2, strides=2),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(units=64, activation=\"relu\"),\n",
" keras.layers.Dense(units=10, activation=\"softmax\"),\n",
" ]\n",
" ],\n",
" )\n",
" else:\n",
" model = keras.models.Sequential(\n",
" [\n",
" keras.layers.Input(shape=(28, 28, 1)),\n",
" keras.layers.Conv2D(\n",
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" filters=32,\n",
" kernel_size=3,\n",
" activation=\"relu\",\n",
" padding=\"same\",\n",
" ),\n",
" keras.layers.Conv2D(\n",
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
" filters=32,\n",
" kernel_size=3,\n",
" activation=\"relu\",\n",
" padding=\"same\",\n",
" ),\n",
" keras.layers.MaxPooling2D(pool_size=2, strides=2),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(units=64, activation=\"relu\"),\n",
" keras.layers.Dense(units=10, activation=\"softmax\"),\n",
" ]\n",
" ],\n",
" )\n",
"\n",
" model.compile(\n",
@@ -653,7 +674,9 @@
"outputs": [],
"source": [
"def agregate_result(\n",
" results: list, normalized: bool, metric_name: str = \"accuracy\"\n",
" results: list,\n",
" normalized: bool,\n",
" metric_name: str = \"accuracy\",\n",
") -> pd.DataFrame:\n",
" train_curves = []\n",
" val_curves = []\n",
@@ -699,7 +722,9 @@
" ax = axs[idx]\n",
" for normalized in [True, False]:\n",
" train, val = agregate_result(\n",
" training_curves, normalized=normalized, metric_name=metric\n",
" training_curves,\n",
" normalized=normalized,\n",
" metric_name=metric,\n",
" )\n",
" train_runs = train.reshape(-1, epochs)\n",
" val_runs = val.reshape(-1, epochs)\n",