Refactor code for improved readability and consistency across multiple Jupyter notebooks

- Added missing commas in various print statements and function calls for better syntax.
- Reformatted code to enhance clarity, including breaking long lines and aligning parameters.
- Updated function signatures to use float type for sigma parameters instead of int for better precision.
- Cleaned up comments and documentation strings for clarity and consistency.
- Ensured consistent formatting in plotting functions and data handling.
This commit is contained in:
2025-12-13 23:38:17 +01:00
parent f89ff4a016
commit d5a6bfd339
50 changed files with 779 additions and 449 deletions

View File

@@ -9,6 +9,7 @@
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"import tensorflow as tf"
]
},
@@ -187,10 +188,12 @@
" kernel_regularizer=tf.keras.regularizers.l2(0.01),\n",
" ),\n",
" tf.keras.layers.Dense(\n",
" 8, activation=\"relu\", kernel_regularizer=tf.keras.regularizers.l2(0.01)\n",
" 8,\n",
" activation=\"relu\",\n",
" kernel_regularizer=tf.keras.regularizers.l2(0.01),\n",
" ),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n",
" ]\n",
" ],\n",
" )\n",
" model.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n",
" return model"
@@ -296,7 +299,10 @@
"histories = []\n",
"\n",
"early_stopping = EarlyStopping(\n",
" monitor=\"val_loss\", patience=10, restore_best_weights=True, verbose=1\n",
" monitor=\"val_loss\",\n",
" patience=10,\n",
" restore_best_weights=True,\n",
" verbose=1,\n",
")\n",
"\n",
"for fold, (train_idx, val_idx) in enumerate(skf.split(X, y), 1):\n",
@@ -314,7 +320,9 @@
"\n",
" # EarlyStopping\n",
" callback = tf.keras.callbacks.EarlyStopping(\n",
" monitor=\"val_loss\", patience=10, restore_best_weights=True\n",
" monitor=\"val_loss\",\n",
" patience=10,\n",
" restore_best_weights=True,\n",
" )\n",
"\n",
" # Entraînement\n",
@@ -433,13 +441,18 @@
],
"source": [
"import numpy as np\n",
"\n",
"import tensorflow as tf\n",
"from sklearn.metrics import classification_report, f1_score\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler\n",
"\n",
"X_train, X_test, y_train, y_test = train_test_split(\n",
" X, y, test_size=0.2, random_state=42, stratify=y\n",
" X,\n",
" y,\n",
" test_size=0.2,\n",
" random_state=42,\n",
" stratify=y,\n",
")\n",
"\n",
"scaler = StandardScaler()\n",
@@ -451,7 +464,9 @@
"model.compile(optimizer=\"adam\", loss=\"binary_crossentropy\")\n",
"\n",
"callback = tf.keras.callbacks.EarlyStopping(\n",
" monitor=\"val_loss\", patience=10, restore_best_weights=True\n",
" monitor=\"val_loss\",\n",
" patience=10,\n",
" restore_best_weights=True,\n",
")\n",
"\n",
"history = model.fit(\n",