Refactor code for improved readability and consistency across notebooks

- Standardized spacing around operators and function arguments in TP7_Kmeans.ipynb and neural_network.ipynb.
- Enhanced the formatting of model building and training code in neural_network.ipynb for better clarity.
- Updated the pyproject.toml to remove a specific TensorFlow version and added linting configuration for Ruff.
- Improved comments and organization in the code to facilitate easier understanding and maintenance.
This commit is contained in:
2025-07-01 20:46:08 +02:00
parent e273cf90f7
commit f94ff07cab
34 changed files with 5713 additions and 5047 deletions

View File

@@ -1,8 +1,9 @@
{
"cells": [
{
"metadata": {},
"cell_type": "markdown",
"id": "c897654e0a140cbd",
"metadata": {},
"source": [
"# Automatic Differentiation\n",
"\n",
@@ -11,42 +12,18 @@
"Loss function: softmax layer in $\\mathbb{R}^3$\n",
"\n",
"Architecture: FC/ReLU 4-5-7-3"
],
"id": "c897654e0a140cbd"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "70a4eb1d928b10d0",
"metadata": {
"ExecuteTime": {
"end_time": "2025-03-24T15:16:27.015669Z",
"start_time": "2025-03-24T15:16:23.856887Z"
}
},
"cell_type": "code",
"source": [
"import numpy as np\n",
"from sklearn.neural_network import MLPClassifier\n",
"from sklearn.datasets import make_classification\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import accuracy_score\n",
"\n",
"accuracies = []\n",
"\n",
"for _ in range(10):\n",
" X, y = make_classification(n_samples=1000, n_features=4, n_classes=3, n_clusters_per_class=1)\n",
"\n",
" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n",
" model = MLPClassifier(hidden_layer_sizes=(5, 7), activation='relu', max_iter=10000, solver='adam')\n",
" model.fit(X_train, y_train)\n",
"\n",
" y_pred = model.predict(X_test)\n",
" accuracies.append(accuracy_score(y_test, y_pred))\n",
"\n",
"print(f'Mean Accuracy: {np.mean(accuracies) * 100:.0f}%')\n",
"print(f'STD Accuracy: {np.std(accuracies) * 100:.0f}%')\n",
"print(f\"Max accuracy: {np.max(accuracies) * 100:.0f}%\")\n",
"print(f\"Min accuracy: {np.min(accuracies) * 100:.0f}%\")"
],
"id": "70a4eb1d928b10d0",
"outputs": [
{
"name": "stdout",
@@ -59,20 +36,47 @@
]
}
],
"execution_count": 33
"source": [
"import numpy as np\n",
"from sklearn.neural_network import MLPClassifier\n",
"from sklearn.datasets import make_classification\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import accuracy_score\n",
"\n",
"accuracies = []\n",
"\n",
"for _ in range(10):\n",
" X, y = make_classification(\n",
" n_samples=1000, n_features=4, n_classes=3, n_clusters_per_class=1\n",
" )\n",
"\n",
" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n",
" model = MLPClassifier(\n",
" hidden_layer_sizes=(5, 7), activation=\"relu\", max_iter=10000, solver=\"adam\"\n",
" )\n",
" model.fit(X_train, y_train)\n",
"\n",
" y_pred = model.predict(X_test)\n",
" accuracies.append(accuracy_score(y_test, y_pred))\n",
"\n",
"print(f\"Mean Accuracy: {np.mean(accuracies) * 100:.0f}%\")\n",
"print(f\"STD Accuracy: {np.std(accuracies) * 100:.0f}%\")\n",
"print(f\"Max accuracy: {np.max(accuracies) * 100:.0f}%\")\n",
"print(f\"Min accuracy: {np.min(accuracies) * 100:.0f}%\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "96b6d46883ed5570",
"metadata": {
"ExecuteTime": {
"end_time": "2025-03-24T14:37:53.507776Z",
"start_time": "2025-03-24T14:37:53.505376Z"
}
},
"cell_type": "code",
"source": "",
"id": "96b6d46883ed5570",
"outputs": [],
"execution_count": null
"source": []
}
],
"metadata": {