Refactor code for improved readability and consistency across notebooks

- Standardized spacing around operators and function arguments in TP7_Kmeans.ipynb and neural_network.ipynb.
- Enhanced the formatting of model building and training code in neural_network.ipynb for better clarity.
- Updated the pyproject.toml to remove a specific TensorFlow version and added linting configuration for Ruff.
- Improved comments and organization in the code to facilitate easier understanding and maintenance.
This commit is contained in:
2025-07-01 20:46:08 +02:00
parent e273cf90f7
commit f94ff07cab
34 changed files with 5713 additions and 5047 deletions

View File

@@ -76,24 +76,40 @@
],
"source": [
"import numpy as np\n",
"\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"\n",
"\n",
"u = np.array([1,2,3,4,5])\n",
"v = np.array([[1,2,3,4,5]])\n",
"su=u.shape\n",
"sv=v.shape\n",
"u = np.array([1, 2, 3, 4, 5])\n",
"v = np.array([[1, 2, 3, 4, 5]])\n",
"su = u.shape\n",
"sv = v.shape\n",
"ut = np.transpose(u)\n",
"vt = np.transpose(v)\n",
"vt2 = np.array([[1],[2],[3],[4],[5]])\n",
"A = np.array([[1,2,0,0,0],[0,2,0,0,0],[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]])\n",
"B = np.array([[1,2,3,4,5],[2,3,4,5,6],[3,4,5,6,7],[4,5,6,7,8],[5,6,7,8,9]])\n",
"d=np.diag(A)\n",
"dd=np.array([np.diag(A)])\n",
"dt=np.transpose(d)\n",
"ddt=np.transpose(dd)\n",
"Ad=np.diag(np.diag(A))\n",
"vt2 = np.array([[1], [2], [3], [4], [5]])\n",
"A = np.array(\n",
" [\n",
" [1, 2, 0, 0, 0],\n",
" [0, 2, 0, 0, 0],\n",
" [0, 0, 3, 0, 0],\n",
" [0, 0, 0, 4, 0],\n",
" [0, 0, 0, 0, 5],\n",
" ]\n",
")\n",
"B = np.array(\n",
" [\n",
" [1, 2, 3, 4, 5],\n",
" [2, 3, 4, 5, 6],\n",
" [3, 4, 5, 6, 7],\n",
" [4, 5, 6, 7, 8],\n",
" [5, 6, 7, 8, 9],\n",
" ]\n",
")\n",
"d = np.diag(A)\n",
"dd = np.array([np.diag(A)])\n",
"dt = np.transpose(d)\n",
"ddt = np.transpose(dd)\n",
"Ad = np.diag(np.diag(A))\n",
"\n",
"print(np.dot(np.linalg.inv(A), A))"
]
@@ -138,11 +154,11 @@
" x = 0 * b\n",
" n = len(b)\n",
" if np.allclose(A, np.triu(A)):\n",
" for i in range(n-1, -1, -1):\n",
" x[i] = (b[i] - np.dot(A[i,i+1:], x[i+1:])) / A[i,i]\n",
" for i in range(n - 1, -1, -1):\n",
" x[i] = (b[i] - np.dot(A[i, i + 1 :], x[i + 1 :])) / A[i, i]\n",
" elif np.allclose(A, np.tril(A)):\n",
" for i in range(n):\n",
" x[i] = (b[i] - np.dot(A[i,:i], x[:i])) / A[i,i]\n",
" x[i] = (b[i] - np.dot(A[i, :i], x[:i])) / A[i, i]\n",
" else:\n",
" raise ValueError(\"A est ni triangulaire supérieure ni triangulaire inférieure\")\n",
" return x"
@@ -171,7 +187,7 @@
"b = np.dot(A, xe)\n",
"x = remontee_descente(A, b)\n",
"\n",
"print(np.dot(x - xe, x-xe))"
"print(np.dot(x - xe, x - xe))"
]
},
{
@@ -263,9 +279,9 @@
" U = A\n",
" n = len(A)\n",
" for j in range(n):\n",
" for i in range(j+1, n):\n",
" beta = U[i,j]/U[j,j]\n",
" U[i,j:] = U[i,j:] - beta * U[j, j:]\n",
" for i in range(j + 1, n):\n",
" beta = U[i, j] / U[j, j]\n",
" U[i, j:] = U[i, j:] - beta * U[j, j:]\n",
" return U"
]
},
@@ -282,14 +298,16 @@
" if n != m:\n",
" raise ValueError(\"Erreur de dimension : A doit etre carré\")\n",
" if n != b.size:\n",
" raise valueError(\"Erreur de dimension : le nombre de lignes de A doit être égal au nombr ede colonnes de b\")\n",
" U = np.zeros((n, n+1))\n",
" raise valueError(\n",
" \"Erreur de dimension : le nombre de lignes de A doit être égal au nombr ede colonnes de b\"\n",
" )\n",
" U = np.zeros((n, n + 1))\n",
" U = A\n",
" V = b\n",
" for j in range(n):\n",
" for i in range(j+1, n):\n",
" beta = U[i,j]/U[j,j]\n",
" U[i,j:] = U[i,j:] - beta * U[j, j:]\n",
" for i in range(j + 1, n):\n",
" beta = U[i, j] / U[j, j]\n",
" U[i, j:] = U[i, j:] - beta * U[j, j:]\n",
" V[i] = V[i] - beta * V[j]\n",
" return remontee_descente(U, V)"
]