From bcac5764f6be358cb16c5ffcb27f296447eb91db Mon Sep 17 00:00:00 2001 From: Arthur DANJOU Date: Wed, 24 Dec 2025 22:26:59 +0100 Subject: [PATCH] Refactor error messages and function signatures across multiple notebooks for clarity and consistency - Updated error messages in Gauss method and numerical methods to use variables for better readability. - Added return type hints to function signatures in various notebooks to improve code documentation. - Corrected minor grammatical issues in docstrings for better clarity. - Adjusted print statements and list concatenations for improved output formatting. - Enhanced plotting functions to ensure consistent figure handling. --- .../TP1_Methode_de_Gauss.ipynb | 9 ++++++--- L3/Calculs Numériques/DM1.ipynb | 12 +++++------ L3/Calculs Numériques/DM2.ipynb | 17 ++++++++-------- L3/Calculs Numériques/DM3.ipynb | 3 ++- L3/Calculs Numériques/Interpolation.ipynb | 3 ++- L3/Calculs Numériques/Point_Fixe.ipynb | 2 +- L3/Projet Numérique/Segregation.ipynb | 8 ++++---- L3/Statistiques/TP2.ipynb | 2 +- M1/Numerical Methods/TP1.ipynb | 2 +- M1/Numerical Methods/TP2_DANJOU_Arthur.ipynb | 20 +++++++++---------- .../ComputerSession3.ipynb | 2 +- .../TP0_Intro_Jupyter_Python.ipynb | 2 +- .../TP4_Ridge_Lasso_and_CV.ipynb | 2 +- M1/Statistical Learning/TP5_Naive_Bayes.ipynb | 5 +++-- .../TP3 - Compléments/TP3 - Starter.ipynb | 9 +++------ .../TP4 - Récurrents/TP4 - Starter.ipynb | 4 ++-- .../TP_1/2025_TP_1_M2_ISF.ipynb | 4 ++-- ... as a Markov Decision Process Part 1.ipynb | 6 +++--- .../Lab 2 - Second maze.ipynb | 6 +++--- 19 files changed, 60 insertions(+), 58 deletions(-) diff --git a/L3/Analyse Matricielle/TP1_Methode_de_Gauss.ipynb b/L3/Analyse Matricielle/TP1_Methode_de_Gauss.ipynb index d583097..4ac1664 100644 --- a/L3/Analyse Matricielle/TP1_Methode_de_Gauss.ipynb +++ b/L3/Analyse Matricielle/TP1_Methode_de_Gauss.ipynb @@ -160,7 +160,8 @@ " for i in range(n):\n", " x[i] = (b[i] - np.dot(A[i, :i], x[:i])) / A[i, i]\n", " else:\n", - " raise ValueError(\"A est ni triangulaire supérieure ni triangulaire inférieure\")\n", + " msg = \"A est ni triangulaire supérieure ni triangulaire inférieure\"\n", + " raise ValueError(msg)\n", " return x" ] }, @@ -296,10 +297,12 @@ "def met_gauss_sys(A, b):\n", " n, m = A.shape\n", " if n != m:\n", - " raise ValueError(\"Erreur de dimension : A doit etre carré\")\n", + " msg = \"Erreur de dimension : A doit etre carré\"\n", + " raise ValueError(msg)\n", " if n != b.size:\n", + " msg = \"Erreur de dimension : le nombre de lignes de A doit être égal au nombr ede colonnes de b\"\n", " raise valueError(\n", - " \"Erreur de dimension : le nombre de lignes de A doit être égal au nombr ede colonnes de b\",\n", + " msg,\n", " )\n", " U = np.zeros((n, n + 1))\n", " U = A\n", diff --git a/L3/Calculs Numériques/DM1.ipynb b/L3/Calculs Numériques/DM1.ipynb index 1115cec..eedbac7 100644 --- a/L3/Calculs Numériques/DM1.ipynb +++ b/L3/Calculs Numériques/DM1.ipynb @@ -113,7 +113,7 @@ "\n", "\n", "def C(t):\n", - " \"\"\"Fonction retournant la solution exacte du problème au temps t\"\"\"\n", + " \"\"\"Fonction retournant la solution exacte du problème au temps t.\"\"\"\n", " return K_star + K / (1 + (K / K0 - 1) * np.exp(-r * (t - t_fl)))\n", "\n", "\n", @@ -135,7 +135,7 @@ "\n", "\n", "def dN(N, t, C_sol):\n", - " \"\"\"Fonction calculant la dérivée de la solution approchée du problème à l'instant t dépendant de N(t) et de C(t)\"\"\"\n", + " \"\"\"Fonction calculant la dérivée de la solution approchée du problème à l'instant t dépendant de N(t) et de C(t).\"\"\"\n", " return r_N * N * (1 - N / C_sol(t))\n", "\n", "\n", @@ -221,7 +221,7 @@ "\n", "\n", "def F(X, t, a, b, c, d, p):\n", - " \"\"\"Fonction second membre pour le système\"\"\"\n", + " \"\"\"Fonction second membre pour le système.\"\"\"\n", " x, y = X\n", " return np.array([x * (a - p - b * y), y * (-c - p + d * x)])\n", "\n", @@ -319,7 +319,7 @@ "outputs": [], "source": [ "def crank_nicolson(y0, T, N, r):\n", - " \"\"\"schéma de Crank-Nicolson pour le modèle de Malthus\n", + " \"\"\"schéma de Crank-Nicolson pour le modèle de Malthus.\n", "\n", " Parameters\n", " ----------\n", @@ -356,7 +356,7 @@ "\n", "\n", "def euler_explicit(y0, T, N, r):\n", - " \"\"\"schéma de d'Euler pour le modèle de Malthus\n", + " \"\"\"schéma de d'Euler pour le modèle de Malthus.\n", "\n", " Parameters\n", " ----------\n", @@ -393,7 +393,7 @@ "\n", "\n", "def solution_exacte(t):\n", - " \"\"\"Fonction calculant la solution exacte du modèle de Malthus à l'instant t\"\"\"\n", + " \"\"\"Fonction calculant la solution exacte du modèle de Malthus à l'instant t.\"\"\"\n", " return y0 * np.exp(r * t)" ] }, diff --git a/L3/Calculs Numériques/DM2.ipynb b/L3/Calculs Numériques/DM2.ipynb index 4e4a187..6b2f524 100644 --- a/L3/Calculs Numériques/DM2.ipynb +++ b/L3/Calculs Numériques/DM2.ipynb @@ -151,7 +151,7 @@ ], "source": [ "def M(x):\n", - " \"\"\"Retourne la matrice du système (2)\n", + " \"\"\"Retourne la matrice du système (2).\n", "\n", " Parameters\n", " ----------\n", @@ -192,7 +192,7 @@ "outputs": [], "source": [ "def sprime(x, y, p0, pN):\n", - " \"\"\"Retourne la solution du système (2)\n", + " \"\"\"Retourne la solution du système (2).\n", "\n", " Parameters\n", " ----------\n", @@ -272,7 +272,7 @@ ], "source": [ "def f(x):\n", - " \"\"\"Retourne la fonction f évaluée aux points x\n", + " \"\"\"Retourne la fonction f évaluée aux points x.\n", "\n", " Parameters\n", " ----------\n", @@ -289,7 +289,7 @@ "\n", "\n", "def fprime(x):\n", - " \"\"\"Retourne la fonction dérivée de f évaluée aux points x\n", + " \"\"\"Retourne la fonction dérivée de f évaluée aux points x.\n", "\n", " Parameters\n", " ----------\n", @@ -360,7 +360,7 @@ "outputs": [], "source": [ "def splines(x, y, p0, pN):\n", - " \"\"\"Retourne la matrice S de taille (4, N)\n", + " \"\"\"Retourne la matrice S de taille (4, N).\n", "\n", " Parameters\n", " ----------\n", @@ -410,7 +410,7 @@ "outputs": [], "source": [ "def spline_eval(x, xx, S):\n", - " \"\"\"Evalue une spline définie par des noeuds équirepartis\n", + " \"\"\"Evalue une spline définie par des noeuds équirepartis.\n", "\n", " Parameters\n", " ----------\n", @@ -433,13 +433,12 @@ " \"\"\"\n", " ind = (np.floor((xx - x[0]) / (x[1] - x[0]))).astype(int)\n", " ind = np.where(ind == x.size - 1, ind - 1, ind)\n", - " yy = (\n", + " return (\n", " S[ind, 0]\n", " + S[ind, 1] * (xx - x[ind])\n", " + S[ind, 2] * (xx - x[ind]) ** 2\n", " + S[ind, 3] * (xx - x[ind]) ** 3\n", - " )\n", - " return yy" + " )" ] }, { diff --git a/L3/Calculs Numériques/DM3.ipynb b/L3/Calculs Numériques/DM3.ipynb index fe95e66..b4bfaed 100644 --- a/L3/Calculs Numériques/DM3.ipynb +++ b/L3/Calculs Numériques/DM3.ipynb @@ -215,7 +215,8 @@ "source": [ "def simpson(f, N):\n", " if N % 2 == 0:\n", - " raise ValueError(\"N doit est impair.\")\n", + " msg = \"N doit est impair.\"\n", + " raise ValueError(msg)\n", "\n", " h = 2 / (2 * (N - 1) // 2)\n", " fx = f(np.linspace(-1, 1, N))\n", diff --git a/L3/Calculs Numériques/Interpolation.ipynb b/L3/Calculs Numériques/Interpolation.ipynb index cfa8c31..6d911c9 100644 --- a/L3/Calculs Numériques/Interpolation.ipynb +++ b/L3/Calculs Numériques/Interpolation.ipynb @@ -534,7 +534,8 @@ "def interp_vdm_poly(x, y):\n", " \"\"\"Compute the coefficients of the interpolation polynomial.\"\"\"\n", " if x.shape != y.shape:\n", - " raise ValueError(\"x and y must have same dimension!\")\n", + " msg = \"x and y must have same dimension!\"\n", + " raise ValueError(msg)\n", " return np.linalg.solve(interp_vdm_build(x), y)" ] }, diff --git a/L3/Calculs Numériques/Point_Fixe.ipynb b/L3/Calculs Numériques/Point_Fixe.ipynb index 4463989..c807229 100644 --- a/L3/Calculs Numériques/Point_Fixe.ipynb +++ b/L3/Calculs Numériques/Point_Fixe.ipynb @@ -143,7 +143,7 @@ "outputs": [], "source": [ "def point_fixe(f, x0, tol=1.0e-6, itermax=5000):\n", - " \"\"\"Recherche de point fixe : méthode brute x_{n+1} = f(x_n)\n", + " \"\"\"Recherche de point fixe : méthode brute x_{n+1} = f(x_n).\n", "\n", " Parameters\n", " ----------\n", diff --git a/L3/Projet Numérique/Segregation.ipynb b/L3/Projet Numérique/Segregation.ipynb index 1c92e04..0f1d515 100644 --- a/L3/Projet Numérique/Segregation.ipynb +++ b/L3/Projet Numérique/Segregation.ipynb @@ -41,7 +41,7 @@ "outputs": [], "source": [ "class ModeleSchelling:\n", - " def __init__(self, M, p, L):\n", + " def __init__(self, M, p, L) -> None:\n", " self.M = M\n", " self.p = p\n", " self.L = L\n", @@ -63,7 +63,7 @@ "\n", " return grille\n", "\n", - " def afficher_grille(self, title):\n", + " def afficher_grille(self, title) -> None:\n", " color = plt.imshow(self.grille, cmap=\"coolwarm\", interpolation=\"nearest\")\n", " plt.colorbar(color)\n", " plt.title(title)\n", @@ -97,7 +97,7 @@ " visited = np.zeros_like(self.grille, dtype=bool)\n", " clusters = []\n", "\n", - " def dfs(i, j, groupe, cluster): # Depth-First Search\n", + " def dfs(i, j, groupe, cluster) -> None: # Depth-First Search\n", " stack = [(i, j)]\n", "\n", " while stack:\n", @@ -136,7 +136,7 @@ " S += int(clusters[i][1]) ** 2\n", " return S * 2 / (self.Ntot**2)\n", "\n", - " def simuler(self, T=400, move_satisfaits=True):\n", + " def simuler(self, T=400, move_satisfaits=True) -> None:\n", " for _t in range(1, int((1 - self.p) * self.M**2 * T)):\n", " agents = [\n", " (i, j)\n", diff --git a/L3/Statistiques/TP2.ipynb b/L3/Statistiques/TP2.ipynb index 1bf8248..6b44f9a 100644 --- a/L3/Statistiques/TP2.ipynb +++ b/L3/Statistiques/TP2.ipynb @@ -220,7 +220,7 @@ }, "outputs": [], "source": [ - "def simule(a, b, n):\n", + "def simule(a, b, n) -> None:\n", " X = np.random.gamma(a, 1 / b, n)\n", " intervalle = np.linspace(0, np.max(X), 100)\n", " plt.hist(X, bins=intervalle, density=True, label=\"Echantillon de X\")\n", diff --git a/M1/Numerical Methods/TP1.ipynb b/M1/Numerical Methods/TP1.ipynb index e6a74fd..333776f 100644 --- a/M1/Numerical Methods/TP1.ipynb +++ b/M1/Numerical Methods/TP1.ipynb @@ -184,7 +184,7 @@ "outputs": [], "source": [ "def F(y, t, a, r):\n", - " S, I, R = y\n", + " S, I, _R = y\n", " dS = -r * S * I\n", " dI = r * S * I - a * I\n", " dR = a * I\n", diff --git a/M1/Numerical Methods/TP2_DANJOU_Arthur.ipynb b/M1/Numerical Methods/TP2_DANJOU_Arthur.ipynb index 16479ec..83f8773 100644 --- a/M1/Numerical Methods/TP2_DANJOU_Arthur.ipynb +++ b/M1/Numerical Methods/TP2_DANJOU_Arthur.ipynb @@ -46,12 +46,12 @@ "outputs": [], "source": [ "def S(t, S0, mu, sigma, W):\n", - " \"\"\"Solution exacte de l'EDS de Black-Scholes\"\"\"\n", + " \"\"\"Solution exacte de l'EDS de Black-Scholes.\"\"\"\n", " return S0 * np.exp((mu - 0.5 * sigma**2) * t + sigma * W)\n", "\n", "\n", "def euler_maruyama(mu, sigma, T, N, X0=0.0):\n", - " \"\"\"Simulation d'une EDS de Black-Scholes par la méthode d'Euler-Maruyama\n", + " \"\"\"Simulation d'une EDS de Black-Scholes par la méthode d'Euler-Maruyama.\n", "\n", " Paramètres :\n", " mu (float) : drift\n", @@ -80,8 +80,8 @@ " return t, X\n", "\n", "\n", - "def plot_brownien(t, X, B=None):\n", - " \"\"\"Plot la simulation d'Euler-Maruyama\n", + "def plot_brownien(t, X, B=None) -> None:\n", + " \"\"\"Plot la simulation d'Euler-Maruyama.\n", "\n", " Paramètres :\n", " t (array-like) : tableau des temps\n", @@ -164,8 +164,8 @@ "np.random.seed(333)\n", "\n", "\n", - "def plot_convergence(S0, mu, sigma, T):\n", - " \"\"\"Plot la convergence du schéma d'Euler-Maruyama\n", + "def plot_convergence(S0, mu, sigma, T) -> None:\n", + " \"\"\"Plot la convergence du schéma d'Euler-Maruyama.\n", "\n", " Paramètres :\n", " S0 (int) : valeur initiale\n", @@ -271,7 +271,7 @@ "\n", "def is_barrier_breached(X, B):\n", " \"\"\"Renvoie True si la barrière est franchie, False sinon\n", - " La barrière est franchie si X >= B\n", + " La barrière est franchie si X >= B.\n", "\n", " Paramètres:\n", " X (array-like): Trajectoire des valeurs\n", @@ -297,8 +297,8 @@ "metadata": {}, "outputs": [], "source": [ - "def plot_browniens(trajectories, B):\n", - " \"\"\"Trace les trajectoires de Brownien et la barrière\n", + "def plot_browniens(trajectories, B) -> None:\n", + " \"\"\"Trace les trajectoires de Brownien et la barrière.\n", "\n", " Paramètres:\n", " trajectories (list of tuples): Liste des trajectoires avec le temps et les valeurs\n", @@ -451,7 +451,7 @@ "np.random.seed(333)\n", "\n", "\n", - "def plot_payoff_errors():\n", + "def plot_payoff_errors() -> None:\n", " \"\"\"Trace l'erreur de convergence du payoff actualisé en fonction de N.\"\"\"\n", " errors = []\n", "\n", diff --git a/M1/Numerical Optimisation/ComputerSession3.ipynb b/M1/Numerical Optimisation/ComputerSession3.ipynb index 08528a4..23cbbb9 100644 --- a/M1/Numerical Optimisation/ComputerSession3.ipynb +++ b/M1/Numerical Optimisation/ComputerSession3.ipynb @@ -248,7 +248,7 @@ " return result.x\n", "\n", "\n", - "def plot_perimeter(n):\n", + "def plot_perimeter(n) -> None:\n", " optimal_angles = optimize_polygon(n + 1)\n", " plt.figure(figsize=(7, 7))\n", " t = np.linspace(0, 2 * np.pi, 100)\n", diff --git a/M1/Statistical Learning/TP0_Intro_Jupyter_Python.ipynb b/M1/Statistical Learning/TP0_Intro_Jupyter_Python.ipynb index 0cd2e2f..18ec2f7 100644 --- a/M1/Statistical Learning/TP0_Intro_Jupyter_Python.ipynb +++ b/M1/Statistical Learning/TP0_Intro_Jupyter_Python.ipynb @@ -902,7 +902,7 @@ } ], "source": [ - "def divisible_by_3_and13(n):\n", + "def divisible_by_3_and13(n) -> None:\n", " if n % 3 == 0 and n % 13 == 0:\n", " print(n, \"is divisible by 3 and 13\")\n", " else:\n", diff --git a/M1/Statistical Learning/TP4_Ridge_Lasso_and_CV.ipynb b/M1/Statistical Learning/TP4_Ridge_Lasso_and_CV.ipynb index 56bca7c..146309d 100644 --- a/M1/Statistical Learning/TP4_Ridge_Lasso_and_CV.ipynb +++ b/M1/Statistical Learning/TP4_Ridge_Lasso_and_CV.ipynb @@ -1084,7 +1084,7 @@ ], "source": [ "# We remove the players for whom Salary is missing\n", - "hitters.dropna(subset=[\"Salary\"], inplace=True)\n", + "hitters = hitters.dropna(subset=[\"Salary\"])\n", "\n", "X = hitters.select_dtypes(include=int)\n", "Y = hitters[\"Salary\"]\n", diff --git a/M1/Statistical Learning/TP5_Naive_Bayes.ipynb b/M1/Statistical Learning/TP5_Naive_Bayes.ipynb index ac003ce..c27c5e5 100644 --- a/M1/Statistical Learning/TP5_Naive_Bayes.ipynb +++ b/M1/Statistical Learning/TP5_Naive_Bayes.ipynb @@ -227,7 +227,8 @@ "source": [ "sms = pd.read_csv(\"data/spam.csv\", encoding=\"latin\")\n", "\n", - "sms.head()" + "sms.head()\n", + "sms = " ] }, { @@ -243,7 +244,7 @@ "metadata": {}, "outputs": [], "source": [ - "sms.rename(columns={\"v1\": \"Label\", \"v2\": \"Text\"}, inplace=True)" + "sms.rename(columns={\"v1\": \"Label\", \"v2\": \"Text\"})" ] }, { diff --git a/M2/Deep Learning/TP3 - Compléments/TP3 - Starter.ipynb b/M2/Deep Learning/TP3 - Compléments/TP3 - Starter.ipynb index eb95e04..08293ae 100644 --- a/M2/Deep Learning/TP3 - Compléments/TP3 - Starter.ipynb +++ b/M2/Deep Learning/TP3 - Compléments/TP3 - Starter.ipynb @@ -295,7 +295,7 @@ ], "source": [ "def get_model() -> keras.Model:\n", - " model = keras.Sequential(\n", + " return keras.Sequential(\n", " [\n", " keras.layers.InputLayer(shape=(32, 32, 3)),\n", " keras.layers.Conv2D(\n", @@ -330,7 +330,6 @@ " ],\n", " )\n", "\n", - " return model\n", "\n", "\n", "model = get_model()\n", @@ -371,14 +370,12 @@ " metrics=[\"accuracy\"],\n", " )\n", "\n", - " history = model.fit(\n", + " return model.fit(\n", " X_train,\n", " y_train,\n", " validation_data=(X_valid, y_valid),\n", " **kwargs,\n", - " )\n", - "\n", - " return history" + " )\n" ] }, { diff --git a/M2/Deep Learning/TP4 - Récurrents/TP4 - Starter.ipynb b/M2/Deep Learning/TP4 - Récurrents/TP4 - Starter.ipynb index d73f589..d6b89f3 100644 --- a/M2/Deep Learning/TP4 - Récurrents/TP4 - Starter.ipynb +++ b/M2/Deep Learning/TP4 - Récurrents/TP4 - Starter.ipynb @@ -177,7 +177,7 @@ "import _pickle as pickle\n", "\n", "train_size = 0.8\n", - "train_index = int(round(len(sequences) * train_size))\n", + "train_index = round(len(sequences) * train_size)\n", "X_train = X[:train_index, :, :]\n", "y_train = y[:train_index, :]\n", "\n", @@ -471,7 +471,7 @@ }, "outputs": [], "source": [ - "def save_model(model, name):\n", + "def save_model(model, name) -> None:\n", " \"\"\"Save a Keras model to JSON and H5 files.\"\"\"\n", " model_json = model.to_json()\n", " with open(name + \".json\", \"w\") as json_file:\n", diff --git a/M2/Machine Learning/TP_1/2025_TP_1_M2_ISF.ipynb b/M2/Machine Learning/TP_1/2025_TP_1_M2_ISF.ipynb index db42065..3265914 100644 --- a/M2/Machine Learning/TP_1/2025_TP_1_M2_ISF.ipynb +++ b/M2/Machine Learning/TP_1/2025_TP_1_M2_ISF.ipynb @@ -226,7 +226,7 @@ "source": [ "print(range(4, 10))\n", "print(range(5, 50, 3))\n", - "print([3, 1, 4] + [1, 5, 9])\n", + "print([3, 1, 4, 1, 5, 9])\n", "print(len(range(4, 10)))" ] }, @@ -282,7 +282,7 @@ "\n", "print(num in [1, 2, 3] and num > 0)\n", "\n", - "print(not 5 == 3)" + "print(5 != 3)" ] }, { diff --git a/M2/Reinforcement Learning/Lab 2 - Maze Game as a Markov Decision Process Part 1.ipynb b/M2/Reinforcement Learning/Lab 2 - Maze Game as a Markov Decision Process Part 1.ipynb index 10354b9..ad74353 100644 --- a/M2/Reinforcement Learning/Lab 2 - Maze Game as a Markov Decision Process Part 1.ipynb +++ b/M2/Reinforcement Learning/Lab 2 - Maze Game as a Markov Decision Process Part 1.ipynb @@ -381,7 +381,7 @@ } ], "source": [ - "def plot_maze_with_states():\n", + "def plot_maze_with_states() -> None:\n", " \"\"\"Plot the maze with state indices.\"\"\"\n", " grid = np.ones(\n", " (n_rows, n_cols),\n", @@ -391,7 +391,7 @@ " if maze_str[i][j] == \"#\":\n", " grid[i, j] = 0 # We replace walls (#) with 0\n", "\n", - " fig, ax = plt.subplots()\n", + " _fig, ax = plt.subplots()\n", " ax.imshow(grid, cmap=\"gray\", alpha=0.7)\n", "\n", " # Plot state indices\n", @@ -1142,7 +1142,7 @@ " ] # For each reachable cell, we write the value V[s] in the grid.\n", " # Walls # never get values, and they stay as NaN.\n", "\n", - " fig, ax = plt.subplots()\n", + " _fig, ax = plt.subplots()\n", " im = ax.imshow(grid_values, cmap=\"magma\")\n", " plt.colorbar(im, ax=ax)\n", "\n", diff --git a/M2/Reinforcement Learning/Lab 2 - Second maze.ipynb b/M2/Reinforcement Learning/Lab 2 - Second maze.ipynb index 709100b..4738b76 100644 --- a/M2/Reinforcement Learning/Lab 2 - Second maze.ipynb +++ b/M2/Reinforcement Learning/Lab 2 - Second maze.ipynb @@ -186,7 +186,7 @@ } ], "source": [ - "def plot_maze_with_states():\n", + "def plot_maze_with_states() -> None:\n", " \"\"\"Plot the maze with state indices.\"\"\"\n", " grid = np.ones(\n", " (n_rows, n_cols),\n", @@ -196,7 +196,7 @@ " if maze_str[i][j] == \"#\":\n", " grid[i, j] = 0 # We replace walls (#) with 0\n", "\n", - " fig, ax = plt.subplots(figsize=figsize)\n", + " _fig, ax = plt.subplots(figsize=figsize)\n", " ax.imshow(grid, cmap=\"gray\", alpha=0.7)\n", "\n", " # Plot state indices\n", @@ -565,7 +565,7 @@ " ] # For each reachable cell, we write the value V[s] in the grid.\n", " # Walls # never get values, and they stay as NaN.\n", "\n", - " fig, ax = plt.subplots(figsize=figsize)\n", + " _fig, ax = plt.subplots(figsize=figsize)\n", " im = ax.imshow(grid_values, cmap=\"magma\")\n", " plt.colorbar(im, ax=ax)\n", "\n",