Refactor code for improved readability and consistency across multiple Jupyter notebooks

- Added missing commas in various print statements and function calls for better syntax.
- Reformatted code to enhance clarity, including breaking long lines and aligning parameters.
- Updated function signatures to use float type for sigma parameters instead of int for better precision.
- Cleaned up comments and documentation strings for clarity and consistency.
- Ensured consistent formatting in plotting functions and data handling.
This commit is contained in:
2025-12-13 23:38:17 +01:00
parent f89ff4a016
commit d5a6bfd339
50 changed files with 779 additions and 449 deletions

View File

@@ -338,7 +338,7 @@
" return np.max(\n",
" np.power(np.abs(sol_appr - sol_exact), 2)[\n",
" np.isfinite(np.power(np.abs(sol_appr - sol_exact), 2))\n",
" ]\n",
" ],\n",
" )\n",
"\n",
"\n",

View File

@@ -38,6 +38,7 @@
],
"source": [
"import numpy as np\n",
"\n",
"from sklearn.datasets import make_classification\n",
"from sklearn.metrics import accuracy_score\n",
"from sklearn.model_selection import train_test_split\n",
@@ -47,12 +48,18 @@
"\n",
"for _ in range(10):\n",
" X, y = make_classification(\n",
" n_samples=1000, n_features=4, n_classes=3, n_clusters_per_class=1\n",
" n_samples=1000,\n",
" n_features=4,\n",
" n_classes=3,\n",
" n_clusters_per_class=1,\n",
" )\n",
"\n",
" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n",
" model = MLPClassifier(\n",
" hidden_layer_sizes=(5, 7), activation=\"relu\", max_iter=10000, solver=\"adam\"\n",
" hidden_layer_sizes=(5, 7),\n",
" activation=\"relu\",\n",
" max_iter=10000,\n",
" solver=\"adam\",\n",
" )\n",
" model.fit(X_train, y_train)\n",
"\n",

View File

@@ -17,7 +17,7 @@
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import scipy.stats as stats"
"from scipy import stats"
]
},
{
@@ -46,15 +46,12 @@
"outputs": [],
"source": [
"def S(t, S0, mu, sigma, W):\n",
" \"\"\"\n",
" Solution exacte de l'EDS de Black-Scholes\n",
" \"\"\"\n",
" \"\"\"Solution exacte de l'EDS de Black-Scholes\"\"\"\n",
" return S0 * np.exp((mu - 0.5 * sigma**2) * t + sigma * W)\n",
"\n",
"\n",
"def euler_maruyama(mu, sigma, T, N, X0=0.0):\n",
" \"\"\"\n",
" Simulation d'une EDS de Black-Scholes par la méthode d'Euler-Maruyama\n",
" \"\"\"Simulation d'une EDS de Black-Scholes par la méthode d'Euler-Maruyama\n",
"\n",
" Paramètres :\n",
" mu (float) : drift\n",
@@ -84,8 +81,7 @@
"\n",
"\n",
"def plot_brownien(t, X, B=None):\n",
" \"\"\"\n",
" Plot la simulation d'Euler-Maruyama\n",
" \"\"\"Plot la simulation d'Euler-Maruyama\n",
"\n",
" Paramètres :\n",
" t (array-like) : tableau des temps\n",
@@ -169,8 +165,7 @@
"\n",
"\n",
"def plot_convergence(S0, mu, sigma, T):\n",
" \"\"\"\n",
" Plot la convergence du schéma d'Euler-Maruyama\n",
" \"\"\"Plot la convergence du schéma d'Euler-Maruyama\n",
"\n",
" Paramètres :\n",
" S0 (int) : valeur initiale\n",
@@ -291,7 +286,7 @@
"print(\n",
" \"La barrière a été franchie\"\n",
" if is_barrier_breached(X, B)\n",
" else \"La barrière n'a pas été franchie\"\n",
" else \"La barrière n'a pas été franchie\",\n",
")"
]
},
@@ -335,8 +330,7 @@
" \"\"\"\n",
" if not is_barrier_breached(X, B):\n",
" return max(X[-1] - K, 0)\n",
" else:\n",
" return 0\n",
" return 0\n",
"\n",
"\n",
"def call_BS(x):\n",

View File

@@ -51,7 +51,7 @@
" for h in h_list:\n",
" t = np.arange(a, b, h)\n",
" y = np.array(\n",
" [3 / 4 * h * f(t[i] + h / 3) + h / 4 * f(t[i] + h) for i in range(len(t))]\n",
" [3 / 4 * h * f(t[i] + h / 3) + h / 4 * f(t[i] + h) for i in range(len(t))],\n",
" )\n",
" I_approx = np.sum(y)\n",
" I.append(I_approx)\n",
@@ -326,7 +326,7 @@
" 1 + np.power(x, 2) * y - (z + 1) * x,\n",
" x * z - np.power(x, 2) * y,\n",
" -x * z + 1.45,\n",
" ]\n",
" ],\n",
" )\n",
"\n",
"\n",