mirror of
https://github.com/ArthurDanjou/ArtStudies.git
synced 2026-02-02 19:31:33 +01:00
Refactor code for improved readability and consistency across notebooks
- Standardized spacing around operators and function arguments in TP7_Kmeans.ipynb and neural_network.ipynb. - Enhanced the formatting of model building and training code in neural_network.ipynb for better clarity. - Updated the pyproject.toml to remove a specific TensorFlow version and added linting configuration for Ruff. - Improved comments and organization in the code to facilitate easier understanding and maintenance.
This commit is contained in:
@@ -27,7 +27,12 @@
|
||||
"source": [
|
||||
"def f(L, z):\n",
|
||||
" x, y = L[0], L[1]\n",
|
||||
" return np.power(z*x, 2) + np.power(y/z, 2) - np.cos(2 * np.pi * x) - np.cos(2 * np.pi * y)"
|
||||
" return (\n",
|
||||
" np.power(z * x, 2)\n",
|
||||
" + np.power(y / z, 2)\n",
|
||||
" - np.cos(2 * np.pi * x)\n",
|
||||
" - np.cos(2 * np.pi * y)\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -123,17 +128,17 @@
|
||||
"source": [
|
||||
"plt.figure(figsize=(18, 5))\n",
|
||||
"for i, nb_repl in enumerate([100, 1000, 10000]):\n",
|
||||
" plt.subplot(1, 3, i+1)\n",
|
||||
" plt.subplot(1, 3, i + 1)\n",
|
||||
" sample_X1 = np.random.normal(0, 1, nb_repl)\n",
|
||||
" sample_X2 = np.random.normal(3, np.sqrt(5), nb_repl)\n",
|
||||
" sample_e = np.random.normal(0, np.sqrt(1/4), nb_repl)\n",
|
||||
" sample_e = np.random.normal(0, np.sqrt(1 / 4), nb_repl)\n",
|
||||
" Y = 5 * sample_X1 - 4 * sample_X2 + 2 + sample_e\n",
|
||||
"\n",
|
||||
" intervalle = np.linspace(np.min(Y), np.max(Y), 100)\n",
|
||||
" plt.hist(Y, bins=intervalle, density=True, label='Echantillon de Y')\n",
|
||||
" plt.hist(Y, bins=intervalle, density=True, label=\"Echantillon de Y\")\n",
|
||||
"\n",
|
||||
" densite = stats.norm(-10, np.sqrt(105.25)).pdf\n",
|
||||
" plt.plot(intervalle, densite(intervalle), label='Fonction densité')\n",
|
||||
" plt.plot(intervalle, densite(intervalle), label=\"Fonction densité\")\n",
|
||||
"\n",
|
||||
" plt.title(f\"Graphique de la somme de gaussiennes pour N={nb_repl}\")\n",
|
||||
" plt.legend()"
|
||||
@@ -151,8 +156,9 @@
|
||||
"def theta_hat(Y):\n",
|
||||
" return np.mean(Y)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def sigma_hat(Y):\n",
|
||||
" return 1/nb_repl * np.sum(np.power(Y - theta_hat(Y), 2))"
|
||||
" return 1 / nb_repl * np.sum(np.power(Y - theta_hat(Y), 2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -166,7 +172,11 @@
|
||||
"source": [
|
||||
"def log_likehood_gauss(X, Y):\n",
|
||||
" theta, sigma_2 = X[0], X[1]\n",
|
||||
" return 1/2*np.log(2*np.pi) + 1/2*np.log(sigma_2) + 1/(2*nb_repl*sigma_2) * np.sum(np.power(Y - theta, 2))"
|
||||
" return (\n",
|
||||
" 1 / 2 * np.log(2 * np.pi)\n",
|
||||
" + 1 / 2 * np.log(sigma_2)\n",
|
||||
" + 1 / (2 * nb_repl * sigma_2) * np.sum(np.power(Y - theta, 2))\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -191,9 +201,9 @@
|
||||
"nb_repl = 5000\n",
|
||||
"sample_X1 = np.random.normal(0, 1, nb_repl)\n",
|
||||
"sample_X2 = np.random.normal(3, np.sqrt(5), nb_repl)\n",
|
||||
"sample_e = np.random.normal(0, np.sqrt(1/4), nb_repl)\n",
|
||||
"sample_e = np.random.normal(0, np.sqrt(1 / 4), nb_repl)\n",
|
||||
"Y = 5 * sample_X1 - 4 * sample_X2 + 2 + sample_e\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"mk = {\"method\": \"BFGS\", \"args\": Y}\n",
|
||||
"res = opt.basinhopping(log_likehood_gauss, x0=(-1, 98.75), minimizer_kwargs=mk)\n",
|
||||
"print(res.x)\n",
|
||||
@@ -211,12 +221,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def simule(a, b, n):\n",
|
||||
" X = np.random.gamma(a, 1/b, n)\n",
|
||||
" X = np.random.gamma(a, 1 / b, n)\n",
|
||||
" intervalle = np.linspace(0, np.max(X), 100)\n",
|
||||
" plt.hist(X, bins=intervalle, density=True, label='Echantillon de X')\n",
|
||||
" plt.hist(X, bins=intervalle, density=True, label=\"Echantillon de X\")\n",
|
||||
"\n",
|
||||
" densite = stats.gamma.pdf(intervalle, a, 0, 1/b)\n",
|
||||
" plt.plot(intervalle, densite, label='Fonction densité Gamma(2, 1)')\n",
|
||||
" densite = stats.gamma.pdf(intervalle, a, 0, 1 / b)\n",
|
||||
" plt.plot(intervalle, densite, label=\"Fonction densité Gamma(2, 1)\")\n",
|
||||
" plt.legend()"
|
||||
]
|
||||
},
|
||||
@@ -254,8 +264,13 @@
|
||||
"source": [
|
||||
"def log_likehood_gamma(X, sample):\n",
|
||||
" a, b = X[0], X[1]\n",
|
||||
" n = len(sample) \n",
|
||||
" return -n*a*np.log(b) + n * np.log(sp.gamma(a)) - (a-1) * np.sum(np.log(sample)) + b * np.sum(sample)"
|
||||
" n = len(sample)\n",
|
||||
" return (\n",
|
||||
" -n * a * np.log(b)\n",
|
||||
" + n * np.log(sp.gamma(a))\n",
|
||||
" - (a - 1) * np.sum(np.log(sample))\n",
|
||||
" + b * np.sum(sample)\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -296,7 +311,7 @@
|
||||
"nb_repl = 1000\n",
|
||||
"a, b = 2, 1\n",
|
||||
"\n",
|
||||
"sample = np.random.gamma(a, 1/b, nb_repl)\n",
|
||||
"sample = np.random.gamma(a, 1 / b, nb_repl)\n",
|
||||
"mk = {\"method\": \"BFGS\", \"args\": sample}\n",
|
||||
"res = opt.basinhopping(log_likehood_gamma, x0=(1, 1), minimizer_kwargs=mk)\n",
|
||||
"print(res.x)"
|
||||
|
||||
Reference in New Issue
Block a user