mirror of
https://github.com/ArthurDanjou/ArtStudies.git
synced 2026-01-23 07:52:44 +01:00
add: TP1 and TP2 in numerical optimisation
This commit is contained in:
@@ -32,8 +32,8 @@
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-02-18T17:39:02.911808Z",
|
||||
"start_time": "2025-02-18T17:39:02.904634Z"
|
||||
"end_time": "2025-03-18T16:19:14.314484Z",
|
||||
"start_time": "2025-03-18T16:19:13.728014Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
@@ -67,7 +67,7 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 91
|
||||
"execution_count": 1
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
@@ -95,8 +95,8 @@
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-02-18T17:39:58.379469Z",
|
||||
"start_time": "2025-02-18T17:39:58.370411Z"
|
||||
"end_time": "2025-03-18T16:19:17.447647Z",
|
||||
"start_time": "2025-03-18T16:19:17.442560Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
@@ -131,12 +131,12 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/tp/_ld5_pzs6nx6mv1pbjhq1l740000gn/T/ipykernel_32664/3868809151.py:14: RuntimeWarning: overflow encountered in exp\n",
|
||||
"/var/folders/tp/_ld5_pzs6nx6mv1pbjhq1l740000gn/T/ipykernel_25957/3868809151.py:14: RuntimeWarning: overflow encountered in exp\n",
|
||||
" f = lambda x: np.log(np.exp(x) + np.exp(-x))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 100
|
||||
"execution_count": 2
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
@@ -155,8 +155,8 @@
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-02-18T17:39:50.816196Z",
|
||||
"start_time": "2025-02-18T17:39:50.624452Z"
|
||||
"end_time": "2025-03-18T16:19:19.649523Z",
|
||||
"start_time": "2025-03-18T16:19:19.456149Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
@@ -188,7 +188,7 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 99
|
||||
"execution_count": 3
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
@@ -213,8 +213,8 @@
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-02-18T17:40:16.411190Z",
|
||||
"start_time": "2025-02-18T17:40:16.403226Z"
|
||||
"end_time": "2025-03-18T16:44:41.592150Z",
|
||||
"start_time": "2025-03-18T16:44:41.584318Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
@@ -222,7 +222,7 @@
|
||||
"def DichotomyNewton(phi, dphi, aL, aR, s=0.1, eps=1e-10):\n",
|
||||
" iter = 0\n",
|
||||
" inital_length = aR - aL\n",
|
||||
" while (aR - aL) / 2 >= s * inital_length:\n",
|
||||
" while (aR - aL) >= s * inital_length:\n",
|
||||
" b = (aL + aR) / 2\n",
|
||||
" if phi(aL) * phi(b) < 0:\n",
|
||||
" aR = b\n",
|
||||
@@ -242,7 +242,6 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"f = lambda x: np.log(np.exp(x) + np.exp(-x))\n",
|
||||
"x0 = 1.8\n",
|
||||
"df = lambda x: (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))\n",
|
||||
"print(DichotomyNewton(f, df, -20, 3))"
|
||||
],
|
||||
@@ -258,12 +257,12 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/tp/_ld5_pzs6nx6mv1pbjhq1l740000gn/T/ipykernel_32664/4047614783.py:14: RuntimeWarning: divide by zero encountered in scalar divide\n",
|
||||
" x0 -= phi(x0) / dphi(x0)\n"
|
||||
"/var/folders/tp/_ld5_pzs6nx6mv1pbjhq1l740000gn/T/ipykernel_25957/1578277506.py:23: RuntimeWarning: overflow encountered in exp\n",
|
||||
" f = lambda x: np.log(np.exp(x) + np.exp(-x))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 101
|
||||
"execution_count": 22
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
@@ -288,28 +287,27 @@
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-02-25T21:29:40.198527Z",
|
||||
"start_time": "2025-02-25T21:29:40.188272Z"
|
||||
"end_time": "2025-03-18T17:43:43.061916Z",
|
||||
"start_time": "2025-03-18T17:43:43.042625Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"u = lambda x: np.sqrt((6 - x) ** 2 + 4)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Objective function\n",
|
||||
"def objective_function(x):\n",
|
||||
" return x / 8 + np.sqrt((6 - x) ** 2 + 4) / 3\n",
|
||||
" return x / 8 + u(x) / 3\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Gradient of the objective function\n",
|
||||
"def gradient(x):\n",
|
||||
" return 1 / 8 + (6 - x) / (3 * np.sqrt((6 - x) ** 2 + 4))\n",
|
||||
" return 1 / 8 + (6 - x) / (3 * u(x))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Hessian of the objective function\n",
|
||||
"def hessian(x):\n",
|
||||
" return -((6 - x) ** 2 + 4) / (3 * ((6 - x) ** 2 + 4) ** (3 / 2))\n",
|
||||
" return (12 * u(x) - (2 * x - 12) ** 2 / 12 * u(x)) / 36 * u(x) ** 2\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Newton's method for optimization\n",
|
||||
@@ -321,26 +319,32 @@
|
||||
" hess = hessian(x)\n",
|
||||
" if np.abs(grad) < tolerance:\n",
|
||||
" break\n",
|
||||
" x = x - grad / hess\n",
|
||||
" x -= grad / hess\n",
|
||||
" iterations += 1\n",
|
||||
" return x, iterations\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Dichotomy method for optimization\n",
|
||||
"def dichotomy_method(func, aL, aR, eps=1e-6, max_iterations=100):\n",
|
||||
"def dichotomy_method(aL, aR, eps=1e-6, max_iterations=1000):\n",
|
||||
" iterations = 0\n",
|
||||
" while (aR - aL) / 2 > eps and iterations < max_iterations:\n",
|
||||
" b = (aL + aR) / 2\n",
|
||||
" if func(aL) * func(b) < 0:\n",
|
||||
" aR = b\n",
|
||||
" else:\n",
|
||||
" aL = b\n",
|
||||
" x0 = (aL + aR) / 2\n",
|
||||
" grad = gradient(x0)\n",
|
||||
" hess = hessian(x0)\n",
|
||||
" while abs(grad) > eps:\n",
|
||||
" try:\n",
|
||||
" x0 -= grad / hess\n",
|
||||
" except ZeroDivisionError:\n",
|
||||
" return np.inf, \"Derivative is zero\"\n",
|
||||
" iterations += 1\n",
|
||||
" return (aL + aR) / 2, iterations\n",
|
||||
" if iterations > max_iterations or grad == np.inf:\n",
|
||||
" return np.inf, \"Method diverges\"\n",
|
||||
" grad = gradient(x0)\n",
|
||||
" hess = hessian(x0)\n",
|
||||
" return x0, iterations\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Initial guess for Newton's method\n",
|
||||
"initial_guess_newton = 3\n",
|
||||
"initial_guess_newton = 4\n",
|
||||
"\n",
|
||||
"# Run Newton's method\n",
|
||||
"optimal_point_newton, iterations_newton = newton_method(initial_guess_newton)\n",
|
||||
@@ -352,7 +356,7 @@
|
||||
"aL, aR = 0, 6\n",
|
||||
"\n",
|
||||
"# Run dichotomy method\n",
|
||||
"optimal_point_dichotomy, iterations_dichotomy = dichotomy_method(objective_function, aL, aR)\n",
|
||||
"optimal_point_dichotomy, iterations_dichotomy = dichotomy_method(aL, aR)\n",
|
||||
"print(f\"Optimal point (Dichotomy): {optimal_point_dichotomy}\")\n",
|
||||
"print(f\"Objective function value at optimal point (Dichotomy): {objective_function(optimal_point_dichotomy)}\")\n",
|
||||
"print(f\"Number of iterations (Dichotomy): {iterations_dichotomy}\")"
|
||||
@@ -362,16 +366,16 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Optimal point (Newton): 6.809045443967482\n",
|
||||
"Objective function value at optimal point (Newton): 1.5702779015852644\n",
|
||||
"Number of iterations (Newton): 7\n",
|
||||
"Optimal point (Dichotomy): 5.999999284744263\n",
|
||||
"Objective function value at optimal point (Dichotomy): 1.416666577259742\n",
|
||||
"Number of iterations (Dichotomy): 22\n"
|
||||
"Optimal point (Newton): 0.9299901531755377\n",
|
||||
"Objective function value at optimal point (Newton): 1.9329918821224974\n",
|
||||
"Number of iterations (Newton): 100\n",
|
||||
"Optimal point (Dichotomy): inf\n",
|
||||
"Objective function value at optimal point (Dichotomy): inf\n",
|
||||
"Number of iterations (Dichotomy): Method diverges\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 14
|
||||
"execution_count": 38
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
|
||||
Reference in New Issue
Block a user