mirror of
https://github.com/ArthurDanjou/ArtStudies.git
synced 2026-01-14 15:54:13 +01:00
Refactor code for improved readability and consistency across multiple Jupyter notebooks
- Added missing commas in various print statements and function calls for better syntax. - Reformatted code to enhance clarity, including breaking long lines and aligning parameters. - Updated function signatures to use float type for sigma parameters instead of int for better precision. - Cleaned up comments and documentation strings for clarity and consistency. - Ensured consistent formatting in plotting functions and data handling.
This commit is contained in:
@@ -20,7 +20,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"%matplotlib inline\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
@@ -300,23 +299,35 @@
|
||||
" [\n",
|
||||
" keras.layers.InputLayer(shape=(32, 32, 3)),\n",
|
||||
" keras.layers.Conv2D(\n",
|
||||
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
|
||||
" filters=32,\n",
|
||||
" kernel_size=3,\n",
|
||||
" activation=\"relu\",\n",
|
||||
" padding=\"same\",\n",
|
||||
" ),\n",
|
||||
" keras.layers.Dropout(0.2),\n",
|
||||
" keras.layers.Conv2D(\n",
|
||||
" filters=32, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
|
||||
" filters=32,\n",
|
||||
" kernel_size=3,\n",
|
||||
" activation=\"relu\",\n",
|
||||
" padding=\"same\",\n",
|
||||
" ),\n",
|
||||
" keras.layers.MaxPooling2D(pool_size=2),\n",
|
||||
" keras.layers.Conv2D(\n",
|
||||
" filters=16, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
|
||||
" filters=16,\n",
|
||||
" kernel_size=3,\n",
|
||||
" activation=\"relu\",\n",
|
||||
" padding=\"same\",\n",
|
||||
" ),\n",
|
||||
" keras.layers.Dropout(0.2),\n",
|
||||
" keras.layers.Conv2D(\n",
|
||||
" filters=16, kernel_size=3, activation=\"relu\", padding=\"same\"\n",
|
||||
" filters=16,\n",
|
||||
" kernel_size=3,\n",
|
||||
" activation=\"relu\",\n",
|
||||
" padding=\"same\",\n",
|
||||
" ),\n",
|
||||
" keras.layers.Flatten(),\n",
|
||||
" keras.layers.Dense(10, activation=\"softmax\"),\n",
|
||||
" ]\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return model\n",
|
||||
@@ -348,7 +359,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def compile_train(\n",
|
||||
" optimizer_function: str, learning_rate: float, **kwargs\n",
|
||||
" optimizer_function: str,\n",
|
||||
" learning_rate: float,\n",
|
||||
" **kwargs,\n",
|
||||
") -> keras.callbacks.History:\n",
|
||||
" model = get_model()\n",
|
||||
" optimizer = optimizer_function(learning_rate=learning_rate)\n",
|
||||
@@ -401,7 +414,10 @@
|
||||
"epochs = 5\n",
|
||||
"batch_size = 64\n",
|
||||
"history_adam = compile_train(\n",
|
||||
" keras.optimizers.Adam, learning_rate=0.001, epochs=epochs, batch_size=batch_size\n",
|
||||
" keras.optimizers.Adam,\n",
|
||||
" learning_rate=0.001,\n",
|
||||
" epochs=epochs,\n",
|
||||
" batch_size=batch_size,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -557,7 +573,10 @@
|
||||
"histories = []\n",
|
||||
"for optimizer in optimizers:\n",
|
||||
" history = compile_train(\n",
|
||||
" optimizer, learning_rate=learning_rate, epochs=epochs, batch_size=batch_size\n",
|
||||
" optimizer,\n",
|
||||
" learning_rate=learning_rate,\n",
|
||||
" epochs=epochs,\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" )\n",
|
||||
" name = optimizer.__name__\n",
|
||||
" label = f\"{name} (lr={learning_rate:.06})\"\n",
|
||||
|
||||
Reference in New Issue
Block a user