diff --git a/13_convolutional_neural_networks.ipynb b/13_convolutional_neural_networks.ipynb index b4ea324..7cd118d 100644 --- a/13_convolutional_neural_networks.ipynb +++ b/13_convolutional_neural_networks.ipynb @@ -124,9 +124,9 @@ }, "outputs": [], "source": [ - "from sklearn.datasets import load_sample_images\n", - "dataset = load_sample_images()\n", - "china, flower = dataset.images\n", + "from sklearn.datasets import load_sample_image\n", + "china = load_sample_image(\"china.jpg\")\n", + "flower = load_sample_image(\"flower.jpg\")\n", "image = china[150:220, 130:250]\n", "height, width, channels = image.shape\n", "image_grayscale = image.mean(axis=2).astype(np.float32)\n", @@ -232,8 +232,7 @@ }, "outputs": [], "source": [ - "from sklearn.datasets import load_sample_images\n", - "dataset = np.array(load_sample_images().images, dtype=np.float32)\n", + "dataset = np.array([china, flower], dtype=np.float32)\n", "batch_size, height, width, channels = dataset.shape\n", "\n", "filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)\n", @@ -261,7 +260,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "metadata": { "collapsed": false }, @@ -283,7 +282,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "metadata": { "collapsed": false }, @@ -307,7 +306,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "metadata": { "collapsed": false },