| import tensorflow as tf
|
|
|
| from tensorflow.keras import datasets, layers, models
|
| from tensorflow.keras.utils import to_categorical
|
|
|
| from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout
|
| from tensorflow.keras.layers import GlobalMaxPooling2D, MaxPooling2D
|
| from tensorflow.keras.layers import BatchNormalization
|
|
|
| from tensorflow.keras.models import Model
|
| from tensorflow.keras.models import Sequential
|
|
|
| import matplotlib.pyplot as plt
|
|
|
|
|
| (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
|
|
|
| # Normalize pixel values to be between 0 and 1
|
| train_images, test_images = train_images / 255.0, test_images / 255.0
|
|
|
|
|
| # Only keep 3 classes out of the 10 available
|
| n_classes = 3
|
| train_indices = np.where(train_labels[:,0]<=n_classes-1)
|
| test_indices = np.where(test_labels[:,0]<=n_classes-1)
|
|
|
| train_images, train_labels = train_images[train_indices], train_labels[train_indices]
|
| test_images, test_labels = test_images[test_indices], test_labels[test_indices]
|
|
|
| # Take a glimpse at the data and class labels
|
| class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
|
| 'dog', 'frog', 'horse', 'ship', 'truck']
|
|
|
| plt.figure(figsize=(10,10))
|
| for i in range(25):
|
| plt.subplot(5,5,i+1)
|
| plt.xticks([])
|
| plt.yticks([])
|
| plt.grid(False)
|
| plt.imshow(train_images[i])
|
| # The CIFAR labels happen to be arrays,
|
| # which is why you need the extra index
|
| plt.xlabel(class_names[train_labels[i][0]])
|
| plt.show()
|
|
|
| # Create base CNN
|
| def input_block_basenet(model = Sequential(),n_filters = 32, conv_kernel_size = (3,3), strides = (1,1), pool_kernel_size = (2,2)):
|
| block = model
|
|
|
| block.add(tf.keras.layers.Conv2D(32, 3, input_shape=(32,32,3), padding='same', strides=strides, activation='relu'))
|
| block.add(tf.keras.layers.Conv2D(32, 3, activation='relu'))
|
| block.add(tf.keras.layers.MaxPooling2D())
|
| block.add(tf.keras.layers.Dropout(0.25))
|
|
|
| return block
|
|
|
| def block_basenet(model = Sequential(),n_filters = 32, conv_kernel_size = (3,3), strides = (1,1), pool_kernel_size = (1,1)):
|
| block = model
|
|
|
| block.add(tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'))
|
| block.add(tf.keras.layers.Conv2D(64, 3, activation='relu'))
|
| block.add(tf.keras.layers.MaxPooling2D())
|
| block.add(tf.keras.layers.Dropout(0.25))
|
|
|
| return block
|
|
|
| # Create Classifier
|
| def classifier_basenet(model = Sequential()):
|
| # Workspace 2.2
|
| block = model
|
|
|
| block.add(tf.keras.layers.Flatten())
|
| block.add(tf.keras.layers.Dense(512, activation='relu'))
|
| block.add(tf.keras.layers.Dropout(0.5))
|
| block.add(tf.keras.layers.Dense(n_classes, activation='softmax'))
|
|
|
| return block
|
|
|
| # Create the network
|
| def cnn_model(input_shape=(32, 32, 3)):
|
|
|
| model = Sequential()
|
|
|
| return classifier_basenet(block_basenet(input_block_basenet(model)))
|
|
|
| model = cnn_model()
|
| model.build()
|
|
|
| # Check the dimensional flow of the data through the network built
|
| model.summary()
|
|
|
| # Fit the model to my data
|
| optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
|
| loss = tf.keras.losses.SparseCategoricalCrossentropy()
|
| model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
|
|
|
| history = model.fit(train_images, train_labels, epochs=5,
|
| validation_data=(test_images,test_labels))
|
|
|
|
|
| #######################
|
| ### Produced Output ###
|
| #######################
|
| Model: "sequential_35"
|
| _________________________________________________________________
|
| Layer (type) Output Shape Param #
|
| =================================================================
|
| conv2d_34 (Conv2D) (None, 32, 32, 32) 896
|
|
|
| conv2d_35 (Conv2D) (None, 30, 30, 32) 9248
|
|
|
| max_pooling2d_16 (MaxPoolin (None, 15, 15, 32) 0
|
| g2D)
|
|
|
| dropout_15 (Dropout) (None, 15, 15, 32) 0
|
|
|
| conv2d_36 (Conv2D) (None, 15, 15, 64) 18496
|
|
|
| conv2d_37 (Conv2D) (None, 13, 13, 64) 36928
|
|
|
| max_pooling2d_17 (MaxPoolin (None, 6, 6, 64) 0
|
| g2D)
|
|
|
| dropout_16 (Dropout) (None, 6, 6, 64) 0
|
|
|
| flatten_3 (Flatten) (None, 2304) 0
|
|
|
| dense_6 (Dense) (None, 512) 1180160
|
|
|
| dropout_17 (Dropout) (None, 512) 0
|
|
|
| dense_7 (Dense) (None, 3) 1539
|
|
|
| =================================================================
|
| Total params: 1,247,267
|
| Trainable params: 1,247,267
|
| Non-trainable params: 0
|
|
|
| Epoch 1/5
|
| 469/469 [==============================] - 13s 26ms/step - loss: 1.0994 - accuracy: 0.3309 - val_loss: 1.0992 - val_accuracy: 0.3333
|
| Epoch 2/5
|
| 469/469 [==============================] - 12s 26ms/step - loss: 1.0993 - accuracy: 0.3361 - val_loss: 1.1003 - val_accuracy: 0.3333
|
| Epoch 3/5
|
| 469/469 [==============================] - 12s 26ms/step - loss: 1.0997 - accuracy: 0.3284 - val_loss: 1.0987 - val_accuracy: 0.3333
|
| Epoch 4/5
|
| 469/469 [==============================] - 12s 26ms/step - loss: 1.0994 - accuracy: 0.3327 - val_loss: 1.0987 - val_accuracy: 0.3333
|
| Epoch 5/5
|
| 469/469 [==============================] - 12s 25ms/step - loss: 1.0990 - accuracy: 0.3323 - val_loss: 1.0996 - val_accuracy: 0.3333
|