Here we will classify for more than two images. The steps involved are almost the same as before.
In [1]:
import os
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import tensorflow as tf
import logging
tf.get_logger().setLevel(logging.ERROR)
In [2]:
BASE_DIR = './rps'
rock_dir = os.path.join(BASE_DIR, 'train/rock')
paper_dir = os.path.join(BASE_DIR, 'train/paper')
scissors_dir = os.path.join(BASE_DIR, 'train/scissors')
print('Total number of training rock images : ', len(os.listdir(rock_dir)))
print('Total number of training paper images : ', len(os.listdir(paper_dir)))
print('Total number of training scissors images : ', len(os.listdir(scissors_dir)))
Total number of training rock images : 329 Total number of training paper images : 362 Total number of training scissors images : 329
In [3]:
rock_files = os.listdir(rock_dir)
paper_files = os.listdir(paper_dir)
scissors_files = os.listdir(scissors_dir)
In [4]:
TRAIN_DIR = os.path.join(BASE_DIR, 'train')
VALIDATION_DIR = os.path.join(BASE_DIR, 'val')
training_dataset = tf.keras.utils.image_dataset_from_directory(
TRAIN_DIR,
image_size=(150,150),
batch_size=32,
label_mode='categorical'
)
validation_dataset = tf.keras.utils.image_dataset_from_directory(
VALIDATION_DIR,
image_size=(150,150),
batch_size=32,
label_mode='categorical'
)
SHUFFLE_BUFFER_SIZE = 1000
PREFETCH_BUFFER_SIZE = tf.data.AUTOTUNE
train_dataset_final = training_dataset.cache().shuffle(SHUFFLE_BUFFER_SIZE).prefetch(PREFETCH_BUFFER_SIZE)
val_dataset_final = validation_dataset.cache().prefetch(PREFETCH_BUFFER_SIZE)
Found 1020 files belonging to 3 classes. Found 804 files belonging to 3 classes.
In [5]:
data_augmentation_model = tf.keras.Sequential([
tf.keras.Input(shape=(150, 150, 3)),
tf.keras.layers.RandomFlip("horizontal"),
tf.keras.layers.RandomRotation(0.4),
tf.keras.layers.RandomTranslation(0.2, 0.2),
tf.keras.layers.RandomContrast(0.4),
tf.keras.layers.RandomZoom(0.2)
])
In [6]:
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(150,150,3)),
tf.keras.layers.Rescaling(1./255),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'), # Change to 128 as per suggestion in course
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'), # Change to 64 as per course suggestion
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
model.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= rescaling (Rescaling) (None, 150, 150, 3) 0 conv2d (Conv2D) (None, 148, 148, 64) 1792 max_pooling2d (MaxPooling2D (None, 74, 74, 64) 0 ) conv2d_1 (Conv2D) (None, 72, 72, 64) 36928 max_pooling2d_1 (MaxPooling (None, 36, 36, 64) 0 2D) conv2d_2 (Conv2D) (None, 34, 34, 32) 18464 max_pooling2d_2 (MaxPooling (None, 17, 17, 32) 0 2D) conv2d_3 (Conv2D) (None, 15, 15, 32) 9248 max_pooling2d_3 (MaxPooling (None, 7, 7, 32) 0 2D) flatten (Flatten) (None, 1568) 0 dropout (Dropout) (None, 1568) 0 dense (Dense) (None, 512) 803328 dense_1 (Dense) (None, 3) 1539 ================================================================= Total params: 871,299 Trainable params: 871,299 Non-trainable params: 0 _________________________________________________________________
I have changed for few number of neurons due to computation limitations
In [7]:
model_with_aug = tf.keras.models.Sequential([
data_augmentation_model,
model
])
model_with_aug.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
In [8]:
class EarlyStopCallBack(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs['accuracy'] > 0.80:
self.model.stop_training=True
print('Model reached 80% accuracy')
In [9]:
history = model_with_aug.fit(
train_dataset_final, epochs = 25, validation_data= val_dataset_final, verbose=2, callbacks=[EarlyStopCallBack()])
Epoch 1/25 32/32 - 60s - loss: 1.1133 - accuracy: 0.3373 - val_loss: 1.1227 - val_accuracy: 0.3321 - 60s/epoch - 2s/step Epoch 2/25 32/32 - 33s - loss: 1.1027 - accuracy: 0.3480 - val_loss: 1.0984 - val_accuracy: 0.3333 - 33s/epoch - 1s/step Epoch 3/25 32/32 - 26s - loss: 1.0996 - accuracy: 0.3490 - val_loss: 1.1147 - val_accuracy: 0.3308 - 26s/epoch - 805ms/step Epoch 4/25 32/32 - 29s - loss: 1.1016 - accuracy: 0.3422 - val_loss: 1.0886 - val_accuracy: 0.4378 - 29s/epoch - 899ms/step Epoch 5/25 32/32 - 27s - loss: 1.0990 - accuracy: 0.3500 - val_loss: 1.0903 - val_accuracy: 0.3607 - 27s/epoch - 844ms/step Epoch 6/25 32/32 - 26s - loss: 1.0921 - accuracy: 0.3882 - val_loss: 1.0840 - val_accuracy: 0.3532 - 26s/epoch - 819ms/step Epoch 7/25 32/32 - 26s - loss: 1.0874 - accuracy: 0.3794 - val_loss: 1.0705 - val_accuracy: 0.4005 - 26s/epoch - 800ms/step Epoch 8/25 32/32 - 25s - loss: 1.0731 - accuracy: 0.4039 - val_loss: 1.0384 - val_accuracy: 0.4614 - 25s/epoch - 792ms/step Epoch 9/25 32/32 - 25s - loss: 1.0744 - accuracy: 0.4078 - val_loss: 1.0201 - val_accuracy: 0.4677 - 25s/epoch - 793ms/step Epoch 10/25 32/32 - 25s - loss: 1.0535 - accuracy: 0.4461 - val_loss: 0.9766 - val_accuracy: 0.5050 - 25s/epoch - 796ms/step Epoch 11/25 32/32 - 25s - loss: 1.0684 - accuracy: 0.4147 - val_loss: 1.0046 - val_accuracy: 0.4739 - 25s/epoch - 776ms/step Epoch 12/25 32/32 - 26s - loss: 1.0409 - accuracy: 0.4441 - val_loss: 0.9591 - val_accuracy: 0.5100 - 26s/epoch - 806ms/step Epoch 13/25 32/32 - 26s - loss: 1.0457 - accuracy: 0.4529 - val_loss: 1.0361 - val_accuracy: 0.4216 - 26s/epoch - 800ms/step Epoch 14/25 32/32 - 26s - loss: 1.0500 - accuracy: 0.4167 - val_loss: 1.0311 - val_accuracy: 0.4303 - 26s/epoch - 805ms/step Epoch 15/25 32/32 - 25s - loss: 1.0142 - accuracy: 0.4863 - val_loss: 0.9215 - val_accuracy: 0.5100 - 25s/epoch - 788ms/step Epoch 16/25 32/32 - 26s - loss: 1.0014 - accuracy: 0.4804 - val_loss: 1.0725 - val_accuracy: 0.4154 - 26s/epoch - 797ms/step Epoch 17/25 32/32 - 26s - loss: 0.9998 - accuracy: 0.4755 - val_loss: 0.9420 - val_accuracy: 0.5261 - 26s/epoch - 809ms/step Epoch 18/25 32/32 - 25s - loss: 1.0157 - accuracy: 0.4676 - val_loss: 0.9100 - val_accuracy: 0.5498 - 25s/epoch - 796ms/step Epoch 19/25 32/32 - 25s - loss: 1.0107 - accuracy: 0.4941 - val_loss: 0.9680 - val_accuracy: 0.4801 - 25s/epoch - 788ms/step Epoch 20/25 32/32 - 25s - loss: 1.0049 - accuracy: 0.4549 - val_loss: 1.1940 - val_accuracy: 0.4129 - 25s/epoch - 785ms/step Epoch 21/25 32/32 - 25s - loss: 0.9684 - accuracy: 0.4853 - val_loss: 0.9139 - val_accuracy: 0.5498 - 25s/epoch - 773ms/step Epoch 22/25 32/32 - 25s - loss: 0.9623 - accuracy: 0.4941 - val_loss: 0.9160 - val_accuracy: 0.5348 - 25s/epoch - 784ms/step Epoch 23/25 32/32 - 25s - loss: 0.9726 - accuracy: 0.4961 - val_loss: 0.9497 - val_accuracy: 0.5435 - 25s/epoch - 784ms/step Epoch 24/25 32/32 - 25s - loss: 0.9223 - accuracy: 0.5353 - val_loss: 0.9024 - val_accuracy: 0.5224 - 25s/epoch - 781ms/step Epoch 25/25 32/32 - 25s - loss: 0.9481 - accuracy: 0.5225 - val_loss: 0.9781 - val_accuracy: 0.5336 - 25s/epoch - 780ms/step
In [10]:
def plot_loss_acc(history):
'''Plots the training and validation loss and accuracy from a history object'''
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
fig, ax = plt.subplots(1,2, figsize=(12, 6))
ax[0].plot(epochs, acc, 'bo', label='Training accuracy')
ax[0].plot(epochs, val_acc, 'b', label='Validation accuracy')
ax[0].set_title('Training and validation accuracy')
ax[0].set_xlabel('epochs')
ax[0].set_ylabel('accuracy')
ax[0].legend()
ax[1].plot(epochs, loss, 'bo', label='Training Loss')
ax[1].plot(epochs, val_loss, 'b', label='Validation Loss')
ax[1].set_title('Training and validation loss')
ax[1].set_xlabel('epochs')
ax[1].set_ylabel('loss')
ax[1].legend()
plt.show()
In [11]:
plot_loss_acc(history)
In [16]:
TESTING_DIR = os.path.join(BASE_DIR, 'test')
testing_dataset = tf.keras.utils.image_dataset_from_directory(
TESTING_DIR,
image_size=(150,150),
batch_size=32,
label_mode='categorical'
)
test_loss, test_accuracy = model_with_aug.evaluate(testing_dataset, verbose=False)
Found 540 files belonging to 3 classes.
In [17]:
test_loss, test_accuracy
Out[17]:
(1.0608102083206177, 0.4018518626689911)
In [20]:
prediction = model.predict(val_dataset_final, verbose=False)
labels = np.concatenate([label_batch.numpy() for _, label_batch in val_dataset_final])
labels = [int(lab) for label in labels for lab in label]
prediction = [1 if x[0] > 0.5 else 0 for x in prediction]
match = sum(a == b for a, b in zip(prediction, labels))
non_match = len(labels) - match
match, non_match
Out[20]:
(534, 1878)
Increase the neuron count and number of epochs to get higher accuracy
In [ ]:
In [ ]: