In [1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from glob import glob
import os
import tensorflow as tf
import logging
tf.get_logger().setLevel(logging.ERROR)
import warnings
warnings.filterwarnings("ignore")
In [2]:
# I am using my GPU
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print('Using GPU - ', gpu)
except RuntimeError as e:
print(e)
Using GPU - PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')
In [5]:
train_path = "./dataset/train/"
validation_path = "./dataset/validation/"
testing_path = "./dataset/test/"
In [6]:
numberOfClass = len(glob(train_path + "/*"))
print("Number Of Class: ", numberOfClass)
Number Of Class: 6
In [8]:
# Loading one image
img = tf.keras.preprocessing.image.load_img(os.path.join(train_path, 'sea/8189.jpg'))
plt.imshow(img)
plt.axis("off")
plt.show()
In [9]:
# The images size in dataset.
image_shape = tf.keras.preprocessing.image.img_to_array(img)
print(image_shape.shape)
(150, 150, 3)
In [11]:
training_dataset = tf.keras.utils.image_dataset_from_directory(
directory = train_path,
image_size=(150, 150),
batch_size=12,
label_mode='categorical'
)
validation_dataset = tf.keras.utils.image_dataset_from_directory(
directory = validation_path,
image_size=(150,150),
batch_size=12,
label_mode='categorical'
)
Found 1200 files belonging to 6 classes. Found 336 files belonging to 6 classes.
In [12]:
vgg19 = tf.keras.applications.vgg19.VGG19(
input_shape=(150,150,3),
include_top=False,
)
In [13]:
for layer in vgg19.layers:
layer.trainable = False
In [14]:
num_total_params = vgg19.count_params()
non_trainable_params = sum([w.shape.num_elements() for w in vgg19.trainable_weights])
num_total_params, non_trainable_params
Out[14]:
(20024384, 0)
In [15]:
last_desired_layer = vgg19.get_layer('block4_conv1')
last_output = last_desired_layer.output
last_output
Out[15]:
<KerasTensor: shape=(None, 18, 18, 512) dtype=float32 (created by layer 'block4_conv1')>
In [19]:
def create_final_model(pre_trained_model, last_output):
x = tf.keras.layers.Flatten()(last_output)
x = tf.keras.layers.Dense(256, activation='relu')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(6, activation='softmax')(x)
model = tf.keras.Model(inputs = pre_trained_model.input, outputs=x)
model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.00001), loss='binary_crossentropy', metrics=['accuracy'])
return model
In [20]:
model = create_final_model(vgg19, last_output)
In [21]:
model.summary()
Model: "model_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 150, 150, 3)] 0 block1_conv1 (Conv2D) (None, 150, 150, 64) 1792 block1_conv2 (Conv2D) (None, 150, 150, 64) 36928 block1_pool (MaxPooling2D) (None, 75, 75, 64) 0 block2_conv1 (Conv2D) (None, 75, 75, 128) 73856 block2_conv2 (Conv2D) (None, 75, 75, 128) 147584 block2_pool (MaxPooling2D) (None, 37, 37, 128) 0 block3_conv1 (Conv2D) (None, 37, 37, 256) 295168 block3_conv2 (Conv2D) (None, 37, 37, 256) 590080 block3_conv3 (Conv2D) (None, 37, 37, 256) 590080 block3_conv4 (Conv2D) (None, 37, 37, 256) 590080 block3_pool (MaxPooling2D) (None, 18, 18, 256) 0 block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160 flatten_1 (Flatten) (None, 165888) 0 dense_2 (Dense) (None, 256) 42467584 dropout_1 (Dropout) (None, 256) 0 dense_3 (Dense) (None, 6) 1542 ================================================================= Total params: 45,974,854 Trainable params: 42,469,126 Non-trainable params: 3,505,728 _________________________________________________________________
In [24]:
hisy_vgg19 = model.fit_generator(training_dataset,
epochs = 10,
validation_data = validation_dataset,
verbose=2)
Epoch 1/10 100/100 - 7s - loss: 0.6275 - accuracy: 0.2825 - val_loss: 1.0474 - val_accuracy: 0.3214 - 7s/epoch - 72ms/step Epoch 2/10 100/100 - 7s - loss: 0.6265 - accuracy: 0.2750 - val_loss: 0.9261 - val_accuracy: 0.3244 - 7s/epoch - 71ms/step Epoch 3/10 100/100 - 7s - loss: 0.6888 - accuracy: 0.2783 - val_loss: 1.0731 - val_accuracy: 0.3244 - 7s/epoch - 70ms/step Epoch 4/10 100/100 - 7s - loss: 0.6116 - accuracy: 0.2875 - val_loss: 0.8847 - val_accuracy: 0.3244 - 7s/epoch - 72ms/step Epoch 5/10 100/100 - 7s - loss: 0.8118 - accuracy: 0.2858 - val_loss: 0.8387 - val_accuracy: 0.3214 - 7s/epoch - 74ms/step Epoch 6/10 100/100 - 7s - loss: 0.6128 - accuracy: 0.2892 - val_loss: 0.7835 - val_accuracy: 0.3214 - 7s/epoch - 72ms/step Epoch 7/10 100/100 - 7s - loss: 0.6042 - accuracy: 0.2883 - val_loss: 0.8054 - val_accuracy: 0.3185 - 7s/epoch - 71ms/step Epoch 8/10 100/100 - 7s - loss: 0.6399 - accuracy: 0.2842 - val_loss: 0.8210 - val_accuracy: 0.3185 - 7s/epoch - 71ms/step Epoch 9/10 100/100 - 7s - loss: 0.6500 - accuracy: 0.2858 - val_loss: 0.8692 - val_accuracy: 0.3185 - 7s/epoch - 72ms/step Epoch 10/10 100/100 - 7s - loss: 0.6190 - accuracy: 0.2892 - val_loss: 0.8981 - val_accuracy: 0.3214 - 7s/epoch - 72ms/step
In [25]:
plt.plot(hisy_vgg19.history["loss"], label = "training loss")
plt.plot(hisy_vgg19.history["val_loss"], label = "validation loss")
plt.legend()
plt.show()
In [26]:
plt.plot(hisy_vgg19.history["accuracy"], label = "accuracy")
plt.plot(hisy_vgg19.history["val_accuracy"], label = "validation accuracy")
plt.legend()
plt.show()
Accuracy will increase on increasing the number of pictures and number of neurons in the dense layer. Also, with more batches, it will increase. However, this requires significant computation power.
In [ ]:
In [ ]: