import os
import zipfile
import shutil
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers, models
from tensorflow.keras.callbacks import Callback
from google.colab import files
from tensorflow.keras.preprocessing import image
%matplotlib inline
# Load Data
!wget --no-check-certificate \
https://github.com/dicodingacademy/assets/releases/download/release/rockpaperscissors.zip \
-O /tmp/rockpaperscissors.zip
zip_path_loc = '/tmp/rockpaperscissors.zip'
zip_ref = zipfile.ZipFile(zip_path_loc, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
root_path = '/tmp/rockpaperscissors'
train_path = os.path.join(root_path, 'train')
val_path = os.path.join(root_path, 'val')
categories = ['paper', 'rock', 'scissors']
def create_train_val_directory(train_path, val_path, categories):
os.mkdir(train_path)
os.mkdir(val_path)
for category in categories:
train_category_path = os.path.join(train_path, category)
val_category_path = os.path.join(val_path, category)
os.mkdir(train_category_path)
os.mkdir(val_category_path)
return
create_train_val_directory(train_path, val_path, categories)
paper_img_path = os.path.join(root_path, 'paper')
rock_img_path = os.path.join(root_path, 'rock')
scissors_img_path = os.path.join(root_path, 'scissors')
paper_img_list = os.listdir(paper_img_path)
rock_img_list = os.listdir(rock_img_path)
scissors_img_list = os.listdir(scissors_img_path)
categories_img_list = [paper_img_list, rock_img_list, scissors_img_list]
def train_test_split_categories(categories, img_categories, root_path):
train_path = os.path.join(root_path, 'train')
val_path = os.path.join(root_path, 'val')
for category, category_img_list in zip(categories, img_categories):
num_category = len(category_img_list)
category_path = os.path.join(root_path, category)
train_size = int(0.6 * num_category)
for img_filename in category_img_list[:train_size]:
shutil.copy(os.path.join(category_path, img_filename), os.path.join(train_path, category))
for img_filename in category_img_list[train_size:]:
shutil.copy(os.path.join(category_path, img_filename), os.path.join(val_path, category))
return
train_test_split_categories(categories, categories_img_list, root_path)
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=60,
horizontal_flip=True,
zoom_range=[1.0, 2.0],
shear_range = 0.25,
fill_mode = 'nearest')
valid_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_path,
target_size=(100, 150),
batch_size=16,
class_mode='categorical')
validation_generator = valid_datagen.flow_from_directory(
val_path,
target_size=(100, 150),
batch_size=12,
class_mode='categorical')
model = models.Sequential([
layers.Conv2D(32, (5,5), activation='relu', input_shape=(100, 150, 3)),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
layers.Conv2D(128, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
layers.Conv2D(256, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.Dropout(0.3),
layers.Dense(256, activation='relu'),
layers.Dropout(0.4),
layers.Dense(3, activation='softmax')
])
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_acc_sum = 0.0
val_acc_sum = 0.0
counter = 0
ep = 0
class ModelCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
global train_acc_sum
global val_acc_sum
global counter
global ep
curr_train_acc = round(logs['accuracy'], 2)
curr_val_acc = round(logs['val_accuracy'], 2)
curr_train_loss = round(logs['loss'], 2)
curr_val_loss = round(logs['val_loss'], 2)
ep += 1
train_acc_sum += round(logs['accuracy'], 2)
val_acc_sum += round(logs['val_accuracy'], 2)
if (epoch+1)%10 == 0:
print("Epoch of {}: Train rate: {}; Validation rate: {}; Count: {}".format(epoch+1, train_acc_sum/(epoch+1), val_acc_sum/(epoch+1), counter))
if curr_train_acc >= 0.96 and curr_val_acc >= 0.96 and curr_train_loss <= 0.06 and curr_val_loss <= 0.06 :
counter += 1
if counter == 12:
print("Training stopped at epoch of {} with train accuracy: {} and validation accuracy: {}".format(epoch+1, curr_train_acc, curr_val_acc))
self.model.stop_training = True
%%time
hist = model.fit(
train_generator,
epochs=100,
validation_data=validation_generator,
callbacks=[ModelCallback()],
verbose=2)
plt.plot(range(1,ep+1), hist.history['accuracy'], label='Training')
plt.plot(range(1,ep+1), hist.history['val_accuracy'], label='Validation')
plt.title("Train Accuracy - Validation Accuracy")
plt.legend()
plt.show()
plt.clf()
plt.plot(range(1,ep+1), hist.history['loss'], label='Training')
plt.plot(range(1,ep+1), hist.history['val_loss'], label='Validation')
plt.title("Train Loss - Validation Loss")
plt.legend()
plt.show()
train_generator.class_indices
uploaded = files.upload()
for fn in uploaded.keys():
path = fn
img = image.load_img(path, target_size=(100,150))
plt.xticks([])
plt.yticks([])
plt.imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images)
print("Classes:", classes)
output_class = np.argmax(classes)
print(fn)
if output_class==0:
print('paper')
elif output_class==1:
print('rock')
elif output_class==2:
print('scissors')
else:
print('unknown')