Rock, Paper, Scissors Image Classification using Convolutional Neural Network (CNN)

In [ ]:
import os
import zipfile
import shutil
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers, models
from tensorflow.keras.callbacks import Callback
from google.colab import files
from tensorflow.keras.preprocessing import image
%matplotlib inline
In [ ]:
# Load Data
!wget --no-check-certificate \
  https://github.com/dicodingacademy/assets/releases/download/release/rockpaperscissors.zip \
  -O /tmp/rockpaperscissors.zip
--2022-06-05 12:08:19--  https://github.com/dicodingacademy/assets/releases/download/release/rockpaperscissors.zip
Resolving github.com (github.com)... 140.82.112.3
Connecting to github.com (github.com)|140.82.112.3|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://objects.githubusercontent.com/github-production-release-asset-2e65be/391417272/7eb836f2-695b-4a46-9c78-b65867166957?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220605%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220605T120819Z&X-Amz-Expires=300&X-Amz-Signature=df964a7b77ed75223efacadbe49063c03d50c556b1d13a1b68e05977b650607f&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=391417272&response-content-disposition=attachment%3B%20filename%3Drockpaperscissors.zip&response-content-type=application%2Foctet-stream [following]
--2022-06-05 12:08:19--  https://objects.githubusercontent.com/github-production-release-asset-2e65be/391417272/7eb836f2-695b-4a46-9c78-b65867166957?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220605%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220605T120819Z&X-Amz-Expires=300&X-Amz-Signature=df964a7b77ed75223efacadbe49063c03d50c556b1d13a1b68e05977b650607f&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=391417272&response-content-disposition=attachment%3B%20filename%3Drockpaperscissors.zip&response-content-type=application%2Foctet-stream
Resolving objects.githubusercontent.com (objects.githubusercontent.com)... 185.199.110.133, 185.199.111.133, 185.199.108.133, ...
Connecting to objects.githubusercontent.com (objects.githubusercontent.com)|185.199.110.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 322873683 (308M) [application/octet-stream]
Saving to: ‘/tmp/rockpaperscissors.zip’

/tmp/rockpapersciss 100%[===================>] 307.92M  65.2MB/s    in 4.6s    

2022-06-05 12:08:24 (66.2 MB/s) - ‘/tmp/rockpaperscissors.zip’ saved [322873683/322873683]

In [ ]:
zip_path_loc = '/tmp/rockpaperscissors.zip'
zip_ref = zipfile.ZipFile(zip_path_loc, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
In [ ]:
root_path = '/tmp/rockpaperscissors'

train_path = os.path.join(root_path, 'train')
val_path = os.path.join(root_path, 'val')
categories = ['paper', 'rock', 'scissors']

def create_train_val_directory(train_path, val_path, categories):
  os.mkdir(train_path)
  os.mkdir(val_path)

  for category in categories:
    train_category_path = os.path.join(train_path, category)
    val_category_path = os.path.join(val_path, category)
    os.mkdir(train_category_path)
    os.mkdir(val_category_path)

  return

create_train_val_directory(train_path, val_path, categories)
In [ ]:
paper_img_path = os.path.join(root_path, 'paper')
rock_img_path = os.path.join(root_path, 'rock')
scissors_img_path = os.path.join(root_path, 'scissors')

paper_img_list = os.listdir(paper_img_path)
rock_img_list = os.listdir(rock_img_path)
scissors_img_list = os.listdir(scissors_img_path)

categories_img_list = [paper_img_list, rock_img_list, scissors_img_list]
In [ ]:
def train_test_split_categories(categories, img_categories, root_path):
  train_path = os.path.join(root_path, 'train')
  val_path = os.path.join(root_path, 'val')
  for category, category_img_list in zip(categories, img_categories):
    num_category = len(category_img_list)
    category_path = os.path.join(root_path, category)
    train_size = int(0.6 * num_category)

    for img_filename in category_img_list[:train_size]:
      shutil.copy(os.path.join(category_path, img_filename), os.path.join(train_path, category))
    
    for img_filename in category_img_list[train_size:]:
      shutil.copy(os.path.join(category_path, img_filename), os.path.join(val_path, category))
  
  return
In [ ]:
train_test_split_categories(categories, categories_img_list, root_path)
In [ ]:
train_datagen = ImageDataGenerator(
                    rescale=1./255,
                    rotation_range=60,
                    horizontal_flip=True,
                    zoom_range=[1.0, 2.0],
                    shear_range = 0.25,
                    fill_mode = 'nearest')

valid_datagen = ImageDataGenerator(rescale=1./255)
In [ ]:
train_generator = train_datagen.flow_from_directory(
        train_path, 
        target_size=(100, 150),
        batch_size=16,
        class_mode='categorical')
 
validation_generator = valid_datagen.flow_from_directory(
        val_path,
        target_size=(100, 150),
        batch_size=12,
        class_mode='categorical')
Found 1312 images belonging to 3 classes.
Found 876 images belonging to 3 classes.
In [ ]:
model = models.Sequential([
    layers.Conv2D(32, (5,5), activation='relu', input_shape=(100, 150, 3)),
    layers.MaxPooling2D(2, 2),
    layers.Conv2D(64, (3,3), activation='relu'),
    layers.MaxPooling2D(2,2),
    layers.Conv2D(128, (3,3), activation='relu'),
    layers.MaxPooling2D(2,2),
    layers.Conv2D(256, (3,3), activation='relu'),
    layers.MaxPooling2D(2,2),
    layers.Flatten(),
    layers.Dense(512, activation='relu'),
    layers.Dropout(0.3),
    layers.Dense(256, activation='relu'),
    layers.Dropout(0.4),
    layers.Dense(3, activation='softmax')
])
In [ ]:
model.summary()
Model: "sequential_9"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_36 (Conv2D)          (None, 96, 146, 32)       2432      
                                                                 
 max_pooling2d_36 (MaxPoolin  (None, 48, 73, 32)       0         
 g2D)                                                            
                                                                 
 conv2d_37 (Conv2D)          (None, 46, 71, 64)        18496     
                                                                 
 max_pooling2d_37 (MaxPoolin  (None, 23, 35, 64)       0         
 g2D)                                                            
                                                                 
 conv2d_38 (Conv2D)          (None, 21, 33, 128)       73856     
                                                                 
 max_pooling2d_38 (MaxPoolin  (None, 10, 16, 128)      0         
 g2D)                                                            
                                                                 
 conv2d_39 (Conv2D)          (None, 8, 14, 256)        295168    
                                                                 
 max_pooling2d_39 (MaxPoolin  (None, 4, 7, 256)        0         
 g2D)                                                            
                                                                 
 flatten_9 (Flatten)         (None, 7168)              0         
                                                                 
 dense_27 (Dense)            (None, 512)               3670528   
                                                                 
 dropout_18 (Dropout)        (None, 512)               0         
                                                                 
 dense_28 (Dense)            (None, 256)               131328    
                                                                 
 dropout_19 (Dropout)        (None, 256)               0         
                                                                 
 dense_29 (Dense)            (None, 3)                 771       
                                                                 
=================================================================
Total params: 4,192,579
Trainable params: 4,192,579
Non-trainable params: 0
_________________________________________________________________
In [ ]:
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
In [ ]:
train_acc_sum = 0.0
val_acc_sum = 0.0
counter = 0
ep = 0

class ModelCallback(Callback):
  def on_epoch_end(self, epoch, logs=None):
    global train_acc_sum
    global val_acc_sum
    global counter
    global ep

    curr_train_acc = round(logs['accuracy'], 2)
    curr_val_acc = round(logs['val_accuracy'], 2)
    curr_train_loss = round(logs['loss'], 2)
    curr_val_loss = round(logs['val_loss'], 2)
    ep += 1
    
    train_acc_sum += round(logs['accuracy'], 2)
    val_acc_sum += round(logs['val_accuracy'], 2)
    if (epoch+1)%10 == 0:
      print("Epoch of {}: Train rate: {}; Validation rate: {}; Count: {}".format(epoch+1, train_acc_sum/(epoch+1), val_acc_sum/(epoch+1), counter))
    
    if curr_train_acc >= 0.96 and curr_val_acc >= 0.96 and curr_train_loss <= 0.06 and curr_val_loss <= 0.06 :
      counter += 1
      if counter == 12:
        print("Training stopped at epoch of {} with train accuracy: {} and validation accuracy: {}".format(epoch+1, curr_train_acc, curr_val_acc))
        self.model.stop_training = True
In [ ]:
%%time
hist = model.fit(
      train_generator,
      epochs=100,
      validation_data=validation_generator,
      callbacks=[ModelCallback()],
      verbose=2)
Epoch 1/100
82/82 - 10s - loss: 1.0651 - accuracy: 0.4055 - val_loss: 0.9162 - val_accuracy: 0.5228 - 10s/epoch - 121ms/step
Epoch 2/100
82/82 - 9s - loss: 0.6722 - accuracy: 0.6860 - val_loss: 0.5975 - val_accuracy: 0.7260 - 9s/epoch - 112ms/step
Epoch 3/100
82/82 - 9s - loss: 0.5118 - accuracy: 0.7706 - val_loss: 0.5822 - val_accuracy: 0.7648 - 9s/epoch - 112ms/step
Epoch 4/100
82/82 - 9s - loss: 0.4112 - accuracy: 0.8361 - val_loss: 0.3345 - val_accuracy: 0.8721 - 9s/epoch - 113ms/step
Epoch 5/100
82/82 - 9s - loss: 0.3161 - accuracy: 0.8712 - val_loss: 0.5564 - val_accuracy: 0.8139 - 9s/epoch - 112ms/step
Epoch 6/100
82/82 - 9s - loss: 0.2791 - accuracy: 0.8956 - val_loss: 0.7173 - val_accuracy: 0.7215 - 9s/epoch - 112ms/step
Epoch 7/100
82/82 - 10s - loss: 0.2129 - accuracy: 0.9085 - val_loss: 0.2987 - val_accuracy: 0.8847 - 10s/epoch - 119ms/step
Epoch 8/100
82/82 - 9s - loss: 0.1861 - accuracy: 0.9314 - val_loss: 0.3199 - val_accuracy: 0.8721 - 9s/epoch - 112ms/step
Epoch 9/100
82/82 - 9s - loss: 0.1604 - accuracy: 0.9459 - val_loss: 0.1595 - val_accuracy: 0.9452 - 9s/epoch - 113ms/step
Epoch 10/100
Epoch of 10: Train rate: 0.8220000000000001; Validation rate: 0.805; Count: 0
82/82 - 9s - loss: 0.1292 - accuracy: 0.9497 - val_loss: 0.1724 - val_accuracy: 0.9361 - 9s/epoch - 112ms/step
Epoch 11/100
82/82 - 9s - loss: 0.1434 - accuracy: 0.9482 - val_loss: 0.3039 - val_accuracy: 0.8950 - 9s/epoch - 112ms/step
Epoch 12/100
82/82 - 9s - loss: 0.1252 - accuracy: 0.9588 - val_loss: 0.2707 - val_accuracy: 0.8938 - 9s/epoch - 112ms/step
Epoch 13/100
82/82 - 9s - loss: 0.0789 - accuracy: 0.9741 - val_loss: 0.1680 - val_accuracy: 0.9463 - 9s/epoch - 112ms/step
Epoch 14/100
82/82 - 9s - loss: 0.1165 - accuracy: 0.9611 - val_loss: 0.1531 - val_accuracy: 0.9429 - 9s/epoch - 112ms/step
Epoch 15/100
82/82 - 9s - loss: 0.0830 - accuracy: 0.9695 - val_loss: 0.1517 - val_accuracy: 0.9521 - 9s/epoch - 112ms/step
Epoch 16/100
82/82 - 9s - loss: 0.0743 - accuracy: 0.9779 - val_loss: 0.1195 - val_accuracy: 0.9669 - 9s/epoch - 112ms/step
Epoch 17/100
82/82 - 9s - loss: 0.0687 - accuracy: 0.9764 - val_loss: 0.1335 - val_accuracy: 0.9578 - 9s/epoch - 112ms/step
Epoch 18/100
82/82 - 10s - loss: 0.0501 - accuracy: 0.9802 - val_loss: 0.1575 - val_accuracy: 0.9463 - 10s/epoch - 123ms/step
Epoch 19/100
82/82 - 9s - loss: 0.0601 - accuracy: 0.9779 - val_loss: 0.2628 - val_accuracy: 0.9247 - 9s/epoch - 113ms/step
Epoch 20/100
Epoch of 20: Train rate: 0.8959999999999999; Validation rate: 0.8710000000000001; Count: 0
82/82 - 9s - loss: 0.1231 - accuracy: 0.9665 - val_loss: 0.1448 - val_accuracy: 0.9498 - 9s/epoch - 111ms/step
Epoch 21/100
82/82 - 9s - loss: 0.0923 - accuracy: 0.9726 - val_loss: 0.1022 - val_accuracy: 0.9726 - 9s/epoch - 112ms/step
Epoch 22/100
82/82 - 9s - loss: 0.0383 - accuracy: 0.9870 - val_loss: 0.1286 - val_accuracy: 0.9623 - 9s/epoch - 112ms/step
Epoch 23/100
82/82 - 9s - loss: 0.0571 - accuracy: 0.9825 - val_loss: 0.1968 - val_accuracy: 0.9486 - 9s/epoch - 113ms/step
Epoch 24/100
82/82 - 9s - loss: 0.0288 - accuracy: 0.9893 - val_loss: 0.1322 - val_accuracy: 0.9669 - 9s/epoch - 112ms/step
Epoch 25/100
82/82 - 9s - loss: 0.0613 - accuracy: 0.9809 - val_loss: 0.1880 - val_accuracy: 0.9406 - 9s/epoch - 112ms/step
Epoch 26/100
82/82 - 9s - loss: 0.0524 - accuracy: 0.9825 - val_loss: 0.7506 - val_accuracy: 0.8710 - 9s/epoch - 112ms/step
Epoch 27/100
82/82 - 9s - loss: 0.0482 - accuracy: 0.9878 - val_loss: 0.0933 - val_accuracy: 0.9680 - 9s/epoch - 113ms/step
Epoch 28/100
82/82 - 9s - loss: 0.0624 - accuracy: 0.9733 - val_loss: 0.1363 - val_accuracy: 0.9623 - 9s/epoch - 112ms/step
Epoch 29/100
82/82 - 9s - loss: 0.0252 - accuracy: 0.9870 - val_loss: 0.1396 - val_accuracy: 0.9692 - 9s/epoch - 112ms/step
Epoch 30/100
Epoch of 30: Train rate: 0.9246666666666664; Validation rate: 0.8986666666666667; Count: 0
82/82 - 9s - loss: 0.0480 - accuracy: 0.9840 - val_loss: 0.0839 - val_accuracy: 0.9806 - 9s/epoch - 111ms/step
Epoch 31/100
82/82 - 9s - loss: 0.0574 - accuracy: 0.9817 - val_loss: 0.1400 - val_accuracy: 0.9543 - 9s/epoch - 111ms/step
Epoch 32/100
82/82 - 9s - loss: 0.0317 - accuracy: 0.9901 - val_loss: 0.0862 - val_accuracy: 0.9703 - 9s/epoch - 114ms/step
Epoch 33/100
82/82 - 9s - loss: 0.0491 - accuracy: 0.9840 - val_loss: 0.1354 - val_accuracy: 0.9578 - 9s/epoch - 112ms/step
Epoch 34/100
82/82 - 9s - loss: 0.0521 - accuracy: 0.9848 - val_loss: 0.1805 - val_accuracy: 0.9429 - 9s/epoch - 112ms/step
Epoch 35/100
82/82 - 9s - loss: 0.0410 - accuracy: 0.9893 - val_loss: 0.0759 - val_accuracy: 0.9703 - 9s/epoch - 113ms/step
Epoch 36/100
82/82 - 9s - loss: 0.0308 - accuracy: 0.9901 - val_loss: 0.0832 - val_accuracy: 0.9737 - 9s/epoch - 112ms/step
Epoch 37/100
82/82 - 10s - loss: 0.0473 - accuracy: 0.9825 - val_loss: 0.0800 - val_accuracy: 0.9795 - 10s/epoch - 119ms/step
Epoch 38/100
82/82 - 9s - loss: 0.0151 - accuracy: 0.9947 - val_loss: 0.0562 - val_accuracy: 0.9863 - 9s/epoch - 112ms/step
Epoch 39/100
82/82 - 9s - loss: 0.0482 - accuracy: 0.9909 - val_loss: 0.1708 - val_accuracy: 0.9498 - 9s/epoch - 113ms/step
Epoch 40/100
Epoch of 40: Train rate: 0.9399999999999998; Validation rate: 0.91375; Count: 1
82/82 - 9s - loss: 0.0545 - accuracy: 0.9863 - val_loss: 0.3166 - val_accuracy: 0.9064 - 9s/epoch - 112ms/step
Epoch 41/100
82/82 - 9s - loss: 0.0494 - accuracy: 0.9848 - val_loss: 0.0924 - val_accuracy: 0.9749 - 9s/epoch - 112ms/step
Epoch 42/100
82/82 - 9s - loss: 0.0492 - accuracy: 0.9809 - val_loss: 0.1593 - val_accuracy: 0.9509 - 9s/epoch - 113ms/step
Epoch 43/100
82/82 - 10s - loss: 0.0321 - accuracy: 0.9886 - val_loss: 0.0640 - val_accuracy: 0.9795 - 10s/epoch - 122ms/step
Epoch 44/100
82/82 - 9s - loss: 0.0185 - accuracy: 0.9931 - val_loss: 0.0672 - val_accuracy: 0.9806 - 9s/epoch - 112ms/step
Epoch 45/100
82/82 - 9s - loss: 0.0177 - accuracy: 0.9947 - val_loss: 0.2054 - val_accuracy: 0.9509 - 9s/epoch - 112ms/step
Epoch 46/100
82/82 - 9s - loss: 0.0993 - accuracy: 0.9748 - val_loss: 0.1812 - val_accuracy: 0.9418 - 9s/epoch - 112ms/step
Epoch 47/100
82/82 - 9s - loss: 0.0362 - accuracy: 0.9893 - val_loss: 0.0924 - val_accuracy: 0.9646 - 9s/epoch - 113ms/step
Epoch 48/100
82/82 - 9s - loss: 0.0269 - accuracy: 0.9916 - val_loss: 0.0932 - val_accuracy: 0.9737 - 9s/epoch - 112ms/step
Epoch 49/100
82/82 - 9s - loss: 0.0378 - accuracy: 0.9931 - val_loss: 0.1333 - val_accuracy: 0.9635 - 9s/epoch - 112ms/step
Epoch 50/100
Epoch of 50: Train rate: 0.949; Validation rate: 0.9233999999999999; Count: 2
82/82 - 9s - loss: 0.0546 - accuracy: 0.9802 - val_loss: 0.1381 - val_accuracy: 0.9578 - 9s/epoch - 112ms/step
Epoch 51/100
82/82 - 9s - loss: 0.0592 - accuracy: 0.9809 - val_loss: 0.1121 - val_accuracy: 0.9646 - 9s/epoch - 112ms/step
Epoch 52/100
82/82 - 9s - loss: 0.0246 - accuracy: 0.9931 - val_loss: 0.4050 - val_accuracy: 0.9053 - 9s/epoch - 112ms/step
Epoch 53/100
82/82 - 9s - loss: 0.0729 - accuracy: 0.9771 - val_loss: 0.0797 - val_accuracy: 0.9806 - 9s/epoch - 112ms/step
Epoch 54/100
82/82 - 9s - loss: 0.0224 - accuracy: 0.9931 - val_loss: 0.0889 - val_accuracy: 0.9726 - 9s/epoch - 112ms/step
Epoch 55/100
82/82 - 9s - loss: 0.0588 - accuracy: 0.9817 - val_loss: 0.3593 - val_accuracy: 0.8824 - 9s/epoch - 112ms/step
Epoch 56/100
82/82 - 9s - loss: 0.0198 - accuracy: 0.9916 - val_loss: 0.0555 - val_accuracy: 0.9863 - 9s/epoch - 113ms/step
Epoch 57/100
82/82 - 9s - loss: 0.0204 - accuracy: 0.9916 - val_loss: 0.0788 - val_accuracy: 0.9726 - 9s/epoch - 111ms/step
Epoch 58/100
82/82 - 9s - loss: 0.0459 - accuracy: 0.9886 - val_loss: 0.0884 - val_accuracy: 0.9715 - 9s/epoch - 112ms/step
Epoch 59/100
82/82 - 9s - loss: 0.0438 - accuracy: 0.9901 - val_loss: 0.1024 - val_accuracy: 0.9760 - 9s/epoch - 113ms/step
Epoch 60/100
Epoch of 60: Train rate: 0.9551666666666666; Validation rate: 0.929333333333333; Count: 3
82/82 - 9s - loss: 0.0462 - accuracy: 0.9848 - val_loss: 0.0793 - val_accuracy: 0.9840 - 9s/epoch - 112ms/step
Epoch 61/100
82/82 - 9s - loss: 0.0166 - accuracy: 0.9947 - val_loss: 0.0668 - val_accuracy: 0.9863 - 9s/epoch - 112ms/step
Epoch 62/100
82/82 - 9s - loss: 0.0052 - accuracy: 0.9970 - val_loss: 0.0829 - val_accuracy: 0.9829 - 9s/epoch - 112ms/step
Epoch 63/100
82/82 - 9s - loss: 0.0325 - accuracy: 0.9916 - val_loss: 0.0931 - val_accuracy: 0.9783 - 9s/epoch - 111ms/step
Epoch 64/100
82/82 - 9s - loss: 0.1130 - accuracy: 0.9748 - val_loss: 0.1056 - val_accuracy: 0.9646 - 9s/epoch - 112ms/step
Epoch 65/100
82/82 - 9s - loss: 0.0645 - accuracy: 0.9825 - val_loss: 0.0947 - val_accuracy: 0.9795 - 9s/epoch - 112ms/step
Epoch 66/100
82/82 - 9s - loss: 0.0307 - accuracy: 0.9886 - val_loss: 0.0811 - val_accuracy: 0.9772 - 9s/epoch - 114ms/step
Epoch 67/100
82/82 - 9s - loss: 0.0420 - accuracy: 0.9893 - val_loss: 0.0983 - val_accuracy: 0.9760 - 9s/epoch - 111ms/step
Epoch 68/100
82/82 - 9s - loss: 0.0462 - accuracy: 0.9863 - val_loss: 0.0937 - val_accuracy: 0.9749 - 9s/epoch - 113ms/step
Epoch 69/100
82/82 - 9s - loss: 0.0315 - accuracy: 0.9924 - val_loss: 0.4277 - val_accuracy: 0.8938 - 9s/epoch - 113ms/step
Epoch 70/100
Epoch of 70: Train rate: 0.9598571428571426; Validation rate: 0.9348571428571424; Count: 3
82/82 - 9s - loss: 0.0441 - accuracy: 0.9901 - val_loss: 0.0791 - val_accuracy: 0.9737 - 9s/epoch - 112ms/step
Epoch 71/100
82/82 - 9s - loss: 0.0146 - accuracy: 0.9939 - val_loss: 0.0479 - val_accuracy: 0.9772 - 9s/epoch - 112ms/step
Epoch 72/100
82/82 - 9s - loss: 0.0163 - accuracy: 0.9947 - val_loss: 0.0406 - val_accuracy: 0.9852 - 9s/epoch - 113ms/step
Epoch 73/100
82/82 - 10s - loss: 0.0211 - accuracy: 0.9916 - val_loss: 0.0970 - val_accuracy: 0.9658 - 10s/epoch - 118ms/step
Epoch 74/100
82/82 - 9s - loss: 0.0140 - accuracy: 0.9947 - val_loss: 0.1085 - val_accuracy: 0.9703 - 9s/epoch - 112ms/step
Epoch 75/100
82/82 - 9s - loss: 0.0092 - accuracy: 0.9977 - val_loss: 0.0589 - val_accuracy: 0.9852 - 9s/epoch - 113ms/step
Epoch 76/100
82/82 - 9s - loss: 0.0092 - accuracy: 0.9977 - val_loss: 0.0667 - val_accuracy: 0.9852 - 9s/epoch - 112ms/step
Epoch 77/100
82/82 - 9s - loss: 0.0020 - accuracy: 1.0000 - val_loss: 0.0705 - val_accuracy: 0.9852 - 9s/epoch - 113ms/step
Epoch 78/100
82/82 - 9s - loss: 0.0674 - accuracy: 0.9901 - val_loss: 0.0524 - val_accuracy: 0.9874 - 9s/epoch - 112ms/step
Epoch 79/100
82/82 - 9s - loss: 0.0063 - accuracy: 0.9977 - val_loss: 0.0784 - val_accuracy: 0.9806 - 9s/epoch - 113ms/step
Epoch 80/100
Epoch of 80: Train rate: 0.9642499999999995; Validation rate: 0.9407499999999993; Count: 6
82/82 - 9s - loss: 0.0068 - accuracy: 0.9977 - val_loss: 0.1090 - val_accuracy: 0.9692 - 9s/epoch - 113ms/step
Epoch 81/100
82/82 - 9s - loss: 0.0170 - accuracy: 0.9931 - val_loss: 0.0601 - val_accuracy: 0.9829 - 9s/epoch - 111ms/step
Epoch 82/100
82/82 - 9s - loss: 0.0152 - accuracy: 0.9939 - val_loss: 0.1290 - val_accuracy: 0.9749 - 9s/epoch - 113ms/step
Epoch 83/100
82/82 - 9s - loss: 0.0502 - accuracy: 0.9855 - val_loss: 0.0832 - val_accuracy: 0.9760 - 9s/epoch - 111ms/step
Epoch 84/100
82/82 - 9s - loss: 0.0169 - accuracy: 0.9939 - val_loss: 0.0809 - val_accuracy: 0.9795 - 9s/epoch - 112ms/step
Epoch 85/100
82/82 - 9s - loss: 0.0082 - accuracy: 0.9985 - val_loss: 0.0859 - val_accuracy: 0.9829 - 9s/epoch - 113ms/step
Epoch 86/100
82/82 - 9s - loss: 0.0513 - accuracy: 0.9878 - val_loss: 0.3269 - val_accuracy: 0.9144 - 9s/epoch - 111ms/step
Epoch 87/100
82/82 - 9s - loss: 0.0248 - accuracy: 0.9886 - val_loss: 0.1889 - val_accuracy: 0.9509 - 9s/epoch - 113ms/step
Epoch 88/100
82/82 - 9s - loss: 0.0114 - accuracy: 0.9970 - val_loss: 0.0574 - val_accuracy: 0.9829 - 9s/epoch - 113ms/step
Epoch 89/100
82/82 - 9s - loss: 0.0061 - accuracy: 0.9977 - val_loss: 0.2952 - val_accuracy: 0.9418 - 9s/epoch - 113ms/step
Epoch 90/100
Epoch of 90: Train rate: 0.9675555555555547; Validation rate: 0.9434444444444441; Count: 8
82/82 - 9s - loss: 0.0141 - accuracy: 0.9962 - val_loss: 0.0636 - val_accuracy: 0.9840 - 9s/epoch - 112ms/step
Epoch 91/100
82/82 - 9s - loss: 0.0104 - accuracy: 0.9977 - val_loss: 0.0863 - val_accuracy: 0.9795 - 9s/epoch - 112ms/step
Epoch 92/100
82/82 - 9s - loss: 0.0066 - accuracy: 0.9970 - val_loss: 0.1507 - val_accuracy: 0.9680 - 9s/epoch - 112ms/step
Epoch 93/100
82/82 - 9s - loss: 0.0221 - accuracy: 0.9931 - val_loss: 0.0615 - val_accuracy: 0.9817 - 9s/epoch - 112ms/step
Epoch 94/100
82/82 - 10s - loss: 0.0207 - accuracy: 0.9947 - val_loss: 0.0504 - val_accuracy: 0.9852 - 10s/epoch - 120ms/step
Epoch 95/100
Training stopped at epoch of 95 with train accuracy: 0.99 and validation accuracy: 0.98
82/82 - 9s - loss: 0.0308 - accuracy: 0.9924 - val_loss: 0.0490 - val_accuracy: 0.9840 - 9s/epoch - 112ms/step
CPU times: user 16min 15s, sys: 19.3 s, total: 16min 34s
Wall time: 14min 42s
In [ ]:
plt.plot(range(1,ep+1), hist.history['accuracy'], label='Training')
plt.plot(range(1,ep+1), hist.history['val_accuracy'], label='Validation')
plt.title("Train Accuracy - Validation Accuracy")
plt.legend()
plt.show()
In [ ]:
plt.clf()

plt.plot(range(1,ep+1), hist.history['loss'], label='Training')
plt.plot(range(1,ep+1), hist.history['val_loss'], label='Validation')
plt.title("Train Loss - Validation Loss")
plt.legend()
plt.show()
In [ ]:
train_generator.class_indices
Out[ ]:
{'paper': 0, 'rock': 1, 'scissors': 2}
In [ ]:
uploaded = files.upload()

for fn in uploaded.keys():
  path = fn
  img = image.load_img(path, target_size=(100,150))
  plt.xticks([])
  plt.yticks([])
  plt.imshow(img)

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  images = np.vstack([x])

  classes = model.predict(images)
  print("Classes:", classes)
  output_class = np.argmax(classes)
  print(fn)
  if output_class==0:
    print('paper')
  elif output_class==1:
    print('rock')
  elif output_class==2:
    print('scissors')
  else:
    print('unknown')
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving woman-hand-scissors-gesture-28257308.jpg to woman-hand-scissors-gesture-28257308.jpg
Classes: [[0. 0. 1.]]
woman-hand-scissors-gesture-28257308.jpg
scissors