keras 구현 해피하우스 프로그램

ng제4과 2주차 작업은keros로happyhouse를 실현한다. 즉,keras 프레임워크로 웃는 얼굴과 비웃는 얼굴을 식별한다. 코드는 다음과 같다.이 코드는 훈련집과 테스트집에 대한 정확도가 매우 높지만 내가 입력한 그림에 대한 식별 효과가 확실히 좋지 않다.ng은 작업에서 원인을 가리킨다.
The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
더 좋은 모델을 훈련시키려면 프레임워크의 방법을 스스로 디버깅하고 파라미터를 조정하거나 훈련집을 다시 선택해야 한다.
import keras.backend as K
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

K.set_image_data_format('channels_last')


def mean_pred(y_true,y_pred):
    return K.mean(y_pred)


def load_dataset():
    train_dataset = h5py.File('datasets/train_happy.h5', "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:])  # train set features
    train_set_y_orig = np.array(train_dataset["train_set_y"][:])  # train set labels

    test_dataset = h5py.File('datasets/test_happy.h5', "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:])  # test set features
    test_set_y_orig = np.array(test_dataset["test_set_y"][:])  # test set labels

    classes = np.array(test_dataset["list_classes"][:])  # the list of classes

    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))

    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes


X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.

# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T


#  graded function : happymodel
def HappyModel(input_shape):
    # input_shape -- shape of the images of the dataset
    # return model -- a Model() instance in keras

    X_input = Input(input_shape)
    X = ZeroPadding2D((3, 3))(X_input)
    X = BatchNormalization(axis=3, name='bn0')(X)
    X = Activation('relu')(X)
    X = MaxPooling2D((2, 2), name='max_pool')(X)
    X = Flatten()(X)
    X = Dense(1, activation='sigmoid', name='fc')(X)
    model = Model(inputs=X_input, outputs=X, name='HappyModel')

    return model


# train and test model above
# 1 Create the model by calling the function above
happyModel = HappyModel(X_train.shape[1:])

# 2 Compile the model by calling model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])
happyModel.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])

# 3 Train the model on train data by calling model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)
happyModel.fit(x=X_train, y=Y_train, epochs=40, batch_size=16)

# 4 Test the model on test data by calling model.evaluate(x = ..., y = ...)
preds = happyModel.evaluate(x=X_test, y=Y_test)

print()
print("Loss = " + str(preds[0]))
print("Test Accuracy = " + str(preds[1]))


# test
img_path = 'images/cry.jpg'
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
plt.show()

x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

print(happyModel.predict(x))

happyModel.summary()

plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))

좋은 웹페이지 즐겨찾기