Keras 자동 참조 조정
29751 단어 kerasdeep-learning
How to Grid Search Hyperparameters for Deep Learning Models in Python With Keras
How to Use Keras Models in scikit-learn
KerasClassifier
또는 KerasRegressor
를 통해 scikit-learn에 사용하도록 포장하고 fit()
를 이용하여 훈련할 수 있다. 예를 들어 def create_model():
...
return model
model = KerasClassifier(build_fn=create_model)
fit(self, x, y, batch_size=32, epochs=10, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0)
의 모든 매개 변수를 포함한다.매개변수 이름은 반드시 일치해야 합니다.예: def create_model():
...
return model
model = KerasClassifier(build_fn=create_model, epochs=10)
def create_model(dropout_rate=0.0):
...
return model
model = KerasClassifier(build_fn=create_model, dropout_rate=0.2)
How to Use Grid Search in scikit-learn
GridSearchCV
는 Grid Search를 구현할 수 있습니다.GridSearchCV
의 평점 기준이며, scoring
매개 변수 설정param_grid
는 사전으로 [매개 변수 이름: 후보 값]으로 표시되며, GridSearchCV는 이 매개 변수를 조합하여 가장 좋은 평가를 할 것이다.이 매개 변수는 트레이닝 매개 변수(epochs,batch size 등)와 모델 매개 변수(kernel size,pool size,num filters 등등)n_jobs
는 기본적으로 1로 프로세스를 사용하고 -1로 설정하는 것을 의미하며 최대 수량의 실행을 호출하는 것을 의미한다. (내가 실험 과정에서 -1로 설정하면 무한히 기다리기 때문에 아래 코드 n jogs의 값은 모두 1)GridSearchCV
는 크로스 validation을 통해 각 모델을 평가합니다.#
param_grid = dict(epochs=[10,20,30])
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
Problem Description
GridSearchCV
를 설명한다. 이런 예들은 하나의 소형 데이터 집합인 Pima Indians onset of diabetes classification dataset을 사용한다. 이것은 이원 분류 문제로 당뇨병이 있는지 판단할 것이다. 데이터 집합 설명은 여기를 보십시오.How to Tune Batch Size and Number of Epochs
EarlyStopping
의 리셋 함수를 사용하여 훈련 과정을 감시할 수 있기 때문에 Epochs 파라미터의 선택은 그리 중요하지 않을 수 있다.import numpy as np
from sklearn.model_selection import GridSearchCV
from keras import models
from keras import layers
from keras import optimizers
from keras.wrappers import scikit_learn
# ,KerasClassifier
def create_model():
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu', input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
#
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# X, Y
X = dataset[:, :8]
Y = dataset[:, 8]
#
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# , ( )
seed = 7
np.random.seed(seed)
#
model = scikit_learn.KerasClassifier(build_fn=create_model, verbose=0)
#
batch_size = [8,16]
epochs = [10,50]
# GridSearchCV,
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
#
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7799479166666666 using {'batch_size': 8, 'epochs': 50}
0.763021 (0.041504) with: {'batch_size': 8, 'epochs': 10}
0.779948 (0.034104) with: {'batch_size': 8, 'epochs': 50}
0.744792 (0.030647) with: {'batch_size': 16, 'epochs': 10}
0.769531 (0.039836) with: {'batch_size': 16, 'epochs': 50}
How to Tune the Trainnig Optimization Algorithm
# ,KerasClassifier
def create_model(optimizer='adam'):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu', input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['acc'])
return model
#
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# X, Y
X = dataset[:, :8]
Y = dataset[:, 8]
#
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# , ( )
seed = 7
np.random.seed(seed)
#
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
#
optimizer = ['sgd', 'rmsprop', 'adam', 'adagrad']
# GridSearchCV,
param_grid = dict(optimizer=optimizer)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
#
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7682291666666666 using {'optimizer': 'rmsprop'}
0.765625 (0.037603) with: {'optimizer': 'sgd'}
0.768229 (0.025582) with: {'optimizer': 'rmsprop'}
0.764323 (0.031466) with: {'optimizer': 'adam'}
0.760417 (0.034104) with: {'optimizer': 'adagrad'}
How to Tune Learning Rate and Momentum
# ,KerasClassifier
def create_model(learning_rate=0.01, momentum=0):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu', input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid'))
optimizer = optimizers.SGD(lr=learning_rate, momentum=momentum)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['acc'])
return model
#
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# X, Y
X = dataset[:, :8]
Y = dataset[:, 8]
#
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# , ( )
seed = 7
np.random.seed(seed)
#
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
#
learning_rate = [0.001, 0.01]
momentum = [0.0, 0.2, 0.4]
# GridSearchCV,
param_grid = dict(learning_rate=learning_rate,momentum=momentum)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
#
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7747395833333334 using {'learning_rate': 0.01, 'momentum': 0.0}
0.640625 (0.030425) with: {'learning_rate': 0.001, 'momentum': 0.0}
0.692708 (0.025780) with: {'learning_rate': 0.001, 'momentum': 0.2}
0.686198 (0.017566) with: {'learning_rate': 0.001, 'momentum': 0.4}
0.774740 (0.035132) with: {'learning_rate': 0.01, 'momentum': 0.0}
0.766927 (0.021710) with: {'learning_rate': 0.01, 'momentum': 0.2}
0.769531 (0.033299) with: {'learning_rate': 0.01, 'momentum': 0.4}
How to Tune Network Weight Initialization
# ,KerasClassifier
def create_model(init_mode='random_uniform'):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu', kernel_initializer=init_mode,input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid', kernel_initializer=init_mode))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
#
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# X, Y
X = dataset[:, :8]
Y = dataset[:, 8]
#
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# , ( )
seed = 7
np.random.seed(seed)
#
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
#
init_mode = ['he_normal', 'he_uniform', 'glorot_normal', 'glorot_uniform', 'lecun_normal']
# GridSearchCV,
param_grid = dict(init_mode=init_mode)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
#
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7760416666666666 using {'init_mode': 'he_normal'}
0.776042 (0.024360) with: {'init_mode': 'he_normal'}
0.764323 (0.025976) with: {'init_mode': 'he_uniform'}
0.769531 (0.025315) with: {'init_mode': 'glorot_normal'}
0.761719 (0.035943) with: {'init_mode': 'glorot_uniform'}
0.763021 (0.038582) with: {'init_mode': 'lecun_normal'}
How to Tune the Neuron Activation Function
# ,KerasClassifier
def create_model(activation='relu'):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation=activation,input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
#
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# X, Y
X = dataset[:, :8]
Y = dataset[:, 8]
#
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# , ( )
seed = 7
np.random.seed(seed)
#
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
#
activation = ['relu', 'tanh', 'softmax', 'linear', 'hard_sigmoid', 'softplus', 'selu']
# GridSearchCV,
param_grid = dict(activation=activation)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
#
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7786458333333334 using {'activation': 'softplus'}
0.773438 (0.035516) with: {'activation': 'relu'}
0.766927 (0.024774) with: {'activation': 'tanh'}
0.760417 (0.017566) with: {'activation': 'softmax'}
0.774740 (0.032106) with: {'activation': 'linear'}
0.760417 (0.033502) with: {'activation': 'hard_sigmoid'}
0.778646 (0.022628) with: {'activation': 'softplus'}
0.770833 (0.025780) with: {'activation': 'selu'}
How to Tune Dropout
# ,KerasClassifier
def create_model(dropout=0.0):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu',input_shape=(8,)))
model.add(layers.Dropout(dropout))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
#
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# X, Y
X = dataset[:, :8]
Y = dataset[:, 8]
#
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# , ( )
seed = 7
np.random.seed(seed)
#
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
#
dropout = [0.2, 0.5]
# GridSearchCV,
param_grid = dict(dropout=dropout)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
#
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7708333333333334 using {'dropout': 0.5}
0.769531 (0.029232) with: {'dropout': 0.2}
0.770833 (0.032264) with: {'dropout': 0.5}
How to Tune the Number of Neurons in the Hidden Layer
# ,KerasClassifier
def create_model(num_neurons=1):
# create model
model = models.Sequential()
model.add(layers.Dense(num_neurons, activation='relu',input_shape=(8,)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
#
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# X, Y
X = dataset[:, :8]
Y = dataset[:, 8]
#
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# , ( )
seed = 7
np.random.seed(seed)
#
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
#
num_neurons = [1, 5, 10, 15, 20]
# GridSearchCV,
param_grid = dict(num_neurons=num_neurons)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
#
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7708333333333334 using {'num_neurons': 10}
0.651042 (0.024774) with: {'num_neurons': 1}
0.757812 (0.019918) with: {'num_neurons': 5}
0.770833 (0.038450) with: {'num_neurons': 10}
0.769531 (0.027251) with: {'num_neurons': 15}
0.764323 (0.032734) with: {'num_neurons': 20}
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
keras에서 훈련 데이터를 바탕으로 하는 몇 가지 방식 비교(fit와fit_generator)train_on_batch 함수는 단일 데이터를 받아들여 역방향 전파를 실행한 다음에 모델 파라미터를 업데이트합니다. 이 데이터의 크기는 임의로 할 수 있습니다. 즉, 명확한 대량 크기를 제공할 필요가 없고 정밀화 ...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.