2학년 여름방학 기계 학습 계단 하락, theano의 MLP와keras의 매개 변수 코드 기반

4563 단어
1. matlab 계단 하강
clear;
close all;
clc
%load data sets 
y = load('/Users/tangchao/Desktop/y500*1.txt');
X = load('/Users/tangchao/Desktop/X500*1000.txt');
 
%get sizes 
[xm,xn] = size(X);% xm=500,xn=1000
[ym,yn] = size(y);%ym=500,yn=1

%define the theta,error and test_epsilon
theta = zeros(xn,1);
error = zeros(xn,1);
test_epsilon = zeros(xn,1);
diff = 0;
all_cost_func = [];
%set epsilon,learning rate(alpha),loop_max,iteration_max
epsilon =0.0001;
iteration_max = 100000;
alpha = 0.005;

for count = 1 : iteration_max
    %define costfunction
    cost_func = 0;
    
    for i = 1 : ym
        
        hypothesis = X(i,:) * theta;
        diff = hypothesis - y(i);
        theta = theta - alpha * diff* X(i,:)';
        %get the costfunction
        cost_func = cost_func + (0.5/ym) * diff * diff;
    end
    %vectorize all of costfunction
    all_cost_func  = [all_cost_func, cost_func];
    
    test_epsilon = theta - error;
    
    if (all(abs(test_epsilon) < epsilon))
        
        break
        
    else
        
        error = theta;
        
    end
    
end

% calculate RMSe(gradient descent)
Hypothesis = X * theta;
grad_RMSe = sqrt((sum(y - Hypothesis)^2)/ym);
    
2. MLP python
# two hidden_layer
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.nnet as nnet

#define logistic
a = T.dmatrix('a')
s = 1 /(1+T.exp(-a))
logistic = theano.function([a], s)
#define logistic_derive
c = T.dmatrix('c')
t = c * (1-c)
logistic_deriv = theano.function([c],t)


#define hidden layer
def hidden_layer(x, w):
    m = np.dot(x, w)
    return np.array(logistic(m))

#load data
X = np.matrix(np.loadtxt('/home/user/Desktop/421*100.txt'))
yt = np.matrix(np.loadtxt('/home/user/Desktop/new.txt'))
y1 = yt.T;


y_max = np.max(y1)
y_min = np.min(y1)
def MaxMinNormalization(yn,Max,Min):
	yn = (yn - Min) / (Max - Min);
	return yn;
y = MaxMinNormalization(y1, y_max, y_min)

#get row,col of X y
X_row = X.shape[0] #421
X_col = X.shape[1] #100
y_row = y.shape[0]  #421
y_col = y.shape[1]
hid_node = 100

# initialize Weights
np.random.seed(3)
W1 = 2*np.random.random((X_col, hid_node))-1
W2 = 2*np.random.random((hid_node, hid_node))-1
W3 = 2*np.random.random((hid_node, 1))-1
for j in range(60000):
    # caculate the vals of each layer
    l1 = np.array(X);
    l2 = hidden_layer(X, W1)
    l3 = hidden_layer(l2, W2)
    l4 = hidden_layer(l3, W3)
    # caculate the error and delta
    l4_error = np.array(y - l4)
    if (j % 10000 == 0):
    	print "Error:" + str(np.mean(np.abs(l4_error)))
    l4_delta = l4_error * logistic_deriv(l4)
    l3_error = l4_delta.dot(W3.T)
    l3_delta = l3_error * logistic_deriv(l3)
    l2_error = l3_delta.dot(W2.T)
    l2_delta = l2_error * logistic_deriv(l2)


    # update weights
    W3 += l3.T.dot(l4_delta)*0.01
    W2 += l2.T.dot(l3_delta)*0.01
    W1 += l1.T.dot(l2_delta)*0.01

print l4
print '@@@@@@@@@@@@@@@@@'
print y


3.keras MLP
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import numpy
#normaliazation function
def MaxMinNormalization(yn,Max,Min):
	yn = (yn - Min) / (Max - Min);
	return yn;
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load data
X = numpy.matrix(numpy.loadtxt('/home/user/Desktop/X500*1000.txt'))
y1 = (numpy.matrix(numpy.loadtxt('/home/user/Desktop/y500*1.txt')))
y_train1 = y1.T;
#normalize y
Y = MaxMinNormalization(y_train1, numpy.max(y_train1), numpy.min(y_train1))
# create model
model = Sequential()
model.add(Dense(500, input_dim=1000, init='uniform', activation='relu'))
model.add(Dense(500, init='uniform', activation='relu'))
model.add(Dense(500, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer='sgd')
# Fit the model
model.fit(X, Y, nb_epoch=40, batch_size=10)
# evaluate the model
scores = model.evaluate(X, Y)
#get the predicted Y
predict_Y = model.predict(X, batch_size=10, verbose=0)
#calculate the mean-squared-error
RMSe= numpy.sqrt((numpy.sum(Y-predict_Y)**2)/500)
print 'the predicted Y:'
print predict_Y	
print 'the real Y:'
print Y
print 'the mean-squared-error:'
print RMSe
print 'the score'
print scores

좋은 웹페이지 즐겨찾기