wemos에서 eighen의 2

개요
wemos에서 eigh를 만들어 봤어요.
신경 네트워크에서 xor를 배워 봤어요.
결실

참조 페이지
샘플 코드
#include <vector>
#include <Eigen.h>
#include <Eigen/LU>
#include <Eigen/Dense>
#include <math.h>

using namespace Eigen;
void print_m(const Eigen::MatrixXd &X)
{
    int i,
        j,
        nrow,
        ncol;
    nrow = X.rows();
    ncol = X.cols();
    Serial.print("nrow: ");
    Serial.println(nrow);
    Serial.print("ncol: ");
    Serial.println(ncol);
    for (i = 0; i < nrow; i++)
    {
        for (j = 0; j < ncol; j++)
        {
            Serial.print(X(i, j), 6);
            Serial.print(", ");
        }
        Serial.println();
    }
}
/*
auto lambda_act = [](const double x) { 
    return 1.7159 * tanh(0.6667 * x); 
};
auto lambda_act_p = [](const double x) { 
    return 1.7159 * 0.6667 * (1.0 - 1.0 / (1.7159 * 1.7159) * x * x); 
};

auto lambda_act = [](const double x) { 
    return tanh(x); 
};
auto lambda_act_p = [](const double x) { 
    return 1.0 - x * x; 
};
*/
auto lambda_act = [](const double x) { 
    return 1.0 / (1 + exp(-x)); 
};
auto lambda_act_p = [](const double x) { 
    return x * (1.0 - x); 
};
const int kNumInput = 2;
const int kNumHidden = 8;
const int kNumOutput = 1;
const double kEta = 0.3;
void Softmax(VectorXd &in)
{
    VectorXd vec_exp = in.array().exp();
    const double sum_exp = vec_exp.sum();
    in = vec_exp.array() / sum_exp;
}
void learn(const VectorXd &in, const VectorXd &tk, MatrixXd &w0, MatrixXd &w1)
{
    VectorXd xi(1 + kNumInput);
    xi(0) = 1.0;
    xi.tail(kNumInput) = in;
    VectorXd aj = w0 * xi;
    VectorXd zj(1 + kNumHidden);
    zj(0) = 1.0;
    zj.tail(kNumHidden) = aj.unaryExpr(lambda_act);
    VectorXd yk = w1 * zj;
    //Softmax(yk);
    VectorXd delta_k = yk - tk;
    VectorXd delta_j = w1.transpose() * delta_k;
    delta_j = delta_j.array() * zj.unaryExpr(lambda_act_p).array();
    w1.array() -= kEta * (delta_k * zj.transpose()).array();
    w0.array() -= kEta * (delta_j.tail(kNumHidden) * xi.transpose()).array();
}
VectorXd feedFoward(const VectorXd &in, const MatrixXd &w0, const MatrixXd &w1)
{
    VectorXd xi(1 + kNumInput);
    xi(0) = 1.0;
    xi.tail(kNumInput) = in;
    VectorXd aj = w0 * xi;
    VectorXd zj(1 + kNumHidden);
    zj(0) = 1.0;
    zj.tail(kNumHidden) = aj.unaryExpr(lambda_act);
    VectorXd yk = w1 * zj;
    //Softmax(yk);
    return yk;
}
void setup()
{
    Serial.begin(9600);
    Serial.println("ok0");
    VectorXd x[4];
    for (int i = 0; i < 4; ++i)
    {
        x[i] = VectorXd::Zero(2);
    }
    x[0] << 0.0, 0.0;
    x[1] << 0.0, 1.0;
    x[2] << 1.0, 0.0;
    x[3] << 1.0, 1.0;
    VectorXd t[4];
    for (int i = 0; i < 4; ++i)
    {
        t[i] = VectorXd::Zero(1);
    }
    t[0] << 0.0;
    t[1] << 1.0;
    t[2] << 1.0;
    t[3] << 0.0;
    MatrixXd w0 = MatrixXd::Random(kNumHidden, 1 + kNumInput);
    MatrixXd w1 = MatrixXd::Random(kNumOutput, 1 + kNumHidden);
    Serial.println("ok0");
    for (int n = 0; n < 1000; ++n)
    {
        for (int i = 0; i < 4; ++i)
        {
            learn(x[i], t[i], w0, w1);
            delay(10);
        }
        Serial.println(n);
    }
    for (int i = 0; i < 4; ++i)
    {
        print_m(feedFoward(x[i], w0, w1));
    }
}
void loop()
{
    delay(300);
}


이상.

좋은 웹페이지 즐겨찾기