쿠버네티스 설치 및 대시보드(HA) 구성
k8s 테스트 베드 구성을 위한 매뉴얼
1. 호스트네임 설정으로 각 노드 구분하기
hostname set-hostname master1
2. /etc/hosts 설정하기
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.81.10 vip
10.10.81.11 master1
10.10.81.12 master2
10.10.81.13 master3
10.10.81.14 worker1
10.10.81.15 worker2
3. 보안 및 시스템 설정 (전체 노드)
selinux 끄기
setenforce 0 && \
sed -i 's/^SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config && \
grep -P '^SELINUX=' /etc/selinux/config
k8s sysctl 설정
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
시스템 설정 적용
sudo sysctl --system
스왑 비활성화
swapoff -a
cat /proc/swaps
스왑 파티션 삭제 in fstab
sed -i '/swap/d' /etc/fstab
cat /etc/fstab
firewalld & iptables disable
systemctl stop firewalld && systemctl disable firewalld
sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X
iptables -L
4. keepalived & proxy 설치 (마스터 노드)
yum install -y haproxy keepalived
cat <<EOF | tee /etc/keepalived/check_apiserver.sh
#!/bin/sh
APISERVER_VIP=10.10.81.10 #vip서버 ip
APISERVER_DEST_PORT=6443
errorExit() {
echo "*** \$*" 1>&2
exit 1
}
curl --silent --max-time 2 --insecure https://localhost:\${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:\${APISERVER_DEST_PORT}/"
if ip addr | grep -q \${APISERVER_VIP}; then
curl --silent --max-time 2 --insecure https://\${APISERVER_VIP}:\${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://\${APISERVER_VIP}:\${APISERVER_DEST_PORT}/"
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh
/usr/bin/cp -f /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf-org
sh -c '> /etc/keepalived/keepalived.conf'
cat << EOF | tee /etc/keepalived/keepalived.conf
! /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER #master2,3 => SLAVE
interface ens192 #마스터서버 network interface
virtual_router_id 151
priority 255 #각 마스터서버 별 변경필요 높을수록 우선순위 높음
authentication {
auth_type PASS
auth_pass bio2013!
}
virtual_ipaddress {
10.10.81.10/16 #가상 vip서버 ip
}
track_script {
check_apiserver
}
}
EOF
/usr/bin/cp -f /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg-org
cat <<EOF | tee /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend apiserver
bind *:8443
mode tcp
option tcplog
default_backend apiserver
#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend apiserver
option httpchk GET /healthz
http-check expect status 200
mode tcp
option ssl-hello-chk
balance roundrobin
server master1 10.10.81.11:6443 check
server master2 10.10.81.12:6443 check
server master3 10.10.81.13:6443 check
EOF
keepalived & haproxy service enable
systemctl enable keepalived --now && systemctl enable haproxy --now
5. 도커 설치 (전체 노드)
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce -y
cgroupdriver 설정 및 docker private registry http 허용 설정 (daemon.json)
cat << EOF | sudo tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"insecure-registries": ["knfs:8080"]
}
EOF
docker service enable
sudo systemctl enable docker --now
6.kubeadm, kubelet, kubectl 설치 (전체 노드)
kubernetes.repo 활성화
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
설치
sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
서비스 활성화
sudo systemctl enable --now kubelet
k8s 클러스터 초기화 - master1 노드
kubeadm init --control-plane-endpoint "10.10.81.10:8443" --upload-certs
You can now join any number of the control-plane node running the following command on
each as root:
# 초기화 과정이 끝나면 이 부분에
# kubeadm join vip:8443 --token로 시작하는 명령문이 출력됨, 복사후 나머지 마스터 노드에 입력
# 더 아래에
Then you can join any number of worker nodes by running the following on each as root:
# 이 부분에 워커 노드용 join 명령문이 출력됨, 복사후 나머지 워커노드에 입력
# 모든 노드를 join 했으면
kubectl get nodes
# 명령어로 확인
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
calico 설치
curl https://projectcalico.docs.tigera.io/manifests/calico.yaml -O
kubectl apply -f calico.yaml
7. 쿠버네티스 대시보드 생성
ha 설치
git clone https://github.com/cookeem/kubeadm-ha.git
cd kubeadm-ha
kubectl apply -f addons/metrics-server.yaml
cd addons/
쿠버네티스 대시보등 인증키 생성
openssl req -newkey rsa:4096 -nodes -sha256 -keyout ca.key -x509 -days 3650 -out ca.crt -subj "/CN=dashboard"
openssl req -newkey rsa:4096 -nodes -sha256 -keyout dashboard.key -out dashboard.csr -subj "/CN=dashboard"
export VIPADDR=10.10.81.10
export VIPHOST=vip
echo "subjectAltName = DNS: dashboard, DNS: ${VIPHOST}, IP: ${VIPADDR}" > extfile.cnf
openssl x509 -req -days 3650 -in dashboard.csr -CA ca.crt -CAkey ca.key -CAcreateserial -extfile extfile.cnf -out dashboard.crt
쿠버네티스 비밀 인증키 생성
kubectl create namespace kubernetes-dashboard --dry-run=client -o yaml | kubectl apply -f -
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard --dry-run=client -o yaml | kubectl apply -f -
대시보드 설치
kubectl apply -f kubernetes-dashboard.yaml
kubectl -n kubernetes-dashboard get pods,services
파드 상태 Running 확인
kubectl -n kubernetes-dashboard get pods,services
8. 쿠버네티스 대시보드 설졍 변경
kubectl edit clusterrole kubernetes-dashboard
기존 규칙 모두 주석처리 후 '*' 추가
..
...
rules:
- apiGroups:
#- metrics.k8s.io
- '*'
resources:
#- pods
#- nodes
- '*'
verbs:
#- get
#- list
#- watch
- '*'
토큰 생성
kubectl -n kube-system get secrets $(kubectl -n kube-system get serviceaccounts admin-user -o=jsonpath='{.secrets[0].name}') -o=jsonpath='{.data.token}' | base64 -d
토큰 유효기간 수정
kubectl patch --namespace kubernetes-dashboard deployment \
kubernetes-dashboard --type='json' --patch \
'[{"op": "add", "path": "/spec/template/spec/containers/0/args/2", "value": "--token-ttl=43200" }]'
모든 서비스 Running 확인
kubectl get pods -n kube-system
9. 컨테이너 실행 테스트
cat <<EOF | sudo tee hello-world.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-world
spec:
selector:
matchLabels:
run: load-balancer-example
replicas: 3
template:
metadata:
labels:
run: load-balancer-example
spec:
containers:
- name: hello-world
image: gcr.io/google-samples/node-hello:1.0
ports:
- containerPort: 8080
protocol: TCP
EOF
kubectl apply -f *.yaml
Author And Source
이 문제에 관하여(쿠버네티스 설치 및 대시보드(HA) 구성), 우리는 이곳에서 더 많은 자료를 발견하고 링크를 클릭하여 보았다
https://velog.io/@woosanghan/쿠버네티스-설치-및-HA-구성
저자 귀속: 원작자 정보가 원작자 URL에 포함되어 있으며 저작권은 원작자 소유입니다.
우수한 개발자 콘텐츠 발견에 전념
(Collection and Share based on the CC Protocol.)
hostname set-hostname master1
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.81.10 vip
10.10.81.11 master1
10.10.81.12 master2
10.10.81.13 master3
10.10.81.14 worker1
10.10.81.15 worker2
setenforce 0 && \
sed -i 's/^SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config && \
grep -P '^SELINUX=' /etc/selinux/config
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
sudo sysctl --system
swapoff -a
cat /proc/swaps
sed -i '/swap/d' /etc/fstab
cat /etc/fstab
systemctl stop firewalld && systemctl disable firewalld
sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X
iptables -L
yum install -y haproxy keepalived
cat <<EOF | tee /etc/keepalived/check_apiserver.sh
#!/bin/sh
APISERVER_VIP=10.10.81.10 #vip서버 ip
APISERVER_DEST_PORT=6443
errorExit() {
echo "*** \$*" 1>&2
exit 1
}
curl --silent --max-time 2 --insecure https://localhost:\${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:\${APISERVER_DEST_PORT}/"
if ip addr | grep -q \${APISERVER_VIP}; then
curl --silent --max-time 2 --insecure https://\${APISERVER_VIP}:\${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://\${APISERVER_VIP}:\${APISERVER_DEST_PORT}/"
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh
/usr/bin/cp -f /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf-org
sh -c '> /etc/keepalived/keepalived.conf'
cat << EOF | tee /etc/keepalived/keepalived.conf
! /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER #master2,3 => SLAVE
interface ens192 #마스터서버 network interface
virtual_router_id 151
priority 255 #각 마스터서버 별 변경필요 높을수록 우선순위 높음
authentication {
auth_type PASS
auth_pass bio2013!
}
virtual_ipaddress {
10.10.81.10/16 #가상 vip서버 ip
}
track_script {
check_apiserver
}
}
EOF
/usr/bin/cp -f /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg-org
cat <<EOF | tee /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend apiserver
bind *:8443
mode tcp
option tcplog
default_backend apiserver
#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend apiserver
option httpchk GET /healthz
http-check expect status 200
mode tcp
option ssl-hello-chk
balance roundrobin
server master1 10.10.81.11:6443 check
server master2 10.10.81.12:6443 check
server master3 10.10.81.13:6443 check
EOF
systemctl enable keepalived --now && systemctl enable haproxy --now
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce -y
cat << EOF | sudo tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"insecure-registries": ["knfs:8080"]
}
EOF
sudo systemctl enable docker --now
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
sudo systemctl enable --now kubelet
kubeadm init --control-plane-endpoint "10.10.81.10:8443" --upload-certs
You can now join any number of the control-plane node running the following command on
each as root:
# 초기화 과정이 끝나면 이 부분에
# kubeadm join vip:8443 --token로 시작하는 명령문이 출력됨, 복사후 나머지 마스터 노드에 입력
# 더 아래에
Then you can join any number of worker nodes by running the following on each as root:
# 이 부분에 워커 노드용 join 명령문이 출력됨, 복사후 나머지 워커노드에 입력
# 모든 노드를 join 했으면
kubectl get nodes
# 명령어로 확인
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
curl https://projectcalico.docs.tigera.io/manifests/calico.yaml -O
kubectl apply -f calico.yaml
git clone https://github.com/cookeem/kubeadm-ha.git
cd kubeadm-ha
kubectl apply -f addons/metrics-server.yaml
cd addons/
openssl req -newkey rsa:4096 -nodes -sha256 -keyout ca.key -x509 -days 3650 -out ca.crt -subj "/CN=dashboard"
openssl req -newkey rsa:4096 -nodes -sha256 -keyout dashboard.key -out dashboard.csr -subj "/CN=dashboard"
export VIPADDR=10.10.81.10
export VIPHOST=vip
echo "subjectAltName = DNS: dashboard, DNS: ${VIPHOST}, IP: ${VIPADDR}" > extfile.cnf
openssl x509 -req -days 3650 -in dashboard.csr -CA ca.crt -CAkey ca.key -CAcreateserial -extfile extfile.cnf -out dashboard.crt
kubectl create namespace kubernetes-dashboard --dry-run=client -o yaml | kubectl apply -f -
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f kubernetes-dashboard.yaml
kubectl -n kubernetes-dashboard get pods,services
kubectl -n kubernetes-dashboard get pods,services
kubectl edit clusterrole kubernetes-dashboard
..
...
rules:
- apiGroups:
#- metrics.k8s.io
- '*'
resources:
#- pods
#- nodes
- '*'
verbs:
#- get
#- list
#- watch
- '*'
kubectl -n kube-system get secrets $(kubectl -n kube-system get serviceaccounts admin-user -o=jsonpath='{.secrets[0].name}') -o=jsonpath='{.data.token}' | base64 -d
kubectl patch --namespace kubernetes-dashboard deployment \
kubernetes-dashboard --type='json' --patch \
'[{"op": "add", "path": "/spec/template/spec/containers/0/args/2", "value": "--token-ttl=43200" }]'
kubectl get pods -n kube-system
cat <<EOF | sudo tee hello-world.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-world
spec:
selector:
matchLabels:
run: load-balancer-example
replicas: 3
template:
metadata:
labels:
run: load-balancer-example
spec:
containers:
- name: hello-world
image: gcr.io/google-samples/node-hello:1.0
ports:
- containerPort: 8080
protocol: TCP
EOF
kubectl apply -f *.yaml
Author And Source
이 문제에 관하여(쿠버네티스 설치 및 대시보드(HA) 구성), 우리는 이곳에서 더 많은 자료를 발견하고 링크를 클릭하여 보았다 https://velog.io/@woosanghan/쿠버네티스-설치-및-HA-구성저자 귀속: 원작자 정보가 원작자 URL에 포함되어 있으며 저작권은 원작자 소유입니다.
우수한 개발자 콘텐츠 발견에 전념 (Collection and Share based on the CC Protocol.)