k8s 배치 - 다 중 노드 배치 와 부하 균형 구축 (5)
부하 균형 소개
실험 환경
다 중 마스터 배치
[root@master01 kubeconfig]# scp -r /opt/kubernetes/ [email protected]:/opt // kubernetes master02
The authenticity of host '192.168.80.11 (192.168.80.11)' can't be established.
ECDSA key fingerprint is SHA256:Ih0NpZxfLb+MOEFW8B+ZsQ5R8Il2Sx8dlNov632cFlo.
ECDSA key fingerprint is MD5:a9:ee:e5:cc:40:c7:9e:24:5b:c1:cd:c1:7b:31:42:0f.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.80.11' (ECDSA) to the list of known hosts.
[email protected]'s password:
token.csv 100% 84 61.4KB/s 00:00
kube-apiserver 100% 929 1.6MB/s 00:00
kube-scheduler 100% 94 183.2KB/s 00:00
kube-controller-manager 100% 483 969.2KB/s 00:00
kube-apiserver 100% 184MB 106.1MB/s 00:01
kubectl 100% 55MB 85.9MB/s 00:00
kube-controller-manager 100% 155MB 111.9MB/s 00:01
kube-scheduler 100% 55MB 115.8MB/s 00:00
ca-key.pem 100% 1675 2.7MB/s 00:00
ca.pem 100% 1359 2.6MB/s 00:00
server-key.pem 100% 1679 2.5MB/s 00:00
server.pem 100% 1643 2.7MB/s 00:00
[root@master01 kubeconfig]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager, kube-scheduler}.service [email protected]:/usr/lib/systemd/system // master
[email protected]'s password:
kube-apiserver.service 100% 282 274.4KB/s 00:00
kube-controller-manager.service 100% 317 403.5KB/s 00:00
kube-scheduler.service 100% 281 379.4KB/s 00:00
[root@master01 kubeconfig]# scp -r /opt/etcd/ [email protected]:/opt/ // :master02 etcd , apiserver master01 etcd master02
[email protected]'s password:
etcd 100% 509 275.7KB/s 00:00
etcd 100% 18MB 95.3MB/s 00:00
etcdctl 100% 15MB 75.1MB/s 00:00
ca-key.pem 100% 1679 941.1KB/s 00:00
ca.pem 100% 1265 1.6MB/s 00:00
server-key.pem 100% 1675 2.0MB/s 00:00
server.pem 100% 1338 1.5MB/s 00:00
[root@master02 ~]# systemctl stop firewalld.service //
[root@master02 ~]# setenforce 0 // selinux
[root@master02 ~]# vim /opt/kubernetes/cfg/kube-apiserver //
...
--etcd-servers=https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 \
--bind-address=192.168.80.11 \ // IP
--secure-port=6443 \
--advertise-address=192.168.80.11 \ // IP
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
...
:wq
[root@master02 ~]# systemctl start kube-apiserver.service // apiserver
[root@master02 ~]# systemctl enable kube-apiserver.service //
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/ systemd/system/kube-apiserver.service.
[root@master02 ~]# systemctl start kube-controller-manager.service // controller-manager
[root@master02 ~]# systemctl enable kube-controller-manager.service //
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@master02 ~]# systemctl start kube-scheduler.service // scheduler
[root@master02 ~]# systemctl enable kube-scheduler.service //
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/ systemd/system/kube-scheduler.service.
[root@master02 ~]# vim /etc/profile //
...
export PATH=$PATH:/opt/kubernetes/bin/
:wq
[root@master02 ~]# source /etc/profile //
[root@master02 ~]# kubectl get node //
NAME STATUS ROLES AGE VERSION
192.168.80.13 Ready 146m v1.12.3
192.168.80.14 Ready 144m v1.12.3 // master
[root@lb01 ~]# systemctl stop firewalld.service
[root@lb01 ~]# setenforce 0
[root@lb01 ~]# vim /etc/yum.repos.d/nginx.repo // nginx yum
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
:wq
[root@lb01 yum.repos.d]# yum list // yum
:fastestmirror
base | 3.6 kB 00:00:00
extras | 2.9 kB 00:00:00
...
[root@lb01 yum.repos.d]# yum install nginx -y // nginx
:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.163.com
...
[root@lb01 yum.repos.d]# vim /etc/nginx/nginx.conf // nginx
...
events {
worker_connections 1024;
}
stream { //
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.80.12:6443; // IP
server 192.168.80.11:6443;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
...
:wq
[root@lb01 yum.repos.d]# systemctl start nginx // nginx nginx
[root@lb01 yum.repos.d]# yum install keepalived -y // keepalived
:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.163.com
...
[root@lb01 yum.repos.d]# mount.cifs //192.168.80.2/shares/K8S/k8s02 /mnt/ //
Password for root@//192.168.80.2/shares/K8S/k8s02:
[root@lb01 yum.repos.d]# cp /mnt/keepalived.conf /etc/keepalived/keepalived.conf // keepalived
cp: "/etc/keepalived/keepalived.conf"? yes
[root@lb01 yum.repos.d]# vim /etc/keepalived/keepalived.conf //
...
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh" //
}
vrrp_instance VI_1 {
state MASTER
interface ens33 //
virtual_router_id 51 //VRRP ID ,
priority 100 // , 90
advert_int 1 // VRRP , 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.80.100/24 //
}
track_script {
check_nginx
}
}
//
:wq
[root@lb02 ~]# vim /etc/keepalived/keepalived.conf
...
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh" //
}
vrrp_instance VI_1 {
state BACKUP // backup
interface ens33 //
virtual_router_id 51 //VRRP ID , >
priority 90 // , 90
advert_int 1 // VRRP , 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.80.100/24 // IP
}
track_script {
check_nginx
}
}
//
:wq
[root@lb01 yum.repos.d]# vim /etc/nginx/check_nginx.sh // nginx
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
:wq
chmod +x /etc/nginx/check_nginx.sh //
[root@lb01 yum.repos.d]# systemctl start keepalived //
[root@lb01 ~]# ip a //
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:e9:04:ba brd ff:ff:ff:ff:ff:ff
inet 192.168.80.19/24 brd 192.168.80.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.80.100/24 scope global secondary ens33 //
valid_lft forever preferred_lft forever
inet6 fe80::c3ab:d7ec:1adf:c5df/64 scope link
valid_lft forever preferred_lft forever
[root@lb02 ~]# ip a //
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:7d:c7:ab brd ff:ff:ff:ff:ff:ff
inet 192.168.80.20/24 brd 192.168.80.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::cd8b:b80c:8deb:251f/64 scope link
valid_lft forever preferred_lft forever
inet6 fe80::c3ab:d7ec:1adf:c5df/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever // IP lb02
[root@lb01 ~]# systemctl stop nginx.service
[root@lb01 nginx]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:e9:04:ba brd ff:ff:ff:ff:ff:ff
inet 192.168.80.19/24 brd 192.168.80.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::c3ab:d7ec:1adf:c5df/64 scope link
valid_lft forever preferred_lft forever
[root@lb02 ~]# ip a // lb02
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:7d:c7:ab brd ff:ff:ff:ff:ff:ff
inet 192.168.80.20/24 brd 192.168.80.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.80.100/24 scope global secondary ens33 // lb02
valid_lft forever preferred_lft forever
inet6 fe80::cd8b:b80c:8deb:251f/64 scope link
valid_lft forever preferred_lft forever
inet6 fe80::c3ab:d7ec:1adf:c5df/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
[root@lb01 nginx]# systemctl start nginx
[root@lb01 nginx]# systemctl start keepalived.service
[root@lb01 nginx]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:e9:04:ba brd ff:ff:ff:ff:ff:ff
inet 192.168.80.19/24 brd 192.168.80.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.80.100/24 scope global secondary ens33 //
valid_lft forever preferred_lft forever
inet6 fe80::c3ab:d7ec:1adf:c5df/64 scope link
valid_lft forever preferred_lft forever
[root@node01 ~]# vim /opt/kubernetes/cfg/bootstrap.kubeconfig
...
server: https://192.168.80.100:6443
...
:wq
[root@node01 ~]# vim /opt/kubernetes/cfg/kubelet.kubeconfig
...
server: https://192.168.80.100:6443
...
:wq
[root@node01 ~]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
...
server: https://192.168.80.100:6443
...
:wq
[root@node01 ~]# systemctl restart kubelet.service //
[root@node01 ~]# systemctl restart kube-proxy.service
[root@lb01 nginx]# tail /var/log/nginx/k8s-access.log
192.168.80.13 192.168.80.12:6443 - [11/Feb/2020:15:23:52 +0800] 200 1118
192.168.80.13 192.168.80.11:6443 - [11/Feb/2020:15:23:52 +0800] 200 1119
192.168.80.14 192.168.80.12:6443 - [11/Feb/2020:15:26:01 +0800] 200 1119
192.168.80.14 192.168.80.12:6443 - [11/Feb/2020:15:26:01 +0800] 200 1120
[root@master01 ~]# kubectl run nginx --image=nginx // pod
kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
deployment.apps/nginx created
[root@master01 ~]# kubectl get pods // pod
NAME READY STATUS RESTARTS AGE
nginx-dbddb74b8-sdcpl 1/1 Running 0 33m //
[root@master01 ~]# kubectl logs nginx-dbddb74b8-sdcpl //
Error from server (Forbidden): Forbidden (user=system:anonymous, verb=get, resource=nodes, subresource=proxy) ( pods/log nginx-dbddb74b8-sdcpl) //
[root@master01 ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous //
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created
[root@master01 ~]# kubectl logs nginx-dbddb74b8-sdcpl //
[root@master01 ~]# // ,
[root@master01 ~]# kubectl get pods -o wide // master01 pod
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
nginx-dbddb74b8-sdcpl 1/1 Running 0 38m 172.17.33.2 192.168.80.14
[root@node01 ~]# curl 172.17.33.2 // node
Welcome to nginx!
Welcome to nginx!
If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.
For online documentation and support please refer to
nginx.org.
Commercial support is available at
nginx.com.
Thank you for using nginx.
[root@master01 ~]# kubectl logs nginx-dbddb74b8-sdcpl
172.17.12.0 - - [12/Feb/2020:06:45:54 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" //
다 중 노드 구축 과 부하 균형 설정 완료 이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
간단! Certbot을 사용하여 웹 사이트를 SSL(HTTPS)화하는 방법초보자가 인프라 주위를 정돈하는 것은 매우 어렵습니다. 이번은 사이트를 간단하게 SSL화(HTTP에서 HTTPS통신)로 변경하는 방법을 소개합니다! 이번에는 소프트웨어 시스템 Nginx CentOS7 의 환경에서 S...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.