[root@master01 ~]# kubectl get pods -n kube-system -o wide
NAME
READY STATUS RESTARTS AGE IP
NODE
NOMINATED NODE READINESS GATES
coredns-5ffbfd976d-lwgvt 1/1
Running 0
75s 10.244.1.3
node01
kube-flannel-ds-dxvtn
1/1
Running 0
171m 192.168.1.23 node02
kube-flannel-ds-njsnc
1/1
Running 4
4h4m 192.168.1.21 master01
kube-flannel-ds-v2gcr
1/1
Running 0
116m 192.168.1.22 node01
DNS解析测试:
[root@master01 ~]# kubectl run -it --rm dns-test --image=busybox:1.28.4 sh
If you don\'t see a command prompt, try pressing enter.
/ # nslookup kubernetes
# 这里输入
Server: 10.0.0.2
Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local
# 解析结果
Name:
kubernetes
Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local
/ # exit
Session ended, resume using \'kubectl attach dns-test -c dns-test -i -t\' command when the pod is running
pod "dns-test" deleted
多Master架构图:
前期工作 中的所有都得挨着做一遍
1、升级内核 关闭防火墙、selinux、swap 将桥接的IPv4流量传递到iptables的链 升级内核 2、安装Docker 和master01一样,从master01上将所以需要的文件发送到master02节点上 [root@master01 ~]# scp ~/docker/* root@192.168.1.24:/usr/local/bin/ [root@master01 ~]# scp /usr/lib/systemd/system/docker.service root@192.168.1.24:/usr/lib/systemd/system/ 2.1 在master02上启动docker 2.1.1、从master01节点上发送docker相关文件 [root@master01 ~]# ssh root@192.168.1.24 mkdir /etc/docker [root@master01 ~]# scp /etc/docker/daemon.json root@192.168.1.24:/etc/docker/ 在master02节点上启动docker systemctl daemon-reload systemctl start docker && systemctl enable docker 3、部署Master2(192.168.1.24) Master2 与已部署的Master1所有操作一致。所以我们只需将Master1所有K8s文件拷贝过来,再修改下服务器IP和主机名启动即可。 3.1、在Master2创建etcd证书目录 [root@master02 ~]# mkdir -p /k8s/{etcd,k8s}/{bin,cfg,ssl,logs} 3.2、从master01上拷贝文件 [root@master01 ~]# scp -r /k8s/k8s/{bin,cfg,ssl} root@192.168.1.24:/k8s/k8s [root@master01 ~]# scp -r /k8s/etcd/ssl root@192.168.1.24:/k8s/etcd/ [root@master01 ~]# scp -r /opt/cni/ root@192.168.1.24:/opt [root@master01 ~]# scp /usr/lib/systemd/system/kube* root@192.168.1.24:/usr/lib/systemd/system [root@master01 ~]# scp /usr/local/bin/kubectl root@192.168.1.24:/usr/bin 3.3、在master02上删除证书文件 [root@master02 ~]# rm -rf /k8s/k8s/cfg/kubelet.kubeconfig [root@master02 ~]# rm -rf /k8s/k8s/ssl/kubelet* 3.4、在master上修改配置文件中的IP和主机名 [root@master02 ~]# vim /k8s/k8s/cfg/kube-apiserver.cfg --bind-address=192.168.1.24 \ --advertise-address=192.168.1.24 \ [root@master02 ~]# vim /k8s/k8s/cfg/kubelet.cfg --hostname-override=master02 \ [root@master02 ~]# vim /k8s/k8s/cfg/kube-proxy-config.yml hostnameOverride: master02 3.5、在master上启动所有组件设置开机启动 systemctl daemon-reload systemctl enable kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy systemctl start kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy systemctl status kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy 3.6、在master上查看集群状态 [root@master02 ~]# kubectl get cs NAME STATUS MESSAGE ERROR scheduler Healthy ok controller-manager Healthy ok etcd-1 Healthy {"health":"true"} etcd-2 Healthy {"health":"true"} etcd-0 Healthy {"health":"true"} 3.7、在master上批准kubelet证书申请 [root@master02 ~]# kubectl get csr NAME AGE SIGNERNAME REQUESTOR CONDITION node-csr-mviArpM4DRc1WC3MQZZX9KQF1G 72s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending 注:因排版问题,上一行被裁剪了部分,全称为node-csr-mviArpM4DRc1WC3MQZZX9KQF1G7AKp156Th7GFDAcvU 批准 [root@master02 ~]# kubectl certificate approve node-csr-mviArpM4DRc1WC3MQZZX9KQF1G7AKp156Th7GFDAcvU certificatesigningrequest.certificates.k8s.io/node-csr-mviArpM4DRc1WC3MQZZX9KQF1G7AKp156Th7GFDAcvU approved [root@master02 ~]# kubectl get csr NAME AGE SIGNERNAME REQUESTOR CONDITION node-csr-mviArpM4DRc1WC3MQZZX9K 2m40s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued master01或master02节点上查看node [root@master02 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION master01 Ready <none> 5h39m v1.18.19 master02 NotReady <none> 10s v1.18.19 node01 Ready <none> 4h9m v1.18.19 node02 Ready <none> 4h9m v1.18.19 5分钟后.... [root@master02 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION master01 Ready <none> 5h47m v1.18.19 master02 Ready <none> 8m22s v1.18.19 node01 Ready <none> 4h17m v1.18.19 node02 Ready <none> 4h17m v1.18.19 部署nginx及nginx高可用