kubeadm搭建单master-多node节点k8s集群 (7)

image-20210708220505927

image-20210708220650646

3)在dashboard的左侧选择Services

image-20210708220830214

4)看到刚才创建的nginx的service在宿主机映射的端口是30094,在浏览器访问:192.168.40.180:30094

image-20210708220938469

四、metrics-server部署

metrics-server是一个集群范围内的资源数据集和工具,metrics-server只是显示数据,并不提供数据存储服务,主要关注的是资源度量API的实现,比如CPU、文件描述符、内存、请求延时等指标,metric-server收集数据给k8s集群内使用,如kubectl,hpa,scheduler等

4.1、安装metrics-server

1)在/etc/kubernetes/manifests里面修改apiserver配置

注意:这个是k8s在1.17的新特性,如果是1.16版本的可以不用添加,1.17以后要添加。这个参数的作用是Aggregation允许在不修改Kubernetes核心代码的同时扩展Kubernetes API。

[root@k8s-master1~]# vim /etc/kubernetes/manifests/kube-apiserver.yaml - --enable-aggregator-routing=true # 增加的内容

image-20210708222225265

2)重新更新apiserver配置

[root@k8s-master1 ~]# kubectl apply -f /etc/kubernetes/manifests/kube-apiserver.yaml [root@k8s-master1 ~]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE calico-kube-controllers-6949477b58-9t9k8 1/1 Running 0 91m calico-node-66b47 1/1 Running 0 91m calico-node-6svrr 1/1 Running 0 91m calico-node-zgnkl 1/1 Running 0 91m coredns-7f89b7bc75-4jvmv 1/1 Running 0 119m coredns-7f89b7bc75-zr5mf 1/1 Running 0 119m etcd-k8s-master1 1/1 Running 0 119m kube-apiserver 0/1 CrashLoopBackOff 1 24s # 删除该pod kube-apiserver-k8s-master1 1/1 Running 0 24s kube-controller-manager-k8s-master1 1/1 Running 1 119m kube-proxy-8fzc4 1/1 Running 0 106m kube-proxy-n2v4j 1/1 Running 0 119m kube-proxy-r9ccp 1/1 Running 0 108m kube-scheduler-k8s-master1 1/1 Running 1 119m # 把CrashLoopBackOff状态的pod删除 [root@k8s-master1 ~]# kubectl delete pods kube-apiserver -n kube-system

3)部署metrics-server

[root@k8s-master1 ~]# cat metrics.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: metrics-server:system:auth-delegator labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: metrics-server-auth-reader namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: extension-apiserver-authentication-reader subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: metrics-server namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: system:metrics-server labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile rules: - apiGroups: - "" resources: - pods - nodes - nodes/stats - namespaces verbs: - get - list - watch - apiGroups: - "extensions" resources: - deployments verbs: - get - list - update - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: system:metrics-server labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:metrics-server subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: metrics-server-config namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: EnsureExists data: NannyConfiguration: |- apiVersion: nannyconfig/v1alpha1 kind: NannyConfiguration --- apiVersion: apps/v1 kind: Deployment metadata: name: metrics-server namespace: kube-system labels: k8s-app: metrics-server kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile version: v0.3.6 spec: selector: matchLabels: k8s-app: metrics-server version: v0.3.6 template: metadata: name: metrics-server labels: k8s-app: metrics-server version: v0.3.6 annotations: scheduler.alpha.kubernetes.io/critical-pod: \'\' seccomp.security.alpha.kubernetes.io/pod: \'docker/default\' spec: priorityClassName: system-cluster-critical serviceAccountName: metrics-server containers: - name: metrics-server image: k8s.gcr.io/metrics-server-amd64:v0.3.6 imagePullPolicy: IfNotPresent command: - /metrics-server - --metric-resolution=30s - --kubelet-preferred-address-types=InternalIP - --kubelet-insecure-tls ports: - containerPort: 443 name: https protocol: TCP - name: metrics-server-nanny image: k8s.gcr.io/addon-resizer:1.8.4 imagePullPolicy: IfNotPresent resources: limits: cpu: 100m memory: 300Mi requests: cpu: 5m memory: 50Mi env: - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: MY_POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: metrics-server-config-volume mountPath: /etc/config command: - /pod_nanny - --config-dir=http://www.likecs.com/etc/config - --cpu=300m - --extra-cpu=20m - --memory=200Mi - --extra-memory=10Mi - --threshold=5 - --deployment=metrics-server - --container=metrics-server - --poll-period=300000 - --estimator=exponential - --minClusterSize=2 volumes: - name: metrics-server-config-volume configMap: name: metrics-server-config tolerations: - key: "CriticalAddonsOnly" operator: "Exists" - key: node-role.kubernetes.io/master effect: NoSchedule --- apiVersion: v1 kind: Service metadata: name: metrics-server namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/cluster-service: "true" kubernetes.io/name: "Metrics-server" spec: selector: k8s-app: metrics-server ports: - port: 443 protocol: TCP targetPort: https --- apiVersion: apiregistration.k8s.io/v1 kind: APIService metadata: name: v1beta1.metrics.k8s.io labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: service: name: metrics-server namespace: kube-system group: metrics.k8s.io version: v1beta1 insecureSkipTLSVerify: true groupPriorityMinimum: 100 versionPriority: 100 [root@k8s-master1 ~]# kubectl apply -f metrics.yaml [root@k8s-master1 ~]# kubectl get pods -n kube-system | grep metrics metrics-server-6595f875d6-dx8w6 2/2 Running 0 8s 4.2、kubectl top命令 [root@k8s-master1 ~]# kubectl top pods -n kube-system NAME CPU(cores) MEMORY(bytes) calico-kube-controllers-6949477b58-9t9k8 4m 26Mi calico-node-66b47 74m 82Mi calico-node-6svrr 77m 98Mi calico-node-zgnkl 83m 97Mi coredns-7f89b7bc75-4jvmv 6m 50Mi coredns-7f89b7bc75-zr5mf 7m 46Mi etcd-k8s-master1 35m 54Mi kube-apiserver-k8s-master1 118m 390Mi kube-controller-manager-k8s-master1 37m 50Mi kube-proxy-8fzc4 1m 14Mi kube-proxy-n2v4j 1m 23Mi kube-proxy-r9ccp 1m 15Mi kube-scheduler-k8s-master1 7m 20Mi metrics-server-6595f875d6-dx8w6 2m 16Mi [root@k8s-master1 ~]# kubectl top nodes NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% k8s-master1 417m 20% 1282Mi 68% k8s-node1 233m 5% 1612Mi 42% k8s-node2 262m 6% 1575Mi 41% 五、其他问题 5.1、scheduler、controller-manager端口变成物理机可以监听的端口

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:https://www.heiqu.com/zgzwdw.html