命令回顾
[root@master01 ~]# kubectl explain ingressKIND: Ingress
VERSION: networking.k8s.io/v1DESCRIPTION:Ingress is a collection of rules that allow inbound connections to reachthe endpoints defined by a backend. An Ingress can be configured to giveservices externally-reachable urls, load balance traffic, terminate SSL,offer name based virtual hosting etc.FIELDS:apiVersion <string>APIVersion defines the versioned schema of this representation of anobject. Servers should convert recognized schemas to the latest internalvalue, and may reject unrecognized values. More info:https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resourceskind <string>Kind is a string value representing the REST resource this objectrepresents. Servers may infer this from the endpoint the client submitsrequests to. Cannot be updated. In CamelCase. More info:https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsmetadata <Object>Standard object's metadata. More info:https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataspec <Object>Spec is the desired state of the Ingress. More info:https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusstatus <Object>Status is the current state of the Ingress. More info:https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status[root@master01 ~]# kubectl describe ingressName: nginx-daemon-ingress
Namespace: default
Address: 10.96.183.19
Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
TLS:tls.secret terminates www.xy102.com
Rules:Host Path Backends---- ---- --------www.xy102.com / nginx-daemon-svc:80 (<none>)
Annotations: <none>
Events: <none>
一、prometheus
node_exporter
节点数据收集器
daemonset-------->保证每个节点都有一个收集器
prometheus------->监控主程序
grafana------->图形化
altermanager---->告警模块
node_exporter组件安装
[root@master01 opt]# mkdir prometheus
[root@master01 opt]# cd prometheus/
[root@master01 prometheus]# vim node_exporter.yaml
[root@master01 prometheus]#
apiVersion: apps/v1
kind: DaemonSet
metadata:name: node-exporternamespace: monitor-salabels:name: node-exporter
spec:selector:matchLabels:name: node-exportertemplate:metadata:labels:name: node-exporterspec:hostPID: truehostIPC: truehostNetwork: truecontainers:- name: node-exporterimage: prom/node-exporterports:- containerPort: 9100resources:limits:cpu: "0.5"securityContext:privileged: trueargs:- --path.procfs- /host/proc- --path.sysfs- /host/sys- --collector.filesystem.ignored-mount-points- '"^/(sys|proc|dev|host|etc)($|/)"'volumeMounts:- name: devmountPath: /host/dev- name: procmountPath: /host/proc- name: sysmountPath: /host/sys- name: rootfsmountPath: /rootfsvolumes:- name: prochostPath:path: /proc- name: devhostPath:path: /dev- name: syshostPath:path: /sys- name: rootfshostPath:path: /[root@master01 ~]# cd /opt/
[root@master01 opt]# kubectl create ns monitor-sa
namespace/monitor-sa created
[root@master01 opt]# ls
cni ingress
cni_bak jenkins-2.396-1.1.noarch.rpm
cni-plugins-linux-amd64-v0.8.6.tgz k8s-yaml
configmap kube-flannel.yml
containerd nginx-de.yaml
data1 secret
flannel.tar test
ingree.contro-0.30.0.tar update-kubeadm-cert.sh
ingree.contro-0.30.0.tar.gz
[root@master01 opt]# mkdir prometheus
[root@master01 opt]# cd prometheus/
[root@master01 prometheus]# vim node_exporter.yaml
[root@master01 prometheus]# kubectl apply -f node_exporter.yaml
daemonset.apps/node-exporter created[root@master01 prometheus]# kubectl get pod -n monitor-sa -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
node-exporter-7mfnf 0/1 ErrImagePull 0 2m29s 192.168.168.81 master01 <none> <none>
node-exporter-c6hq2 0/1 ImagePullBackOff 0 13m 192.168.168.82 node01 <none> <none>
node-exporter-jgz96 0/1 ImagePullBackOff 0 13m 192.168.168.83 node02 <none> <none>##镜像拉取失败##镜像拉不下来
导入镜像
[root@master01 prometheus]# rz -E
rz waiting to receive.
[root@master01 prometheus]# ls
node_exporter.yaml node.tar
[root@master01 prometheus]# docker load -i node.tar ##所有节点都部署[root@node01 opt]# mkdir prometheus
[root@node01 opt]# rz -E
rz waiting to receive.
[root@node01 opt]# docker load -i node.tar
1e604deea57d: Loading layer 1.458MB/1.458MB
6b83872188a9: Loading layer 2.455MB/2.455MB
4f3f7dd00054: Loading layer 20.5MB/20.5MB
Loaded image: prom/node-exporter:v1[root@node02 ~]# cd /opt/
[root@node02 opt]# mkdir prometheus
[root@node02 opt]# cd prometheus/
[root@node02 prometheus]# rz -E
rz waiting to receive.
[root@node02 prometheus]# docker load -i node.tar
1e604deea57d: Loading layer 1.458MB/1.458MB
6b83872188a9: Loading layer 2.455MB/2.455MB
4f3f7dd00054: Loading layer 20.5MB/20.5MB
Loaded image: prom/node-exporter:v1[root@master01 prometheus]# vim node_exporter.yamlapiVersion: apps/v1
kind: DaemonSet
metadata:name: node-exporternamespace: monitor-salabels:name: node-exporter
spec:selector:matchLabels:name: node-exportertemplate:metadata:labels:name: node-exporterspec:hostPID: truehostIPC: truehostNetwork: truecontainers:- name: node-exporterimage: prom/node-exporter:v1ports:- containerPort: 9100resources:limits:cpu: "0.5"securityContext:privileged: trueargs:- --path.procfs- /host/proc- --path.sysfs- /host/sys- --collector.filesystem.ignored-mount-points- '"^/(sys|proc|dev|host|etc)($|/)"'volumeMounts:- name: devmountPath: /host/dev- name: procmountPath: /host/proc- name: sysmountPath: /host/sys- name: rootfsmountPath: /rootfsvolumes:- name: prochostPath:path: /proc- name: devhostPath:path: /dev- name: syshostPath:path: /sys- name: rootfshostPath:path: /[root@master01 prometheus]# kubectl get pod -n monitor-sa -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
node-exporter-7mfnf 0/1 ErrImagePull 0 2m29s 192.168.168.81 master01 <none> <none>
node-exporter-c6hq2 0/1 ImagePullBackOff 0 13m 192.168.168.82 node01 <none> <none>
node-exporter-jgz96 0/1 ImagePullBackOff 0 13m 192.168.168.83 node02 <none> <none>##已经导入镜像,重启[root@master01 prometheus]# kubectl delete pod node-exporter-7mfnf -n monitor-sa
pod "node-exporter-7mfnf" deleted[root@master01 prometheus]# kubectl get pod -n monitor-sa -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
node-exporter-76nkz 1/1 Running 0 26s 192.168.168.81 master01 <none> <none>
node-exporter-c6hq2 0/1 ImagePullBackOff 0 14m 192.168.168.82 node01 <none> <none>
node-exporter-jgz96 0/1 ImagePullBackOff 0 13m 192.168.168.83 node02 <none> <none>##已经导入镜像,重启
[root@master01 prometheus]# kubectl delete pod node-exporter-c6hq2 -n monitor-sa
pod "node-exporter-c6hq2" deleted[root@master01 prometheus]# kubectl get pod -n monitor-sa -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
node-exporter-487lb 1/1 Running 0 55s 192.168.168.82 node01 <none> <none>
node-exporter-76nkz 1/1 Running 0 98s 192.168.168.81 master01 <none> <none>
node-exporter-jj92l 0/1 ContainerCreating 0 10s 192.168.168.83 node02 <none> <none>##已经导入镜像,重启
[root@master01 prometheus]# kubectl delete pod node-exporter-jgz96 -n monitor-sa
pod "node-exporter-jgz96" deleted[root@master01 prometheus]# kubectl get pod -n monitor-sa -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
node-exporter-487lb 1/1 Running 0 12m 192.168.168.82 node01 <none> <none>
node-exporter-76nkz 1/1 Running 0 13m 192.168.168.81 master01 <none> <none>
node-exporter-jj92l 1/1 Running 0 12m 192.168.168.83 node02 <none> <none>
http://192.168.168.81:9100/metrics
http://192.168.168.81:9100/metrics[root@master01 prometheus]# kubectl create serviceaccount monitor -n monitor-sa
serviceaccount/monitor created[root@master01 prometheus]# kubectl create clusterrolebinding monitor-clusterrolebinding -n monitor-sa --clusterrole=cluster-admin --serviceaccount=monitor-sa:monitor
clusterrolebinding.rbac.authorization.k8s.io/monitor-clusterrolebinding created
192.168.168.81:9100/metrics
设置告警的配置
[root@master01 prometheus]# rz -E
rz waiting to receive.
[root@master01 prometheus]# ls
node_exporter.yaml node.tar prometheus-alertmanager-cfg.yaml[root@master01 prometheus]# vim prometheus-alertmanager-cfg.yaml 120 - targets: ['192.168.168.81:10251']
121 - job_name: 'kubernetes-controller-manager'
122 scrape_interval: 5s
123 static_configs:
124 - targets: ['192.168.168.81:10252']
125 - job_name: 'kubernetes-kube-proxy'
126 scrape_interval: 5s
127 static_configs:
128 - targets: ['192.168.168.81:10249','192.168.168.82:10249','192.168 .168.83:10249']137 - targets: ['192.168.168.81:2379']221 - alert: kube-state-metrics的cpu使用率大于90%
222 expr: rate(process_cpu_seconds_total{k8s_app=~"kube-state-metric s"}[1m]) * 100 > 90description: "{{$labels.mountpoint }} 磁盘分区使用大于80%(目前使用
:{{$value}}%)"- alert: HighPodCpuUsage#告警邮件的标题expr: sum(rate(container_cpu_usage_seconds_total{namespace="default", pod=~".+"}[5m])) by (pod) > 0.9#收集指标数据for: 5m#占用cpu90%的持续时间5minute,告警labels:severity: warningannotations:#告警的内容description: "{{ $labels.pod }} 的CPU使用率高于90%."summary: "Pod {{ $labels.pod }} 的CPU使用率高"[root@master01 prometheus]# kubectl apply -f prometheus-alertmanager-cfg.yaml
configmap/prometheus-config created
邮件邮箱设置
prometheus的svc
prometheus告警的svc
prometheus+nodeport部署prometheus
创建secret资源
grafana的yaml文件
[root@master01 prometheus]# vim alter-mail.yamlkind: ConfigMap
apiVersion: v1
metadata:name: alertmanagernamespace: monitor-sa
data:alertmanager.yml: |-global:resolve_timeout: 1msmtp_smarthost: 'smtp.qq.com:25'smtp_from: '1435678619@qq.com'smtp_auth_username: '1435678619@qq.com'smtp_auth_password: 'yniumbpaclkggfcc'smtp_require_tls: falseroute:group_by: [alertname]group_wait: 10sgroup_interval: 10srepeat_interval: 10m receiver: default-receiverreceivers:- name: 'default-receiver'email_configs:- to: '1435678619@qq.com'send_resolved: true[root@master01 prometheus]# kubectl apply -f alter-mail.yaml
configmap/alertmanager created##prometheus的svc
[root@master01 prometheus]# vim prometheus-svc.yamlapiVersion: v1
kind: Service
metadata:name: prometheusnamespace: monitor-salabels:app: prometheus
spec:type: NodePortports:- port: 9090targetPort: 9090protocol: TCPselector:app: prometheuscomponent: server##prometheus告警的svc
[root@master01 prometheus]# vim prometheus-alter.yamlapiVersion: v1
kind: Service
metadata:labels:name: prometheuskubernetes.io/cluster-service: 'true'name: alertmanagernamespace: monitor-sa
spec:ports:- name: alertmanagernodePort: 30066port: 9093protocol: TCPtargetPort: 9093selector:app: prometheussessionAffinity: Nonetype: NodePort##prometheus+nodeport部署prometheus[root@master01 prometheus]# vim prometheus-deploy.yamlapiVersion: apps/v1
kind: Deployment
metadata:name: prometheus-servernamespace: monitor-salabels:app: prometheus
spec:replicas: 1selector:matchLabels:app: prometheuscomponent: servertemplate:metadata:labels:app: prometheuscomponent: serverannotations:prometheus.io/scrape: 'false'spec:serviceAccountName: monitorinitContainers:- name: init-chmodimage: busybox:latestcommand: ['sh','-c','chmod -R 777 /prometheus;chmod -R 777 /etc']volumeMounts:- mountPath: /prometheusname: prometheus-storage-volume- mountPath: /etc/localtimename: timezonecontainers:- name: prometheusimage: prom/prometheus:v2.45.0command:- prometheus- --config.file=/etc/prometheus/prometheus.yml- --storage.tsdb.path=/prometheus- --storage.tsdb.retention=720h- --web.enable-lifecycleports:- containerPort: 9090volumeMounts:- name: prometheus-configmountPath: /etc/prometheus/- mountPath: /prometheus/name: prometheus-storage-volume- name: timezonemountPath: /etc/localtime- name: k8s-certsmountPath: /var/run/secrets/kubernetes.io/k8s-certs/etcd/- name: alertmanagerimage: prom/alertmanager:v0.20.0args:- "--config.file=/etc/alertmanager/alertmanager.yml"- "--log.level=debug"ports:- containerPort: 9093protocol: TCPname: alertmanagervolumeMounts:- name: alertmanager-configmountPath: /etc/alertmanager- name: alertmanager-storagemountPath: /alertmanager- name: localtimemountPath: /etc/localtimevolumes:- name: prometheus-configconfigMap:name: prometheus-configdefaultMode: 0777- name: prometheus-storage-volumehostPath:path: /datatype: DirectoryOrCreate- name: k8s-certssecret:secretName: etcd-certs- name: timezonehostPath:path: /usr/share/zoneinfo/Asia/Shanghai- name: alertmanager-configconfigMap:name: alertmanager- name: alertmanager-storagehostPath:path: /data/alertmanagertype: DirectoryOrCreate- name: localtimehostPath:path: /usr/share/zoneinfo/Asia/Shanghai[root@master01 prometheus]# kubectl apply -f prometheus-deploy.yaml
deployment.apps/prometheus-server created
[root@master01 prometheus]# kubectl apply -f prometheus-svc.yaml
service/prometheus created
[root@master01 prometheus]# kubectl apply -f prometheus-alter.yaml
service/alertmanager created##创建secret资源
[root@master01 prometheus]# kubectl -n monitor-sa create secret generic etcd-certs --from-file=/etc/kubernetes/pki/etcd/server.key --from-file=/etc/kubernetes/pki/etcd/server.crt --from-file=/etc/kubernetes/pki/etcd/ca.crt
secret/etcd-certs created[root@master01 prometheus]# kubectl describe pod -n monitor-sa ##prometheus启动情况
[root@master01 prometheus]# kubectl get pod -n monitor-sa
NAME READY STATUS RESTARTS AGE
node-exporter-487lb 1/1 Running 0 3h50m
node-exporter-76nkz 1/1 Running 0 3h51m
node-exporter-jj92l 1/1 Running 0 3h50m
prometheus-server-55d866cb44-6n2bf 2/2 Running 0 4m4s##查看命名空间下的端口
[root@master01 prometheus]# kubectl get svc -n monitor-sa
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
alertmanager NodePort 10.96.54.65 <none> 9093:30066/TCP 5m25s
prometheus NodePort 10.96.29.5 <none> 9090:30493/TCP 5m40s##grafana的yaml文件[root@master01 prometheus]# vim pro-gra.yamlapiVersion: v1
kind: PersistentVolumeClaim
metadata:name: grafananamespace: kube-system
spec:accessModes:- ReadWriteManystorageClassName: nfs-client-storageclassresources:requests:storage: 2Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:name: monitoring-grafananamespace: kube-system
spec:replicas: 1selector:matchLabels:task: monitoringk8s-app: grafanatemplate:metadata:labels:task: monitoringk8s-app: grafanaspec:containers:- name: grafanaimage: grafana/grafana:7.5.11securityContext:runAsUser: 104runAsGroup: 107ports:- containerPort: 3000protocol: TCPvolumeMounts:- mountPath: /etc/ssl/certsname: ca-certificatesreadOnly: false- mountPath: /varname: grafana-storage- mountPath: /var/lib/grafananame: graf-testenv:- name: INFLUXDB_HOSTvalue: monitoring-influxdb- name: GF_SERVER_HTTP_PORTvalue: "3000"- name: GF_AUTH_BASIC_ENABLEDvalue: "false"- name: GF_AUTH_ANONYMOUS_ENABLEDvalue: "true"- name: GF_AUTH_ANONYMOUS_ORG_ROLEvalue: Admin- name: GF_SERVER_ROOT_URLvalue: /volumes:- name: ca-certificateshostPath:path: /etc/ssl/certs- name: grafana-storageemptyDir: {}- name: graf-testpersistentVolumeClaim:claimName: grafana
---
apiVersion: v1
kind: Service
metadata:labels:name: monitoring-grafananamespace: kube-system
spec:ports:- port: 80targetPort: 3000selector:k8s-app: grafanatype: NodePort[root@master01 prometheus]# kubectl apply -f pro-gra.yaml
persistentvolumeclaim/grafana created
deployment.apps/monitoring-grafana created
service/monitoring-grafana created[root@master01 prometheus]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 23d
monitoring-grafana NodePort 10.96.131.109 <none> 80:30901/TCP 39shttp://192.168.168.81:30066/#/alerts
http://192.168.168.81:30493/
http://192.168.168.81:30901/
[root@master01 prometheus]# kubectl edit configmap kube-proxy -n kube-system//处理 kube-proxy 监控告警
kubectl edit configmap kube-proxy -n kube-system
......
metricsBindAddress: "0.0.0.0:10249"
#因为 kube-proxy 默认端口10249是监听在 127.0.0.1 上的,需要改成监听到物理节点上configmap/kube-proxy edited#重新启动 kube-proxy
kubectl get pods -n kube-system | grep kube-proxy |awk '{print $1}' | xargs kubectl delete pods -n kube-system[root@master01 prometheus]# kubectl get pods -n kube-system | grep kube-proxy |awk '{print $1}' | xargs kubectl delete pods -n kube-system
pod "kube-proxy-d5fnf" deleted
pod "kube-proxy-kpvs2" deleted
pod "kube-proxy-nrszf" deleted
http://prometheus.monitor-sa.svc:9090
压力测试
[root@master01 prometheus]# vim ylcs.yamlapiVersion: apps/v1
kind: Deployment
metadata:name: hpa-testlabels:hpa: test
spec:replicas: 1selector:matchLabels:hpa: testtemplate:metadata:labels:hpa: testspec:containers:- name: centosimage: centos:7 command: ["/bin/bash", "-c", "yum install -y stress --nogpgcheck && sleep 3600"]volumeMounts:- name: yummountPath: /etc/yum.repos.d/volumes:- name: yumhostPath:path: /etc/yum.repos.d/[root@master01 prometheus]# kubectl get pod
NAME READY STATUS RESTARTS AGE
hpa-test-c9b658d84-7pvc8 0/1 CrashLoopBackOff 6 10m
nfs1-76f66b958-68wpl 1/1 Running 1 13d[root@master01 prometheus]# kubectl logs -f hpa-test-c9b658d84-7pvc8
Loaded plugins: fastestmirror, ovl
Repository base is listed more than once in the configuration
Repository updates is listed more than once in the configuration
Repository extras is listed more than once in the configuration
Repository centosplus is listed more than once in the configuration
Determining fastest mirrors
Could not retrieve mirrorlist http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=os&infra=container error was
14: curl#6 - "Could not resolve host: mirrorlist.centos.org; Unknown error"One of the configured repositories failed (Unknown),and yum doesn't have enough cached data to continue. At this point the onlysafe thing yum can do is fail. There are a few ways to work "fix" this:1. Contact the upstream for the repository and get them to fix the problem.2. Reconfigure the baseurl/etc. for the repository, to point to a workingupstream. This is most often useful if you are using a newerdistribution release than is supported by the repository (and thepackages for the previous distribution release still work).3. Run the command with the repository temporarily disabledyum --disablerepo=<repoid> ...4. Disable the repository permanently, so yum won't use it by default. Yumwill then just ignore the repository until you permanently enable itagain or use --enablerepo for temporary usage:yum-config-manager --disable <repoid>orsubscription-manager repos --disable=<repoid>5. Configure the failing repository to be skipped, if it is unavailable.Note that yum will try to contact the repo. when it runs most commands,so will have to try and fail each time (and thus. yum will be be muchslower). If it is a very temporary problem though, this is often a nicecompromise:yum-config-manager --save --setopt=<repoid>.skip_if_unavailable=trueCannot find a valid baseurl for repo: base/7/x86_64[root@master01 prometheus]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
--2024-09-19 14:31:18-- http://mirrors.aliyun.com/repo/Centos-7.repo
正在解析主机 mirrors.aliyun.com (mirrors.aliyun.com)... 114.232.93.242, 58.218.92.241, 114.232.93.243, ...
正在连接 mirrors.aliyun.com (mirrors.aliyun.com)|114.232.93.242|:80... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:2523 (2.5K) [application/octet-stream]
正在保存至: “/etc/yum.repos.d/CentOS-Base.repo”100%[==================================>] 2,523 --.-K/s 用时 0s 2024-09-19 14:31:18 (106 MB/s) - 已保存 “/etc/yum.repos.d/CentOS-Base.repo” [2523/2523])
解决报错
[root@master01 prometheus]# kubectl delete -f ylcs.yaml
deployment.apps "hpa-test" deleted
[root@master01 prometheus]# kubectl apply -f ylcs.yaml
deployment.apps/hpa-test created[root@master01 prometheus]# cd /etc/yum.repos.d/
[root@master01 yum.repos.d]# ls
backup CentOS-Debuginfo.repo CentOS-Vault.repo kubernetes.repo
Centos-7.repo CentOS-fasttrack.repo docker-ce.repo local.repo
CentOS-Base.repo CentOS-Media.repo epel.repo
CentOS-CR.repo CentOS-Sources.repo epel-testing.repo
[root@master01 yum.repos.d]# rm -rf local.repo
[root@master01 yum.repos.d]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
--2024-09-19 14:38:36-- http://mirrors.aliyun.com/repo/Centos-7.repo
正在解析主机 mirrors.aliyun.com (mirrors.aliyun.com)... 114.232.93.240, 58.218.92.243, 114.232.93.241, ...
正在连接 mirrors.aliyun.com (mirrors.aliyun.com)|114.232.93.240|:80... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:2523 (2.5K) [application/octet-stream]
正在保存至: “/etc/yum.repos.d/CentOS-Base.repo”100%[==================================>] 2,523 --.-K/s 用时 0s 2024-09-19 14:38:36 (73.3 MB/s) - 已保存 “/etc/yum.repos.d/CentOS-Base.repo” [2523/2523])[root@master01 yum.repos.d]# cd -
/opt/prometheus
[root@master01 prometheus]# kubectl get pod
NAME READY STATUS RESTARTS AGE
hpa-test-c9b658d84-bs457 0/1 Error 3 50s
nfs1-76f66b958-68wpl 1/1 Running 1 13d
[root@master01 prometheus]# kubectl delete -f ylcs.yaml
deployment.apps "hpa-test" deleted
[root@master01 prometheus]# kubectl get pod
NAME READY STATUS RESTARTS AGE
hpa-test-c9b658d84-bs457 0/1 Terminating 3 56s
nfs1-76f66b958-68wpl 1/1 Running 1 13d
[root@master01 prometheus]# kubectl get pod
NAME READY STATUS RESTARTS AGE
hpa-test-c9b658d84-bs457 0/1 Terminating 3 57s
nfs1-76f66b958-68wpl 1/1 Running 1 13d
[root@master01 prometheus]# kubectl get pod
NAME READY STATUS RESTARTS AGE
hpa-test-c9b658d84-bs457 0/1 Terminating 3 58s
nfs1-76f66b958-68wpl 1/1 Running 1 13d
[root@master01 prometheus]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nfs1-76f66b958-68wpl 1/1 Running 1 13d
[root@master01 prometheus]# kubectl apply -f ylcs.yaml
deployment.apps/hpa-test created
[root@master01 prometheus]# kubectl get pod
NAME READY STATUS RESTARTS AGE
hpa-test-c9b658d84-h9xvf 1/1 Running 0 1s
nfs1-76f66b958-68wpl 1/1 Running 1 13d[root@node01 ~]# cd /etc/yum.repos.d/
[root@node01 yum.repos.d]# ls
backup CentOS-Debuginfo.repo CentOS-Vault.repo kubernetes.repo
Centos-7.repo CentOS-fasttrack.repo docker-ce.repo local.repo
CentOS-Base.repo CentOS-Media.repo epel.repo
CentOS-CR.repo CentOS-Sources.repo epel-testing.repo
[root@node01 yum.repos.d]# rm -rf local.repo [root@node02 ~]# cd /etc/yum.repos.d/
[root@node02 yum.repos.d]# ls
backup CentOS-Debuginfo.repo CentOS-Vault.repo kubernetes.repo
Centos-7.repo CentOS-fasttrack.repo docker-ce.repo local.repo
CentOS-Base.repo CentOS-Media.repo epel.repo
CentOS-CR.repo CentOS-Sources.repo epel-testing.repo
[root@node02 yum.repos.d]# rm -rf local.repo
[root@node02 yum.repos.d]# ls[root@node01 yum.repos.d]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo[root@node02 yum.repos.d]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo[root@master01 prometheus]# kubectl apply -f ylcs.yaml
deployment.apps/hpa-test created
[root@master01 prometheus]# kubectl get pod
NAME READY STATUS RESTARTS AGE
hpa-test-c9b658d84-cqklr 1/1 Running 0 3s
nfs1-76f66b958-68wpl 1/1 Running 1 13d[root@master01 prometheus]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
hpa-test-c9b658d84-cqklr 1/1 Running 0 110s 10.244.2.251 node02 <none> <none>
nfs1-76f66b958-68wpl 1/1 Running 1 13d 10.244.2.173 node02 <none> <none>##到node02上top情况
[root@node02 yum.repos.d]#
[root@master01 prometheus]# kubectl exec -it hpa-test-c9b658d84-cqklr bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@hpa-test-c9b658d84-cqklr /]# stress -c 4
stress: info: [64] dispatching hogs: 4 cpu, 0 io, 0 vm, 0 hdd