安装 docker 的步骤这里不说了,默认机器已经安装好docker

安装kubeadm、kubelet、kubectl,通过 kubeadm 创建集群
安装之前把docker安装好,hostname都配置到/etc/hosts,都添加ssl免密认证

#添加清华源
vim /etc/yum.repos.d/kubernetes.repo

[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1

#查看有没有kubeadm这个包

dnf search kubeadm

#安装包

dnf install kubeadm kubelet kubectl -y --nogpgcheck

#启用kubelet

systemctl enable kubelet

#禁用swap分区

swapoff -a
#查看分区大小是否为0
free -h

#不挂载swap分区,编辑这个文件,将swap挂载一行前缀加上#注释掉
vim /etc/fstab

#查看kudeadm配置所需的镜像名称

kubeadm config images list

#结果如下:

k8s.gcr.io/kube-apiserver:v1.21.2
k8s.gcr.io/kube-controller-manager:v1.21.2
k8s.gcr.io/kube-scheduler:v1.21.2
k8s.gcr.io/kube-proxy:v1.21.2
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0

#先下载这些镜像到本地,创建一个脚本,利用阿里的镜像站下载
vim pull_k8s_config_images.sh

#!/bin/bash

#docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.2
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.2 k8s.gcr.io/kube-apiserver:v1.21.2
docker rmi registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.2
docker tag k8s.gcr.io/kube-apiserver:v1.21.2   192.168.1.65:5000/k8s.gcr.io/kube-apiserver:v1.21.2
docker push 192.168.1.65:5000/k8s.gcr.io/kube-apiserver:v1.21.2

docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.2  
docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.2  k8s.gcr.io/kube-controller-manager:v1.21.2
docker rmi registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.2  
docker tag k8s.gcr.io/kube-controller-manager:v1.21.2 192.168.1.65:5000/k8s.gcr.io/kube-controller-manager:v1.21.2
docker push 192.168.1.65:5000/k8s.gcr.io/kube-controller-manager:v1.21.2

docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.2           
docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.2           k8s.gcr.io/kube-scheduler:v1.21.2
docker rmi registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.2           
docker tag k8s.gcr.io/kube-scheduler:v1.21.2    192.168.1.65:5000/k8s.gcr.io/kube-scheduler:v1.21.2
docker push 192.168.1.65:5000/k8s.gcr.io/kube-scheduler:v1.21.2

docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.21.2               
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.21.2               k8s.gcr.io/kube-proxy:v1.21.2
docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.21.2               
docker tag k8s.gcr.io/kube-proxy:v1.21.2    192.168.1.65:5000/k8s.gcr.io/kube-proxy:v1.21.2
docker push 192.168.1.65:5000/k8s.gcr.io/kube-proxy:v1.21.2

docker pull registry.aliyuncs.com/google_containers/pause:3.4.1                      
docker tag registry.aliyuncs.com/google_containers/pause:3.4.1                      k8s.gcr.io/pause:3.4.1
docker rmi registry.aliyuncs.com/google_containers/pause:3.4.1                      
docker tag k8s.gcr.io/pause:3.4.1   192.168.1.65:5000/k8s.gcr.io/pause:3.4.1
docker push 192.168.1.65:5000/k8s.gcr.io/pause:3.4.1

docker pull registry.aliyuncs.com/google_containers/etcd:3.4.13-0                    
docker tag registry.aliyuncs.com/google_containers/etcd:3.4.13-0                    k8s.gcr.io/etcd:3.4.13-0
docker rmi registry.aliyuncs.com/google_containers/etcd:3.4.13-0                    
docker tag k8s.gcr.io/etcd:3.4.13-0    192.168.1.65:5000/k8s.gcr.io/etcd:3.4.13-0
docker push 192.168.1.65:5000/k8s.gcr.io/etcd:3.4.13-0

#docker pull registry.aliyuncs.com/google_containers/coredns/coredns:v1.8.0           
#docker tag registry.aliyuncs.com/google_containers/coredns/coredns:v1.8.0           k8s.gcr.io/coredns/coredns:v1.8.0
#docker rmi registry.aliyuncs.com/google_containers/coredns/coredns:v1.8.0           
docker pull coredns/coredns:1.8.0
docker tag coredns/coredns:1.8.0            k8s.gcr.io/coredns/coredns:v1.8.0
docker rmi coredns/coredns:1.8.0           
docker tag k8s.gcr.io/coredns/coredns:v1.8.0    192.168.1.65:5000/k8s.gcr.io/coredns/coredns:v1.8.0
docker push 192.168.1.65:5000/k8s.gcr.io/coredns/coredns:v1.8.0

PS: 我这里是在本地有一个镜像仓库,下载回来我push到了本地的仓库,好在其它节点机器上下载包,就不走外网了,没有本地仓库不要每一个包后面两条 tag 和 push

下载镜像

chmod a+x pull_k8s_config_images.sh
./pull_k8s_config_images.sh
docker images

#开始启动kubeadm 创建集群

kubeadm init --apiserver-advertise-address=192.168.1.65 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16

当看到这个结果的时候,说明创建成功了,庆祝一下下吧

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.65:6443 --token vvrbdu.w5hc25hp418cixw9 \
    --discovery-token-ca-cert-hash sha256:e1c3ff1b568bf21114e3be23a6d93753b820ceec386b0fff0054dd9a13e901cc

按上面的提示可以部署其它节点机器,上面显示了节点机器加入集群的命令

要开始使用集群,普通用户的身份运行以下内容

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 配置kubetl认证信息
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile

或者,如果您是root用户,您可以运行

  export KUBECONFIG=/etc/kubernetes/admin.conf
  echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile

# 一定要记住这几行信息,节点加入集群需要验证
kubeadm join ......
#如果没记住,可以在master上执行下面这句,重新生成
kubeadm token create --print-join-command
**#还有个问题 token 默认有效期是24小时,失效后,node也是加不进来的。需要重新生成token
kubeadm token list # 查看当前有效的token**

# 查看一下集群pod,确认个组件都处于 Running 状态

[root@node1 ~]# kubectl get pod -n kube-system
NAME                            READY   STATUS              RESTARTS   AGE
coredns-558bd4d5db-flm5j        0/1     Pending             0          22m
coredns-558bd4d5db-rt8nd        0/1     Pending             0          22m
etcd-node1                      1/1     Running             0          22m
kube-apiserver-node1            1/1     Running             0          22m
kube-controller-manager-node1   1/1     Running             0          22m
kube-proxy-9r9zh                1/1     Running             0          22m
kube-proxy-h88jk                0/1     ContainerCreating   0          15m
kube-proxy-stljm                0/1     ContainerCreating   0          15m
kube-scheduler-node1            1/1     Running             0          22m
**#更详细的**
[root@node1 ~]# kubectl get pod --all-namespaces -o wide
NAMESPACE     NAME                            READY   STATUS              RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
kube-system   coredns-558bd4d5db-flm5j        0/1     Pending             0          25m   <none>         <none>   <none>           <none>
kube-system   coredns-558bd4d5db-rt8nd        0/1     Pending             0          25m   <none>         <none>   <none>           <none>
kube-system   etcd-node1                      1/1     Running             0          25m   192.168.1.65   node1    <none>           <none>
kube-system   kube-apiserver-node1            1/1     Running             0          25m   192.168.1.65   node1    <none>           <none>
kube-system   kube-controller-manager-node1   1/1     Running             0          25m   192.168.1.65   node1    <none>           <none>
kube-system   kube-proxy-9r9zh                1/1     Running             0          25m   192.168.1.65   node1    <none>           <none>
kube-system   kube-proxy-h88jk                0/1     ContainerCreating   0          18m   192.168.1.79   node3    <none>           <none>
kube-system   kube-proxy-stljm                0/1     ContainerCreating   0          18m   192.168.1.69   node2    <none>           <none>
kube-system   kube-scheduler-node1            1/1     Running             0          25m   192.168.1.65   node1    <none>           <none>

由于另外两个节点node2,node3没有拉取镜像,状态是ContainerCreating
#当把另两个节点从本址仓库拉取了那几个镜像之后,发现在主节点上再次执行这个命令时,看到已经是Running状态了(应该是我的三个节点之前使用docker swarm创建过集群的原因)

[root@node1 ~]# kubectl get pod --all-namespaces -o wide
NAMESPACE     NAME                            READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
kube-system   coredns-558bd4d5db-flm5j        0/1     Pending   0          33m   <none>         <none>   <none>           <none>
kube-system   coredns-558bd4d5db-rt8nd        0/1     Pending   0          33m   <none>         <none>   <none>           <none>
kube-system   etcd-node1                      1/1     Running   0          33m   192.168.1.65   node1    <none>           <none>
kube-system   kube-apiserver-node1            1/1     Running   0          33m   192.168.1.65   node1    <none>           <none>
kube-system   kube-controller-manager-node1   1/1     Running   0          33m   192.168.1.65   node1    <none>           <none>
kube-system   kube-proxy-9r9zh                1/1     Running   0          33m   192.168.1.65   node1    <none>           <none>
kube-system   kube-proxy-h88jk                1/1     Running   0          26m   192.168.1.79   node3    <none>           <none>
kube-system   kube-proxy-stljm                1/1     Running   0          26m   192.168.1.69   node2    <none>           <none>
kube-system   kube-scheduler-node1            1/1     Running   0          33m   192.168.1.65   node1    <none>           <none>

在node2机器上面可以看到容器已经运行

[root@node2 ~]# docker ps
CONTAINER ID   IMAGE                                COMMAND                  CREATED         STATUS         PORTS      NAMES
081e6c14a13b   a6ebd1c1ad98                         "/usr/local/bin/kube…"   8 minutes ago   Up 8 minutes              k8s_kube-proxy_kube-proxy-stljm_kube-system_44bbbc1a-00fb-4f1e-bcdd-90580f6083c2_0
bb5d2dc31f19   k8s.gcr.io/pause:3.4.1               "/pause"                 8 minutes ago   Up 8 minutes              k8s_POD_kube-proxy-stljm_kube-system_44bbbc1a-00fb-4f1e-bcdd-90580f6083c2_0

# 由于master节点还差一些配置,coredns暂时无法正常启动
# 此时主节点状态

[root@node1 ~]# kubectl get nodes
NAME    STATUS     ROLES                  AGE   VERSION
node1   NotReady   control-plane,master   43m   v1.21.2
node2   NotReady   <none>                 36m   v1.21.2
node3   NotReady   <none>                 35m   v1.21.2

# 配置flannel网络

mkdir -p /root/k8s/
cd /root/k8s
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#此yml中的"Network": "10.244.0.0/16" 要和 kubeadm init 中 --pod-network-cidr 一样;
#否则可能会使得Node间 Cluster IP 不通。

# 查看需要下载的镜像

cat kube-flannel.yml |grep image|uniq
#image: quay.io/coreos/flannel:v0.13.1-rc1

# 下载镜像

docker pull quay.io/coreos/flannel:v0.13.1-rc1 
#push到本地仓库
docker tag quay.io/coreos/flannel:v0.14.0 192.168.1.65:5000/quay.io/coreos/flannel:v0.14.0
docker push 192.168.1.65:5000/quay.io/coreos/flannel:v0.14.0

# 部署插件

kubectl apply -f kube-flannel.yml

#PS:卸载插件用 kubectl delete -f kube-flannel.yml !!!不要轻易用,可能删除不干净。

# 系统开始自己启动服务,稍等片刻,再查询主节点状态, 已经是Ready状态了

[root@node1 k8s]# kubectl get nodes
NAME    STATUS   ROLES                  AGE   VERSION
node1   Ready    control-plane,master   51m   v1.21.2
node2   Ready    <none>                 44m   v1.21.2
node3   Ready    <none>                 44m   v1.21.2

# 下面的结果才算正常
[root@node1 k8s]# kubectl get pod --all-namespaces
NAMESPACE     NAME                            READY   STATUS    RESTARTS   AGE
kube-system   coredns-558bd4d5db-flm5j        1/1     Running   0          52m
kube-system   coredns-558bd4d5db-rt8nd        1/1     Running   0          52m
kube-system   etcd-node1                      1/1     Running   0          52m
kube-system   kube-apiserver-node1            1/1     Running   0          52m
kube-system   kube-controller-manager-node1   1/1     Running   0          52m
kube-system   kube-flannel-ds-b87lk           1/1     Running   0          101s
kube-system   kube-flannel-ds-kpghj           1/1     Running   0          101s
kube-system   kube-flannel-ds-tmrr2           1/1     Running   0          101s
kube-system   kube-proxy-9r9zh                1/1     Running   0          52m
kube-system   kube-proxy-h88jk                1/1     Running   0          45m
kube-system   kube-proxy-stljm                1/1     Running   0          45m
kube-system   kube-scheduler-node1            1/1     Running   0          52m

# 试一试设置命令补全(Worker机器也适用)

yum install -y bash-completion
source <(kubectl completion bash) # 重启失效
echo "source <(kubectl completion bash)" >> ~/.bashrc # 重启生效
source  ~/.bashrc

可能会出现下面这种警告,附上解决办法。

# 查询集群健康状态

kubectl get cs

# scheduler            Unhealthy
# controller-manager   Unhealthy
# 这是/etc/kubernetes/manifests下的kube-controller-manager.yaml和kube-scheduler.yaml设置的默认端口是0
# - --port=0

#注释掉就可以了,之后三台机器都重启kubelet
#参考:https://llovewxm1314.blog.csdn.net/article/details/108458197

systemctl status kubelet.service
systemctl restart kubelet.service

Worker Node配置
Worker上也要安装两个基础的容器,由于我的三台机器之前使用了docker swarm 创建过集群,所以node2,node3已经在集群节点中可以看到了,如果新机器加入群集需要按此步骤

docker pull registry.aliyuncs.com/google_containers/pause:3.2
docker tag registry.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
docker rmi registry.aliyuncs.com/google_containers/pause:3.2

#worker服务器上的版本可以和master不一样
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.19.4
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.19.4 k8s.gcr.io/kube-proxy:v1.19.4
docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.19.4


#我用了本地仓库
docker pull 192.168.1.65:5000/k8s.gcr.io/kube-proxy:v1.21.2
docker tag 192.168.1.65:5000/k8s.gcr.io/kube-proxy:v1.21.2  k8s.gcr.io/kube-proxy:v1.21.2
docker pull 192.168.1.65:5000/k8s.gcr.io/pause:3.4.1
docker tag 192.168.1.65:5000/k8s.gcr.io/pause:3.4.1  k8s.gcr.io/pause:3.4.1

Worker节点支持K8s命令
类似Master的K8s命令也可以在Worker节点执行:
# copy admin.conf文件 到 workder 节点

scp /etc/kubernetes/admin.conf node2:/etc/kubernetes
scp /etc/kubernetes/admin.conf node3:/etc/kubernetes

# 设置环境变量

echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
export KUBECONFIG=/etc/kubernetes/admin.conf

# 注册新节点
# 当前Worker加入集群

kubeadm join 192.168.1.65:6443 --token vvrbdu.w5hc25hp418cixw9 \
    --discovery-token-ca-cert-hash sha256:e1c3ff1b568bf21114e3be23a6d93753b820ceec386b0fff0054dd9a13e901cc

# 在Master中检查集群的状态:

kubectl cluster-info
kubectl get cs
kubectl get pod  --all-namespaces
kubectl get nodes
kubectl get service
kubectl get serviceaccounts

# 删除节点node3

依次在Master和Worker服务器上执行下面几条,然后重新走 注册流程

#Master
[root@node1 ~]# kubectl delete nodes node3
node "node3" deleted

#node3
[root@node3 ~]# docker ps -qa | xargs docker rm -f
[root@node3 ~]# rm -rf /etc/kubernetes/kubelet.conf 
[root@node3 ~]# systemctl restart docker.service kubelet.service
[root@node3 ~]# rm -rf /etc/kubernetes/pki/ca.crt

#重新加入集群

kubeadm join 192.168.1.65:6443 --token vvrbdu.w5hc25hp418cixw9 --discovery-token-ca-cert-hash sha256:e1c3ff1b568bf21114e3be23a6d93753b820ceec386b0fff0054dd9a13e901cc

[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

#查看节点

[root@node3 ~]# kubectl get  nodes
NAME    STATUS   ROLES                  AGE   VERSION
node1   Ready    control-plane,master   85m   v1.21.2
node2   Ready    <none>                 78m   v1.21.2
node3   Ready    <none>                 54s   v1.21.2

# 问题汇总
如何重置Master

# 重置Master上的k8s,重新从 init 开始,这种没有清理干净一般会遇到很多问题,谨慎使用

kubeadm reset

Worker加入新的Master

已经使用过的Workder节点加入新的Master,可能会遇到无法加入报错的问题:
[kubelet-check] Initial timeout of 40s passed.
timed out waiting for the condition
error uploading crisocket

# 解决办法就是重置。重置Worker节点比较容易,一般不会出问题。

kubeadm reset
systemctl daemon-reload
systemctl restart kubelet
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
# will reset iptables

Master上显示worker的角色名称,通过Label标记:

kubectl label nodes node2 node-role.kubernetes.io/worker=worker
[root@node1 ~]# kubectl get nodes
NAME    STATUS   ROLES                  AGE    VERSION
node1   Ready    control-plane,master   90m    v1.21.2
node2   Ready    worker                 83m    v1.21.2
node3   Ready    <none>                 6m2s   v1.21.2

简单示例
Pod是K8s中你可以创建和部署的最小也是最简的单位。Pod中封装着应用的容器(也可以是好几个容器)、存储、独立的网络IP,管理容器如何运行的策略选项。

一个Pod中运行一个容器

下面我们在刚配置好的K8s集群中,创建一个nginx容器nginx-svc.yaml。

apiVersion: v1
kind: Pod
metadata:
  name: nginx-test
  labels:
    app: web
spec:
  containers:
  - name: front-end
    image: nginx:1.7.9
    ports:
    - containerPort: 80

运行命令

**# 创建Pod**
kubectl create -f ./nginx-svc.yaml

**# 查看Pod**
kubectl get po

**# 查看Pod详细情况**
kubectl describe po nginx-test

**# 进入到Pod(容器)内部**
kubectl exec -it nginx-test /bin/bash
#新版本用这个,加两个--[空格]
kubectl exec -it nginx-test -- /bin/bash

一个Pod中运行多个容器

配置文件nginx-redis-svc.yaml

apiVersion: v1
kind: Pod
metadata:
  name: rss-site
  labels:
    app: rss-web
spec:
  containers:
    - name: front-nginx
      image: nginx:1.7.9
      ports:
        - containerPort: 80
    - name: rss-reader
      image: redis
      ports:
        - containerPort: 88

运行命令

kubectl create -f ./nginx-redis-svc.yaml
kubectl get po
kubectl describe po rss-site 
kubectl exec -it rss-site -c front-nginx /bin/bash 
kubectl exec -it rss-site -c rss-reader /bin/bash
#新版本用这个
kubectl exec -it rss-site -c front-nginx -- /bin/bash 
kubectl exec -it rss-site -c rss-reader -- /bin/bash

这里重点说一下,一个Pod中有两个容器,他们公用同一个根pause:
image-20210129184508415

安装web端面板

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml

开发测试环境生成ssl证书,以在web端能登录

openssl genrsa -out server.key 1024
openssl req -new -key server.key -out server.csr
openssl x509 -req -in server.csr -out server.crt -signkey server.key -days 3650

创建登录账户TOKEN
管理账户

vim dashboard-adminuser.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
绑定集群访问角色
vim dashboard-clusterRoleBinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
kubectl apply -f dashboard-adminuser.yaml
kubectl apply -f dashboard-clusterRoleBinding.yaml

查看登录账户TOKEN

kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -ogo-template="{{.data.token | base64decode}}"

创建代理访问面板

kubectl proxy --accept-hosts=^.*$ --accept-paths=^/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

Starting to serve on 127.0.0.1:8001
面板访问地址:
http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

默认端口是8001,这个地址内部主机访问,可以使用nginx反向代理到这个地址,并添加前面我们生成的证书使用https://192.168.1.65:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/
不使用https访问,则不能通过token登录面板

参考:https://www.cnblogs.com/wenyang321/p/14050893.html

标签: none

添加新评论