一、准备条件

服务器发行版本

[root@localhost ~]# cat /etc/redhat-release
CentOS Linux release 7.7.1908 (Core)

服务器内核版本

[root@localhost ~]# cat /proc/version
Linux version 3.10.0-1062.el7.x86_64 (mockbuild@kbuilder.bsys.centos.org) (gcc version 4.8.5 20150623 (Red Hat 4.8.5-36) (GCC) ) #1 SMP Wed Aug 7 18:08:02 UTC 2019
主机名IP地址角色CPU内存磁盘描述
Kubernetes-Base192.168.17.110基本镜像2核2线程2G20GKubernetes基础镜像
Kubernetes-Master192.168.17.120主节点2核2线程2G20GKubernetes主节点
Kubernetes-Slave1192.168.17.121从节点12核2线程2G20GKubernetes从节点
Kubernetes-Slave2192.168.17.122从节点22核2线程2G20GKubernetes从节点
Kubernetes-Volumes192.168.17.130数据卷2核2线程2G20GKubernetes数据卷

1.1关闭交换内存

vim /etc/fstab

注释如图

image-20200601172907834

执行命令

echo vm.swappiness=0 >> /etc/sysctl.conf

重启服务

reboot

查看内存

free -h

image-20200601175544996

1.2关闭防火墙

# 查看状态
systemctl status firewalld.service
# 关闭防火墙
systemctl stop firewalld.service
# 开机禁用防火墙自启命令
systemctl disable firewalld.service

1.3安装Docker

点击下载docker

# !/bin/bash

# 一键部署Docker

# Create : 2019-12-13
# Update : 2019-12-13
# @Autor : wuduoqiang

# 要安装的软件名
software=docker
# 安装版本
version=docker-18.06.1-ce.tar.gz
# 解压后的目录名
folder=docker-18.06.1-ce

# 判断服务是否在运行
systemctl status $software &>/dev/null
if [ $? -eq 0 ];then
    echo "$software在运行!"
    exit
fi

# 判断命令是否存在
if $(command -v $software > /dev/null 2>&1);
then
    echo "$software已经安装!"
    $software -v
    exit
fi

# 判断源码包是否安装过
if rpm -q $software &>/dev/null;
then
    echo "$software已经安装!"
    $software -v
    exit
fi

# 开始安装服务
echo "解压$software程序!"
tar -zxvf $version
echo "配置$software程序!"
sudo cp $folder/* /usr/bin/
sudo mv /usr/bin/docker.service /etc/systemd/system/
echo "重新加载配置文件!"
systemctl daemon-reload
echo "启动$software服务!"
systemctl start $software
echo "添加开启自动启动!"
systemctl enable docker.service
echo "$software运行状态!"
systemctl status $software
echo "docker安装版本如下"
$software -v
echo "docker-compose安装版本如下"
docker-compose -v
sudo rm -rf $folder

卸载Docker

# !/bin/bash

# 一键卸载Docker

# Create : 2019-12-13
# Update : 2019-12-13
# @Autor : wuduoqiang

# 要卸载的软件
software=docker

function undocker(){
    echo "禁止启动"
    systemctl enable docker.service
    echo "停止服务"
    systemctl stop $software
    echo "移除文件"
    cd /usr/bin/
    sudo rm docker docker-containerd docker-containerd-ctr docker-containerd-shim dockerd docker-init docker-proxy docker-runc docker-compose
    cd /etc/systemd/system/
    sudo rm docker.service
    echo "卸载成功"
    exit
}
# 判断服务是否在运行
systemctl status $software &>/dev/null
if [ $? -eq 0 ];then
    undocker
else
    echo "docker未安装!"
    exit
fi

# 判断命令是否存在
if $(command -v $software > /dev/null 2>&1);
then
    undocker
else
    echo "docker未安装!"
    exit
fi

# 判断源码包是否安装过
if rpm -q $software &>/dev/null;
then
    undocker
else
    echo "docker未安装!"
    exit
fi

Docker镜像加速

# 修改配置文件
vim /etc/docker/daemon.json
{
  "registry-mirrors": ["https://v8o5rqy6.mirror.aliyuncs.com"]
}
# 重新加载守护进程
sudo systemctl daemon-reload
# 重启docker服务
sudo systemctl restart docker

1.4安装时间服务器

查看系统当前的时区

[root@localhost ~]# timedatectl
      Local time: 三 2020-06-24 15:13:36 EDT
  Universal time: 三 2020-06-24 19:13:36 UTC
        RTC time: 三 2020-06-24 19:13:36
       Time zone: America/New_York (EDT, -0400)
     NTP enabled: yes
NTP synchronized: yes
 RTC in local TZ: no
      DST active: yes
 Last DST change: DST began at
                  日 2020-03-08 01:59:59 EST
                  日 2020-03-08 03:00:00 EDT
 Next DST change: DST ends (the clock jumps one hour backwards) at
                  日 2020-11-01 01:59:59 EDT
                  日 2020-11-01 01:00:00 EST

修改系统使用的时区

timedatectl set-timezone Asia/Shanghai

时间不准确校准时间

yum install -y ntpdate
ntpdate us.pool.ntp.org

将系统时间写入硬件

hwclock --systohc

查看时间是否已修改

[root@localhost ~]# date
2020年 06月 25日 星期四 03:17:29 CST

1.5修改主机名

# 查看主机名
hostnamectl status
# 修改主机名
hostnamectl set-hostname kubernetes-volumes
# 修改Hosts
cat >> /etc/hosts << EOF
192.168.17.130 kubernetes-volumes
EOF
# 重启服务器
reboot

1.6安装Kubernetes必备工具

安装三个Kubernetes必备工具,分别为kubeadm,kubelet,kubectl

# 配置
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装
yum install -y kubelet-1.17.3 kubeadm-1.17.3 kubectl-1.17.3
systemctl enable kubelet && systemctl start kubelet

1.7关闭selinux

# 临时关闭,无需重启
setenforce 0
# 查看
getenforce
# 永久关闭,需要重启
vim /etc/selinux/config
SELINUX=disabled

二、集群安装

kubeadm 是 kubernetes 的集群安装工具,能够快速安装 kubernetes 集群,安装 kubernetes 主要是安装它的各个镜像,而 kubeadm 已经为我们集成好了运行 kubernetes 所需的基本镜像。但由于国内的网络原因,在搭建环境时,无法拉取到这些镜像。此时我们只需要修改为阿里云提供的镜像服务即可解决该问题。

2.1创建目录

cd /usr/local/
mkdir kubernetes
cd kubernetes
mkdir cluster
cd cluster

2.2导出配置

kubeadm config print init-defaults --kubeconfig ClusterConfiguration > kubeadm.yml

2.3修改配置

查看安装的版本,由于阿里云没有同步最新的kubernetes所以需要下载低版本

[root@kubernetes-master cluster]# vim kubeadm.yml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # 改成主节点IP
  advertiseAddress: 192.168.17.120
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: kubernetes-master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
# 镜像仓库修改为阿里云
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
# 修改Kubernetes版本号
kubernetesVersion: v1.17.3
networking:
  dnsDomain: cluster.local
  # 配置 POD 所在网段,为的是让容器与容器之间通信
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}

2.4查看所需要的镜像

[root@kubernetes-master cluster]# kubeadm config images list --config kubeadm.yml
W0626 03:10:56.230847   26581 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
registry.aliyuncs.com/google_containers/kube-apiserver:v1.17.3
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.17.3
registry.aliyuncs.com/google_containers/kube-scheduler:v1.17.3
registry.aliyuncs.com/google_containers/kube-proxy:v1.17.3
registry.aliyuncs.com/google_containers/pause:3.2
registry.aliyuncs.com/google_containers/etcd:3.4.3-0
registry.aliyuncs.com/google_containers/coredns:1.6.7

2.5拉取所需要的镜像

[root@kubernetes-master cluster]# kubeadm config images pull --config kubeadm.yml
W0625 04:33:27.592961   39998 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.17.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.17.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.17.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.17.3
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.2
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.4.3-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.6.7

image-20200629011301871

2.6安装主节点

--upload-certs 参数可以在后续执行加入节点时自动分发证书文件。

[root@kubernetes-master cluster]# echo "1" >/proc/sys/net/bridge/bridge-nf-call-iptables
[root@kubernetes-master cluster]# kubeadm init --config=kubeadm.yml --upload-certs | tee kubeadm-init.log
W0628 12:45:05.657664   13332 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0628 12:45:05.657757   13332 validation.go:28] Cannot validate kubelet config - no validator is available
[init] Using Kubernetes version: v1.17.3
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.17.120]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [kubernetes-master localhost] and IPs [192.168.17.120 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [kubernetes-master localhost] and IPs [192.168.17.120 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0628 12:45:11.883443   13332 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0628 12:45:11.884310   13332 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.505067 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
25a71c899a0d60d04385107b52ea33bd5bcba5bdcea522a471714e65fe5bedd1
[mark-control-plane] Marking the node kubernetes-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node kubernetes-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.17.120:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:b6999b8f3545a3e8620b2cf11fc12cc10c7785219efd16c0a9e1b577f9838bcd

2.7配置主节点

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

2.8从节点加入

[root@kubernetes-base ~]# echo "1" >/proc/sys/net/bridge/bridge-nf-call-iptables
[root@kubernetes-base ~]# kubeadm join 192.168.17.120:6443 --token abcdef.0123456789abcdef     --discovery-token-ca-cert-hash sha256:b6999b8f3545a3e8620b2cf11fc12cc10c7785219efd16c0a9e1b577f9838bcd
W0628 14:17:31.888233    5683 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

2.8主节点验证

kubectl get nodes

image-20200628142800472

2.9从节点验证

mkdir -p $HOME/.kube
scp root@192.168.17.120:/etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes

image-20200628143654122

2.10在主节点删除

kubeadm delete nodes <NAME>

三、安装网络插件

3.1下载calico

wget https://docs.projectcalico.org/manifests/calico.yaml

3.2配置calico

192.168.0.0/16 修改为 10.244.0.0/16

vim calico.yaml

image-20200702150050362

kubectl apply -f calico.yaml

输出如下

configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

3.3验证是否成功

查看 Calico 网络插件处于 Running 状态即表示安装成功

watch kubectl get pods --all-namespaces

image-20200702150308777

查看节点状态处于 Ready 即表示安装成功

kubectl get nodes

image-20200702150447405

四、部署容器

4.1启动容器

# 使用 kubectl 命令创建两个监听 80 端口的 Nginx
# Pod(Kubernetes 运行容器的最小单元)
kubectl run nginx --image=nginx --replicas=2 --port=80
# 输出如下
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created

4.2查看Pods 的状态

[root@kubernetes-master kubernetes]# kubectl get pods
NAME                     READY   STATUS    RESTARTS   AGE
nginx-5578584966-27r4v   1/1     Running   1          27h
nginx-5578584966-9qqcc   1/1     Running   1          27h
[root@kubernetes-master kubernetes]#

4.3查看已部署的服务

[root@kubernetes-master kubernetes]# kubectl get deployment
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   2/2     2            2           27h
[root@kubernetes-master kubernetes]#

4.4发布服务

# 使用负载均衡模式发布服务,让用户可以访问
kubectl expose deployment nginx --port=80 --type=LoadBalancer
# 输出如下
service/nginx exposed

4.5查看已发布的服务

kubectl get services

image-20200702152919798

4.6查看服务详情

kubectl describe service nginx

image-20200702153055737

4.7验证是否成功

http://192.168.17.120:30874/

http://192.168.17.121:30874/

image-20200702153406038

image-20200702153349854

4.8停止服务

# 删除已部署的服务
kubectl delete deployment nginx
# 输出如下
deployment.apps "nginx" deleted
# 删除已发布的服务
kubectl delete service nginx
# 输出如下
service "nginx" deleted

五、资源配置文件

通过 run 命令启动容器非常麻烦,Docker 提供了 Compose 为我们解决了这个问题。Kubernetes 提供了 kubectl apply 为我们解决了这个问题,该命令可以通过配置文件快速创建一个集群资源对象。

5.1创建配置文件

vim nginx.yaml
# API 版本号
apiVersion: apps/v1
# 类型,如:Pod/ReplicationController/Deployment/Service/Ingress
kind: Deployment
metadata:
  # Kind 的名称
  name: nginx-app
spec:
  selector:
    matchLabels:
      # 容器标签的名字,发布 Service 时,selector 需要和这里对应
      app: nginx
  # 部署的实例数量
  replicas: 2
  template:
    metadata:
      labels:
        app: nginx
    spec:
      # 配置容器,数组类型,说明可以配置多个容器
      containers:
      # 容器名称
      - name: nginx
        # 容器镜像
        image: nginx:1.17
        # 只有镜像不存在时,才会进行镜像拉取
        imagePullPolicy: IfNotPresent
        ports:
        # Pod 端口
        - containerPort: 80
# 发布服务
# 不要忘记这里的三条横线
---
# API 版本号
apiVersion: v1
# 类型,如:Pod/ReplicationController/Deployment/Service/Ingress
kind: Service
# 元数据
metadata:
  # Kind 的名称
  name: nginx-http
spec:
  # 暴露端口
  ports:
    # Service 暴露的端口
    - port: 80
      # Pod 上的端口,这里是将 Service 暴露的端口转发到 Pod 端口上
      targetPort: 80
  # 类型
  type: LoadBalancer
  # 标签选择器
  selector:
    # 需要和上面部署的 Deployment 标签名对应
    app: nginx

镜像拉取策略说明

支持三种 ImagePullPolicy

  • Always: 不管镜像是否存在都会进行一次拉取
  • Never: 不管镜像是否存在都不会进行拉取
  • IfNotPresent: 只有镜像不存在时,才会进行镜像拉取

注意

  • 默认为 IfNotPresent,但 :latest 标签的镜像默认为 Always
  • 拉取镜像时 Docker 会进行校验,如果镜像中的 MD5 码没有变,则不会拉取镜像数据
  • 生产环境中应该尽量避免使用 :latest 标签,而开发环境中可以借助 :latest 标签自动拉取最新的镜像

5.2部署服务

# 部署
kubectl apply -f nginx.yaml
# 删除
kubectl delete -f nginx.yaml

5.3验证是否生效

[root@kubernetes-slave1 ~]# kubectl get pods
NAME                         READY   STATUS    RESTARTS   AGE
nginx-app-798dcc9989-bhrrj   1/1     Running   0          3m31s
nginx-app-798dcc9989-xqwsg   1/1     Running   0          3m31s
[root@kubernetes-slave1 ~]# 

5.4查看已部署的服务

[root@kubernetes-slave1 ~]# kubectl get deployment
NAME        READY   UP-TO-DATE   AVAILABLE   AGE
nginx-app   2/2     2            2           4m59s

5.5查看已发布的服务

[root@kubernetes-slave1 ~]# kubectl get service
NAME         TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP      10.96.0.1       <none>        443/TCP        4d4h
nginx-http   LoadBalancer   10.103.226.63   <pending>     80:31167/TCP   6m51s
[root@kubernetes-slave1 ~]# 

5.6查看服务详情

[root@kubernetes-slave1 ~]# kubectl describe service nginx-http
Name:                     nginx-http
Namespace:                default
Labels:                   <none>
Annotations:              kubectl.kubernetes.io/last-applied-configuration:
                            {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"nginx-http","namespace":"default"},"spec":{"ports":[{"port":80,"t...
Selector:                 app=nginx
Type:                     LoadBalancer
IP:                       10.103.226.63
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  31167/TCP
Endpoints:                10.244.17.5:80,10.244.17.6:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>

六、安装Ingress

6.1获取配置文件

https://github.com/kubernetes/ingress-nginx/blob/nginx-0.30.0/deploy/static/mandatory.yaml

6.2修改配置文件

在 serviceAccountName: nginx-ingress-serviceaccount 下增加 hostNetwork: true

#hostNetwork: true,意思是开启主机网络模式,暴露 Nginx 服务端口 80
hostNetwork: true

image-20200702174743267

6.3部署服务

[root@kubernetes-master ingress]# kubectl apply -f mandatory.yaml
namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
deployment.apps/nginx-ingress-controller created
limitrange/ingress-nginx created

查看部署情况

[root@kubernetes-master ingress]# kubectl get pods -n ingress-nginx -o wide
NAME                                        READY   STATUS    RESTARTS   AGE     IP               NODE                NOMINATED NODE   READINESS GATES
nginx-ingress-controller-6ffc8fdf96-8rvp2   1/1     Running   0          2m18s   192.168.17.121   kubernetes-slave1   <none>           <none>
[root@kubernetes-master ingress]#

6.4部署Tomcat

部署 Tomcat 但仅允许在内网访问,我们要通过 Ingress 提供的反向代理功能路由到 Tomcat 之上,创建一个名为 tomcat.yaml 资源配置文件。

vim tomcat.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: tomcat-app
spec:
  selector:
    matchLabels:
      # 容器标签的名字,发布 Service 时,selector 需要和这里对应
      app: tomcat
  replicas: 2
  template:
    metadata:
      labels:
        app: tomcat
    spec:
      containers:
      - name: tomcat
        image: tomcat:8.5.43
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
  name: tomcat-http
spec:
  ports:
    - port: 8080
      targetPort: 8080
  # ClusterIP, NodePort, LoadBalancer
  type: ClusterIP
  selector:
    app: tomcat

部署

kubectl apply -f tomcat.yaml

6.5反向代理

通过 Ingress 反向代理 Tomcat

创建一个名为 ingress.yaml 的资源配置文件

apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: nginx-web
  annotations:
    # 指定 Ingress Controller 的类型
    kubernetes.io/ingress.class: "nginx"
    # 指定我们的 rules 的 path 可以使用正则表达式
    nginx.ingress.kubernetes.io/use-regex: "true"
    # 连接超时时间,默认为 5s
    nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
    # 后端服务器回转数据超时时间,默认为 60s
    nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
    # 后端服务器响应超时时间,默认为 60s
    nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
    # 客户端上传文件,最大大小,默认为 20m
    nginx.ingress.kubernetes.io/proxy-body-size: "10m"
    # URL 重写
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  # 路由规则
  rules:
  # 主机名,只能是域名,修改为你自己的
  - host: www.wuduoqiang.com
    http:
      paths:
      - path:
        backend:
          # 后台部署的 Service Name
          serviceName: tomcat-http
          # 后台部署的 Service Port
          servicePort: 8080

部署

kubectl apply -f ingress.yaml

查看

[root@kubernetes-master config]# kubectl get ingress
NAME        HOSTS                ADDRESS   PORTS   AGE
nginx-web   www.wuduoqiang.com             80      53s
[root@kubernetes-master config]#

查看Ingress

kubectl get ingress
kubectl get pods -n ingress-nginx -o wide

image-20200703113922582

访问 Ingress

# 修改hosts
vim /etc/hosts
# 修改如下
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.17.110 kubernetes-base
192.168.17.120 kubernetes-master
192.168.17.121 kubernetes-slave1
192.168.17.121 www.wuduoqiang.com
curl -v http://192.168.17.121 -H 'host: www.wuduoqiang.com'
[root@kubernetes-master config]# curl -v http://192.168.17.121 -H 'host: www.wuduoqiang.com'
* About to connect() to 192.168.17.121 port 80 (#0)
*   Trying 192.168.17.121...
* Connected to 192.168.17.121 (192.168.17.121) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Accept: */*
> host: www.wuduoqiang.com
> 
< HTTP/1.1 200 
< Server: nginx/1.17.8
< Date: Fri, 03 Jul 2020 06:20:57 GMT
< Content-Type: text/html;charset=UTF-8
< Transfer-Encoding: chunked
< Connection: keep-alive
< Vary: Accept-Encoding

七、数据卷

7.1安装NFS服务端

mkdir -p /usr/local/kubernetes/volumes
chmod a+rw /usr/local/kubernetes/volumes

ubuntu

apt-get install -y nfs-kernel-server

centos

yum -y install nfs-utils rpcbind

7.2配置服务目录

vim /etc/exports

在文件添加如下

/usr/local/kubernetes/volumes *(rw,sync,no_subtree_check,no_root_squash)
  • /usr/local/kubernetes/volumes 作为服务目录向客户端开放
  • ***:**表示任何 IP 都可以访问
  • rw: 读写权限
  • sync: 同步权限
  • no_subtree_check: 表示如果输出目录是一个子目录,NFS 服务器不检查其父目录的权限
  • no_root_squash: 客户端连接服务端时如果使用的是 root,那么也拥有对服务端分享的目录的 root 权限

7.3启动服务端

ubuntu

/etc/init.d/nfs-kernel-server restart
service startnfs-kernel-server start

chkconfig rpcbind on
chkconfig nfs on
# 或者
systemctl enable nfs
systemctl enable rpcbind

centos

# 启动
service rpcbind start
service nfs start
# 或者
systemctl start rpcbind
systemctl start nfs
# 开启启动
systemctl enable rpcbind
systemctl enable nfs

7.4安装NFS客户端

ubuntu

apt-get install -y nfs-common

centos

yum -y install nfs-utils rpcbind

创建NFS客户端挂载目录

mkdir -p /usr/local/kubernetes/volumes-mount

重启服务生效

systemctl restart nfs

将 NFS 服务器的 /usr/local/kubernetes/volumes 目录挂载到 NFS 客户端的 /usr/local/kubernetes/volumes-mount 目录

mount 192.168.17.130:/usr/local/kubernetes/volumes /usr/local/kubernetes/volumes-mount

查看是否成功

df -h

image-20200704053044161

7.5验证是否成功

测试文件上传

touch /usr/local/kubernetes/volumes-mount/a.txt
cd /usr/local/kubernetes/volumes

目录/usr/local/kubernetes/volumes下有a.txt即成功

image-20200704053549475

7.6 取消NFS客户端挂载

# 不要直接在挂载目录下执行,否则会报错
umount /usr/local/kubernetes/volumes-mount

7.7PV与PVC

Persistent Volume(持久卷) 和 Persistent Volume Claim(持久卷消费者)。

PV 是集群的资源。PVC 是对这一资源的请求,也是对资源的所有权的检验。PV 和 PVC 之间的互动遵循如下的生命周期。

  • 供应: 集群管理员会创建一系列的 PV。这些 PV 包含了为集群用户提供的真实存储资源,它们可利用 Kubernetes API 来消费。
  • 绑定: 用户创建一个包含了容量和访问模式的持久卷申请。Master 会监听 PVC 的产生,并尝试根据请求内容查找匹配的 PV,并把 PV 和 PVC 进行绑定。用户能够获取满足需要的资源,并且在使用过程中可能超出请求数量。如果找不到合适的卷,这一申请就会持续处于非绑定状态,一直到出现合适的 PV。例如一个集群准备了很多的 50G 大小的持久卷,(虽然总量足够)也是无法响应 100G 的申请的,除非把 100G 的 PV 加入集群。
  • 使用: Pod 把申请作为卷来使用。集群会通过 PVC 查找绑定的 PV,并 Mount 给 Pod。对于支持多种访问方式的卷,用户在使用 PVC 作为卷的时候,可以指定需要的访问方式。一旦用户拥有了一个已经绑定的 PVC,被绑定的 PV 就归该用户所有了。用户的 Pods 能够通过在 Pod 的卷中包含的 PVC 来访问他们占有的 PV。
  • 释放: 当用户完成对卷的使用时,就可以利用 API 删除 PVC 对象了,而且他还可以重新申请。删除 PVC 后,对应的卷被视为 “被释放”,但是这时还不能给其他的 PVC 使用。之前的 PVC 数据还保存在卷中,要根据策略来进行后续处理。
  • 回收: PV 的回收策略向集群阐述了在 PVC 释放卷的时候,应如何进行后续工作。目前可以采用三种策略:保留,回收或者删除。保留策略允许重新申请这一资源。在持久卷能够支持的情况下,删除策略会同时删除持久卷以及 AWS EBS/GCE PD 或者 Cinder 卷中的存储内容。如果插件能够支持,回收策略会执行基础的擦除操作(rm -rf /thevolume/*),这一卷就能被重新申请了。

7.8定义PV

持久卷是以插件方式实现的,目前支持的插件如下:

  • GCEPersistentDisk
  • AWSElasticBlockStore
  • NFS(我们采用的是该方案)
  • iSCSI
  • RBD (Ceph Block Device)
  • Glusterfs
  • HostPath (单节点测试使用)
  • 本地持久卷

创建配置文件nfs-pv-mysql.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv-mysql
spec:
  # 设置容量
  capacity:
    storage: 5Gi
  # 访问模式
  accessModes:
    # 该卷能够以读写模式被多个节点同时加载
    - ReadWriteMany
  # 回收策略,这里是基础擦除 `rm-rf/thevolume/*`
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    # NFS 服务端配置的路径
    path: "/usr/local/kubernetes/volumes"
    # NFS 服务端地址
    server: 192.168.17.130
    readOnly: false
# 部署
kubectl apply -f nfs-pv-mysql.yaml
# 删除
kubectl delete -f nfs-pv-mysql.yaml
# 查看
kubectl get pv

输出如下

image-20200706100217669

7.9定义PVC

nfs-pvc-mysql-xqdmy.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  # 以部署某个数据库为例
  name: nfs-pvc-mysql-xqdmy
spec:
  accessModes:
  # 需要使用和 PV 一致的访问模式
  - ReadWriteMany
  # 按需分配资源
  resources:
     requests:
       # 为 xqdmy 这个数据库单独分配 1G 空间
       storage: 1Gi
# 部署
kubectl apply -f nfs-pvc-mysql-xqdmy.yaml
# 删除
kubectl delete -f nfs-pvc-mysql-xqdmy.yaml
# 查看
kubectl get pvc

输出如下

image-20200706101204568

7.10部署MySQL

要确保每台节点都安装了NFS客户端,yum -y install nfs-utils rpcbind

# 安装服务
yum -y install nfs-utils rpcbind
# 启动服务
systemctl start nfs
systemctl start rpcbind
# 开启启动
systemctl enable nfs
systemctl enable rpcbind

创建配置文件mysql-xqdmy.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-xqdmy
spec:
  selector:
    matchLabels:
      app: mysql-xqdmy
  replicas: 1
  template:
    metadata:
      labels:
        app: mysql-xqdmy
    spec:
      containers:
        - name: mysql-xqdmy
          image: mysql:8.0.16
          # 只有镜像不存在时,才会进行镜像拉取
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 3306
          # 同 Docker 配置中的 environment
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          # 容器中的挂载目录
          volumeMounts:
            - name: nfs-vol-xqdmy
              mountPath: /var/lib/mysql
      volumes:
        # 挂载到数据卷
        - name: nfs-vol-xqdmy
          persistentVolumeClaim:
            claimName: nfs-pvc-mysql-xqdmy

---
apiVersion: v1
kind: Service
metadata:
  name: mysql-xqdmy
spec:
  ports:
    - port: 3306
      targetPort: 3306
  type: LoadBalancer
  selector:
    app: mysql-xqdmy
# 部署
kubectl apply -f mysql-xqdmy.yaml
# 删除
kubectl delete -f mysql-xqdmy.yaml
# 查看
kubectl get pods
kubectl get service

image-20200706110044891

部署成功

image-20200706110143064

去到数据卷服务器查看数据已经过来了

image-20200706110317279

八、外部化配置

8.1概述

ConfigMap 是用来存储配置文件的 Kubernetes 资源对象,所有的配置内容都存储在 etcd 中。它可以被用来保存单个属性,也可以用来保存整个配置文件或者 JSON 二进制对象。ConfigMap API 资源提供了将配置数据注入容器的方式,同时保证该机制对容器来说是透明的。配置应该从 Image 内容中解耦,以此来保持容器化应用程序的可移植性。

8.2使用

使用 ConfigMap 配置 MySQL,修改mysql-xqdmy.yaml配置文件

apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql-xqdmy-config
data:
  # 这里是键值对数据
  mysqld.cnf: |
    [client]
    port=3306
    [mysql]
    no-auto-rehash
    [mysqld]
    skip-host-cache
    skip-name-resolve
    default-authentication-plugin=mysql_native_password
    character-set-server=utf8mb4
    collation-server=utf8mb4_general_ci
    explicit_defaults_for_timestamp=true
    lower_case_table_names=1

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-xqdmy
spec:
  selector:
    matchLabels:
      app: mysql-xqdmy
  replicas: 1
  template:
    metadata:
      labels:
        app: mysql-xqdmy
    spec:
      containers:
        - name: mysql-xqdmy
          image: mysql:8.0.16
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 3306
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          volumeMounts:
            # 以数据卷的形式挂载 MySQL 配置文件目录
            - name: cm-vol-xqdmy
              mountPath: /etc/mysql/conf.d
            - name: nfs-vol-xqdmy
              mountPath: /var/lib/mysql
      volumes:
        # 将 ConfigMap 中的内容以文件形式挂载进数据卷
        - name: cm-vol-xqdmy
          configMap:
            name: mysql-xqdmy-config
            items:
                # ConfigMap 中的 Key
              - key: mysqld.cnf
                # ConfigMap Key 匹配的 Value 写入名为 mysqld.cnf 的文件中
                path: mysqld.cnf
        - name: nfs-vol-xqdmy
          persistentVolumeClaim:
            claimName: nfs-pvc-mysql-xqdmy

---
apiVersion: v1
kind: Service
metadata:
  name: mysql-xqdmy
spec:
  ports:
    - port: 3306
      targetPort: 3306
  type: LoadBalancer
  selector:
    app: mysql-xqdmy

8.3部署MySQL

重启刚刚部署的MySQL

# 删除旧的服务
kubectl delete -f mysql-xqdmy.yaml
# 去数据卷服务器,删除旧的数据
rm -rf /usr/local/kubernetes/volumes
# 启动新的服务
kubectl apply -f mysql-xqdmy.yaml
# 查看 ConfigMap
kubectl get cm
kubectl describe cm <ConfigMap Name>

image-20200706144856680

# 查看容器日志
kubectl get pods
kubectl logs -f <MySQL PodName>

image-20200706145511940

# 输出如下
Initializing database
2020-07-06T06:33:37.024065Z 0 [Warning] [MY-011070] [Server] 'Disabling symbolic links using --skip-symbolic-links (or equivalent) is the default. Consider not using this option as it' is deprecated and will be removed in a future release.
2020-07-06T06:33:37.024159Z 0 [System] [MY-013169] [Server] /usr/sbin/mysqld (mysqld 8.0.16) initializing of server in progress as process 31
2020-07-06T06:33:40.980436Z 5 [Warning] [MY-010453] [Server] root@localhost is created with an empty password ! Please consider switching off the --initialize-insecure option.
2020-07-06T06:33:42.652147Z 0 [System] [MY-013170] [Server] /usr/sbin/mysqld (mysqld 8.0.16) initializing of server has completed
Database initialized
MySQL init process in progress...
MySQL init process in progress...
2020-07-06T06:33:44.820140Z 0 [Warning] [MY-011070] [Server] 'Disabling symbolic links using --skip-symbolic-links (or equivalent) is the default. Consider not using this option as it' is deprecated and will be removed in a future release.
2020-07-06T06:33:44.820246Z 0 [System] [MY-010116] [Server] /usr/sbin/mysqld (mysqld 8.0.16) starting as process 82
2020-07-06T06:33:46.225091Z 0 [Warning] [MY-010068] [Server] CA certificate ca.pem is self signed.
2020-07-06T06:33:46.228875Z 0 [Warning] [MY-011810] [Server] Insecure configuration for --pid-file: Location '/var/run/mysqld' in the path is accessible to all OS users. Consider choosing a different directory.
2020-07-06T06:33:46.272034Z 0 [System] [MY-010931] [Server] /usr/sbin/mysqld: ready for connections. Version: '8.0.16'  socket: '/var/run/mysqld/mysqld.sock'  port: 0  MySQL Community Server - GPL.
2020-07-06T06:33:46.353602Z 0 [System] [MY-011323] [Server] X Plugin ready for connections. Socket: '/var/run/mysqld/mysqlx.sock'
Warning: Unable to load '/usr/share/zoneinfo/iso3166.tab' as time zone. Skipping it.
Warning: Unable to load '/usr/share/zoneinfo/leap-seconds.list' as time zone. Skipping it.
Warning: Unable to load '/usr/share/zoneinfo/zone.tab' as time zone. Skipping it.
Warning: Unable to load '/usr/share/zoneinfo/zone1970.tab' as time zone. Skipping it.

2020-07-06T06:33:52.849753Z 0 [System] [MY-010910] [Server] /usr/sbin/mysqld: Shutdown complete (mysqld 8.0.16)  MySQL Community Server - GPL.

MySQL init process done. Ready for start up.

2020-07-06T06:33:53.284028Z 0 [Warning] [MY-011070] [Server] 'Disabling symbolic links using --skip-symbolic-links (or equivalent) is the default. Consider not using this option as it' is deprecated and will be removed in a future release.
2020-07-06T06:33:53.284821Z 0 [System] [MY-010116] [Server] /usr/sbin/mysqld (mysqld 8.0.16) starting as process 1
2020-07-06T06:33:54.372072Z 0 [Warning] [MY-010068] [Server] CA certificate ca.pem is self signed.
2020-07-06T06:33:54.374312Z 0 [Warning] [MY-011810] [Server] Insecure configuration for --pid-file: Location '/var/run/mysqld' in the path is accessible to all OS users. Consider choosing a different directory.
2020-07-06T06:33:54.423374Z 0 [System] [MY-010931] [Server] /usr/sbin/mysqld: ready for connections. Version: '8.0.16'  socket: '/var/run/mysqld/mysqld.sock'  port: 3306  MySQL Community Server - GPL.
2020-07-06T06:33:54.546887Z 0 [System] [MY-011323] [Server] X Plugin ready for connections. Socket: '/var/run/mysqld/mysqlx.sock' bind-address: '::' port: 33060

交互式进入容器

kubectl get pods
kubectl exec -it <MySQL PodName> /bin/bash
whereis mysql
cd /etc/mysql/conf.d
# 可以看到我们刚才配置的 ConfigMap 生效啦
cat mysqld.cnf

image-20200706150337658

查看发布的服务

kubectl get services -owide

image-20200706150527786

image-20200706150652963

九、安装Kuboard

9.1安装命令

kubectl apply -f https://kuboard.cn/install-script/kuboard.yaml

查看安装

kuboard在Running

Every 2.0s: kubectl get pods --all-namespaces                                                                                                                                                                                     Fri Jul  3 14:35:21 2020

NAMESPACE       NAME                                        READY   STATUS    RESTARTS   AGE
default         tomcat-app-6566544768-bpwnq                 1/1     Running   0          3h12m
default         tomcat-app-6566544768-mvdgk                 1/1     Running   0          3h12m
ingress-nginx   nginx-ingress-controller-6ffc8fdf96-8rvp2   1/1     Running   2          20h
kube-system     calico-kube-controllers-69cb4d4df7-sl2z5    1/1     Running   3          2d3h
kube-system     calico-node-7h968                           1/1     Running   3          2d3h
kube-system     calico-node-w95kh                           1/1     Running   4          2d3h
kube-system     coredns-9d85f5447-2fj7g                     1/1     Running   3          5d1h
kube-system     coredns-9d85f5447-rl29k                     1/1     Running   3          5d1h
kube-system     etcd-kubernetes-master                      1/1     Running   3          5d1h
kube-system     kube-apiserver-kubernetes-master            1/1     Running   4          5d1h
kube-system     kube-controller-manager-kubernetes-master   1/1     Running   3          5d1h
kube-system     kube-proxy-bn9kg                            1/1     Running   3          5d1h
kube-system     kube-proxy-lzlqq                            1/1     Running   5          5d
kube-system     kube-scheduler-kubernetes-master            1/1     Running   6          5d1h
kube-system     kuboard-5ffbc8466d-xqpf6                    1/1     Running   0          6m42s

9.2获取token

kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}')
[root@kubernetes-master config]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}')
Name:         kuboard-user-token-rnt47
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kuboard-user
              kubernetes.io/service-account.uid: 4263b000-e88e-48a5-a66c-4ad96fe2bf7d

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6InZJWjdNY1M2elVpMS1fdHU5OGhRYW1ieVZpdnJoZDBTb0xvSTZlMWlCRUkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJvYXJkLXVzZXItdG9rZW4tcm50NDciLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoia3Vib2FyZC11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNDI2M2IwMDAtZTg4ZS00OGE1LWE2NmMtNGFkOTZmZTJiZjdkIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmt1Ym9hcmQtdXNlciJ9.lKb9RyRfgltye2oah1daV0QAr9tmXP08-PGO-fnGinwbviSPjYRqZ2gm6zypScKBV_gVP9wk0oUYbhDWBE6BARqY0MJ2nMsz7ibYIvaMwnW9E_aj4ZijlX7tyYPpcFd9vAvZfY3dppWIF_2V7qdyFo-BhaW7QJwpXQ_6YMzu95m2UtuyKIDHEIlvVaCzlSvACxtoc2RDbu7lfJcWxxceQ0Ps2-uc6K1ZcX1ooYTBcgxhkU1v90c_qjdTD8cgfUT0XMPPFjuWtfHqTXNOl9PR9DAVqAoTBsbrlNSa0P0uBcwYEkmhLW3qw8yyOI9rjQpCuvOoTSQGSOX6XqRK8SzO-w

9.3登录Kuboard

访问集群中任意节点的 32567 端口,如 http://192.168.17.121:32567/,使用token登录

image-20200703143715307

十、常见错误

1.初始化错误

如果报错,修改kubernetesVersion对应的kubeadm,kubelet,kubectl版本即可

[root@kubernetes-master cluster]# kubeadm init --config=kubeadm.yml --upload-certs | tee kubeadm-init.log
W0626 02:53:20.702638    8014 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.17.3
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1
        [ERROR KubeletVersion]: the kubelet version is higher than the control plane version. This is not a supported version skew and may lead to a malfunctional cluster. Kubelet version: "1.18.4" Control plane version: "1.17.3"
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher

查看安装的版本,由于阿里云没有同步最新的kubernetes所以需要下载低版本

[root@kubernetes-master cluster]# yum list installed | grep kubeadm
kubeadm.x86_64                       1.18.4-1                        @kubernetes
[root@kubernetes-master cluster]# yum list installed | grep kubectl
kubectl.x86_64                       1.18.4-1                        @kubernetes
[root@kubernetes-master cluster]# yum list installed | grep kubelet
kubelet.x86_64                       1.18.4-1                        @kubernetes

解决办法

yum remove -y kubeadm kubelet kubectl
yum install -y kubelet-1.17.3 kubeadm-1.17.3 kubectl-1.17.3

2.安装主节点错误

注意: 如果安装 kubernetes 版本和下载的镜像版本不统一则会出现 timed out waiting for the condition 错误。中途失败或是想修改配置可以使用 kubeadm reset 命令重置配置,再做初始化操作即可。

kubeadm reset
sudo systemctl daemon-reload
sudo systemctl restart docker

3.初始化告警

[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
error execution phase preflight: [preflight] Some fatal errors occurred:

解决办法

[root@kubernetes-master bin]# docker info | grep Cgroup
WARNING: bridge-nf-call-iptables is disabled
WARNING: bridge-nf-call-ip6tables is disabled
Cgroup Driver: cgroupfs
[root@kubernetes-master bin]# vim /etc/systemd/system/docker.service
[root@kubernetes-master bin]# sudo systemctl daemon-reload
[root@kubernetes-master bin]# sudo systemctl restart docker
[root@kubernetes-master bin]# docker info | grep Cgroup
WARNING: bridge-nf-call-iptables is disabled
WARNING: bridge-nf-call-ip6tables is disabled
Cgroup Driver: cgroupfs
# 修改
ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd

4.初始化错误

[root@kubernetes-master cluster]# kubeadm init --config=kubeadm.yml --upload-certs | tee kubeadm-init.log
W0626 04:07:43.418877   33326 validation.go:28] Cannot validate kubelet config - no validator is available
W0626 04:07:43.418947   33326 validation.go:28] Cannot validate kube-proxy config - no validator is available
[init] Using Kubernetes version: v1.17.3
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@kubernetes-master cluster]# echo "1" >/proc/sys/net/bridge/bridge-nf-call-iptables

5.CPU核心数太少

[root@kubernetes-master cluster]# kubeadm init --config=kubeadm.yml --upload-certs | tee kubeadm-init.log
W0628 01:00:05.813039   43086 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0628 01:00:05.813116   43086 validation.go:28] Cannot validate kubelet config - no validator is available
[init] Using Kubernetes version: v1.17.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.17.120]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [kubernetes-master localhost] and IPs [192.168.17.120 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [kubernetes-master localhost] and IPs [192.168.17.120 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0628 01:00:12.089086   43086 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0628 01:00:12.090968   43086 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.

Unfortunately, an error has occurred:
        error execution phase wait-control-plane: couldn't initialize a Kubernetes cluster
To see the stack trace of this error execute with --v=5 or higher
timed out waiting for the condition

This error is likely caused by:
        - The kubelet is not running
        - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)

If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
        - 'systemctl status kubelet'
        - 'journalctl -xeu kubelet'

Additionally, a control plane component may have crashed or exited when started by the container runtime.
To troubleshoot, list all containers using your preferred container runtimes CLI, e.g. docker.
Here is one example how you may list all Kubernetes containers running in docker:
        - 'docker ps -a | grep kube | grep -v pause'
        Once you have found the failing container, you can inspect its logs with:
        - 'docker logs CONTAINERID'

解决办法

CPU核心数最少2