二进制部署-kubernetes

192.168.26.105
192.168.26.115   #master
192.168.26.125

192.168.26.135
192.168.26.145   #node
192.168.26.155

192.168.26.165
192.168.26.175   #etcd
192.168.26.185

192.168.26.195   #harbor

192.168.26.205   #haproxy/keepalived
192.168.26.215   #haproxy/keepalived

基本配置

vim /etc/hosts #建议在每个节点配置 hosts 文件

192.168.26.105 k8s-master-1 k8s.master-1.com
192.168.26.115 k8s-master-2 k8s.master-2.com
192.168.26.125 k8s-master-3 k8s.master-3.com

192.168.26.135 k8s-node-1 k8s.node-1.com
192.168.26.145 k8s-node-2 k8s.node-2.com
192.168.26.155 k8s-node-3 k8s.node-3.com

192.168.26.165 k8s-etcd-1 k8s.etcd-1.com
192.168.26.175 k8s-etcd-2 k8s.etcd-2.com
192.168.26.185 k8s-etcd-3 k8s.etcd-3.com

192.168.26.195 k8s-harbor k8s.harbor.com

192.168.26.205 k8s-haproxy-1 k8s.haproxy-1.com
192.168.26.215 k8s-haproxy-2 k8s.haproxy-2.com

基础环境准备

  • 系统主机名配置、IP配置、系统参数优化,以及依赖的负载均衡和 Harbor 部署
//关闭 swap 分区
swap -a

//同步时间
ntpdate time1.aliyun.com && hwclock -w

系统配置

  • 主机名等系统配置

高可用负载均衡

配置 haproxy/keepalived

- 192.168.26.205   #haproxy/keepalived

# apt install keepalived haproxy -y

# find / -name keepalived.conf*
/usr/share/doc/keepalived/samples/keepalived.conf.vrrp

# cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf

# vim /etc/keepalived/keepalived.conf
root@ubuntu-suosuoli-server:/etc/keepalived# cat keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER #状态
    interface eth0 #设备接口
    garp_master_delay 10
    smtp_alert
    virtual_router_id 88
    priority 100 #优先级
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.26.248 dev eth0 label eth0:1 #vip
        192.168.26.249 dev eth0 label eth0:2 #vip
    }
}

# vim /etc/haproxy/haproxy.cfg

...省略...

listen stats #状态页
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri    /haproxy_status
    stats auth   bokebi:123456

listen k8s_api_node_6443
    bind 192.168.26.248:6443 #vip
    mode tcp
    #balance leastconn
    server 192.168.26.105 192.168.26.105:6443 check inter 2000 fall 3 rise 5 #k8s-master-1
    #server 192.168.26.115 192.168.26.115:6443 check inter 2000 fall 3 rise 5 #k8s-master-2
    #server 192.168.26.125 192.168.26.125:6443 check inter 2000 fall 3 rise 5 #k8s-master-3

# systemctl restart keepalived.service haproxy.service
- 192.168.26.215   #haproxy/keepalived

# apt install keepalived haproxy -y

# find / -name keepalived.conf*
/usr/share/doc/keepalived/samples/keepalived.conf.vrrp

# cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf

# vim /etc/keepalived/keepalived.conf
root@ubuntu-suosuoli-server:/etc/keepalived# cat keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER #状态
    interface eth0 #设备接口
    garp_master_delay 10
    smtp_alert
    virtual_router_id 88
    priority 80 #优先级
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.26.248 dev eth0 label eth0:1 #vip
        192.168.26.249 dev eth0 label eth0:2 #vip
    }
}

# vim /etc/haproxy/haproxy.cfg

...省略...

listen stats #状态页
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri    /haproxy_status
    stats auth   bokebi:123456

listen k8s_api_node_6443
    bind 192.168.26.248:6443 #vip
    mode tcp
    #balance leastconn
    server 192.168.26.105 192.168.26.105:6443 check inter 2000 fall 3 rise 5 #k8s-master-1
    #server 192.168.26.115 192.168.26.115:6443 check inter 2000 fall 3 rise 5 #k8s-master-2
    #server 192.168.26.125 192.168.26.125:6443 check inter 2000 fall 3 rise 5 #k8s-master-3

# systemctl restart keepalived.service haproxy.service

配置 harbor

- 192.168.26.195   #harbor

# apt-get install docker-compose 

# cd /usr/local/src && pwd
/usr/local/src

# vim install-docker.sh #编写下载指定 docker 版本的脚本
#!/bin/bash

sudo apt-get update

sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common

curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -

sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"

sudo apt-get -y update

sudo apt-get -y install docker-ce=5:18.09.9~3-0~ubuntu-bionic  docker-ce-cli=5:18.09.9~3-0~ubuntu-bionic

mark

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-DTGK7uDS-1634020493996)(https://image.bokebi.cn/blog/20200404/quWHw2PCGQsl.png?imageslim)]

mark

[外链图片转存中...(img-jrpXwi4J-1634020494006)]

[外链图片转存中...(img-HtQXxY7S-1634020494007)]

  • 宿主机添加域名解析
C:\Windows\System32\drivers\etc\hosts

192.168.26.195   k8s.harbor.com

ctrl + s

#完成以上步骤,即可通过 k8s.harbor.com 访问 harbor web 界面了

Harbor 之 https

  • 内部镜像将统一保存在内部Harbor服务器,不再通过互联网在线下载。
# cd harbor && pwd
/usr/local/src/harbor

# mkdir certs #创建密钥存储目录

# cd certs && pwd
/usr/local/src/harbor/certs

# openssl genrsa -out /usr/local/src/harbor/certs/harbor-ca.key #生成私有 key

# openssl req -x509 -new -nodes -key /usr/local/src/harbor/certs/harbor-ca.key -subj "/CN=k8s.harbor.com" -days 7120 -out /usr/local/src/harbor/certs/harbor-ca.crt
Can't load /root/.rnd into RNG
140302059827648:error:2406F079:random number generator:RAND_load_file:Cannot open file:../crypto/rand/randfile.c:88:Filename=/root/.rnd #报错

# touch /root/.rnd

# openssl req -x509 -new -nodes -key /usr/local/src/harbor/certs/harbor-ca.key -subj "/CN=k8s.harbor.com" -days 7120 -out /usr/local/src/harbor/certs/harbor-ca.crt #重新签发

# cd .. && pwd
/usr/local/src/harbor

# vim harbor.cfg #编辑修改 harbor.cfg 配置文件

hostname = k8s.harbor.com

ui_url_protocol = https

ssl_cert = /usr/local/src/harbor/certs/harbor-ca.crt
ssl_cert_key = /usr/local/src/harbor/certs/harbor-ca.key

harbor_admin_password = 123456

# ./install.sh #安装 harbor

# systemctl daemon-reload

# systemctl restart docker.service
  • client 同步 .crt 证书
- 192.168.26.195 #harbor

# pwd #确定工作目录
/usr/local/src/harbor/certs

# for i in 0 1 2 3 4 5 ; do ssh 192.168.26.1${i}5 mkdir /etc/docker/certs.d/k8s.harbor.com -p ; done #在 master 及 node 节点创建 .crt 证书存储目录

# for i in 0 1 2 3 4 5 ; do scp harbor-ca.crt 192.168.26.1${i}5:/etc/docker/certs.d/k8s.harbor.com/ ; done #向 master 及 node 节点同步 .crt 证书

# cp /etc/docker/certs.d/k8s.harbor.com/harbor-ca.crt /opt/ #备份一份证书给到后面脚本使用
  • client 同步 /etc/hosts 文件
- 192.168.26.195 #harbor
# pwd #确定工作目录
/etc

# cat hosts #添加 hosts 文件解析
192.168.26.105 k8s-master-1 k8s.master-1.com
192.168.26.115 k8s-master-2 k8s.master-2.com
192.168.26.125 k8s-master-3 k8s.master-3.com

192.168.26.135 k8s-node-1 k8s.node-1.com
192.168.26.145 k8s-node-2 k8s.node-2.com
192.168.26.155 k8s-node-3 k8s.node-3.com

192.168.26.165 k8s-etcd-1 k8s.etcd-1.com
192.168.26.175 k8s-etcd-2 k8s.etcd-2.com
192.168.26.185 k8s-etcd-3 k8s.etcd-3.com

192.168.26.195 k8s-harbor k8s.harbor.com #主要是这一条,解析 harbor IP 地址

192.168.26.205 k8s-haproxy-1 k8s.haproxy-1.com
192.168.26.215 k8s-haproxy-2 k8s.haproxy-2.com

# for i in 0 1 2 3 4 5 ; do scp hosts 192.168.26.1${i}5:`pwd` ; done #分发 hosts 文件
  • 测试登录 harbor
root@k8s-master-1:~# docker login k8s.harbor.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
  • 测试 push 镜像到 harbor
root@k8s-master-1:~# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
alpine              latest              a187dde48cd2        11 days ago         5.6MB
root@k8s-master-1:~# docker tag alpine k8s.harbor.com/library/alpine:v1
root@k8s-master-1:~# docker push k8s.harbor.com/library/alpine:v1
The push refers to repository [k8s.harbor.com/library/alpine]
beee9f30bc1f: Pushed 
v1: digest: sha256:cb8a924afdf0229ef7515d9e5b3024e23b3eb03ddbba287f4a19c6ac90b8d221 size: 528
  • 测试 pull 镜像到本地
root@k8s-node-1:~# docker login k8s.harbor.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
root@k8s-node-1:~# docker pull k8s.harbor.com/library/alpine:v1
v1: Pulling from library/alpine
aad63a933944: Pull complete 
Digest: sha256:cb8a924afdf0229ef7515d9e5b3024e23b3eb03ddbba287f4a19c6ac90b8d221
Status: Downloaded newer image for k8s.harbor.com/library/alpine:v1
k8s.harbor.com/library/alpine:v1

ansible 部署

基础环境准备

//更新源
# apt-get update

//下载 python2.7
# for i in 0 1 2 3 4 5 6 7 8 ; do ssh 192.168.26.1${i}5 apt install python2.7 -y ;done

//将 python 链接到安装的 python2.7 上
# for i in 0 1 2 3 4 5 6 7 8 ; do ssh 192.168.26.1${i}5 ln -s /usr/bin/python2.7 /usr/bin/python ; done
  • 在控制端下载 ansible
# for i in 0 1 2 ; do ssh 192.168.26.1${i}5 apt install ansible -y ; done
  • 在 ansible 控制端配置免密码登录
- 192.168.26.105 #master

# ssh-keygen #生成密钥对

# apt-get install sshpass -y #ssh 同步公钥到各 k8s 服务器

# pwd
/usr/local/src

# cat scp-crt.sh #查看分发公钥脚本 

#!/bin/bash
#目标主机列表
IP="
192.168.26.105
192.168.26.115
192.168.26.125
192.168.26.135
192.168.26.145
192.168.26.155
192.168.26.165
192.168.26.175
192.168.26.185
"
for node in ${IP};do
    sshpass -p 123  ssh-copy-id ${node} -o StrictHostKeyChecking=no
    if [ $? -eq 0 ];then
        echo "${node} copy succeed"
    else
        echo "${node} copy failed"
    fi
done

# bash scp-crt.sh #执行脚本
- 192.168.26.105 #master

# pwd
/usr/local/src

# cat scp-docker.sh #查看同步 docker 证书脚本

#!/bin/bash
#目标主机列表
IP="
192.168.26.105
192.168.26.115
192.168.26.125
192.168.26.135
192.168.26.145
192.168.26.155
192.168.26.165
192.168.26.175
192.168.26.185
"
for node in ${IP};do
    sshpass -p 123 ssh-copy-id ${node} -o StrictHostKeyChecking=no
    if [ $? -eq 0 ];then
        echo "${node} 秘钥copy完成"
        echo "${node} 秘钥copy完成,准备环境初始化....."
            ssh ${node} "mkdir /etc/docker/certs.d/k8s.harbor.com -p"
            echo "Harbor 证书目录创建成功!"
            scp /etc/docker/certs.d/k8s.harbor.com/harbor-ca.crt ${node}:/etc/docker/certs.d/k8s.harbor.com/harbor-ca.crt
            echo "Harbor 证书拷贝成功!"
            scp /etc/hosts ${node}:/etc/hosts
            echo "host 文件拷贝完成"
            scp -r /root/.docker ${node}:/root/
            echo "Harbor 认证文件拷贝完成!"
            scp -r /etc/resolv.conf ${node}:/etc/
    else
            echo "${node} 秘钥copy失败"
    fi
done

# bash scp-docker.sh #执行脚本
  • 测试这里是否可以直接 ssh 连接到其他节点看是否免密登入

clone 项目

# export release=2.2.0 #定义下载 kubeasz 版本变量

#curl -C- -fLO --retry 3 https://github.com/easzlab/kubeasz/releases/download/${release}/easzup #下载工具脚本 easzup

# vim easzup #编辑下载脚本
17  export DOCKER_VER=19.03.8 #改变版本

# chmod +x ./easzup #增加执行权限

# ./easzup -D #使用工具脚本下载(这个过程大概有十几分钟)
  • 安装成功

[外链图片转存中...(img-lg0cbA70-1634020494009)]

准备hosts文件

  • 下载完成之后,查看目录文件
root@k8s-master-1:/usr/local/src# cd /etc/ansible/
root@k8s-master-1:/etc/ansible# ll
total 132
drwxrwxr-x  11 root root  4096 Feb  1 10:55 ./
drwxr-xr-x 101 root root  4096 Apr  4 21:18 ../
-rw-rw-r--   1 root root   395 Feb  1 10:35 01.prepare.yml
-rw-rw-r--   1 root root    58 Feb  1 10:35 02.etcd.yml
-rw-rw-r--   1 root root   149 Feb  1 10:35 03.containerd.yml
-rw-rw-r--   1 root root   137 Feb  1 10:35 03.docker.yml
-rw-rw-r--   1 root root   470 Feb  1 10:35 04.kube-master.yml
-rw-rw-r--   1 root root   140 Feb  1 10:35 05.kube-node.yml
-rw-rw-r--   1 root root   408 Feb  1 10:35 06.network.yml
-rw-rw-r--   1 root root    77 Feb  1 10:35 07.cluster-addon.yml
-rw-rw-r--   1 root root  3686 Feb  1 10:35 11.harbor.yml
-rw-rw-r--   1 root root   431 Feb  1 10:35 22.upgrade.yml
-rw-rw-r--   1 root root  1975 Feb  1 10:35 23.backup.yml
-rw-rw-r--   1 root root   113 Feb  1 10:35 24.restore.yml
-rw-rw-r--   1 root root  1752 Feb  1 10:35 90.setup.yml
-rw-rw-r--   1 root root  1127 Feb  1 10:35 91.start.yml
-rw-rw-r--   1 root root  1120 Feb  1 10:35 92.stop.yml
-rw-rw-r--   1 root root   337 Feb  1 10:35 99.clean.yml
-rw-rw-r--   1 root root 10283 Feb  1 10:35 ansible.cfg
drwxrwxr-x   3 root root  4096 Apr  4 21:25 bin/
drwxrwxr-x   2 root root  4096 Feb  1 10:55 dockerfiles/
drwxrwxr-x   8 root root  4096 Feb  1 10:55 docs/
drwxrwxr-x   4 root root  4096 Apr  5 00:33 down/
drwxrwxr-x   2 root root  4096 Feb  1 10:55 example/
-rw-rw-r--   1 root root   414 Feb  1 10:35 .gitignore
drwxrwxr-x  14 root root  4096 Feb  1 10:55 manifests/
drwxrwxr-x   2 root root  4096 Feb  1 10:55 pics/
-rw-rw-r--   1 root root  5607 Feb  1 10:35 README.md
drwxrwxr-x  23 root root  4096 Feb  1 10:55 roles/
drwxrwxr-x   2 root root  4096 Feb  1 10:55 tools/
  • 拷贝模板配置文件
# 'etcd' cluster should have odd member(s) (1,3,5,...)
# variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster
[etcd]
192.168.26.165 NODE_NAME=etcd1
192.168.26.175 NODE_NAME=etcd2
192.168.26.185 NODE_NAME=etcd3

# master node(s) #这里暂时加2个master 后面演示增加master节点
[kube-master]
192.168.26.105
192.168.26.115

# work node(s) #这里暂时加2个node 后面演示增加node节点
[kube-node]
192.168.26.135
192.168.26.145

# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'yes' to install a harbor server; 'no' to integrate with existed one
# 'SELF_SIGNED_CERT': 'no' you need put files of certificates named harbor.pem and harbor-key.pem in directory 'down'

# 参数 NEW_INSTALL:yes 表示新建,no 表示使用已有 harbor 服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no SELF_SIGNED_CERT=yes

# [optional] loadbalance for accessing k8s from outside
# [可选]外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
192.168.26.205 LB_ROLE=backup EX_APISERVER_VIP=192.168.26.248 EX_APISERVER_PORT=6443 #VIP 以及 VIP 端口和 HA 地址
192.168.26.215 LB_ROLE=backup EX_APISERVER_VIP=192.168.26.248 EX_APISERVER_PORT=6443 #VIP 以及 VIP 端口和 HA 地址
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443

# [optional] ntp server for the cluster
[chrony]
#192.168.1.1 #时间服务器

[all:vars]
# --------- Main Variables ---------------
# Cluster container-runtime supported: docker, containerd
CONTAINER_RUNTIME="docker" #容器的运行时

# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="flannel" #网络插件

# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs" #代理模式 

# K8S Service CIDR, not overlap with node(host) networking
# 服务网段 (Service CIDR),注意不要与内网已有网段冲突
SERVICE_CIDR="172.20.0.0/16" #service 网段地址

# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突
CLUSTER_CIDR="10.10.0.0/16" #pod 网段地址

# 服务端口范围 (NodePort Range)
# NodePort Range
NODE_PORT_RANGE="30000-60000" #pod 端口范围

# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="bokebi.local." #service 自定义的域名后缀

# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/usr/bin" #二进制目录放置地址

# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl" #证书路径

# Deploy Directory (kubeasz workspace)
# 部署目录,即 ansible 工作目录,建议不要修改
base_dir="/etc/ansible" #kubeesz 工作路径

测试网络正常通信

root@k8s-master-1:/etc/ansible# ansible all -m ping
192.168.26.205 | UNREACHABLE! => {  #HA-1 正常报错
    "changed": false, 
    "msg": "SSH Error: data could not be sent to remote host \"192.168.26.205\". Make sure this host can be reached over ssh", 
    "unreachable": true
}
192.168.26.215 | UNREACHABLE! => {  #HA-2 正常报错
    "changed": false, 
    "msg": "SSH Error: data could not be sent to remote host \"192.168.26.215\". Make sure this host can be reached over ssh", 
    "unreachable": true
}
192.168.26.145 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}
192.168.26.135 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}
192.168.26.105 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}
192.168.26.175 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}
192.168.26.185 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}
192.168.26.165 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}
192.168.26.115 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}

环境初始化-01.prepare.yml 部署

root@k8s-master-1:/etc/ansible# vim 01.prepare.yml 
# [optional] to synchronize system time of nodes with 'chrony' 
- hosts:
  - kube-master
  - kube-node
  - etcd
#  - ex-lb #注释外部负载均衡
#  - chrony #注释时间同步
  roles:
  - { role: chrony, when: "groups['chrony']|length > 0" }

# to create CA, kubeconfig, kube-proxy.kubeconfig etc.
- hosts: localhost
  roles:
  - deploy

# prepare tasks for all nodes
- hosts:
  - kube-master
  - kube-node
  - etcd
  roles:
  - prepare
root@k8s-master-1:/etc/ansible# ansible-playbook 01.prepare.yml #ansible 运行 .yml 文件
  • 报错

mark

  • 解决报错
root@k8s-master-1:/etc/ansible# apt install python-pip -y 
  • 再次执行 ansible-playbook 01.prepare.yml

[外链图片转存中...(img-Q954FWFn-1634020494010)]

部署etcd集群-02.etcd.yml部署

root@k8s-master-1:/etc/ansible# ansible-playbook 02.etcd.yml  #ansible 运行 .yml 文件

[外链图片转存中...(img-4rjfAF8l-1634020494011)]

检测 etcd 节点

  • 去 etcd 节点查看是否服务起来:这里以 k8s-etcd-1 为例
root@k8s-etcd-1:~# ps -ef | grep etcd
root      23332      1  1 01:31 ?        00:00:03 /usr/bin/etcd --name=etcd1 --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/etcd/ssl/etcd.pem --peer-key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/kubernetes/ssl/ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem --initial-advertise-peer-urls=https://192.168.26.165:2380 --listen-peer-urls=https://192.168.26.165:2380 --listen-client-urls=https://192.168.26.165:2379,http://127.0.0.1:2379 --advertise-client-urls=https://192.168.26.165:2379 --initial-cluster-token=etcd-cluster-0 --initial-cluster=etcd1=https://192.168.26.165:2380,etcd2=https://192.168.26.175:2380,etcd3=https://192.168.26.185:2380 --initial-cluster-state=new --data-dir=/var/lib/etcd
root      23353  19333  0 01:34 pts/0    00:00:00 grep --color=auto etcd
  • 各 etxd 服务器验证 etcd 服务
root@k8s-etcd-1:~# export NODE_IPS="192.168.26.165 192.168.26.175 192.168.26.185"
root@k8s-etcd-1:~# echo $NODE_IPS
192.168.26.165 192.168.26.175 192.168.26.185
root@k8s-etcd-1:~# for ip in ${NODE_IPS} ; do ETCDCTL_API=3 /usr/bin/etcdctl   --endpoints=https://${ip}:2379   --cacert=/etc/kubernetes/ssl/ca.pem   --cert=/etc/etcd/ssl/etcd.pem   --key=/etc/etcd/ssl/etcd-key.pem   endpoint health ; done
https://192.168.26.165:2379 is healthy: successfully committed proposal: took = 7.076204ms
https://192.168.26.175:2379 is healthy: successfully committed proposal: took = 7.687458ms
https://192.168.26.185:2379 is healthy: successfully committed proposal: took = 8.340643ms

部署docker-03.docker.yml 部署

  • 可选更改启动脚本路径,但是我们已经再 ansible 控制端提前安装了 docker,因此不需要重新执行
root@k8s-etcd-1:~# ansible-playbook 03.docker.yml
root@k8s-etcd-1:~# docker version
Client: Docker Engine - Community
 Version:           19.03.5
 API version:       1.40
 Go version:        go1.12.12
 Git commit:        633a0ea838
 Built:             Wed Mar 11 01:15:55 2020
 OS/Arch:           linux/amd64
 Experimental:      false

Server: Docker Engine - Community
 Engine:
  Version:          19.03.5
  API version:      1.40 (minimum version 1.12)
  Go version:       go1.12.12
  Git commit:       633a0ea838
  Built:            Wed Mar 11 01:15:55 2020
  OS/Arch:          linux/amd64
  Experimental:     false
 containerd:
  Version:          v1.2.10
  GitCommit:        b34a5c8af56e510852c35414db4c1f4fa6172339
 runc:
  Version:          1.0.0-rc8+dev
  GitCommit:        3e425f80a8c931f88e6d94a8c831b9d5aa481657
 docker-init:
  Version:          0.18.0
  GitCommit:        fec3683
  • 发现上面的版本是 19.03.5 不是最新的版本,也不是自己定义想安装的版本可以通过把对应的 docker 二进制文件拷贝到 /etc/ansible/bin/ 这个目录下
root@k8s-master-1:/etc/ansible# cd down/
root@k8s-master-1:/etc/ansible/down# ll
total 792420
drwxrwxr-x  4 root     root          4096 Apr  5 00:33 ./
drwxrwxr-x 12 root     root          4096 Apr  5 01:30 ../
-rw-------  1 root     root     203646464 Apr  4 23:56 calico_v3.4.4.tar
-rw-------  1 root     root      40974848 Apr  5 00:01 coredns_1.6.6.tar
-rw-------  1 root     root     126891520 Apr  5 00:07 dashboard_v2.0.0-rc3.tar
drwxrwxr-x  2 stevenux stevenux      4096 Apr  5 00:33 docker/
-rw-r--r--  1 root     root      63252595 Apr  5 00:33 docker-19.03.8.tgz #下载好的 docker 文件
-rw-rw-r--  1 root     root          1737 Feb  1 10:35 download.sh
-rw-------  1 root     root      55390720 Apr  5 00:08 flannel_v0.11.0-amd64.tar
-rw-------  1 root     root     152580096 Apr  5 00:18 kubeasz_2.2.0.tar
-rw-------  1 root     root      40129536 Apr  5 00:09 metrics-scraper_v1.0.3.tar
-rw-------  1 root     root      41199616 Apr  5 00:16 metrics-server_v0.3.6.tar
drwxr-xr-x  2 root     root          4096 Oct 19 22:56 packages/
-rw-------  1 root     root        754176 Apr  5 00:16 pause_3.1.tar
-rw-------  1 root     root      86573568 Apr  5 00:18 traefik_v1.7.20.tar
root@k8s-master-1:/etc/ansible/down# tar xvf docker-19.03.8.tgz 
docker/
docker/containerd
docker/docker
docker/ctr
docker/dockerd
docker/runc
docker/docker-proxy
docker/docker-init
docker/containerd-shim
root@k8s-master-1:/etc/ansible/down# cp ./docker/* /etc/ansible/bin/  #把解压后`docker/` 中的所有文件拷贝到 `/etc/ansible/bin/` 这个目录下
root@k8s-master-1:/etc/ansible# cd /etc/ansible
root@k8s-master-1:/etc/ansible# ./bin/docker version
Client: Docker Engine - Community
 Version:           19.03.5
 API version:       1.40
 Go version:        go1.12.17
 Git commit:        afacb8b7f0
 Built:             Wed Mar 11 01:22:56 2020
 OS/Arch:           linux/amd64
 Experimental:      false
  • 再次执行 ansible-playbook 03.docker.yml

mark

  • 查看 node 节点安装的 docker 的版本是否被更新
root@k8s-node-1:~# docker version
Client: Docker Engine - Community
 Version:           19.03.8
 API version:       1.40
 Go version:        go1.12.17
 Git commit:        afacb8b7f0
 Built:             Wed Mar 11 01:22:56 2020
 OS/Arch:           linux/amd64
 Experimental:      false

Server: Docker Engine - Community
 Engine:
  Version:          19.03.8
  API version:      1.40 (minimum version 1.12)
  Go version:       go1.12.17
  Git commit:       afacb8b7f0
  Built:            Wed Mar 11 01:30:32 2020
  OS/Arch:          linux/amd64
  Experimental:     false
 containerd:
  Version:          v1.2.13
  GitCommit:        7ad184331fa3e55e52b890ea95e65ba581ae3429
 runc:
  Version:          1.0.0-rc10
  GitCommit:        dc9208a3303feef5b3839f4323d9beb36df0a9dd
 docker-init:
  Version:          0.18.0
  GitCommit:        fec3683

部署master-04.kube-master 部署

root@k8s-master-1:~# cd /etc/ansible/
root@k8s-master-1:/etc/ansible# ansible-playbook 04.kube-master.yml

[外链图片转存中...(img-5FH0ujbx-1634020494013)]

  • 查看 master 状态
root@k8s-master-1:/etc/ansible# kubectl get node
NAME             STATUS                     ROLES    AGE     VERSION
192.168.26.105   Ready,SchedulingDisabled   master   2m29s   v1.17.2
192.168.26.115   Ready,SchedulingDisabled   master   2m32s   v1.17.2

部署node-05.kube-node 部署

  • node 节点必须安装 docker
  • 查看剧本文件镜像地址是否能够下载镜像
root@k8s-master-1:/etc/ansible# cat 05.kube-node.yml 
# to set up 'kube-node' nodes
- hosts: kube-node
  roles:
  - { role: kube-node, when: "inventory_hostname not in groups['kube-master']" } 
  • 在 harbor 仓库,创建项目

mark

root@k8s-master-1:/etc/ansible# docker pull mirrorgooglecontainers/pause-amd64:3.1

root@k8s-master-1:/etc/ansible# docker tag mirrorgooglecontainers/pause-amd64:3.1 k8s.harbor.com/base-images/mirrorgooglecontainers/pause-amd64:3.1 #打标签

root@k8s-master-1:/etc/ansible# docker push k8s.harbor.com/base-images/mirrorgooglecontainers/pause-amd64:3.1 #上传镜像
  • harbor 服务器 web 界面验证上传的 images

[外链图片转存中...(img-nSZPtTYQ-1634020494014)]

  • 修改下载镜像地址为本地 harbor 自定义的 images 下载路径
root@k8s-master-1:/etc/ansible# vim roles/kube-node/defaults/main.yml

# 基础容器镜像
SANDBOX_IMAGE: "k8s.harbor.com/base-images/pause-amd64:3.1" #本地 harbor 镜像存储路径
#SANDBOX_IMAGE: "mirrorgooglecontainers/pause-amd64:3.1"
#SANDBOX_IMAGE: "registry.access.redhat.com/rhel7/pod-infrastructure:latest"
root@k8s-master-1:/etc/ansible# ansible-playbook 05.kube-node.yml

[外链图片转存中...(img-JWAnv2Yn-1634020494015)]

  • 查看node节点是否成功加入到集群当中
root@k8s-master-1:/etc/ansible# kubectl get node
NAME             STATUS                     ROLES    AGE     VERSION
192.168.26.105   Ready,SchedulingDisabled   master   6h14m   v1.17.2
192.168.26.115   Ready,SchedulingDisabled   master   6h14m   v1.17.2
192.168.26.135   Ready                      node     56s     v1.17.2
192.168.26.145   Ready                      node     56s     v1.17.2

node-haproxy原理

  • 角色执行过程中会在每一个 node 节点都会下载 haproxy 组件
root@k8s-node-1:~# cat /etc/haproxy/haproxy.cfg #查看 node 节点中的 haproxy 配置文件
global
        log /dev/log    local1 warning
        chroot /var/lib/haproxy
        user haproxy
        group haproxy
        daemon
        nbproc 1

defaults
        log     global
        timeout connect 5s
        timeout client  10m
        timeout server  10m

listen kube-master
        bind 127.0.0.1:6443 #监听本地回环网卡的 6443 端口
        mode tcp
        option tcplog
        option dontlognull
        option dontlog-normal
        balance roundrobin 
        server 192.168.26.105 192.168.26.105:6443 check inter 10s fall 2 rise 2 weight 1 #转发到 k8s 集群中的 master 主机上
        server 192.168.26.115 192.168.26.115:6443 check inter 10s fall 2 rise 2 weight 1
  • service 访问这个 127.0.0.1:6443 地址端口。 然后通过 node-1 的 haproxy 在调度到后端的 master 主机
root@k8s-node-1:~# cat /etc/kubernetes/kubelet.kubeconfig
5 server: https://127.0.0.1:6443 #定义的监听地址

部署网络服务-06.network.yml 部署

root@k8s-master-1:/etc/ansible# ansible-playbook 06.network.yml

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-Z1XMXTZk-1634020494016)(http://image.bokebi.cn/blog/20200405/Ewbzaca3uCyC.png?imageslim)]

测试网络组件

  • 创建 pod 节点用来健康测试
root@k8s-master-1:/etc/ansible# kubectl run net-test  --image=alpine --replicas=4  sleep 360000
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/net-test created
root@k8s-master-1:/etc/ansible# kubectl get pod -o wide
NAME                        READY   STATUS    RESTARTS   AGE   IP          NODE             NOMINATED NODE   READINESS GATES
net-test-6c94768685-65mnb   1/1     Running   0          54s   10.10.3.6   192.168.26.135   <none>           <none>
net-test-6c94768685-h57sh   1/1     Running   0          54s   10.10.2.4   192.168.26.145   <none>           <none>
net-test-6c94768685-qq9bx   1/1     Running   0          54s   10.10.3.5   192.168.26.135   <none>           <none>
net-test-6c94768685-v749g   1/1     Running   0          54s   10.10.2.5   192.168.26.145   <none>           <none>
root@k8s-master-1:/etc/ansible# kubectl exec -it net-test-6c94768685-65mnb sh #随便进入一个 pod
/ # ifconfig
eth0      Link encap:Ethernet  HWaddr 8A:F7:3C:3F:AA:BA  
          inet addr:10.10.3.6  Bcast:0.0.0.0  Mask:255.255.255.0
          UP BROADCAST RUNNING MULTICAST  MTU:1450  Metric:1
          RX packets:12 errors:0 dropped:0 overruns:0 frame:0
          TX packets:1 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:928 (928.0 B)  TX bytes:42 (42.0 B)

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

/ # ping 10.10.3.5 #测试内网中 pod 之间是否能够通信
PING 10.10.3.5 (10.10.3.5): 56 data bytes
64 bytes from 10.10.3.5: seq=0 ttl=64 time=0.100 ms
64 bytes from 10.10.3.5: seq=1 ttl=64 time=0.074 ms
64 bytes from 10.10.3.5: seq=4 ttl=64 time=0.090 ms
^C
--- 10.10.3.5 ping statistics ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 0.059/0.076/0.100 ms
/ # ping 223.6.6.6 #测试外网和 node 之间是否能够通信
PING 223.6.6.6 (223.6.6.6): 56 data bytes
64 bytes from 223.6.6.6: seq=3 ttl=127 time=8.529 ms
64 bytes from 223.6.6.6: seq=4 ttl=127 time=8.245 ms
64 bytes from 223.6.6.6: seq=5 ttl=127 time=9.297 ms
^C
--- 223.6.6.6 ping statistics ---
6 packets transmitted, 3 packets received, 50% packet loss
round-trip min/avg/max = 8.245/8.690/9.297 ms
/ # ping www.baidu.com #测试不通因为现在还没配置 DNS
ping: bad address 'www.baidu.com'

二进制安装的 k8s 添加 master 和 node 节点

添加 master 节点

  • 首先配置 ssh 免密码登录新增的 master 节点
root@k8s-master-1:~# ssh 192.168.26.125

...省略...

Last login: Sun Apr  5 23:04:23 2020 from 192.168.26.105
root@k8s-master-3:~# 
  • 查看添加 master 节点脚本帮助文档
root@k8s-master-1:~# easzctl --help
Usage: easzctl COMMAND [args]

Cluster-wide operation:
    checkout        To switch to context <clustername>, or create it if not existed
    destroy     To destroy the current cluster, '--purge' to also delete the context
    list        To list all of clusters managed
    setup       To setup a cluster using the current context
    start-aio       To quickly setup an all-in-one cluster for testing (like minikube)

In-cluster operation:
    add-etcd        To add a etcd-node to the etcd cluster
    add-master      To add a kube-master(master node) to the k8s cluster
    add-node        To add a kube-node(work node) to the k8s cluster
    del-etcd        To delete a etcd-node from the etcd cluster
    del-master      To delete a kube-master from the k8s cluster
    del-node        To delete a kube-node from the k8s cluster
    upgrade     To upgrade the k8s cluster

Extra operation:
    basic-auth      To enable/disable basic-auth for apiserver

Use "easzctl help <command>" for more information about a given command.
  • 添加 master 节点
root@k8s-master-1:~# easzctl add-master 192.168.26.125

easzctl del-master 192.168.26.xxx #删除 master 节点
  • master 节点添加成功

mark

  • 验证
root@k8s-master-1:~# kubectl get node
NAME             STATUS                     ROLES    AGE     VERSION
192.168.26.105   Ready,SchedulingDisabled   master   21h     v1.17.2
192.168.26.115   Ready,SchedulingDisabled   master   21h     v1.17.2
192.168.26.125   Ready,SchedulingDisabled   master   8m38s   v1.17.2
192.168.26.135   Ready                      node     14h     v1.17.2
192.168.26.145   Ready                      node     14h     v1.17.2
  • node 节点上验证添加的 master 节点
root@k8s-node-1:~# vim /etc/haproxy/haproxy.cfg
global
        log /dev/log    local1 warning
        chroot /var/lib/haproxy
        user haproxy
        group haproxy
        daemon
        nbproc 1

defaults
        log     global
        timeout connect 5s
        timeout client  10m
        timeout server  10m

listen kube-master
        bind 127.0.0.1:6443
        mode tcp
        option tcplog
        option dontlognull
        option dontlog-normal
        balance roundrobin
        server 192.168.26.125 192.168.26.125:6443 check inter 10s fall 2 rise 2 weight 1 #新添加的 master 节点
        server 192.168.26.105 192.168.26.105:6443 check inter 10s fall 2 rise 2 weight 1
        server 192.168.26.115 192.168.26.115:6443 check inter 10s fall 2 rise 2 weight 1

添加 node 节点

  • 首先配置 ssh 免密码登录新增的 master 节点
root@k8s-master-1:~# ssh 192.168.26.155

...省略...

Last login: Sun Apr  5 20:42:45 2020 from 192.168.26.200
root@k8s-node-3:~# 
  • 添加 node 节点
root@k8s-master-1:~# easzctl add-node 192.168.26.155
easzctl del-node 192.168.26.xxx #删除 node 节点

[外链图片转存中...(img-dufqJ6Tp-1634020494017)]

  • 验证
root@k8s-master-1:~# kubectl get node
NAME             STATUS                     ROLES    AGE   VERSION
192.168.26.105   Ready,SchedulingDisabled   master   21h   v1.17.2
192.168.26.115   Ready,SchedulingDisabled   master   21h   v1.17.2
192.168.26.125   Ready,SchedulingDisabled   master   16m   v1.17.2
192.168.26.135   Ready                      node     14h   v1.17.2
192.168.26.145   Ready                      node     14h   v1.17.2
192.168.26.155   Ready                      node     93s   v1.17.2

二进制安装的 k8s 进行版本更新

root@k8s-master-1:/usr/local/src# ll
total 458248
drwxr-xr-x  2 root root      4096 Apr  6 01:40 ./
drwxr-xr-x 10 root root      4096 Dec 30 23:20 ../
-rwxr-xr-x  1 root root     12965 Apr  5 02:01 easzup*
-rw-r--r--  1 root root       463 Apr  4 17:52 install-docker.sh
-rw-r--r--  1 root root  13105487 Apr  6 01:32 kubernetes-client-linux-amd64.tar.gz
-rw-r--r--  1 root root  96338064 Apr  6 01:32 kubernetes-node-linux-amd64.tar.gz
-rw-r--r--  1 root root 359299390 Apr  6 01:35 kubernetes-server-linux-amd64.tar.gz
-rw-r--r--  1 root root    462528 Apr  6 01:31 kubernetes.tar.gz
-rw-r--r--  1 root root       337 Apr  4 20:35 scp.sh
  • 解包
root@k8s-master-1:/usr/local/src# tar xf kubernetes-server-linux-amd64.tar.gz #基本上已经包含我们所需要的所有程序了
  • 查看我们所需要的程序脚本
root@k8s-master-1:/usr/local/src# cd kubernetes/server/bin
root@k8s-master-1:/usr/local/src/kubernetes/server/bin# ll #程序脚本存储目录
total 1073732
drwxr-xr-x 2 root root      4096 Mar 13 05:43 ./
drwxr-xr-x 3 root root      4096 Mar 13 05:38 ../
-rwxr-xr-x 1 root root  46960640 Mar 13 05:43 apiextensions-apiserver*
-rwxr-xr-x 1 root root  39346176 Mar 13 05:43 kubeadm*
-rwxr-xr-x 1 root root 118722560 Mar 13 05:43 kube-apiserver*
-rw-r--r-- 1 root root         8 Mar 13 05:39 kube-apiserver.docker_tag
-rw------- 1 root root 172620800 Mar 13 05:39 kube-apiserver.tar
-rwxr-xr-x 1 root root 108613632 Mar 13 05:43 kube-controller-manager*
-rw-r--r-- 1 root root         8 Mar 13 05:39 kube-controller-manager.docker_tag
-rw------- 1 root root 162511872 Mar 13 05:39 kube-controller-manager.tar
-rwxr-xr-x 1 root root  43499520 Mar 13 05:43 kubectl*
-rwxr-xr-x 1 root root 111630104 Mar 13 05:43 kubelet*
-rwxr-xr-x 1 root root  37806080 Mar 13 05:43 kube-proxy*
-rw-r--r-- 1 root root         8 Mar 13 05:39 kube-proxy.docker_tag
-rw------- 1 root root 117974016 Mar 13 05:39 kube-proxy.tar
-rwxr-xr-x 1 root root  42098688 Mar 13 05:43 kube-scheduler*
-rw-r--r-- 1 root root         8 Mar 13 05:39 kube-scheduler.docker_tag
-rw------- 1 root root  95996928 Mar 13 05:39 kube-scheduler.tar
-rwxr-xr-x 1 root root   1687552 Mar 13 05:43 mounter*
  • 创建升级版本目录及现有版本备份目录
root@k8s-master-1:/usr/local/src/kubernetes/server/bin# mkdir /opt/k8s-1.17.4 #升级版本目录
root@k8s-master-1:/usr/local/src/kubernetes/server/bin# mkdir /opt/k8s-1.17.2 #现版本备份目录
  • 拷贝升级及备份脚本
root@k8s-master-1:/usr/local/src/kubernetes/server/bin# cd /etc/ansible/bin/
root@k8s-master-1:/etc/ansible/bin# cp kubectl kubelet kube-proxy kube-apiserver  kube-controller-manager kube-scheduler /opt/k8s-1.17.2/ #现版本备份目录
root@k8s-master-1:/etc/ansible/bin# cd /usr/local/src/kubernetes/server/bin
root@k8s-master-1:/usr/local/src/kubernetes/server/bin# cp kubectl kubelet kube-proxy kube-apiserver  kube-controller-manager kube-scheduler /opt/k8s-1.17.4/ #升级版本目录
root@k8s-master-1:/usr/local/src/kubernetes/server/bin# ll /opt/k8s-1.17.2/
total 451316
drwxr-xr-x 2 root root      4096 Apr  6 01:52 ./
drwxr-xr-x 6 root root      4096 Apr  6 01:49 ../
-rwxr-xr-x 1 root root 118632448 Apr  6 01:52 kube-apiserver*
-rwxr-xr-x 1 root root 108548096 Apr  6 01:52 kube-controller-manager*
-rwxr-xr-x 1 root root  43491328 Apr  6 01:52 kubectl*
-rwxr-xr-x 1 root root 111568408 Apr  6 01:52 kubelet*
-rwxr-xr-x 1 root root  37801984 Apr  6 01:52 kube-proxy*
-rwxr-xr-x 1 root root  42094592 Apr  6 01:52 kube-scheduler*
root@k8s-master-1:/usr/local/src/kubernetes/server/bin# ll /opt/k8s-1.17.4/
total 451544
drwxr-xr-x 2 root root      4096 Apr  6 01:52 ./
drwxr-xr-x 6 root root      4096 Apr  6 01:49 ../
-rwxr-xr-x 1 root root 118722560 Apr  6 01:52 kube-apiserver*
-rwxr-xr-x 1 root root 108613632 Apr  6 01:52 kube-controller-manager*
-rwxr-xr-x 1 root root  43499520 Apr  6 01:52 kubectl*
-rwxr-xr-x 1 root root 111630104 Apr  6 01:52 kubelet*
-rwxr-xr-x 1 root root  37806080 Apr  6 01:52 kube-proxy*
-rwxr-xr-x 1 root root  42098688 Apr  6 01:52 kube-scheduler*
  • 停止相关服务,准备 copy 脚本
systemctl stop kube-apiserver kube-proxy kube-controller-manager kubelet kube-scheduler #master 节点

systemctl stop kube-proxy kubelet #node 节点

先升级 node 节点,进行测试

root@k8s-node-1:~# kubelet --version
Kubernetes v1.17.2
root@k8s-node-1:~# kube-proxy --version
Kubernetes v1.17.2
root@k8s-node-1:~# systemctl stop kube-proxy kubelet #需先停服务,再拷脚本
root@k8s-master-1:~# cd /opt/k8s-1.17.4/
root@k8s-master-1:/opt/k8s-1.17.4# scp kubelet kube-proxy 192.168.26.135:/usr/bin #拷贝升级版本的脚本至 node 节点
  • 验证升级后的版本
root@k8s-master-1:~# kubectl get node
NAME             STATUS                        ROLES    AGE    VERSION
192.168.26.105   NotReady,SchedulingDisabled   master   23h    v1.17.2
192.168.26.115   Ready,SchedulingDisabled      master   23h    v1.17.2
192.168.26.125   Ready,SchedulingDisabled      master   150m   v1.17.2
192.168.26.135   Ready                         node     17h    v1.17.4 #升级成功
192.168.26.145   Ready                         node     17h    v1.17.2
192.168.26.155   Ready                         node     134m   v1.17.2

root@k8s-node-1:~# kubelet --version
Kubernetes v1.17.4
root@k8s-node-1:~# kube-proxy --version
Kubernetes v1.17.4

集群整体升级

root@k8s-node-1:~# systemctl stop kube-apiserver kube-proxy kube-controller-manager kubelet kube-scheduler
root@k8s-node-2:~# systemctl stop kube-apiserver kube-proxy kube-controller-manager kubelet kube-scheduler
root@k8s-node-3:~# systemctl stop kube-apiserver kube-proxy kube-controller-manager kubelet kube-scheduler

root@k8s-node-1:~# systemctl stop kube-proxy kubelet
root@k8s-node-2:~# systemctl stop kube-proxy kubelet
root@k8s-node-3:~# systemctl stop kube-proxy kubelet
  • 在控制节点替换新版本程序脚本
root@k8s-master-1:/opt/k8s-1.17.4# cd 
root@k8s-master-1:~# cd /opt/k8s-1.17.4/ #新版程序脚本存储目录
root@k8s-master-1:/opt/k8s-1.17.4# cp ./* /etc/ansible/bin/ #拷贝准备好的脚本到 ansible 工作目录
root@k8s-master-1:/opt/k8s-1.17.4# cd /etc/ansible/bin/ #进入 ansible 工作目录
root@k8s-master-1:/etc/ansible/bin# ./kubelet --version #验证未升级钱的脚本版本
Kubernetes v1.17.4
root@k8s-master-1:/etc/ansible/bin# easzctl upgrade #执行集群升级

[外链图片转存中...(img-p3lUDE5y-1634020494018)]

  • 验证集群中 master 和 node 版本
root@k8s-master-1:/etc/ansible/bin# kubectl get node
NAME             STATUS                     ROLES    AGE    VERSION
192.168.26.105   Ready,SchedulingDisabled   master   23h    v1.17.4
192.168.26.115   Ready,SchedulingDisabled   master   23h    v1.17.4
192.168.26.125   Ready,SchedulingDisabled   master   165m   v1.17.4
192.168.26.135   Ready                      node     17h    v1.17.4
192.168.26.145   Ready                      node     17h    v1.17.4
192.168.26.155   Ready                      node     149m   v1.17.4
root@k8s-master-1:/etc/ansible# kubectl exec -it net-test-6c94768685-65mnb sh #随便进入一个 pod
/ # ifconfig
eth0      Link encap:Ethernet  HWaddr 8A:F7:3C:3F:AA:BA  
          inet addr:10.10.3.6  Bcast:0.0.0.0  Mask:255.255.255.0
          UP BROADCAST RUNNING MULTICAST  MTU:1450  Metric:1
          RX packets:12 errors:0 dropped:0 overruns:0 frame:0
          TX packets:1 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:928 (928.0 B)  TX bytes:42 (42.0 B)

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

/ # ping 10.10.3.5 #测试内网中 pod 之间是否能够通信
PING 10.10.3.5 (10.10.3.5): 56 data bytes
64 bytes from 10.10.3.5: seq=0 ttl=64 time=0.100 ms
64 bytes from 10.10.3.5: seq=1 ttl=64 time=0.074 ms
64 bytes from 10.10.3.5: seq=4 ttl=64 time=0.090 ms
^C
--- 10.10.3.5 ping statistics ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 0.059/0.076/0.100 ms
/ # ping 223.6.6.6 #测试外网和 node 之间是否能够通信
PING 223.6.6.6 (223.6.6.6): 56 data bytes
64 bytes from 223.6.6.6: seq=3 ttl=127 time=8.529 ms
64 bytes from 223.6.6.6: seq=4 ttl=127 time=8.245 ms
64 bytes from 223.6.6.6: seq=5 ttl=127 time=9.297 ms
^C
--- 223.6.6.6 ping statistics ---
6 packets transmitted, 3 packets received, 50% packet loss
round-trip min/avg/max = 8.245/8.690/9.297 ms
/ # ping www.baidu.com #测试不通因为现在还没配置 DNS
ping: bad address 'www.baidu.com'
/ # cat /etc/resolv.conf 
nameserver 172.20.0.2 #默认分配的 DNS 地址为 service 网段的第二个地址
search default.svc.bokebi.local. svc.bokebi.local. bokebi.local.
options ndots:5
  • service 网段定义的文件位置
root@k8s-master-1:~# vim /etc/ansible/hosts 

...省略...
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="172.20.0.0/16" #定义的 service 网段
  • 基础容器镜像 pause 定义的文件位置
root@k8s-master-1:~# vim /etc/ansible/roles/kube-node/defaults/main.yml

...省略...
# 基础容器镜像
SANDBOX_IMAGE: "k8s.harbor.com/base-images/pause-amd64:3.1" #本地 harbor 下载路径
#SANDBOX_IMAGE: "mirrorgooglecontainers/pause-amd64:3.1" #默认下载地址
#SANDBOX_IMAGE: "registry.access.redhat.com/rhel7/pod-infrastructure:latest"
...省略...

k8s应用环境

dashboard

  • 部署 kubernetes 的 web 管理界面 dashboard

具体步骤

  • kubeasz 项目自带的组件存储目录
root@k8s-master-1:/etc/ansible/manifests# ll
total 56
drwxrwxr-x 14 root root 4096 Feb  1 10:55 ./
drwxrwxr-x 12 root root 4096 Apr  6 13:36 ../
drwxrwxr-x  4 root root 4096 Feb  1 10:55 dashboard/
drwxrwxr-x  7 root root 4096 Feb  1 10:55 efk/
drwxrwxr-x  3 root root 4096 Feb  1 10:55 es-cluster/
drwxrwxr-x  5 root root 4096 Feb  1 10:55 heapster/
drwxrwxr-x  4 root root 4096 Feb  1 10:55 ingress/
drwxrwxr-x  3 root root 4096 Feb  1 10:55 jenkins/
drwxrwxr-x  3 root root 4096 Feb  1 10:55 mariadb-cluster/
drwxrwxr-x  2 root root 4096 Feb  1 10:55 metrics-server/
drwxrwxr-x  2 root root 4096 Feb  1 10:55 mysql-cluster/
drwxrwxr-x  4 root root 4096 Feb  1 10:55 prometheus/
drwxrwxr-x  3 root root 4096 Feb  1 10:55 redis-cluster/
drwxrwxr-x  2 root root 4096 Feb  1 10:55 storage/

[外链图片转存中...(img-YkaMjgpK-1634020494019)]

root@k8s-master-1:/etc/ansible# cd manifests/dashboard
root@k8s-master-1:/etc/ansible/manifests/dashboard# mkdir dashboard-v2.0.0-rc7
root@k8s-master-1:/etc/ansible/manifests/dashboard# cd dashboard-v2.0.0-rc7/
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7#ll
total 20
drwxr-xr-x 2 root root 4096 Apr  6 13:59 ./
drwxrwxr-x 5 root root 4096 Apr  6 13:59 ../
-rw-r--r-- 1 root root  374 Mar 28 22:44 admin-user.yml
-rw-r--r-- 1 root root 7591 Apr  6 13:59 dashboard-2.0.0-rc7.yml

root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# docker pull kubernetesui/dashboard:v2.0.0-rc7 #提前拉取镜像
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# docker tag kubernetesui/dashboard:v2.0.0-rc7 k8s.harbor.com/base-images/dashboard:v2.0.0-rc7 #打标签
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# docker pull k8s.harbor.com/base-images/dashboard:v2.0.0-rc7 #上传至本地 harbor

root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# docker pull kubernetesui/metrics-scraper:v1.0.4
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# docker tag kubernetesui/metrics-scraper:v1.0.4 k8s.harbor.com/base-images/metrics-scraper:v1.0.4
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# docker push k8s.harbor.com/base-images/metrics-scraper:v1.0.4

root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# vim dashboard-2.0.0-rc7.yml #修改 .yml 文件中的 images 路径为本地 harbor 的镜像地址

...省略...
    spec:
      containers:
        - name: kubernetes-dashboard
          image: k8s.harbor.com/base-images/dashboard:v2.0.0-rc7
          imagePullPolicy: Always
          ports:
...省略...
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: k8s.harbor.com/base-images/metrics-scraper:v1.0.4
          ports:
...省略...
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# kubectl apply -f . #指定 .yml 文件进行创建
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# kubectl get pod -A
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
default                net-test-6c94768685-65mnb                    1/1     Running   1          15h
default                net-test-6c94768685-h57sh                    1/1     Running   1          15h
default                net-test-6c94768685-qq9bx                    1/1     Running   1          15h
default                net-test-6c94768685-v749g                    1/1     Running   1          15h
kube-system            kube-flannel-ds-amd64-4xbv4                  1/1     Running   1          14h
kube-system            kube-flannel-ds-amd64-grbkg                  1/1     Running   1          17h
kube-system            kube-flannel-ds-amd64-gx78s                  1/1     Running   1          17h
kube-system            kube-flannel-ds-amd64-r2lv8                  1/1     Running   1          17h
kube-system            kube-flannel-ds-amd64-rchw9                  1/1     Running   1          14h
kube-system            kube-flannel-ds-amd64-srvh4                  1/1     Running   1          17h
kubernetes-dashboard   dashboard-metrics-scraper-64dd9c76f9-h7f6j   1/1     Running   0          13s
kubernetes-dashboard   kubernetes-dashboard-7b8d67d746-gvnfs        1/1     Running   0          13s

token 登录 dashboard

mark

  • 获取 token 值进行访问
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# kubectl get secrets -A | grep admin
kubernetes-dashboard   admin-user-token-twpm2                           kubernetes.io/service-account-token   3      21m
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# kubectl describe secrets admin-user-token-twpm2 -n kubernetes-dashboard
\Name:         admin-user-token-twpm2
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: d53880b1-19c7-41dd-b77b-841edea8fd78

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1350 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IkhJYkhnOGU3VDRJdDJRSjRxUHlZdHFUdURrbnJ4ei1ZY056X1p2V2RZVm8ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXR3cG0yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkNTM4ODBiMS0xOWM3LTQxZGQtYjc3Yi04NDFlZGVhOGZkNzgiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.JvK1M5T7w5YSWxLsJcz3ZmT8xKuNQF9oPVXxKbAyhDEM6zriB6BF55Uy-vLQl97fro1OuNT4hyHLZjTB1gy6nKBnPAn_v9YcMk_qScUQM-gFNB9eU1eRHM5v7OL5pRXLSNnbvUY3gfQP3aU2vSTC3uriuu5Wsv89u0uZexc2YfPj03Ejpah1ToT-FhDVaMvqKU_cRYbXDWVewyXcTZVl0yeF5Wg37XbtzPZ98-Tl_SdpIlrFJOmAZmQvX8OxnO4xVhqWOh7_sZMSuLI_Zn21xka12K5ZcP89HYLbFO_cNXTeh4BYC3KpH_FxDBrgDYjfUogZmAIbVoHu64MKcoAPxQ

[外链图片转存中...(img-zxGNZQxO-1634020494020)]

kubeconfig 文件登录

//找到 admin-user 密钥名
kubectl get secret -A | grep admin-user

//生成并复制 token 值
kubectl describe secret admin-user-token-mwfkw -n kubernetes-dashboard

//确定工作目录
pwd
/root/.kube

//制作 kubeconfig 文件
cp config ./kubeconfig

//编辑制作的 kubeconfig 文件
vim kubeconfig

//将生成的 token 值粘贴至 kubeconfig 文件的 users.name.user 列下(注意为 .yml 格式,所以得用空格)
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1C ...省略...
    client-key-data: LS0tLS1CRUdJ ...省略...
    token: eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLW13Zmt3Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjZmZhYmI3Ni1iODZmLTQ4OGEtOWE2MC1lNTk4OWU0NTk5YmUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.KlNQqvVbNR0doZu9rggWBNISCx_CBTAro0zYkcVA528TExYayQWTQF6F8hF2QbY6eYvclVgiYTgYAHsuQu-TR0WMXBrUuoVXL51ARD0nMYUZouNzGGalEw5bYVBes1H5OefXqHKKsMkV1njd87aoTnGOH57_-v88v6hG-UIjI12FBVHXHctZ63-qCYTRSOyHPpbZQI8PAl0huYLIocaCttup19uC4UHdfU4dazFMCWGCXR6l_SH8kzi_Ybes95ERZ5v8OBmh-A9x6IvYG0O4aH-Oc0vO7mVtHmcVRvn2UGNNqjgCaIIJpAyfIp_ypTadkSXHyTrX9guqz2djEHN8vg

#将制作好的 kubeconfig 文件存入 windows 自定义路径
#方便后期基于 kubeconfig 文件登录 Dashboard Web 界面
  • 浏览器访问 dashboard web 界面

[外链图片转存中...(img-T3EwJyPH-1634020494021)]

  • 选择 windows 中你自定义路径下存储的 kubeconfig 文件

[外链图片转存中...(img-yZPLUl0J-1634020494021)]

  • 登陆成功界面

[外链图片转存中...(img-HnEi1pQ8-1634020494022)]

设置 token 登录会话保持时间

root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# pwd
/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# vim dashboard-2.0.0-rc7.yml 

...省略...
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            - --token-ttl=3600  #1 小时,默认以秒为单位(建议最长设置为 2 个小时,为了防止长时间保持 web 页面,被人误操作)
...省略...
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# kubectl delete -f .
root@k8s-master-1:/etc/ansible/manifests/dashboard/dashboard-v2.0.0-rc7# kubectl apply -f .

DNS 服务

  • 目前常用的 dns 组件有 kube-dns 和 core-dns 两个,用于解析 k8s 集群中 service name 所对应得到IP地址。

部署 kube-dns

kube-dns:提供service name域名的解析(对外提供服务的,建议资源限制内存设置高一点)
dns-dnsmasq:提供DNS缓存,降低kubedns负载,提高性能(对外提供服务的,建议资源限制内存设置高一点)
dns-sidecar:定期检查kubedns和dnsmasq的健康状态
root@k8s-master-1:/etc/ansible/manifests# ll
total 56 #组件存储目录,可将常用组件放在此目录下,后期将 /etc/ansible 目录整体打包,以实现备份
drwxrwxr-x 14 root root 4096 Feb  1 10:55 ./
drwxrwxr-x 12 root root 4096 Apr  6 13:36 ../
drwxrwxr-x  5 root root 4096 Apr  6 13:59 dashboard/
drwxrwxr-x  7 root root 4096 Feb  1 10:55 efk/
drwxrwxr-x  3 root root 4096 Feb  1 10:55 es-cluster/
drwxrwxr-x  5 root root 4096 Feb  1 10:55 heapster/
drwxrwxr-x  4 root root 4096 Feb  1 10:55 ingress/
drwxrwxr-x  3 root root 4096 Feb  1 10:55 jenkins/
drwxrwxr-x  3 root root 4096 Feb  1 10:55 mariadb-cluster/
drwxrwxr-x  2 root root 4096 Feb  1 10:55 metrics-server/
drwxrwxr-x  2 root root 4096 Feb  1 10:55 mysql-cluster/
drwxrwxr-x  4 root root 4096 Feb  1 10:55 prometheus/
drwxrwxr-x  3 root root 4096 Feb  1 10:55 redis-cluster/
drwxrwxr-x  2 root root 4096 Feb  1 10:55 storage/

root@k8s-master-1:/etc/ansible/manifests# mkdir dns
root@k8s-master-1:/etc/ansible/manifests# cd dns/
root@k8s-master-1:/etc/ansible/manifests/dns# ll
root@k8s-master-1:/etc/ansible/manifests/dns# tree
.
├── CoreDNS
│   ├── deployment-master.zip
│   └── magedu-coredns.yaml
└── kube-dns
    ├── busybox-online.tar.gz
    ├── busybox.yaml
    ├── heapster
    │   ├── grafana.yaml
    │   ├── heapster-amd64_v1.5.1.tar
    │   ├── heapster-grafana-amd64-v4.4.3.tar
    │   ├── heapster-influxdb-amd64_v1.3.3.tar
    │   ├── heapster.yaml
    │   └── influxdb.yaml
    ├── k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz #kube-dns 所需镜像
    ├── k8s-dns-kube-dns-amd64_1.14.13.tar.gz #kube-dns 所需镜像
    ├── k8s-dns-sidecar-amd64_1.14.13.tar.gz #kube-dns 所需镜像
    ├── kube-dns-bokebi.yaml  #主要的配置文件
    └── kube-dns.yaml #拷贝过来的模板

3 directories, 16 files
  • dns 文件来自哪里
root@k8s-master-1:/usr/local/src# ll
total 458248
drwxr-xr-x  2 root root      4096 Apr  6 17:28 ./
drwxr-xr-x 10 root root      4096 Dec 30 23:20 ../
-rwxr-xr-x  1 root root     12965 Apr  5 02:01 easzup*
-rw-r--r--  1 root root       463 Apr  4 17:52 install-docker.sh
-rw-r--r--  1 root root  13105487 Apr  6 01:32 kubernetes-client-linux-amd64.tar.gz
-rw-r--r--  1 root root  96338064 Apr  6 01:32 kubernetes-node-linux-amd64.tar.gz
-rw-r--r--  1 root root 359299390 Apr  6 01:35 kubernetes-server-linux-amd64.tar.gz
-rw-r--r--  1 root root    462528 Apr  6 01:31 kubernetes.tar.gz
-rw-r--r--  1 root root       337 Apr  4 20:35 scp.sh
root@k8s-master-1:/usr/local/src# tar xf kubernetes-client-linux-amd64.tar.gz
root@k8s-master-1:/usr/local/src# tar xf kubernetes-node-linux-amd64.tar.gz
root@k8s-master-1:/usr/local/src# tar xf kubernetes-server-linux-amd64.tar.gz
root@k8s-master-1:/usr/local/src# tar xf kubernetes.tar.gz

root@k8s-master-1:/usr/local/src# cd kubernetes/cluster/addons
root@k8s-master-1:/usr/local/src/kubernetes/cluster/addons# ll #官方提供的组件存储目录
total 92
drwxr-xr-x 21 root root 4096 Mar 13 05:48 ./
drwxr-xr-x 11 root root 4096 Mar 13 05:48 ../
drwxr-xr-x  2 root root 4096 Mar 13 05:48 addon-manager/
-rw-r--r--  1 root root  765 Mar 13 05:48 BUILD
drwxr-xr-x  3 root root 4096 Mar 13 05:48 calico-policy-controller/
drwxr-xr-x  3 root root 4096 Mar 13 05:48 cluster-loadbalancing/
drwxr-xr-x  7 root root 4096 Mar 13 05:48 cluster-monitoring/
drwxr-xr-x  2 root root 4096 Mar 13 05:48 dashboard/
drwxr-xr-x  3 root root 4096 Mar 13 05:48 device-plugins/
drwxr-xr-x  5 root root 4096 Mar 13 05:48 dns/ #官方提供的 dns 组件存储目录
drwxr-xr-x  2 root root 4096 Mar 13 05:48 dns-horizontal-autoscaler/
drwxr-xr-x  5 root root 4096 Mar 13 05:48 fluentd-elasticsearch/
drwxr-xr-x  4 root root 4096 Mar 13 05:48 fluentd-gcp/
drwxr-xr-x  3 root root 4096 Mar 13 05:48 ip-masq-agent/
drwxr-xr-x  2 root root 4096 Mar 13 05:48 kube-proxy/
drwxr-xr-x  3 root root 4096 Mar 13 05:48 metadata-agent/
drwxr-xr-x  3 root root 4096 Mar 13 05:48 metadata-proxy/
drwxr-xr-x  2 root root 4096 Mar 13 05:48 metrics-server/
drwxr-xr-x  5 root root 4096 Mar 13 05:48 node-problem-detector/
drwxr-xr-x  8 root root 4096 Mar 13 05:48 rbac/
-rw-r--r--  1 root root 1763 Mar 13 05:48 README.md
drwxr-xr-x  8 root root 4096 Mar 13 05:48 storage-class/
drwxr-xr-x  4 root root 4096 Mar 13 05:48 volumesnapshots/
root@k8s-master-1:/usr/local/src/kubernetes/cluster/addons# cd dns/kube-dns/
root@k8s-master-1:/usr/local/src/kubernetes/cluster/addons/dns/kube-dns# ll #此文件下的 kube-dns.yaml.base 文件即为模板
total 48
drwxr-xr-x 2 root root 4096 Apr  6 18:07 ./
drwxr-xr-x 5 root root 4096 Mar 13 05:48 ../
-rw-r--r-- 1 root root 6852 Apr  6 18:02 kube-dns.yaml.base
-rw-r--r-- 1 root root 6932 Mar 13 05:48 kube-dns.yaml.in
-rw-r--r-- 1 root root 6845 Mar 13 05:48 kube-dns.yaml.sed
-rw-r--r-- 1 root root 1077 Mar 13 05:48 Makefile
-rw-r--r-- 1 root root 1954 Mar 13 05:48 README.md
-rw-r--r-- 1 root root  376 Mar 13 05:48 transforms2salt.sed
-rw-r--r-- 1 root root  319 Mar 13 05:48 transforms2sed.sed
root@k8s-master-1:/usr/local/src/kubernetes/cluster/addons/dns/kube-dns# cp -a kube-dns.yaml.base /etc/ansible/manifests/dns/kube-dns/kube-dns-bokebi.yaml
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod -o wide
NAME                        READY   STATUS    RESTARTS   AGE   IP          NODE             NOMINATED NODE   READINESS GATES
net-test-6c94768685-65mnb   1/1     Running   1          18h   10.10.3.7   192.168.26.135   <none>           <none>
net-test-6c94768685-h57sh   1/1     Running   1          18h   10.10.2.6   192.168.26.145   <none>           <none>
net-test-6c94768685-qq9bx   1/1     Running   1          18h   10.10.3.8   192.168.26.135   <none>           <none>
net-test-6c94768685-v749g   1/1     Running   1          18h   10.10.2.7   192.168.26.145   <none>           <none>
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl exec -it net-test-6c94768685-65mnb sh
/ # cat /etc/resolv.conf 
nameserver 172.20.0.2 #默认分配的 DNS 地址为 service 网段的第二个地址
search default.svc.bokebi.local. svc.bokebi.local. bokebi.local.
options ndots:5
vim kube-dns.yml

...省略...
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 172.20.0.2 #必须和 pod 中默分配的 dns 地址相同 
...省略...
          limits:
            memory: 512Mi #当前 pod 最大可使用内存
          requests:
            cpu: 100m #最少需要 cpu
            memory: 70Mi #最少需要的内存
...省略...
        args:
        - --domain=bokebi.local. #service 域名后缀 . 表示结尾,不能少
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --v=2

        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --no-negcache
        - --dns-loop-detect
        - --log-facility=-
        - --server=/boekbi.local/127.0.0.1#10053 (如果公司内部有 DNS 服务器,可在此处修改关于 xxx.xxx 域名后缀全部转发给 xxx.xxx.xxx 地址的 xxx 端口)
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
...省略...
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.bokebi.local,5,SRV
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.bokebi.local,5,SRV

# 目前暂时修改了 dns,稍后再进一步修改以下三个镜像路径
      - name: kubedns #提供service name域名的解析
        image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13

      - name: dnsmasq #提供DNS缓存,降低kubedns负载,提高性能
        image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13

      - name: sidecar #定期检查kubedns和dnsmasq的健康状态
        image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
# 遇到 __PILLAR__DNS__DOMAIN__ 统统替换成我们自己设置的域名 bokebi.local
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# ll #上传下载好的镜像
total 137012
drwxr-xr-x 3 root root     4096 Apr  6 18:07 ./
drwxr-xr-x 4 root root     4096 Apr  6 17:24 ../
-rw-r--r-- 1 root root 41687040 Apr  6 16:54 k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz
-rw-r--r-- 1 root root 51441152 Apr  6 16:55 k8s-dns-kube-dns-amd64_1.14.13.tar.gz
-rw-r--r-- 1 root root 43140608 Apr  6 16:57 k8s-dns-sidecar-amd64_1.14.13.tar.gz

- 导入镜像
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz 
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz 
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz 

- 打标签
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker tag gcr.io/google-containers/k8s-dns-sidecar-amd64:1.14.13 k8s.harbor.com/base-images/k8s-dns-sidecar-amd64:1.14.13
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker tag gcr.io/google-containers/k8s-dns-kube-dns-amd64:1.14.13 k8s.harbor.com/base-images/k8s-dns-kube-dns-amd64:1.14.13
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker tag gcr.io/google-containers/k8s-dns-dnsmasq-nanny-amd64:1.14.13 k8s.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:1.14.13

- 上传镜像
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker push k8s.harbor.com/base-images/k8s-dns-sidecar-amd64:1.14.13
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker push k8s.harbor.com/base-images/k8s-dns-kube-dns-amd64:1.14.13
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker push k8s.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:1.14.13

- 修改 kube-dns-bokebi.yml 文件中的 image 路径
      - name: kubedns #提供 service name 域名的解析
        image: k8s.harbor.com/base-images/k8s-dns-kube-dns-amd64:1.14.13

      - name: dnsmasq #提供 DNS 缓存,降低 kubedns 负载,提高性能
        image: k8s.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:1.14.13

      - name: sidecar #定期检查 kubedns 和 dnsmasq 的健康状态
        image: k8s.harbor.com/base-images/k8s-dns-sidecar-amd64:1.14.13
  • 执行
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl apply -f kube-dns-bokebi.yaml 
service/kube-dns created
serviceaccount/kube-dns created
configmap/kube-dns created
deployment.apps/kube-dns created
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod -n kube-system 
NAME                          READY   STATUS    RESTARTS   AGE
kube-dns-c5c7f884d-2f2pl      3/3     Running   0          2m45s #3个容器全部 起来了
kube-flannel-ds-amd64-4xbv4   1/1     Running   1          18h
kube-flannel-ds-amd64-grbkg   1/1     Running   1          21h
kube-flannel-ds-amd64-gx78s   1/1     Running   1          21h
kube-flannel-ds-amd64-r2lv8   1/1     Running   1          21h
kube-flannel-ds-amd64-rchw9   1/1     Running   1          19h
kube-flannel-ds-amd64-srvh4   1/1     Running   1          21h
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod -A -n default
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
default                net-test-6c94768685-65mnb                    1/1     Running   1          19h
default                net-test-6c94768685-h57sh                    1/1     Running   1          19h
default                net-test-6c94768685-qq9bx                    1/1     Running   1          19h
default                net-test-6c94768685-v749g                    1/1     Running   1          19h
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl exec -it net-test-6c94768685-65mnb sh
/ # ping www.baidu.com #已经可以通过域名访问外网了
PING www.baidu.com (61.135.169.121): 56 data bytes
64 bytes from 61.135.169.121: seq=0 ttl=127 time=7.766 ms
64 bytes from 61.135.169.121: seq=1 ttl=127 time=11.605 ms
64 bytes from 61.135.169.121: seq=2 ttl=127 time=9.405 ms
^C
--- www.baidu.com ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 7.766/9.592/11.605 ms
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl get svc -A
NAMESPACE              NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
default                kubernetes                  ClusterIP   172.20.0.1     <none>        443/TCP         40h
kube-system            kube-dns                    ClusterIP   172.20.0.2     <none>        53/UDP,53/TCP   18m
kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   172.20.62.6    <none>        8000/TCP        3h55m
kubernetes-dashboard   kubernetes-dashboard        NodePort    172.20.28.87   <none>        443:30002/TCP   3h55m
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod -A -n default
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
default                net-test-6c94768685-65mnb                    1/1     Running   1          19h
default                net-test-6c94768685-h57sh                    1/1     Running   1          19h
default                net-test-6c94768685-qq9bx                    1/1     Running   1          19h
default                net-test-6c94768685-v749g                    1/1     Running   1          19h
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl exec -it net-test-6c94768685-65mnb sh
/ # ping kubernetes #ping 同一个 namespace 里的 service 是可以 ping 通的
PING kubernetes (172.20.0.1): 56 data bytes
64 bytes from 172.20.0.1: seq=0 ttl=64 time=0.041 ms
64 bytes from 172.20.0.1: seq=1 ttl=64 time=0.058 ms
64 bytes from 172.20.0.1: seq=2 ttl=64 time=0.054 ms
^C
--- kubernetes ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.041/0.051/0.058 ms
/ # ping dashboard-metrics-scraper #不在同一个 namespace 中的 service 无法 ping 通
ping: bad address 'dashboard-metrics-scraper'
/ # ping dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local #得严格按照 service name.name space.svc.bokebi.laocal(我们在hosts文件中定义的 service 域名后缀)
PING dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local (172.20.62.6): 56 data bytes
64 bytes from 172.20.62.6: seq=0 ttl=64 time=0.307 ms
64 bytes from 172.20.62.6: seq=1 ttl=64 time=0.079 ms
64 bytes from 172.20.62.6: seq=2 ttl=64 time=0.124 ms
^C
--- dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.079/0.170/0.307 ms

使用 busybox 进行解析

  • 导入 busybox 镜像,并上传至本地 harbor
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker load -i busybox-online.tar.gz
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker images
quay.io/prometheus/busybox                               latest              747e1d7f6665        2 years ago         2.59MB

root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker tag quay.io/prometheus/busybox:latest k8s.harbor.com/base-images/quay.io/prometheus/busybox:latest
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# docker push k8s.harbor.com/base-images/quay.io/prometheus/busybox:latest
  • 修改 busybox 的 .yaml 文件,并解析其他 namespace 的域名
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# vim busybox.yaml
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl apply -f busybox.yaml 
pod/busybox created
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod
NAME                        READY   STATUS    RESTARTS   AGE
busybox                     1/1     Running   0          6s
net-test-6c94768685-65mnb   1/1     Running   1          21h
net-test-6c94768685-h57sh   1/1     Running   1          21h
net-test-6c94768685-qq9bx   1/1     Running   1          21h
net-test-6c94768685-v749g   1/1     Running   1          21h
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl exec busybox nslookup dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local #使用 busybox 解析其他namespace 的域名
Server:    172.20.0.2
Address 1: 172.20.0.2 kube-dns.kube-system.svc.bokebi.local

Name:      dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local
Address 1: 172.20.62.6 dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local

部署 coredns

[外链图片转存中...(img-UScDskmt-1634020494023)]、

root@k8s-master-1:/etc/ansible/manifests/dns# git clone https://github.com/coredns/deployment.git #拉取 GitHub 项目
root@k8s-master-1:/etc/ansible/manifests/dns# ll
total 20
drwxr-xr-x  5 root root 4096 Apr  6 21:02 ./
drwxrwxr-x 15 root root 4096 Apr  6 18:00 ../
drwxr-xr-x  2 root root 4096 Apr  6 21:07 CoreDNS/
drwxr-xr-x  9 root root 4096 Apr  6 21:02 deployment/
drwxr-xr-x  3 root root 4096 Apr  6 20:43 kube-dns/
root@k8s-master-1:/etc/ansible/manifests/dns# cd deployment/kubernetes/
root@k8s-master-1:/etc/ansible/manifests/dns/deployment/kubernetes# ll
total 68
drwxr-xr-x 4 root root 4096 Apr  6 21:04 ./
drwxr-xr-x 9 root root 4096 Apr  6 21:02 ../
-rw-r--r-- 1 root root 4232 Apr  6 21:02 CoreDNS-k8s_version.md
-rw-r--r-- 1 root root 4197 Apr  6 21:02 coredns.yaml.sed
drwxr-xr-x 2 root root 4096 Apr  6 21:02 corefile-tool/
-rwxr-xr-x 1 root root 3789 Apr  6 21:02 deploy.sh* #生成模板文件的脚本
-rw-r--r-- 1 root root 4985 Apr  6 21:02 FAQs.md
drwxr-xr-x 2 root root 4096 Apr  6 21:02 migration/
-rw-r--r-- 1 root root 2706 Apr  6 21:02 README.md
-rwxr-xr-x 1 root root 1336 Apr  6 21:02 rollback.sh*
-rw-r--r-- 1 root root 7159 Apr  6 21:02 Scaling_CoreDNS.md
-rw-r--r-- 1 root root 7911 Apr  6 21:02 Upgrading_CoreDNS.md
root@k8s-master-1:/etc/ansible/manifests/dns/deployment/kubernetes# ./deploy.sh  > /etc/ansible/manifests/dns/CoreDNS/coredns-bokebi.yaml #使用脚本生成 .yaml 模板文件至指定目录下

部署 coredns 的前提是 kubernetes 集群当中已经部署了 kube-dns
root@k8s-master-1:/etc/ansible/manifests/dns/deployment/kubernetes# cd /etc/ansible/manifests/dns/CoreDNS/ #进入 coredns 部署目录
root@k8s-master-1:/etc/ansible/manifests/dns/CoreDNS# ll
total 56
drwxr-xr-x 2 root root  4096 Apr  6 21:07 ./
drwxr-xr-x 5 root root  4096 Apr  6 21:02 ../
-rw-r--r-- 1 root root  4176 Apr  6 21:06 coredns-bokebi.yaml
-rw-r--r-- 1 root root 29986 Apr  6 16:57 deployment-master.zip
  • 修改生成的部署 coredns 的 .yaml 文件
...省略...
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes bokebi.local in-addr.arpa ip6.arpa { #修改 service 域名后缀
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . 233.6.6.6 
        #forward . /etc/resolv.conf #可以指定转发哪些本地无法解析的域名到指定 server
        cache 30
        loop
        reload
        loadbalance
    }
...省略...
      - name: coredns
        #image: coredns/coredns:1.6.7 #本地镜像地址
        image: k8s.harbor.com/base-images/coredns/coredns:1.6.9
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 512Mi #当前 pod 最大可使用内存
...省略...
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 172.20.0.2 #基于 kube-dns 获取到 IP 地址
...省略...
root@k8s-master-1:~# docker pull coredns/coredns:1.6.9
root@k8s-master-1:~# docker tag coredns/coredns:1.6.9 k8s.harbor.com/base-images/coredns/coredns:1.6.9
root@k8s-master-1:~# docker push k8s.harbor.com/base-images/coredns/coredns:1.6.9
  • 删除 kube-dns 的部署信息
root@k8s-master-1:~# cd /etc/ansible/manifests/dns/kube-dns/
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# kubectl delete -f kube-dns-bokebi.yaml
  • 部署 coredns
root@k8s-master-1:/etc/ansible/manifests/dns/kube-dns# cd /etc/ansible/manifests/dns/CoreDNS/
root@k8s-master-1:/etc/ansible/manifests/dns/CoreDNS# kubectl apply -f coredns-bokebi.yaml
  • 验证部署 coredns
root@k8s-master-1:/etc/ansible/manifests/dns/CoreDNS# kubectl get pod -n kube-system 
NAME                          READY   STATUS    RESTARTS   AGE
coredns-759cc998f9-59874      1/1     Running   0          2m35s #已经部署好了
kube-flannel-ds-amd64-4xbv4   1/1     Running   1          21h
kube-flannel-ds-amd64-grbkg   1/1     Running   1          24h
kube-flannel-ds-amd64-gx78s   1/1     Running   1          24h
kube-flannel-ds-amd64-r2lv8   1/1     Running   1          24h
kube-flannel-ds-amd64-rchw9   1/1     Running   1          22h
kube-flannel-ds-amd64-srvh4   1/1     Running   1          24h
root@k8s-master-1:/etc/ansible/manifests/dns/CoreDNS# kubectl get pod -n default
NAME                        READY   STATUS    RESTARTS   AGE
busybox                     1/1     Running   1          88m
net-test-6c94768685-65mnb   1/1     Running   1          23h
net-test-6c94768685-h57sh   1/1     Running   1          23h
net-test-6c94768685-qq9bx   1/1     Running   1          23h
net-test-6c94768685-v749g   1/1     Running   1          23h
root@k8s-master-1:/etc/ansible/manifests/dns/CoreDNS# kubectl exec -it net-test-6c94768685-qq9bx sh
/ # ping www.baidu.com
PING www.baidu.com (61.135.169.121): 56 data bytes
64 bytes from 61.135.169.121: seq=0 ttl=127 time=7.970 ms
64 bytes from 61.135.169.121: seq=1 ttl=127 time=8.712 ms
64 bytes from 61.135.169.121: seq=2 ttl=127 time=10.392 ms
^C
--- www.baidu.com ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 7.970/9.024/10.392 ms

使用 busybox 进行解析

  • 推荐使用 coredns,结构简单、性能高于 kube-dns、同样的业务场景下 kube-dns 性能是低于 coredns 的
root@k8s-master-1:/etc/ansible/manifests/dns/CoreDNS# kubectl exec busybox nslookup dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local
Server:    172.20.0.2
Address 1: 172.20.0.2 kube-dns.kube-system.svc.bokebi.local

Name:      dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local
Address 1: 172.20.62.6 dashboard-metrics-scraper.kubernetes-dashboard.svc.bokebi.local

划重点:部署 kubernetes 中的网络组件,要在 kubernetes 集群部署之前确认。一旦在 kubernetes 集群部署完之后想要更换网络组件,要么就是替换的不完全、要么就根本无法替换网络组件。

点赞

发表回复

电子邮件地址不会被公开。必填项已用 * 标注