当先锋百科网

首页 1 2 3 4 5 6 7

二进制安装Kubernetes(k8s) v1.27.3 IPv4/IPv6双栈 可脱离互联网

https://github.com/cby-chen/Kubernetes 开源不易,帮忙点个star,谢谢了

介绍

kubernetes(k8s)二进制高可用安装部署,支持IPv4+IPv6双栈。

我使用IPV6的目的是在公网进行访问,所以我配置了IPV6静态地址。

若您没有IPV6环境,或者不想使用IPv6,不对主机进行配置IPv6地址即可。

不配置IPV6,不影响后续,不过集群依旧是支持IPv6的。为后期留有扩展可能性。

若不要IPv6 ,不给网卡配置IPv6即可,不要对IPv6相关配置删除或操作,否则会出问题。

强烈建议在Github上查看文档 !!!

Github出问题会更新文档,并且后续尽可能第一时间更新新版本文档 !!!

手动项目地址:https://github.com/cby-chen/Kubernetes

1.环境

主机名称IP地址说明软件

192.168.1.60外网节点下载各种所需安装包
Master01192.168.0.31master节点kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx
Master02192.168.0.32master节点kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx
Master03192.168.0.33master节点kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx
Node01192.168.0.34node节点kubelet、kube-proxy、nfs-client、nginx
Node02192.168.0.35node节点kubelet、kube-proxy、nfs-client、nginx

192.168.0.36VIP

网段

物理主机:192.168.0.0/24

service:10.96.0.0/12

pod:172.16.0.0/12

安装包已经整理好:https://github.com/cby-chen/Kubernetes/releases/download/v1.27.3/kubernetes-v1.27.3.tar

1.1.k8s基础系统环境配置

1.2.配置IP

# 注意!
# 若虚拟机是进行克隆的那么网卡的UUID会重复
# 若UUID重复需要重新生成新的UUID
# UUID重复无法获取到IPV6地址
# 
# 查看当前的网卡列表和 UUID:
# nmcli con show
# 删除要更改 UUID 的网络连接:
# nmcli con delete uuid <原 UUID>
# 重新生成 UUID:
# nmcli con add type ethernet ifname <接口名称> con-name <新名称>
# 重新启用网络连接:
# nmcli con up <新名称>

# 更改网卡的UUID
ssh [email protected] "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"
ssh [email protected] "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"
ssh [email protected] "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"
ssh [email protected] "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"
ssh [email protected] "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"

# 修改静态的IPv4地址
ssh [email protected] "nmcli con mod eth0 ipv4.addresses 192.168.0.31/24; nmcli con mod eth0 ipv4.gateway  192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh [email protected] "nmcli con mod eth0 ipv4.addresses 192.168.0.32/24; nmcli con mod eth0 ipv4.gateway  192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh [email protected] "nmcli con mod eth0 ipv4.addresses 192.168.0.33/24; nmcli con mod eth0 ipv4.gateway  192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh [email protected] "nmcli con mod eth0 ipv4.addresses 192.168.0.34/24; nmcli con mod eth0 ipv4.gateway  192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh [email protected] "nmcli con mod eth0 ipv4.addresses 192.168.0.35/24; nmcli con mod eth0 ipv4.gateway  192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"


# 没有IPv6选择不配置即可
ssh [email protected] "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh [email protected] "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::20; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh [email protected] "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::30; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh [email protected] "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::40; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh [email protected] "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::50; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"


# 查看网卡配置
# nmcli device show eth0
# nmcli con show eth0
[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=no
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eth0
UUID=424fd260-c480-4899-97e6-6fc9722031e8
DEVICE=eth0
ONBOOT=yes
IPADDR=192.168.0.31
PREFIX=24
GATEWAY=192.168.8.1
DNS1=8.8.8.8
IPV6ADDR=fc00:43f4:1eea:1::10/128
IPV6_DEFAULTGW=fc00:43f4:1eea:1::1
DNS2=2400:3200::1
[root@localhost ~]#

1.3.设置主机名

hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02

1.4.配置yum源

# 其他系统的源地址
# https://mirrors.tuna.tsinghua.edu.cn/help/

# 对于 Ubuntu
sed -i 's/cn.archive.ubuntu.com/mirrors.ustc.edu.cn/g' /etc/apt/sources.list

# 对于 CentOS 7
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
         -e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
         -i.bak \
         /etc/yum.repos.d/CentOS-*.repo

# 对于 CentOS 8
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
         -e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
         -i.bak \
         /etc/yum.repos.d/CentOS-*.repo

# 对于私有仓库
sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://192.168.1.123/centos|g' -i.bak  /etc/yum.repos.d/CentOS-*.repo

1.5.安装一些必备工具

# 对于 Ubuntu
apt update && apt upgrade -y && apt install -y wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl

# 对于 CentOS 7
yum update -y && yum -y install  wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl

# 对于 CentOS 8
yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl

1.5.1 下载离线所需文件(可选)

在互联网服务器上安装一个一模一样的系统进行下载所需包

CentOS7
# 下载必要工具
yum -y install createrepo yum-utils wget epel*

# 下载全量依赖包
repotrack createrepo wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp

# 删除libseccomp
rm -rf libseccomp-*.rpm

# 下载libseccomp
wget http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm

# 创建yum源信息
createrepo -u -d /data/centos7/

# 拷贝包到内网机器上
scp -r /data/centos7/ [email protected]:
scp -r /data/centos7/ [email protected]:
scp -r /data/centos7/ [email protected]:
scp -r /data/centos7/ [email protected]:
scp -r /data/centos7/ [email protected]:

# 在内网机器上创建repo配置文件
rm -rf /etc/yum.repos.d/*
cat > /etc/yum.repos.d/123.repo  << EOF 
[cby]
name=CentOS-$releasever - Media
baseurl=file:///root/centos7/
gpgcheck=0
enabled=1
EOF

# 安装下载好的包
yum clean all
yum makecache
yum install /root/centos7/* --skip-broken -y

#### 备注 #####
# 安装完成后,可能还会出现yum无法使用那么再次执行
rm -rf /etc/yum.repos.d/*
cat > /etc/yum.repos.d/123.repo  << EOF 
[cby]
name=CentOS-$releasever - Media
baseurl=file:///root/centos7/
gpgcheck=0
enabled=1
EOF
yum clean all
yum makecache
yum install /root/centos7/* --skip-broken -y

#### 备注 #####
# 安装 chrony 和 libseccomp
# yum install /root/centos7/libseccomp-2.5.1*.rpm -y
# yum install /root/centos7/chrony-*.rpm -y
CentOS8
# 下载必要工具
yum -y install createrepo yum-utils wget epel*

# 下载全量依赖包
repotrack wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp

# 创建yum源信息
createrepo -u -d /data/centos8/

# 拷贝包到内网机器上
scp -r centos8/ [email protected]:
scp -r centos8/ [email protected]:
scp -r centos8/ [email protected]:
scp -r centos8/ [email protected]:
scp -r centos8/ [email protected]:

# 在内网机器上创建repo配置文件
rm -rf /etc/yum.repos.d/*
cat > /etc/yum.repos.d/123.repo  << EOF 
[cby]
name=CentOS-$releasever - Media
baseurl=file:///root/centos8/
gpgcheck=0
enabled=1
EOF

# 安装下载好的包
yum clean all
yum makecache
yum install /root/centos8/* --skip-broken -y

#### 备注 #####
# 安装完成后,可能还会出现yum无法使用那么再次执行
rm -rf /etc/yum.repos.d/*
cat > /etc/yum.repos.d/123.repo  << EOF 
[cby]
name=CentOS-$releasever - Media
baseurl=file:///root/centos8/
gpgcheck=0
enabled=1
EOF
yum clean all
yum makecache
yum install /root/centos8/* --skip-broken -y
Ubuntu 下载包和依赖
#!/bin/bash

logfile=123.log
ret=""
function getDepends()
{
   echo "fileName is" $1>>$logfile
   # use tr to del < >
   ret=`apt-cache depends $1|grep Depends |cut -d: -f2 |tr -d "<>"`
   echo $ret|tee  -a $logfile
}
# 需要获取其所依赖包的包
libs="wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp"

# download libs dependen. deep in 3
i=0
while [ $i -lt 3 ] ;
do
    let i++
    echo $i
    # download libs
    newlist=" "
    for j in $libs
    do
        added="$(getDepends $j)"
        newlist="$newlist $added"
        apt install $added --reinstall -d -y
    done

    libs=$newlist
done

# 创建源信息
apt install dpkg-dev
sudo cp /var/cache/apt/archives/*.deb /data/ubuntu/ -r
dpkg-scanpackages . /dev/null |gzip > /data/ubuntu/Packages.gz -r

# 拷贝包到内网机器上
scp -r ubuntu/ [email protected]:
scp -r ubuntu/ [email protected]:
scp -r ubuntu/ [email protected]:
scp -r ubuntu/ [email protected]:
scp -r ubuntu/ [email protected]:

# 在内网机器上配置apt源
vim /etc/apt/sources.list
cat /etc/apt/sources.list
deb file:root/ ubuntu/

# 安装deb包
apt install ./*.deb

1.6.选择性下载需要工具

#!/bin/bash

# 查看版本地址:
# 
# https://github.com/containernetworking/plugins/releases/
# https://github.com/containerd/containerd/releases/
# https://github.com/kubernetes-sigs/cri-tools/releases/
# https://github.com/Mirantis/cri-dockerd/releases/
# https://github.com/etcd-io/etcd/releases/
# https://github.com/cloudflare/cfssl/releases/
# https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
# https://download.docker.com/linux/static/stable/x86_64/
# https://github.com/opencontainers/runc/releases/
# https://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/
# https://github.com/helm/helm/tags
# http://nginx.org/download/

# Version numbers
cni_plugins_version='v1.3.0'
cri_containerd_cni_version='1.7.2'
crictl_version='v1.27.0'
cri_dockerd_version='0.3.3'
etcd_version='v3.5.9'
cfssl_version='1.6.4'
kubernetes_server_version='1.27.3'
docker_version='24.0.2'
runc_version='1.1.7'
kernel_version='5.4.248'
helm_version='3.12.1'
nginx_version='1.25.1'

# URLs 
base_url='https://ghproxy.com/https://github.com'
kernel_url="http://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/kernel-lt-${kernel_version}-1.el7.elrepo.x86_64.rpm"
runc_url="${base_url}/opencontainers/runc/releases/download/v${runc_version}/runc.amd64"
docker_url="https://download.docker.com/linux/static/stable/x86_64/docker-${docker_version}.tgz"
cni_plugins_url="${base_url}/containernetworking/plugins/releases/download/${cni_plugins_version}/cni-plugins-linux-amd64-${cni_plugins_version}.tgz"
cri_containerd_cni_url="${base_url}/containerd/containerd/releases/download/v${cri_containerd_cni_version}/cri-containerd-cni-${cri_containerd_cni_version}-linux-amd64.tar.gz"
crictl_url="${base_url}/kubernetes-sigs/cri-tools/releases/download/${crictl_version}/crictl-${crictl_version}-linux-amd64.tar.gz"
cri_dockerd_url="${base_url}/Mirantis/cri-dockerd/releases/download/v${cri_dockerd_version}/cri-dockerd-${cri_dockerd_version}.amd64.tgz"
etcd_url="${base_url}/etcd-io/etcd/releases/download/${etcd_version}/etcd-${etcd_version}-linux-amd64.tar.gz"
cfssl_url="${base_url}/cloudflare/cfssl/releases/download/v${cfssl_version}/cfssl_${cfssl_version}_linux_amd64"
cfssljson_url="${base_url}/cloudflare/cfssl/releases/download/v${cfssl_version}/cfssljson_${cfssl_version}_linux_amd64"
helm_url="https://files.m.daocloud.io/get.helm.sh/helm-v${helm_version}-linux-amd64.tar.gz"
kubernetes_server_url="https://dl.k8s.io/v${kubernetes_server_version}/kubernetes-server-linux-amd64.tar.gz"
nginx_url="http://nginx.org/download/nginx-${nginx_version}.tar.gz"

# Download packages
packages=(
  $kernel_url
  $runc_url
  $docker_url
  $cni_plugins_url
  $cri_containerd_cni_url
  $crictl_url
  $cri_dockerd_url
  $etcd_url
  $cfssl_url
  $cfssljson_url
  $helm_url
  $kubernetes_server_url
  $nginx_url
)

for package_url in "${packages[@]}"; do
  filename=$(basename "$package_url")
  if wget -cq --progress=bar:force:noscroll -nc "$package_url"; then
    echo "Downloaded $filename"
  else
    echo "Failed to download $filename"
    exit 1
  fi
done

1.7.关闭防火墙

# Ubuntu忽略,CentOS执行
systemctl disable --now firewalld

1.8.关闭SELinux

# Ubuntu忽略,CentOS执行
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

1.9.关闭交换分区

sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a && sysctl -w vm.swappiness=0

cat /etc/fstab
# /dev/mapper/centos-swap swap                    swap    defaults        0 0

1.10.网络配置(俩种方式二选一)

# Ubuntu忽略,CentOS执行

# 方式一
# systemctl disable --now NetworkManager
# systemctl start network && systemctl enable network

# 方式二
cat > /etc/NetworkManager/conf.d/calico.conf << EOF 
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
EOF
systemctl restart NetworkManager

1.11.进行时间同步

# 服务端
# apt install chrony -y
yum install chrony -y
cat > /etc/chrony.conf << EOF 
pool ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.0.0/24
local stratum 10
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF

systemctl restart chronyd ; systemctl enable chronyd

# 客户端
# apt install chrony -y
yum install chrony -y
cat > /etc/chrony.conf << EOF 
pool 192.168.0.31 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF

systemctl restart chronyd ; systemctl enable chronyd

#使用客户端进行验证
chronyc sources -v

1.12.配置ulimit

ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF

1.13.配置免密登录

# apt install -y sshpass
yum install -y sshpass
ssh-keygen -f /root/.ssh/id_rsa -P ''
export IP="192.168.0.31 192.168.0.32 192.168.0.33 192.168.0.34 192.168.0.35"
export SSHPASS=123123
for HOST in $IP;do
     sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done

1.14.添加启用源

# Ubuntu忽略,CentOS执行

# 为 RHEL-8或 CentOS-8配置源
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm -y 
sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo 
sed -i "[email protected]/[email protected]/elrepo@g" /etc/yum.repos.d/elrepo.repo 

# 为 RHEL-7 SL-7 或 CentOS-7 安装 ELRepo 
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y 
sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo 
sed -i "[email protected]/[email protected]/elrepo@g" /etc/yum.repos.d/elrepo.repo 

# 查看可用安装包
yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available

1.15.升级内核至4.18版本以上

# Ubuntu忽略,CentOS执行

# 安装最新的内核
# 我这里选择的是稳定版kernel-ml   如需更新长期维护版本kernel-lt  
yum -y --enablerepo=elrepo-kernel  install  kernel-ml

# 查看已安装那些内核
rpm -qa | grep kernel

# 查看默认内核
grubby --default-kernel

# 若不是最新的使用命令设置
grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo)

# 重启生效
reboot

# v8 整合命令为:
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm -y ; sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo ; sed -i "[email protected]/[email protected]/elrepo@g" /etc/yum.repos.d/elrepo.repo ; yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available -y ; yum  --enablerepo=elrepo-kernel  install kernel-lt -y ; grubby --default-kernel ; reboot 

# v7 整合命令为:
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y ; sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo ; sed -i "[email protected]/[email protected]/elrepo@g" /etc/yum.repos.d/elrepo.repo ; yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available -y ; yum  --enablerepo=elrepo-kernel  install  kernel-lt -y ; grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo) ; grubby --default-kernel ; reboot 

# 离线版本 
yum install -y /root/cby/kernel-lt-*-1.el7.elrepo.x86_64.rpm ; grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo) ; grubby --default-kernel ; reboot

1.16.安装ipvsadm

# 对于CentOS7离线安装
# yum install /root/centos7/ipset-*.el7.x86_64.rpm /root/centos7/lm_sensors-libs-*.el7.x86_64.rpm  /root/centos7/ipset-libs-*.el7.x86_64.rpm /root/centos7/sysstat-*.el7_9.x86_64.rpm  /root/centos7/ipvsadm-*.el7.x86_64.rpm  -y

# 对于 Ubuntu
# apt install ipvsadm ipset sysstat conntrack -y

# 对于 CentOS
yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

systemctl restart systemd-modules-load.service

lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 180224  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          176128  1 ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
nf_defrag_ipv4         16384  1 nf_conntrack
libcrc32c              16384  3 nf_conntrack,xfs,ip_vs

1.17.修改内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384

net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF

sysctl --system

1.18.所有节点配置hosts本地解析

cat > /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6


192.168.0.31 k8s-master01
192.168.0.32 k8s-master02
192.168.0.33 k8s-master03
192.168.0.34 k8s-node01
192.168.0.35 k8s-node02
192.168.0.36 lb-vip
EOF

2.k8s基本组件安装

注意 :2.1 和 2.2 二选其一即可

2.1.安装Containerd作为Runtime (推荐)

# https://github.com/containernetworking/plugins/releases/
# wget https://ghproxy.com/https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-amd64-v1.3.0.tgz

cd cby/

#创建cni插件所需目录
mkdir -p /etc/cni/net.d /opt/cni/bin 
#解压cni二进制包
tar xf cni-plugins-linux-amd64-v*.tgz -C /opt/cni/bin/

# https://github.com/containerd/containerd/releases/
# wget https://ghproxy.com/https://github.com/containerd/containerd/releases/download/v1.7.2/cri-containerd-cni-1.7.2-linux-amd64.tar.gz

#解压
tar -xzf cri-containerd-cni-*-linux-amd64.tar.gz -C /

#创建服务启动文件
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF

2.1.1配置Containerd所需的模块

cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

2.1.2加载模块

systemctl restart systemd-modules-load.service

2.1.3配置Containerd所需的内核

cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

# 加载内核

sysctl --system

2.1.4创建Containerd的配置文件

# 创建默认配置文件
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml

# 修改Containerd的配置文件
sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep SystemdCgroup

sed -i "s#registry.k8s.io#m.daocloud.io/registry.k8s.io#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep sandbox_image

sed -i "s#config_path\ \=\ \"\"#config_path\ \=\ \"/etc/containerd/certs.d\"#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep certs.d

mkdir /etc/containerd/certs.d/docker.io -pv

# 配置加速器
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://hub-mirror.c.163.com"]
  capabilities = ["pull", "resolve"]
EOF

2.1.5启动并设置为开机启动

systemctl daemon-reload
systemctl enable --now containerd
systemctl restart containerd

2.1.6配置crictl客户端连接的运行时位置

# wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz

#解压
tar xf crictl-v*-linux-amd64.tar.gz -C /usr/bin/
#生成配置文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

#测试
systemctl restart  containerd
crictl info

2.2 安装docker作为Runtime

2.2.1 安装docker

# 二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/
# wget https://download.docker.com/linux/static/stable/x86_64/docker-24.0.2.tgz

#解压
tar xf docker-*.tgz 
#拷贝二进制文件
cp docker/* /usr/bin/
#创建containerd的service文件,并且启动
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF

# 设置开机自启
systemctl enable --now containerd.service

#准备docker的service文件
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service

[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500

[Install]
WantedBy=multi-user.target
EOF


#准备docker的socket文件
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API

[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF


#创建docker组
groupadd docker
#启动docker
systemctl enable --now docker.socket  && systemctl enable --now docker.service
#验证
docker info

# 配置加速器
mkdir /etc/docker/ -pv
cat >/etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": [
    "https://docker.m.daocloud.io",
    "https://docker.mirrors.ustc.edu.cn",
    "http://hub-mirror.c.163.com"
  ],
  "max-concurrent-downloads": 10,
  "log-driver": "json-file",
  "log-level": "warn",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
    },
  "data-root": "/var/lib/docker"
}
EOF
systemctl daemon-reload 
systemctl stop docker
systemctl restart docker

2.2.2 安装cri-docker

# 由于1.24以及更高版本不支持docker所以安装cri-docker
# 下载cri-docker 
# wget  https://ghproxy.com/https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.3/cri-dockerd-0.3.3.amd64.tgz

# 解压cri-docker
tar xvf cri-dockerd-*.amd64.tgz 
cp -r cri-dockerd/  /usr/bin/
chmod +x /usr/bin/cri-dockerd/cri-dockerd

# 写入启动配置文件
cat >  /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

StartLimitBurst=3
StartLimitInterval=60s

LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

# 写入socket配置文件
cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF

# 进行启动cri-docker
systemctl daemon-reload 
systemctl enable cri-docker --now
systemctl restart cri-docker
systemctl status cri-docker

2.3.k8s与etcd下载及安装(仅在master01操作)

2.3.1解压k8s安装包

# 下载安装包
# wget https://dl.k8s.io/v1.27.3/kubernetes-server-linux-amd64.tar.gz
# wget https://github.com/etcd-io/etcd/releases/download/v3.5.9/etcd-v3.5.9-linux-amd64.tar.gz

# 解压k8s安装文件
cd cby
tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

# 解压etcd安装文件
tar -xf etcd*.tar.gz && mv etcd-*/etcd /usr/local/bin/ && mv etcd-*/etcdctl /usr/local/bin/

# 查看/usr/local/bin下内容
ls /usr/local/bin/
containerd               crictl       etcdctl                  kube-proxy
containerd-shim          critest      kube-apiserver           kube-scheduler
containerd-shim-runc-v1  ctd-decoder  kube-controller-manager
containerd-shim-runc-v2  ctr          kubectl
containerd-stress        etcd         kubelet

2.3.2查看版本

[root@k8s-master01 ~]#  kubelet --version
Kubernetes v1.27.3
[root@k8s-master01 ~]# etcdctl version
etcdctl version: 3.5.9
API version: 3.5
[root@k8s-master01 ~]#

2.3.3将组件发送至其他k8s节点

Master='k8s-master02 k8s-master03'
Work='k8s-node01 k8s-node02'

# 拷贝master组件
for NODE in $Master; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done

# 拷贝work组件
for NODE in $Work; do     scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done

# 所有节点执行
mkdir -p /opt/cni/bin

2.3创建证书相关文件

# 请查看Github仓库 或者进行获取已经打好的包
https://github.com/cby-chen/Kubernetes/
https://github.com/cby-chen/Kubernetes/tags
https://github.com/cby-chen/Kubernetes/releases/download/v1.27.3/kubernetes-v1.27.3.tar

3.相关证书生成

# master01节点下载证书生成工具
# wget "https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl_1.6.4_linux_amd64" -O /usr/local/bin/cfssl
# wget "https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssljson_1.6.4_linux_amd64" -O /usr/local/bin/cfssljson

# 软件包内有
cp cfssl_*_linux_amd64 /usr/local/bin/cfssl
cp cfssljson_*_linux_amd64 /usr/local/bin/cfssljson

# 添加执行权限
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson

3.1.生成etcd证书

特别说明除外,以下操作在所有master节点操作

3.1.1所有master节点创建证书存放目录

mkdir /etc/etcd/ssl -p

3.1.2master01节点生成etcd证书

cd pki
# 生成etcd证书和etcd证书的key(如果你觉得以后可能会扩容,可以在ip那多写几个预留出来)
# 若没有IPv6 可删除可保留 
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
cfssl gencert \
   -ca=/etc/etcd/ssl/etcd-ca.pem \
   -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
   -config=ca-config.json \
   -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.0.31,192.168.0.32,192.168.0.33,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30,::1 \
   -profile=kubernetes \
   etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd

3.1.3将证书复制到其他节点

Master='k8s-master02 k8s-master03'
for NODE in $Master; do ssh $NODE "mkdir -p /etc/etcd/ssl"; for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}; done; done

3.2.生成k8s相关证书

特别说明除外,以下操作在所有master节点操作

3.2.1所有k8s节点创建证书存放目录

mkdir -p /etc/kubernetes/pki

3.2.2master01节点生成k8s证书

cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca

# 生成一个根证书 ,多写了一些IP作为预留IP,为将来添加node做准备
# 10.96.0.1是service网段的第一个地址,需要计算,192.168.0.36为高可用vip地址
# 若没有IPv6 可删除可保留 

cfssl gencert   \
-ca=/etc/kubernetes/pki/ca.pem   \
-ca-key=/etc/kubernetes/pki/ca-key.pem   \
-config=ca-config.json   \
-hostname=10.96.0.1,192.168.0.36,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,x.oiox.cn,k.oiox.cn,l.oiox.cn,o.oiox.cn,192.168.0.31,192.168.0.32,192.168.0.33,192.168.0.34,192.168.0.35,192.168.0.36,192.168.0.37,192.168.0.38,192.168.0.39,192.168.1.70,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30,fc00:43f4:1eea:1::40,fc00:43f4:1eea:1::50,fc00:43f4:1eea:1::60,fc00:43f4:1eea:1::70,fc00:43f4:1eea:1::80,fc00:43f4:1eea:1::90,fc00:43f4:1eea:1::100,::1   \
-profile=kubernetes   apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver

3.2.3生成apiserver聚合证书

cfssl gencert   -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca 

# 有一个警告,可以忽略

cfssl gencert  \
-ca=/etc/kubernetes/pki/front-proxy-ca.pem   \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem   \
-config=ca-config.json   \
-profile=kubernetes   front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client

3.2.4生成controller-manage的证书

在《5.高可用配置》选择使用那种高可用方案
若使用 haproxy、keepalived 那么为 --server=https://192.168.0.36:9443
若使用 nginx方案,那么为 --server=https://127.0.0.1:8443

cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager

# 设置一个集群项

# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`

kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://127.0.0.1:8443 \
     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 设置一个环境项,一个上下文

kubectl config set-context system:kube-controller-manager@kubernetes \
    --cluster=kubernetes \
    --user=system:kube-controller-manager \
    --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 设置一个用户项

kubectl config set-credentials system:kube-controller-manager \
     --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
     --client-key=/etc/kubernetes/pki/controller-manager-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 设置默认环境

kubectl config use-context system:kube-controller-manager@kubernetes \
     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler

# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`

kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://127.0.0.1:8443 \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler \
     --client-certificate=/etc/kubernetes/pki/scheduler.pem \
     --client-key=/etc/kubernetes/pki/scheduler-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

kubectl config set-context system:kube-scheduler@kubernetes \
     --cluster=kubernetes \
     --user=system:kube-scheduler \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

kubectl config use-context system:kube-scheduler@kubernetes \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin

# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`

kubectl config set-cluster kubernetes     \
  --certificate-authority=/etc/kubernetes/pki/ca.pem     \
  --embed-certs=true     \
  --server=https://127.0.0.1:8443     \
  --kubeconfig=/etc/kubernetes/admin.kubeconfig

kubectl config set-credentials kubernetes-admin  \
  --client-certificate=/etc/kubernetes/pki/admin.pem     \
  --client-key=/etc/kubernetes/pki/admin-key.pem     \
  --embed-certs=true     \
  --kubeconfig=/etc/kubernetes/admin.kubeconfig

kubectl config set-context kubernetes-admin@kubernetes    \
  --cluster=kubernetes     \
  --user=kubernetes-admin     \
  --kubeconfig=/etc/kubernetes/admin.kubeconfig

kubectl config use-context kubernetes-admin@kubernetes  --kubeconfig=/etc/kubernetes/admin.kubeconfig

3.2.5创建kube-proxy证书

在《5.高可用配置》选择使用那种高可用方案
若使用 haproxy、keepalived 那么为 --server=https://192.168.0.36:8443
若使用 nginx方案,那么为 --server=https://127.0.0.1:8443

cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy

# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`

kubectl config set-cluster kubernetes     \
  --certificate-authority=/etc/kubernetes/pki/ca.pem     \
  --embed-certs=true     \
  --server=https://127.0.0.1:8443     \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy  \
  --client-certificate=/etc/kubernetes/pki/kube-proxy.pem     \
  --client-key=/etc/kubernetes/pki/kube-proxy-key.pem     \
  --embed-certs=true     \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config set-context kube-proxy@kubernetes    \
  --cluster=kubernetes     \
  --user=kube-proxy     \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config use-context kube-proxy@kubernetes  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

3.2.5创建ServiceAccount Key ——secret

openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

3.2.6将证书发送到其他master节点

#其他节点创建目录
# mkdir  /etc/kubernetes/pki/ -p

for NODE in k8s-master02 k8s-master03; do  for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do  scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done;  for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do  scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done

3.2.7查看证书

ls /etc/kubernetes/pki/
admin.csr          controller-manager.csr      kube-proxy.csr
admin-key.pem      controller-manager-key.pem  kube-proxy-key.pem
admin.pem          controller-manager.pem      kube-proxy.pem
apiserver.csr      front-proxy-ca.csr          sa.key
apiserver-key.pem  front-proxy-ca-key.pem      sa.pub
apiserver.pem      front-proxy-ca.pem          scheduler.csr
ca.csr             front-proxy-client.csr      scheduler-key.pem
ca-key.pem         front-proxy-client-key.pem  scheduler.pem
ca.pem             front-proxy-client.pem

# 一共26个就对了
ls /etc/kubernetes/pki/ |wc -l
26

4.k8s系统组件配置

4.1.etcd配置

4.1.1master01配置

# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF 
name: 'k8s-master01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.0.31:2380'
listen-client-urls: 'https://192.168.0.31:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.0.31:2380'
advertise-client-urls: 'https://192.168.0.31:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

4.1.2master02配置

# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF 
name: 'k8s-master02'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.0.32:2380'
listen-client-urls: 'https://192.168.0.32:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.0.32:2380'
advertise-client-urls: 'https://192.168.0.32:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

4.1.3master03配置

# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF 
name: 'k8s-master03'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.0.33:2380'
listen-client-urls: 'https://192.168.0.33:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.0.33:2380'
advertise-client-urls: 'https://192.168.0.33:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

4.2.创建service(所有master节点操作)

4.2.1创建etcd.service并启动

cat > /usr/lib/systemd/system/etcd.service << EOF

[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service

EOF

4.2.2创建etcd证书目录

mkdir /etc/kubernetes/pki/etcd
ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
systemctl daemon-reload
systemctl enable --now etcd

4.2.3查看etcd状态

# 如果要用IPv6那么把IPv4地址修改为IPv6即可
export ETCDCTL_API=3
etcdctl --endpoints="192.168.0.33:2379,192.168.0.32:2379,192.168.0.31:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|     ENDPOINT      |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 192.168.0.33:2379 | 6ae2196f75cd6d95 |   3.5.9 |   20 kB |     false |      false |         2 |          9 |                  9 |        |
| 192.168.0.32:2379 | 46cbf93f7713a252 |   3.5.9 |   20 kB |     false |      false |         2 |          9 |                  9 |        |
| 192.168.0.31:2379 | ec6051ffc7487dd7 |   3.5.9 |   20 kB |      true |      false |         2 |          9 |                  9 |        |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

5.高可用配置(在Master服务器上操作)

注意* 5.1.1 和5.1.2 二选一即可

选择使用那种高可用方案,同时可以俩种都选用,实现内外兼顾的效果,比如:
5.1 的 NGINX方案实现集群内的高可用
5.2 的 haproxy、keepalived 方案实现集群外访问

在《3.2.生成k8s相关证书》

若使用 nginx方案,那么为 --server=https://127.0.0.1:8443
若使用 haproxy、keepalived 那么为 --server=https://192.168.0.36:9443

5.1 NGINX高可用方案

5.1.1 进行编译

# 安装编译环境
yum install gcc -y

# 下载解压nginx二进制文件
# wget http://nginx.org/download/nginx-1.25.1.tar.gz
tar xvf nginx-*.tar.gz
cd nginx-*

# 进行编译
./configure --with-stream --without-http --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module
make && make install 

# 拷贝编译好的nginx
node='k8s-master02 k8s-master03 k8s-node01 k8s-node02'
for NODE in $node; do scp -r /usr/local/nginx/ $NODE:/usr/local/nginx/; done

5.1.2 写入启动配置

在所有主机上执行

# 写入nginx配置文件
cat > /usr/local/nginx/conf/kube-nginx.conf <<EOF
worker_processes 1;
events {
    worker_connections  1024;
}
stream {
    upstream backend {
        least_conn;
        hash $remote_addr consistent;
        server 192.168.0.31:6443        max_fails=3 fail_timeout=30s;
        server 192.168.0.32:6443        max_fails=3 fail_timeout=30s;
        server 192.168.0.33:6443        max_fails=3 fail_timeout=30s;
    }
    server {
        listen 127.0.0.1:8443;
        proxy_connect_timeout 1s;
        proxy_pass backend;
    }
}
EOF

# 写入启动配置文件
cat > /etc/systemd/system/kube-nginx.service <<EOF
[Unit]
Description=kube-apiserver nginx proxy
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=forking
ExecStartPre=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx
ExecReload=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx -s reload
PrivateTmp=true
Restart=always
RestartSec=5
StartLimitInterval=0
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 设置开机自启
systemctl enable --now  kube-nginx 
systemctl restart kube-nginx
systemctl status kube-nginx

5.2 keepalived和haproxy 高可用方案

5.2.1安装keepalived和haproxy服务

systemctl disable --now firewalld

setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

yum -y install keepalived haproxy

5.2.2修改haproxy配置文件(配置文件一样)

# cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak

cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
 maxconn 2000
 ulimit-n 16384
 log 127.0.0.1 local0 err
 stats timeout 30s

defaults
 log global
 mode http
 option httplog
 timeout connect 5000
 timeout client 50000
 timeout server 50000
 timeout http-request 15s
 timeout http-keep-alive 15s


frontend monitor-in
 bind *:33305
 mode http
 option httplog
 monitor-uri /monitor

frontend k8s-master
 bind 0.0.0.0:9443
 bind 127.0.0.1:9443
 mode tcp
 option tcplog
 tcp-request inspect-delay 5s
 default_backend k8s-master


backend k8s-master
 mode tcp
 option tcplog
 option tcp-check
 balance roundrobin
 default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
 server  k8s-master01  192.168.0.31:6443 check
 server  k8s-master02  192.168.0.32:6443 check
 server  k8s-master03  192.168.0.33:6443 check
EOF

5.2.3Master01配置keepalived master节点

#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5 
    weight -5
    fall 2
    rise 1
}
vrrp_instance VI_1 {
    state MASTER
    # 注意网卡名
    interface eth0 
    mcast_src_ip 192.168.0.31
    virtual_router_id 51
    priority 100
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.0.36
    }
    track_script {
      chk_apiserver 
} }

EOF

5.2.4Master02配置keepalived backup节点

# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5 
    weight -5
    fall 2
    rise 1

}
vrrp_instance VI_1 {
    state BACKUP
    # 注意网卡名
    interface eth0
    mcast_src_ip 192.168.0.32
    virtual_router_id 51
    priority 80
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.0.36
    }
    track_script {
      chk_apiserver 
} }

EOF

5.2.5Master03配置keepalived backup节点

# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5 
    weight -5
    fall 2
    rise 1

}
vrrp_instance VI_1 {
    state BACKUP
    # 注意网卡名
    interface eth0
    mcast_src_ip 192.168.0.33
    virtual_router_id 51
    priority 50
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.0.36
    }
    track_script {
      chk_apiserver 
} }

EOF

5.2.6健康检查脚本配置(lb主机)

cat >  /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash

err=0
for k in \$(seq 1 3)
do
    check_code=\$(pgrep haproxy)
    if [[ \$check_code == "" ]]; then
        err=\$(expr \$err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ \$err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF

# 给脚本授权

chmod +x /etc/keepalived/check_apiserver.sh

5.2.7启动服务

systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived

5.2.8测试高可用

# 能ping同

[root@k8s-node02 ~]# ping 192.168.0.36

# 能telnet访问

[root@k8s-node02 ~]# telnet 192.168.0.36 9443

# 关闭主节点,看vip是否漂移到备节点

6.k8s组件配置

所有k8s节点创建以下目录

mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

6.1.创建apiserver(所有master节点)

6.1.1master01节点配置

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
      --v=2  \\
      --allow-privileged=true  \\
      --bind-address=0.0.0.0  \\
      --secure-port=6443  \\
      --advertise-address=192.168.0.31 \\
      --service-cluster-ip-range=10.96.0.0/12,fd00::/108  \\
      --service-node-port-range=30000-32767  \\
      --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \\
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \\
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \\
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \\
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \\
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \\
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \\
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \\
      --enable-bootstrap-token-auth=true  \\
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \\
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \\
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \\
      --requestheader-allowed-names=aggregator  \\
      --requestheader-group-headers=X-Remote-Group  \\
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
      --requestheader-username-headers=X-Remote-User \\
      --enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF

6.1.2master02节点配置

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
      --v=2  \\
      --allow-privileged=true  \\
      --bind-address=0.0.0.0  \\
      --secure-port=6443  \\
      --advertise-address=192.168.0.32 \\
      --service-cluster-ip-range=10.96.0.0/12,fd00::/108  \\
      --service-node-port-range=30000-32767  \\
      --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \\
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \\
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \\
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \\
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \\
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \\
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \\
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
      --authorization-mode=Node,RBAC  \\
      --enable-bootstrap-token-auth=true  \\
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \\
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \\
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \\
      --requestheader-allowed-names=aggregator  \\
      --requestheader-group-headers=X-Remote-Group  \\
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
      --requestheader-username-headers=X-Remote-User \\
      --enable-aggregator-routing=true

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF

6.1.3master03节点配置

cat > /usr/lib/systemd/system/kube-apiserver.service  << EOF

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
      --v=2  \\
      --allow-privileged=true  \\
      --bind-address=0.0.0.0  \\
      --secure-port=6443  \\
      --advertise-address=192.168.0.33 \\
      --service-cluster-ip-range=10.96.0.0/12,fd00::/108  \\
      --service-node-port-range=30000-32767  \\
      --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \\
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \\
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \\
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \\
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \\
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \\
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \\
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
      --authorization-mode=Node,RBAC  \\
      --enable-bootstrap-token-auth=true  \\
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \\
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \\
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \\
      --requestheader-allowed-names=aggregator  \\
      --requestheader-group-headers=X-Remote-Group  \\
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
      --requestheader-username-headers=X-Remote-User \\
      --enable-aggregator-routing=true

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF

6.1.4启动apiserver(所有master节点)

systemctl daemon-reload
systemctl enable --now kube-apiserver
systemctl restart kube-apiserver
systemctl status kube-apiserver

6.2.配置kube-controller-manager service

# 所有master节点配置,且配置相同
# 172.16.0.0/12为pod网段,按需求设置你自己的网段

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
      --v=2 \\
      --bind-address=0.0.0.0 \\
      --root-ca-file=/etc/kubernetes/pki/ca.pem \\
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \\
      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\
      --leader-elect=true \\
      --use-service-account-credentials=true \\
      --node-monitor-grace-period=40s \\
      --node-monitor-period=5s \\
      --controllers=*,bootstrapsigner,tokencleaner \\
      --allocate-node-cidrs=true \\
      --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\
      --cluster-cidr=172.16.0.0/12,fc00::/48 \\
      --node-cidr-mask-size-ipv4=24 \\
      --node-cidr-mask-size-ipv6=120 \\
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

6.2.1启动kube-controller-manager,并查看状态

systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl restart kube-controller-manager
systemctl status kube-controller-manager

6.3.配置kube-scheduler service

6.3.1所有master节点配置,且配置相同

cat > /usr/lib/systemd/system/kube-scheduler.service << EOF

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
      --v=2 \\
      --bind-address=0.0.0.0 \\
      --leader-elect=true \\
      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

6.3.2启动并查看服务状态

systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl restart kube-scheduler
systemctl status kube-scheduler

7.TLS Bootstrapping配置

7.1在master01上配置

# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`

cd bootstrap

kubectl config set-cluster kubernetes     \
--certificate-authority=/etc/kubernetes/pki/ca.pem     \
--embed-certs=true     --server=https://127.0.0.1:8443     \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config set-credentials tls-bootstrap-token-user     \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config set-context tls-bootstrap-token-user@kubernetes     \
--cluster=kubernetes     \
--user=tls-bootstrap-token-user     \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config use-context tls-bootstrap-token-user@kubernetes     \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config

7.2查看集群状态,没问题的话继续后续操作

kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
scheduler            Healthy   ok                              
controller-manager   Healthy   ok                              
etcd-0               Healthy   {"health":"true","reason":""}   
etcd-2               Healthy   {"health":"true","reason":""}   
etcd-1               Healthy   {"health":"true","reason":""} 

# 切记执行,别忘记!!!
kubectl create -f bootstrap.secret.yaml

8.node节点配置

8.1.在master01上将证书复制到node节点

cd /etc/kubernetes/

for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do ssh $NODE mkdir -p /etc/kubernetes/pki; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done

8.2.kubelet配置

注意 :8.2.1 和 8.2.2 需要和 上方 2.1 和 2.2 对应起来

8.2.1当使用docker作为Runtime

cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes

[Service]
ExecStart=/usr/local/bin/kubelet \\
    --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig  \\
    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
    --config=/etc/kubernetes/kubelet-conf.yml \\
    --container-runtime-endpoint=unix:///run/cri-dockerd.sock  \\
    --node-labels=node.kubernetes.io/node=

[Install]
WantedBy=multi-user.target
EOF

8.2.2当使用Containerd作为Runtime (推荐)

mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/

# 所有k8s节点配置kubelet service
cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
ExecStart=/usr/local/bin/kubelet \\
    --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig  \\
    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
    --config=/etc/kubernetes/kubelet-conf.yml \\
    --container-runtime-endpoint=unix:///run/containerd/containerd.sock  \\
    --node-labels=node.kubernetes.io/node=

[Install]
WantedBy=multi-user.target
EOF

8.2.3所有k8s节点创建kubelet的配置文件

cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

8.2.4启动kubelet

systemctl daemon-reload
systemctl enable --now kubelet
systemctl restart kubelet
systemctl status kubelet

8.2.5查看集群

[root@k8s-master01 ~]# kubectl  get node
NAME           STATUS     ROLES    AGE   VERSION
k8s-master01   Ready    <none>   18s   v1.27.3
k8s-master02   Ready    <none>   16s   v1.27.3
k8s-master03   Ready    <none>   16s   v1.27.3
k8s-node01     Ready    <none>   14s   v1.27.3
k8s-node02     Ready    <none>   14s   v1.27.3
[root@k8s-master01 ~]#

8.2.6查看容器运行时

[root@k8s-master01 ~]# kubectl describe node | grep Runtime
  Container Runtime Version:  containerd://1.7.2
  Container Runtime Version:  containerd://1.7.2
  Container Runtime Version:  containerd://1.7.2
  Container Runtime Version:  containerd://1.7.2
  Container Runtime Version:  containerd://1.7.2
[root@k8s-master01 ~]# kubectl describe node | grep Runtime
  Container Runtime Version:  docker://24.0.2
  Container Runtime Version:  docker://24.0.2
  Container Runtime Version:  docker://24.0.2
  Container Runtime Version:  docker://24.0.2
  Container Runtime Version:  docker://24.0.2

8.3.kube-proxy配置

8.3.1将kubeconfig发送至其他节点

for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done

8.3.2所有k8s节点添加kube-proxy的service文件

cat >  /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \\
  --config=/etc/kubernetes/kube-proxy.yaml \\
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

8.3.3所有k8s节点添加kube-proxy的配置

cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 172.16.0.0/12,fc00::/48
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms

EOF

8.3.4启动kube-proxy

systemctl daemon-reload
 systemctl restart kube-proxy
 systemctl enable --now kube-proxy
 systemctl status kube-proxy

9.安装网络插件

注意 9.1 和 9.2 二选其一即可,建议在此处创建好快照后在进行操作,后续出问题可以回滚

** centos7 要升级libseccomp 不然 无法安装网络插件**

# https://github.com/opencontainers/runc/releases
# 升级runc
# wget https://ghproxy.com/https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64

install -m 755 runc.amd64 /usr/local/sbin/runc
cp -p /usr/local/sbin/runc  /usr/local/bin/runc
cp -p /usr/local/sbin/runc  /usr/bin/runc

#下载高于2.4以上的包
yum -y install http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm
# 清华源
yum -y install https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm

#查看当前版本
[root@k8s-master-1 ~]# rpm -qa | grep libseccomp
libseccomp-2.5.1-1.el8.x86_64

9.1安装Calico

9.1.1更改calico网段

wget https://mirrors.chenby.cn/https://github.com/projectcalico/calico/blob/master/manifests/calico-typha.yaml

cp calico-typha.yaml calico.yaml
cp calico-typha.yaml calico-ipv6.yaml

vim calico.yaml
# calico-config ConfigMap处
    "ipam": {
        "type": "calico-ipam",
    },
    - name: IP
      value: "autodetect"

    - name: CALICO_IPV4POOL_CIDR
      value: "172.16.0.0/12"

# vim calico-ipv6.yaml
# calico-config ConfigMap处
    "ipam": {
        "type": "calico-ipam",
        "assign_ipv4": "true",
        "assign_ipv6": "true"
    },
    - name: IP
      value: "autodetect"

    - name: IP6
      value: "autodetect"

    - name: CALICO_IPV4POOL_CIDR
      value: "172.16.0.0/12"

    - name: CALICO_IPV6POOL_CIDR
      value: "fc00::/48"

    - name: FELIX_IPV6SUPPORT
      value: "true"


# 若docker镜像拉不下来,可以使用国内的仓库
sed -i "s#docker.io/calico/#m.daocloud.io/docker.io/calico/#g" calico.yaml 
sed -i "s#docker.io/calico/#m.daocloud.io/docker.io/calico/#g" calico-ipv6.yaml


# 本地没有公网 IPv6 使用 calico.yaml
kubectl apply -f calico.yaml

# 本地有公网 IPv6 使用 calico-ipv6.yaml 
# kubectl apply -f calico-ipv6.yaml

9.1.2查看容器状态

# calico 初始化会很慢 需要耐心等待一下,大约十分钟左右
[root@k8s-master01 ~]# kubectl  get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-6747f75cdc-fbvvc   1/1     Running   0          61s
kube-system   calico-node-fs7hl                          1/1     Running   0          61s
kube-system   calico-node-jqz58                          1/1     Running   0          61s
kube-system   calico-node-khjlg                          1/1     Running   0          61s
kube-system   calico-node-wmf8q                          1/1     Running   0          61s
kube-system   calico-node-xc6gn                          1/1     Running   0          61s
kube-system   calico-typha-6cdc4b4fbc-57snb              1/1     Running   0          61s

9.2 安装cilium

9.2.1 安装helm

# [root@k8s-master01 ~]# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
# [root@k8s-master01 ~]# chmod 700 get_helm.sh
# [root@k8s-master01 ~]# ./get_helm.sh

wget https://files.m.daocloud.io/get.helm.sh/helm-v3.12.1-linux-amd64.tar.gz
tar xvf helm-*-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin/

9.2.2 安装cilium

# 添加源
helm repo add cilium https://helm.cilium.io

# 修改为国内源
helm pull cilium/cilium
tar xvf cilium-*.tgz
cd cilium/
sed -i "s#quay.io/#m.daocloud.io/quay.io/#g" values.yaml

# 默认参数安装
helm install  cilium ./cilium/ -n kube-system

# 启用ipv6
# helm install cilium cilium/cilium --namespace kube-system --set ipv6.enabled=true

# 启用路由信息和监控插件
# helm install cilium cilium/cilium --namespace kube-system --set hubble.relay.enabled=true --set hubble.ui.enabled=true --set prometheus.enabled=true --set operator.prometheus.enabled=true --set hubble.enabled=true --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,http}"

9.2.3 查看

[root@k8s-master01 ~]# kubectl  get pod -A | grep cil
kube-system   cilium-gmr6c                       1/1     Running       0             5m3s
kube-system   cilium-kzgdj                       1/1     Running       0             5m3s
kube-system   cilium-operator-69b677f97c-6pw4k   1/1     Running       0             5m3s
kube-system   cilium-operator-69b677f97c-xzzdk   1/1     Running       0             5m3s
kube-system   cilium-q2rnr                       1/1     Running       0             5m3s
kube-system   cilium-smx5v                       1/1     Running       0             5m3s
kube-system   cilium-tdjq4                       1/1     Running       0             5m3s
[root@k8s-master01 ~]#

9.2.4 下载专属监控面板

安装时候没有创建 监控可以忽略

[root@k8s-master01 yaml]# wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/cilium/cilium/1.12.1/examples/kubernetes/addons/prometheus/monitoring-example.yaml

[root@k8s-master01 yaml]# sed -i "s#docker.io/#m.daocloud.io/docker.io/#g" monitoring-example.yaml

[root@k8s-master01 yaml]# kubectl  apply -f monitoring-example.yaml
namespace/cilium-monitoring created
serviceaccount/prometheus-k8s created
configmap/grafana-config created
configmap/grafana-cilium-dashboard created
configmap/grafana-cilium-operator-dashboard created
configmap/grafana-hubble-dashboard created
configmap/prometheus created
clusterrole.rbac.authorization.k8s.io/prometheus created
clusterrolebinding.rbac.authorization.k8s.io/prometheus created
service/grafana created
service/prometheus created
deployment.apps/grafana created
deployment.apps/prometheus created
[root@k8s-master01 yaml]#

9.2.5 下载部署测试用例

说明 测试用例 需要在 安装CoreDNS 之后即可完成

[root@k8s-master01 yaml]# wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/cilium/cilium/master/examples/kubernetes/connectivity-check/connectivity-check.yaml

[root@k8s-master01 yaml]# sed -i "s#google.com#baidu.cn#g" connectivity-check.yaml

sed -i "s#quay.io/#m.daocloud.io/quay.io/#g" connectivity-check.yaml

[root@k8s-master01 yaml]# kubectl  apply -f connectivity-check.yaml
deployment.apps/echo-a created
deployment.apps/echo-b created
deployment.apps/echo-b-host created
deployment.apps/pod-to-a created
deployment.apps/pod-to-external-1111 created
deployment.apps/pod-to-a-denied-cnp created
deployment.apps/pod-to-a-allowed-cnp created
deployment.apps/pod-to-external-fqdn-allow-google-cnp created
deployment.apps/pod-to-b-multi-node-clusterip created
deployment.apps/pod-to-b-multi-node-headless created
deployment.apps/host-to-b-multi-node-clusterip created
deployment.apps/host-to-b-multi-node-headless created
deployment.apps/pod-to-b-multi-node-nodeport created
deployment.apps/pod-to-b-intra-node-nodeport created
service/echo-a created
service/echo-b created
service/echo-b-headless created
service/echo-b-host-headless created
ciliumnetworkpolicy.cilium.io/pod-to-a-denied-cnp created
ciliumnetworkpolicy.cilium.io/pod-to-a-allowed-cnp created
ciliumnetworkpolicy.cilium.io/pod-to-external-fqdn-allow-google-cnp created
[root@k8s-master01 yaml]#

9.2.6 查看pod

[root@k8s-master01 yaml]# kubectl  get pod -A
NAMESPACE           NAME                                                     READY   STATUS    RESTARTS      AGE
cilium-monitoring   grafana-59957b9549-6zzqh                                 1/1     Running   0             10m
cilium-monitoring   prometheus-7c8c9684bb-4v9cl                              1/1     Running   0             10m
default             chenby-75b5d7fbfb-7zjsr                                  1/1     Running   0             27h
default             chenby-75b5d7fbfb-hbvr8                                  1/1     Running   0             27h
default             chenby-75b5d7fbfb-ppbzg                                  1/1     Running   0             27h
default             echo-a-6799dff547-pnx6w                                  1/1     Running   0             10m
default             echo-b-fc47b659c-4bdg9                                   1/1     Running   0             10m
default             echo-b-host-67fcfd59b7-28r9s                             1/1     Running   0             10m
default             host-to-b-multi-node-clusterip-69c57975d6-z4j2z          1/1     Running   0             10m
default             host-to-b-multi-node-headless-865899f7bb-frrmc           1/1     Running   0             10m
default             pod-to-a-allowed-cnp-5f9d7d4b9d-hcd8x                    1/1     Running   0             10m
default             pod-to-a-denied-cnp-65cc5ff97b-2rzb8                     1/1     Running   0             10m
default             pod-to-a-dfc64f564-p7xcn                                 1/1     Running   0             10m
default             pod-to-b-intra-node-nodeport-677868746b-trk2l            1/1     Running   0             10m
default             pod-to-b-multi-node-clusterip-76bbbc677b-knfq2           1/1     Running   0             10m
default             pod-to-b-multi-node-headless-698c6579fd-mmvd7            1/1     Running   0             10m
default             pod-to-b-multi-node-nodeport-5dc4b8cfd6-8dxmz            1/1     Running   0             10m
default             pod-to-external-1111-8459965778-pjt9b                    1/1     Running   0             10m
default             pod-to-external-fqdn-allow-google-cnp-64df9fb89b-l9l4q   1/1     Running   0             10m
kube-system         cilium-7rfj6                                             1/1     Running   0             56s
kube-system         cilium-d4cch                                             1/1     Running   0             56s
kube-system         cilium-h5x8r                                             1/1     Running   0             56s
kube-system         cilium-operator-5dbddb6dbf-flpl5                         1/1     Running   0             56s
kube-system         cilium-operator-5dbddb6dbf-gcznc                         1/1     Running   0             56s
kube-system         cilium-t2xlz                                             1/1     Running   0             56s
kube-system         cilium-z65z7                                             1/1     Running   0             56s
kube-system         coredns-665475b9f8-jkqn8                                 1/1     Running   1 (36h ago)   36h
kube-system         hubble-relay-59d8575-9pl9z                               1/1     Running   0             56s
kube-system         hubble-ui-64d4995d57-nsv9j                               2/2     Running   0             56s
kube-system         metrics-server-776f58c94b-c6zgs                          1/1     Running   1 (36h ago)   37h
[root@k8s-master01 yaml]#

9.2.7 修改为NodePort

安装时候没有创建 监控可以忽略

[root@k8s-master01 yaml]# kubectl  edit svc  -n kube-system hubble-ui
service/hubble-ui edited
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl  edit svc  -n cilium-monitoring grafana
service/grafana edited
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl  edit svc  -n cilium-monitoring prometheus
service/prometheus edited
[root@k8s-master01 yaml]#

type: NodePort

9.2.8 查看端口

安装时候没有创建 监控可以忽略

[root@k8s-master01 yaml]# kubectl get svc -A | grep monit
cilium-monitoring   grafana                NodePort    10.100.250.17    <none>        3000:30707/TCP           15m
cilium-monitoring   prometheus             NodePort    10.100.131.243   <none>        9090:31155/TCP           15m
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl get svc -A | grep hubble
kube-system         hubble-metrics         ClusterIP   None             <none>        9965/TCP                 5m12s
kube-system         hubble-peer            ClusterIP   10.100.150.29    <none>        443/TCP                  5m12s
kube-system         hubble-relay           ClusterIP   10.109.251.34    <none>        80/TCP                   5m12s
kube-system         hubble-ui              NodePort    10.102.253.59    <none>        80:31219/TCP             5m12s
[root@k8s-master01 yaml]#

9.2.9 访问

安装时候没有创建 监控可以忽略

http://192.168.0.31:30707
http://192.168.0.31:31155
http://192.168.0.31:31219

10.安装CoreDNS

10.1以下步骤只在master01操作

10.1.1修改文件

# 下载tgz包
helm repo add coredns https://coredns.github.io/helm
helm pull coredns/coredns
tar xvf coredns-*.tgz
cd coredns/

# 修改IP地址
vim values.yaml
cat values.yaml | grep clusterIP:
clusterIP: "10.96.0.10"

# 示例
---
service:
# clusterIP: ""
# clusterIPs: []
# loadBalancerIP: ""
# externalIPs: []
# externalTrafficPolicy: ""
# ipFamilyPolicy: ""
  # The name of the Service
  # If not set, a name is generated using the fullname template
  clusterIP: "10.96.0.10"
  name: ""
  annotations: {}
---

# 修改为国内源 docker源可选
sed -i "s#coredns/#m.daocloud.io/docker.io/coredns/#g" values.yaml
sed -i "s#registry.k8s.io/#m.daocloud.io/registry.k8s.io/#g" values.yaml

# 默认参数安装
helm install  coredns ./coredns/ -n kube-system

11.安装Metrics Server

11.1以下步骤只在master01操作

11.1.1安装Metrics-server

在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率

# 单机版 
wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# 高可用版本
wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability.yaml


# 修改配置
vim components.yaml
vim high-availability.yaml

---
# 1
defaultArgs:
  - --cert-dir=/tmp
  - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
  - --kubelet-use-node-status-port
  - --metric-resolution=15s
  - --kubelet-insecure-tls
  - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
  - --requestheader-username-headers=X-Remote-User
  - --requestheader-group-headers=X-Remote-Group
  - --requestheader-extra-headers-prefix=X-Remote-Extra-

# 2
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
        - name: ca-ssl
          mountPath: /etc/kubernetes/pki

# 3
      volumes:
      - emptyDir: {}
        name: tmp-dir
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki
---


# 修改为国内源 docker源可选
sed -i "s#registry.k8s.io/#m.daocloud.io/registry.k8s.io/#g" *.yaml

# 二选一
kubectl apply -f components.yaml
# kubectl apply -f high-availability.yaml

11.1.2稍等片刻查看状态

kubectl  top node
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01   197m         4%     1497Mi          39%       
k8s-master02   152m         3%     1315Mi          34%       
k8s-master03   112m         2%     1274Mi          33%       
k8s-node01     142m         3%     777Mi           20%       
k8s-node02     71m          1%     682Mi           17%

12.集群验证

12.1部署pod资源

cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: docker.io/library/busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF

# 查看
kubectl  get pod
NAME      READY   STATUS    RESTARTS   AGE
busybox   1/1     Running   0          17s

12.2用pod解析默认命名空间中的kubernetes

# 查看name
kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   17h

# 进行解析
kubectl exec  busybox -n default -- nslookup kubernetes
3Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

12.3测试跨命名空间是否可以解析

# 查看有那些name
kubectl  get svc -A
NAMESPACE     NAME              TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
default       kubernetes        ClusterIP   10.96.0.1       <none>        443/TCP         76m
kube-system   calico-typha      ClusterIP   10.105.100.82   <none>        5473/TCP        35m
kube-system   coredns-coredns   ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP   8m14s
kube-system   metrics-server    ClusterIP   10.105.60.31    <none>        443/TCP         109s

# 进行解析
kubectl exec  busybox -n default -- nslookup coredns-coredns.kube-system
Server:    10.96.0.10
Address 1: 10.96.0.10 coredns-coredns.kube-system.svc.cluster.local

Name:      coredns-coredns.kube-system
Address 1: 10.96.0.10 coredns-coredns.kube-system.svc.cluster.local
[root@k8s-master01 metrics-server]#

12.4每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53

telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.

 telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.

curl 10.96.0.10:53
curl: (52) Empty reply from server

12.5Pod和Pod之前要能通

kubectl get po -owide
NAME      READY   STATUS    RESTARTS   AGE   IP              NODE         NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          17m   172.27.14.193   k8s-node02   <none>           <none>

kubectl get po -n kube-system -owide
NAME                                       READY   STATUS    RESTARTS   AGE     IP               NODE           NOMINATED NODE   READINESS GATES
calico-kube-controllers-76754ff848-pw4xg   1/1     Running   0          38m     172.25.244.193   k8s-master01   <none>           <none>
calico-node-97m55                          1/1     Running   0          38m     192.168.0.34     k8s-node01     <none>           <none>
calico-node-hlz7j                          1/1     Running   0          38m     192.168.0.32     k8s-master02   <none>           <none>
calico-node-jtlck                          1/1     Running   0          38m     192.168.0.33     k8s-master03   <none>           <none>
calico-node-lxfkf                          1/1     Running   0          38m     192.168.0.35     k8s-node02     <none>           <none>
calico-node-t667x                          1/1     Running   0          38m     192.168.0.31     k8s-master01   <none>           <none>
calico-typha-59d75c5dd4-gbhfp              1/1     Running   0          38m     192.168.0.35     k8s-node02     <none>           <none>
coredns-coredns-c5c6d4d9b-bd829            1/1     Running   0          10m     172.25.92.65     k8s-master02   <none>           <none>
metrics-server-7c8b55c754-w7q8v            1/1     Running   0          3m56s   172.17.125.3     k8s-node01     <none>           <none>

# 进入busybox ping其他节点上的pod

kubectl exec -ti busybox -- sh
/ # ping 192.168.0.34
PING 192.168.0.34 (192.168.0.34): 56 data bytes
64 bytes from 192.168.0.34: seq=0 ttl=63 time=0.358 ms
64 bytes from 192.168.0.34: seq=1 ttl=63 time=0.668 ms
64 bytes from 192.168.0.34: seq=2 ttl=63 time=0.637 ms
64 bytes from 192.168.0.34: seq=3 ttl=63 time=0.624 ms
64 bytes from 192.168.0.34: seq=4 ttl=63 time=0.907 ms

# 可以连通证明这个pod是可以跨命名空间和跨主机通信的

12.6创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了)

cat > deployments.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80

EOF

kubectl  apply -f deployments.yaml 
deployment.apps/nginx-deployment created

kubectl  get pod 
NAME                               READY   STATUS    RESTARTS   AGE
busybox                            1/1     Running   0          6m25s
nginx-deployment-9456bbbf9-4bmvk   1/1     Running   0          8s
nginx-deployment-9456bbbf9-9rcdk   1/1     Running   0          8s
nginx-deployment-9456bbbf9-dqv8s   1/1     Running   0          8s

# 删除nginx

[root@k8s-master01 ~]# kubectl delete -f deployments.yaml

13.安装dashboard

helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace kube-system

13.1更改dashboard的svc为NodePort,如果已是请忽略

kubectl edit svc kubernetes-dashboard -n kube-system

  type: NodePort

13.2查看端口号

kubectl get svc kubernetes-dashboard -n kube-system
NAME                   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.108.120.110   <none>        443:30034/TCP   34s

13.3创建token

cat > dashboard-user.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

kubectl  apply -f dashboard-user.yaml

# 创建token
kubectl -n kube-system create token admin-user
eyJhbGciOiJSUzI1NiIsImtpZCI6ImtHTXRwbS1IR3NabHR5WDhYTUhUX1Rnekt4M1pzNFNNM3NwLXdkSlh3T2MifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjg3ODc1MjIyLCJpYXQiOjE2ODc4NzE2MjIsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiZjZiMzYzYzEtZjE1Ni00YTBhLTk5MzUtYmZmN2YzZWJlNTU2In19LCJuYmYiOjE2ODc4NzE2MjIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbi11c2VyIn0.uNIwe8tzA7IjdBWiCroZxT7OGw9IiCdPT0R1E1G5k965tVH9spVxz6PFvWLwNl6QnjhvseDUAbz0yBIJ3v42nsp1EYZeKXMYxfPGqgZ_7EQ4xYh-zEEoHLtdVVo20beCVtzTzEV_0doUehV_GLDt1es794OI7s4SlxYOtc1MMg50VUr4jkUvfuDPqHSMh2cirnTJXL9TX_3K-30W4c_fN2TCxWoWpwa4G-5oCORx8j9FLejTldHDFB_Z4TNhirNQLpi05C6OT43HiVxrsD6fgvPUQatUznCedb48RWTjCk8nY0CTsZ3VR6Vby4MOrlHf57asMFfe6lSTIcDSj0lV1g

13.3登录dashboard

https://192.168.0.31:30034/

14.ingress安装

14.1执行部署

wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml

# 修改为国内源 docker源可选
sed -i "s#registry.k8s.io/#m.daocloud.io/registry.k8s.io/#g" *.yaml

cat > backend.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: default-http-backend
  labels:
    app.kubernetes.io/name: default-http-backend
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: default-http-backend
  template:
    metadata:
      labels:
        app.kubernetes.io/name: default-http-backend
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name: default-http-backend
        image: registry.cn-hangzhou.aliyuncs.com/chenby/defaultbackend-amd64:1.5 
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          timeoutSeconds: 5
        ports:
        - containerPort: 8080
        resources:
          limits:
            cpu: 10m
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
  namespace: kube-system
  labels:
    app.kubernetes.io/name: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app.kubernetes.io/name: default-http-backend
EOF

kubectl  apply -f deploy.yaml 
kubectl  apply -f backend.yaml 


cat > ingress-demo-app.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: hello-server
spec:
  replicas: 2
  selector:
    matchLabels:
      app: hello-server
  template:
    metadata:
      labels:
        app: hello-server
    spec:
      containers:
      - name: hello-server
        image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server
        ports:
        - containerPort: 9000
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-demo
  template:
    metadata:
      labels:
        app: nginx-demo
    spec:
      containers:
      - image: nginx
        name: nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  selector:
    app: nginx-demo
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: hello-server
  name: hello-server
spec:
  selector:
    app: hello-server
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 9000
---
apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.chenby.cn"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.chenby.cn"
    http:
      paths:
      - pathType: Prefix
        path: "/nginx"  
        backend:
          service:
            name: nginx-demo
            port:
              number: 8000
EOF

# 等创建完成后在执行:
kubectl  apply -f ingress-demo-app.yaml 

kubectl  get ingress
NAME               CLASS   HOSTS                            ADDRESS     PORTS   AGE
ingress-host-bar   nginx   hello.chenby.cn,demo.chenby.cn   192.168.0.32   80      7s

14.2过滤查看ingress端口

# 修改为nodeport
kubectl edit svc -n ingress-nginx   ingress-nginx-controller

type: NodePort

[root@hello ~/yaml]# kubectl  get svc -A | grep ingress
ingress-nginx          ingress-nginx-controller             NodePort    10.104.231.36    <none>        80:32636/TCP,443:30579/TCP   104s
ingress-nginx          ingress-nginx-controller-admission   ClusterIP   10.101.85.88     <none>        443/TCP                      105s
[root@hello ~/yaml]#

15.IPv6测试

#部署应用

cat<<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
  name: chenby
spec:
  replicas: 3
  selector:
    matchLabels:
      app: chenby
  template:
    metadata:
      labels:
        app: chenby
    spec:
      containers:
      - name: chenby
        image: docker.io/library/nginx
        resources:
          limits:
            memory: "128Mi"
            cpu: "500m"
        ports:
        - containerPort: 80

---
apiVersion: v1
kind: Service
metadata:
  name: chenby
spec:
  ipFamilyPolicy: PreferDualStack
  ipFamilies:
  - IPv6
  - IPv4
  type: NodePort
  selector:
    app: chenby
  ports:
  - port: 80
    targetPort: 80
EOF


#查看端口
[root@k8s-master01 ~]# kubectl  get svc
NAME           TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
chenby         NodePort    fd00::a29c       <none>        80:30779/TCP   5s
[root@k8s-master01 ~]# 

#使用内网访问
[root@localhost yaml]# curl -I http://[fd00::a29c]
HTTP/1.1 200 OK
Server: nginx/1.21.6
Date: Thu, 05 May 2022 10:20:35 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
Connection: keep-alive
ETag: "61f01158-267"
Accept-Ranges: bytes

[root@localhost yaml]# curl -I http://192.168.0.31:30779
HTTP/1.1 200 OK
Server: nginx/1.21.6
Date: Thu, 05 May 2022 10:20:59 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
Connection: keep-alive
ETag: "61f01158-267"
Accept-Ranges: bytes

[root@localhost yaml]# 

#使用公网访问
[root@localhost yaml]# curl -I http://[2409:8a10:9e18:9020::10]:30779
HTTP/1.1 200 OK
Server: nginx/1.21.6
Date: Thu, 05 May 2022 10:20:54 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
Connection: keep-alive
ETag: "61f01158-267"
Accept-Ranges: bytes

16.安装命令行自动补全功能

yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

附录

# 镜像加速器可以使用DaoCloud仓库,替换规则如下
cr.l5d.io/  ===> m.daocloud.io/cr.l5d.io/
docker.elastic.co/  ===> m.daocloud.io/docker.elastic.co/
docker.io/  ===> m.daocloud.io/docker.io/
gcr.io/  ===> m.daocloud.io/gcr.io/
ghcr.io/  ===> m.daocloud.io/ghcr.io/
k8s.gcr.io/  ===> m.daocloud.io/k8s.gcr.io/
mcr.microsoft.com/  ===> m.daocloud.io/mcr.microsoft.com/
nvcr.io/  ===> m.daocloud.io/nvcr.io/
quay.io/  ===> m.daocloud.io/quay.io/
registry.jujucharms.com/  ===> m.daocloud.io/registry.jujucharms.com/
registry.k8s.io/  ===> m.daocloud.io/registry.k8s.io/
registry.opensource.zalan.do/  ===> m.daocloud.io/registry.opensource.zalan.do/
rocks.canonical.com/  ===> m.daocloud.io/rocks.canonical.com/




# 镜像版本要自行查看,因为镜像版本是随时更新的,文档无法做到实时更新

# docker pull 镜像

docker pull registry.cn-hangzhou.aliyuncs.com/chenby/cni:master 
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/node:master
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/kube-controllers:master
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/typha:master
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2
docker pull kubernetesui/dashboard:v2.7.0
docker pull kubernetesui/metrics-scraper:v1.0.8
docker pull quay.io/cilium/cilium:v1.12.6
docker pull quay.io/cilium/certgen:v0.1.8
docker pull quay.io/cilium/hubble-relay:v1.12.6
docker pull quay.io/cilium/hubble-ui-backend:v0.9.2
docker pull quay.io/cilium/hubble-ui:v0.9.2
docker pull quay.io/cilium/cilium-etcd-operator:v2.0.7
docker pull quay.io/cilium/operator:v1.12.6
docker pull quay.io/cilium/clustermesh-apiserver:v1.12.6
docker pull quay.io/coreos/etcd:v3.5.4
docker pull quay.io/cilium/startup-script:d69851597ea019af980891a4628fb36b7880ec26

# docker 保存镜像
docker save registry.cn-hangzhou.aliyuncs.com/chenby/cni:master -o cni.tar 
docker save registry.cn-hangzhou.aliyuncs.com/chenby/node:master -o node.tar 
docker save registry.cn-hangzhou.aliyuncs.com/chenby/typha:master -o typha.tar 
docker save registry.cn-hangzhou.aliyuncs.com/chenby/kube-controllers:master -o kube-controllers.tar 
docker save registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0 -o coredns.tar 
docker save registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6 -o pause.tar 
docker save registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2 -o metrics-server.tar 
docker save kubernetesui/dashboard:v2.7.0 -o dashboard.tar 
docker save kubernetesui/metrics-scraper:v1.0.8 -o metrics-scraper.tar 
docker save quay.io/cilium/cilium:v1.12.6 -o cilium.tar 
docker save quay.io/cilium/certgen:v0.1.8 -o certgen.tar 
docker save quay.io/cilium/hubble-relay:v1.12.6 -o hubble-relay.tar 
docker save quay.io/cilium/hubble-ui-backend:v0.9.2 -o hubble-ui-backend.tar 
docker save quay.io/cilium/hubble-ui:v0.9.2 -o hubble-ui.tar 
docker save quay.io/cilium/cilium-etcd-operator:v2.0.7 -o cilium-etcd-operator.tar 
docker save quay.io/cilium/operator:v1.12.6 -o operator.tar 
docker save quay.io/cilium/clustermesh-apiserver:v1.12.6 -o clustermesh-apiserver.tar 
docker save quay.io/coreos/etcd:v3.5.4 -o etcd.tar 
docker save quay.io/cilium/startup-script:d69851597ea019af980891a4628fb36b7880ec26 -o startup-script.tar 

# 传输到各个节点
for NODE in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02; do scp -r images/  $NODE:/root/ ; done

# 创建命名空间
ctr ns create k8s.io

# 导入镜像
ctr --namespace k8s.io image import images/cni.tar
ctr --namespace k8s.io image import images/node.tar
ctr --namespace k8s.io image import images/typha.tar
ctr --namespace k8s.io image import images/kube-controllers.tar 
ctr --namespace k8s.io image import images/coredns.tar 
ctr --namespace k8s.io image import images/pause.tar 
ctr --namespace k8s.io image import images/metrics-server.tar 
ctr --namespace k8s.io image import images/dashboard.tar 
ctr --namespace k8s.io image import images/metrics-scraper.tar 
ctr --namespace k8s.io image import images/dashboard.tar 
ctr --namespace k8s.io image import images/metrics-scraper.tar 
ctr --namespace k8s.io image import images/cilium.tar 
ctr --namespace k8s.io image import images/certgen.tar 
ctr --namespace k8s.io image import images/hubble-relay.tar 
ctr --namespace k8s.io image import images/hubble-ui-backend.tar 
ctr --namespace k8s.io image import images/hubble-ui.tar 
ctr --namespace k8s.io image import images/cilium-etcd-operator.tar 
ctr --namespace k8s.io image import images/operator.tar 
ctr --namespace k8s.io image import images/clustermesh-apiserver.tar 
ctr --namespace k8s.io image import images/etcd.tar 
ctr --namespace k8s.io image import images/startup-script.tar 

# pull tar包 解压后
helm pull cilium/cilium

# 查看镜像版本
root@hello:~/cilium# cat values.yaml| grep tag: -C1
  repository: "quay.io/cilium/cilium"
  tag: "v1.12.6"
  pullPolicy: "IfNotPresent"
--
    repository: "quay.io/cilium/certgen"
    tag: "v0.1.8@sha256:4a456552a5f192992a6edcec2febb1c54870d665173a33dc7d876129b199ddbd"
    pullPolicy: "IfNotPresent"
--
      repository: "quay.io/cilium/hubble-relay"
      tag: "v1.12.6"
       # hubble-relay-digest
--
        repository: "quay.io/cilium/hubble-ui-backend"
        tag: "v0.9.2@sha256:a3ac4d5b87889c9f7cc6323e86d3126b0d382933bd64f44382a92778b0cde5d7"
        pullPolicy: "IfNotPresent"
--
        repository: "quay.io/cilium/hubble-ui"
        tag: "v0.9.2@sha256:d3596efc94a41c6b772b9afe6fe47c17417658956e04c3e2a28d293f2670663e"
        pullPolicy: "IfNotPresent"
--
    repository: "quay.io/cilium/cilium-etcd-operator"
    tag: "v2.0.7@sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc"
    pullPolicy: "IfNotPresent"
--
    repository: "quay.io/cilium/operator"
    tag: "v1.12.6"
    # operator-generic-digest
--
    repository: "quay.io/cilium/startup-script"
    tag: "d69851597ea019af980891a4628fb36b7880ec26"
    pullPolicy: "IfNotPresent"
--
    repository: "quay.io/cilium/cilium"
    tag: "v1.12.6"
    # cilium-digest
--
      repository: "quay.io/cilium/clustermesh-apiserver"
      tag: "v1.12.6"
      # clustermesh-apiserver-digest
--
        repository: "quay.io/coreos/etcd"
        tag: "v3.5.4@sha256:795d8660c48c439a7c3764c2330ed9222ab5db5bb524d8d0607cac76f7ba82a3"
        pullPolicy: "IfNotPresent"

关于

https://www.oiox.cn/

https://www.oiox.cn/index.php/start-page.html

CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客

全网可搜《小陈运维》

文章主要发布于微信公众号:《Linux运维交流社区》