k8s1.26 (使用containerd)
1.关闭防火墙
systemctl disable --now firewalld.service
2.禁用SELINUX
setenforce 0
vim /etc/selinux/config
SELINUX=disabled
3.禁用所有swap交换分区
注:最好是安装系统时就不要创建swap交换分区
swapoff -a #禁用所有swap交换分区
free -h
total used free shared buff/cache available
Mem: 1.8G 280M 1.2G 9.6M 286M 1.4G
Swap: 0B 0B 0B
vim /etc/fstab #永久禁用swap,删除或注释掉/etc/fstab里的swap设备的挂载命令即可
4.设置主机名
cat >> /etc/hosts <<EOF
192.168.9.20 k8s01
192.168.9.21 k8s02
192.168.9.22 k8s03
vip 192.168.9.33
date #查看时区,时间
timedatectl set-timezone Asia/Shanghai #先查看时区是否正常,不正确则替换为上海时区
yum -y install ntp #安装ntp服务
systemctl start ntp #开始ntpd服务,或者做定时任务如:*/5 * * * * /usr/sbin/ntpdate -u
systemctl enable ntp
5.创建配置文件
cat << EOF > /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
执行以下命令使配置生效:
modprobe overlay
modprobe br_netfilter
创建/etc/sysctl.d/99-kubernetes-cri.conf配置文件:
cat << EOF > /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
user.max_user_namespaces=28633
EOF
执行以下命令使配置生效:
sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf
6.配置服务器支持开启ipvs的前提条件
kube-proxy开启ipvs的前提需要加载以下的内核模块:
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
那么执行脚本
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
上面脚本创建了的/etc/sysconfig/modules/ipvs.modules文件,保证在节点重启后能自动加载所需模块。 使用lsmod | grep -e ip_vs -e nf_conntrack_ipv4命令查看是否已经正确加载所需的内核模块。
7、安装containerd
(在所有节点服务器上都执行,因为k8s 1.24版本默认CRI为containerd,cri称之为容器运行时插件)
containerd的官网
containerd官网安装教程,官网安装文档提供了源码包安装和普通的yum、apt-get安装
下载Containerd的二进制包:
wget https://github.com/containerd/containerd/releases/download/v1.6.14/containerd-1.6.14-linux-amd64.tar.gz #巨慢!建议浏览器下载,然后CP到/usr/local 然后执行解压
tar Cxzvf /usr/local containerd-1.6.8-linux-amd64.tar.gz #解压到/usr/local/bin目录下了
bin/ #解压到/usr/local/bin目录下了
bin/containerd-shim-runc-v2 #这6个可执行文件就是解压出来的containerd相关命令
bin/containerd-shim
bin/ctr
bin/containerd-shim-runc-v1
bin/containerd
bin/containerd-stress
wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
mv containerd.service /usr/lib/systemd/system/
systemctl daemon-reload && systemctl enable --now containerd
systemctl status containerd #containerd已经是running状态了
curl -LO https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 && \
install -m 755 runc.amd64 /usr/local/sbin/runc
YUM安装
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
yum install containerd crictl-tools -y
修改containerd的配置,因为containerd默认从k8s官网拉取镜像
mkdir -p /etc/containerd #创建一个目录用于存放containerd的配置文件
containerd config default | sudo tee /etc/containerd/config.toml #把containerd配置导出到文件
vim /etc/containerd/config.toml #修改配置文件
[plugins."io.containerd.grpc.v1.cri"]
..................................
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9" #搜索sandbox_image,把原来的k8s.gcr.io/pause:3.6改为"registry.aliyuncs.com/google_containers/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
..........................
SystemdCgroup = true #搜索SystemdCgroup,把这个false改为true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d" #搜索config_path,配置镜像加速地址(这是一个目录下面创建)
mkdir /etc/containerd/certs.d/docker.io -pv
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://b9pmyelo.mirror.aliyuncs.com"]
capabilities = ["pull", "resolve"]
EOF
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
systemctl enable --now containerd
systemctl status containerd
ctr i pull docker.io/library/nginx:alpine #能正常拉取镜像说明没啥问题
ctr images ls #查看镜像
ctr c create --net-host docker.io/library/nginx:alpine nginx #创建容器
ctr task start -d nginx #启动容器,正常说明containerd没啥问题
ctr containers ls #查看容器
ctr tasks kill -s SIGKILL nginx #终止容器
ctr containers rm nginx #删除容器
cat <<EOF> /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
8.安装kubeadm、kubelet、kubectl
yum方式
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet kubeadm kubectl
使用kubeadm init初始化集群
systemctl enable kubelet.service
使用kubeadm config print init-defaults --component-configs KubeletConfiguration可以打印集群初始化默认的使用的配置:
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.9.33 #需要替换master节点IP
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: node
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io #这个源要换成阿里的!registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.26.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
flushFrequency: 0
options:
json:
infoBufferSize: "0"
verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
resolvConf: /run/systemd/resolve/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
从默认的配置中可以看到,可以使用imageRepository定制在集群初始化时拉取k8s所需镜像的地址。基于默认配置定制出本次使用kubeadm初始化集群所需的配置文件kubeadm.yaml:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.9.33 #master节点IP
bindPort: 6443
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
taints:
- effect: PreferNoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: 1.26.0
imageRepository: registry.aliyuncs.com/google_containers #阿里的源
networking:
podSubnet: 10.244.0.0/16
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
kubeadm config images pull --config kubeadm.yaml
kubeadm init --config kubeadm.yaml
wget https://github.com/projectcalico/calico/releases/download/v3.24.5/tigera-operator-v3.24.5.tgz
helm show values tigera-operator-v3.24.5.tgz
apiServer:
enabled: false
helm install calico tigera-operator-v3.24.5.tgz -n kube-system --create-namespace -f values.yaml
cd /usr/local/bin
curl -o kubectl-calico -O -L "https://github.com/projectcalico/calicoctl/releases/download/v3.21.5/calicoctl-linux-amd64"
chmod +x kubectl-calico
kubectl calico -h
kubeadm join 192.168.9.33:6443 --token 8mcp5p.kfpnqr46av1df0bz \
--discovery-token-ca-cert-hash sha256:007f26ca26f70cf2bd33f38b702921e107bbd1656998619e1cba99840d0c9a30
kubeadm token create --print-join-command
kubeadm init phase upload-certs --upload-certs
kubeadm join 192.168.9.33:6443 --token 8mcp5p.kfpnqr46av1df0bz --discovery-token-ca-cert-hash sha256:007f26ca26f70cf2bd33f38b702921e107bbd1656998619e1cba99840d0c9a30 --control-plane --certificate-key 1d9848b3590b6c1616c013ebd10edbf0aa0f2e0998559b7dbf481aaffc68a897