Anolis OS 8.9 K8S实操

Anolis OS 8.9 K8S部署

# 系统ios镜像下载
https://mirrors.openanolis.cn/anolis/8.9/isos/GA/x86_64/AnolisOS-8.9-x86_64-minimal.iso
# 软件库
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages

# 关闭 SELINUX
setenforce 0
sed -i 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config
systemctl stop firewalld
systemctl disable firewalld

test -f /etc/security/limits.d/20-nproc.conf && rm -rf /etc/security/limits.d/20-nproc.conf && touch /etc/security/limits.d/20-nproc.conf

cp /etc/security/limits.conf /etc/security/limits.conf.bak
>/etc/security/limits.conf
cat >> /etc/security/limits.conf <<EOF
root        soft        nofile        1048576
root        hard        nofile        1048576
root        soft        stack         10240
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF

cp /etc/sysctl.conf  /etc/sysctl.conf.bak
cat >> /etc/sysctl.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-arptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches = 1048576
fs.file-max = 5000000
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 32768
vm.swappiness = 0
net.ipv4.tcp_syncookies = 0
fs.inotify.max_user_instances = 16384
net.ipv4.conf.all.rp_filter = 1
net.ipv4.neigh.default.gc_thresh1 = 80000
net.ipv4.neigh.default.gc_thresh2 = 90000
net.ipv4.neigh.default.gc_thresh3 = 100000
EOF

# 所有节点安装ipvsadm
yum install ipvsadm ipset sysstat conntrack libseccomp -y

modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack

cat <<EOF | sudo tee /etc/modules-load.d/ipvs.conf 
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
##高版本内核
#ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack ## 4.19内核版本以下使用 nf_conntrack_ipv4
## nf_conntrack_ipv4     
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

# nginx 1.22.1
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-filesystem-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.noarch.rpm
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.x86_64.rpm
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-all-modules-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.noarch.rpm
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-mod-devel-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.x86_64.rpm
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-mod-http-image-filter-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.x86_64.rpm
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-mod-http-perl-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.x86_64.rpm
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-mod-http-xslt-filter-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.x86_64.rpm
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-mod-mail-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.x86_64.rpm
https://mirrors.aliyun.com/anolis/8.9/AppStream/x86_64/os/Packages/nginx-mod-stream-1.22.1-1.0.2.module+an8.9.0+11165+32bf18ca.x86_64.rpm

[nginx-stable]
name=nginx stable repo
baseurl=https://mirrors.nwafu.edu.cn/nginx/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true

[nginx-mainline]
name=nginx mainline repo
baseurl=https://mirrors.nwafu.edu.cn/nginx/mainline/centos/$releasever/$basearch/
gpgcheck=1
enabled=0
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true

sudo dnf install epel-release
yum install bash-com* vim wget net-tools lrzsz unzip telnet lsof nethogs iftop htop nc

sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
download.docker.com 地址换成 mirrors.nwafu.edu.cn/docker-ce

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 

sudo yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine

//安装containerd的时候提示有冲突的软件包,根据提示,后面加上--allowerasing' 来替换冲突的软件包
# yum -y install containerd --allowerasing

sudo yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

cat > /etc/docker/daemon.json << EOF
{
    "registry-mirrors": [
       "https://0r5ybm0h.mirror.aliyuncs.com",
       "https://wli8urvv.mirror.aliyuncs.com"
     ],
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "300m",
        "max-file":"5"
     },
    "max-concurrent-downloads": 10,
    "max-concurrent-uploads": 5,
    "live-restore": true,
    "storage-driver": "overlay2",
    "data-root": "/var/lib/docker"
}
EOF

cat > /etc/systemd/system/docker.service.d/limit-nofile.conf <<EOF
[Service]
LimitNOFILE=1048576
EOF

cat > /etc/systemd/system/docker.service.d/http-proxy.conf <<EOF
Environment="HTTP_PROXY=http://192.168.9.20:1081"
Environment="HTTPS_PROXY=http://192.168.9.20:1081"
Environment="NO_PROXY=localhost,127.0.0.1,192.168.9.0/24,10.96.0.0/12,10.244.0.0/16,172.16.0.0/16,172.17.0.0/16,172.18.0.0/16,172.19.0.0/16,172.20.0.0/16,*.vimll.com,*.mirror.aliyuncs.com,*.163.com,*.edu.cn,dockerhub.mirrors.nwafu.edu.cn"
EOF

## Container 代理
# 在容器运行阶段,如果需要代理上网,则需要配置 ~/.docker/config.json。以下配置,只在Docker 17.07及以上版本生效。
{
 "proxies":
 {
   "default":
   {
     "httpProxy": "http://proxy.example.com:8080",
     "httpsProxy": "http://proxy.example.com:8080",
     "noProxy": "localhost,127.0.0.1,.example.com"
   }
 }
}

systemctl daemon-reload && systemctl restart docker 

# 安装 cri-docker
dnf install https://home.vimll.com:9999/download/k8s/cri-dockerd-0.3.14-3.el8.x86_64.rpm
vim /usr/lib/systemd/system/cri-docker.service
# 修改 cri-docker 服务配置
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
systemctl enable --now cri-docker

# mysql
sudo dnf install https://repo.mysql.com//mysql80-community-release-el8-1.noarch.rpm
sudo dnf install https://repo.mysql.com//mysql57-community-release-el7-8.noarch.rpm

dnf install mysql
mysql -h192.168.9.6 -P33306 -upython -pPyThonFlask@123 flask < /nfs/tang/backup/flask.sql

sudo dnf install -y nfs-utils
mount -t nfs 192.168.9.6:/mnt/mypool/nfs /nfs

dnf install git gcc make
https://github.com/rofl0r/proxychains-ng

./configure --prefix=/usr --sysconfdir=/etc
make && make install
make install-config

https://github.com/XTLS/Xray-install/blob/main/install-release.sh
https://home.vimll.com:9999/download/install-release.sh
wget https://home.vimll.com:9999/download/xray/install-release.sh
https://github.com/XTLS/Xray-core/releases/download/v1.6.6-2/Xray-linux-64.zip
https://github.com/XTLS/Xray-core/releases/download/v1.6.6-2/Xray-linux-64.zip.dgst

bash -c "$(cat install-release.sh)" @ help
bash -c "$(cat install-release.sh)" @ remove --purge

bash -c "$(cat install-release.sh)" @ install --version v1.6.6-2

systemctl enable --now xray

cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl
EOF

# 官方库
# 此操作会覆盖 /etc/yum.repos.d/kubernetes.repo 中现存的所有配置
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.30/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.30/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
cat >/etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9"
EOF
systemctl enable --now kubelet

kubeadm config print
kubeadm config images list
kubeadm config images pull

# 使用代理服务器拉取镜像
dnf install docker-ce rsync
export DOCKER_HOST=unix:///run/docker.sock

## kubeadm 百年证书
git clone --depth=1 -b v1.30.3 https://github.com/kubernetes/kubernetes.git
cd kubernetes
K8S_IMG="registry.k8s.io/build-image/kube-cross:$(cat ./build/build-image/cross/VERSION)"
docker pull $K8S_IMG

## 修改 NewSelfSignedCACert 位置  NotAfter:              now.Add(duration365d * 100).UTC(),
vim ./staging/src/k8s.io/client-go/util/cert/cert.go
## 修改 CertificateValidity = time.Hour * 24 * 365 * 99
vim ./cmd/kubeadm/app/constants/constants.go
## 编译kubeadm:
cd build/ && ./run.sh make kubeadm
## 执行命令查看编译kubeadm的版本:
./_output/dockerized/bin/linux/amd64/kubeadm version
kubeadm certs check-expiration
kubeadm certs renew all

wget https://home.vimll.com:9999/download/k8s/kubeadm-v.1.30-3-99years

kubeadm config print init-defaults > kubeadm-vimll.yaml
# 修改
# cat kubeadm-vimll.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.9.20
  bindPort: 6443
nodeRegistration:
  #criSocket: unix:///var/run/containerd/containerd.sock
  # 修改成cri-dockerd的sock
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  # 修改成本master的主机名
  name: Vimller-AnolisOS
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
#imageRepository: registry.k8s.io
# 修改加速地址
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.30.3
# 如果是多master节点,就需要添加这项,指向代理的地址,这里就设置成master的节点
controlPlaneEndpoint: "Vimller-AnolisOS:6443"
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  # 添加pod的IP地址
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd

yum install iproute-tc -y
kubeadm init --config=kubeadm-vimll.yaml
kubeadm reset --cri-socket unix:///var/run/cri-dockerd.sock
ipvsadm --clear

crictl --runtime-endpoint unix:///run/cri-dockerd.sock ps -a | grep kube | grep -v pause
crictl --runtime-endpoint unix:///run/cri-dockerd.sock logs CONTAINERID
journalctl -xeu kubelet

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc

kubeadm join 192.168.9.20:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:0a2903145497bb7c2fc0977da00271de8d1d9fbc69326c81bbea3241c7931577 \
        --control-plane

kubeadm join 192.168.9.20:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:0a2903145497bb7c2fc0977da00271de8d1d9fbc69326c81bbea3241c7931577

kubeadm token create --print-join-command   # 如果token过期,生成新token

kubeadm init phase upload-certs  --upload-certs     # Master需要生成--certificate-key

#MASTER节点可调度
kubectl  taint node vimller-anolisos node-role.kubernetes.io/control-plane-

kubeadm certs check-expiration
kubeadm certs renew xxx
kubeadm certs renew all

#  Kubernetes 证书 API 更新证书
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
controllerManager:
  extraArgs:
    cluster-signing-cert-file: /etc/kubernetes/pki/ca.crt
    cluster-signing-key-file: /etc/kubernetes/pki/ca.key

kubectl get csr
kubectl certificate approve <CSR-名称>

# master节点安装 calico cni
https://github.com/projectcalico/calico/blob/v3.28.1/manifests/tigera-operator.yaml

cat > ippool.yaml << EOF
apiVersion: crd.projectcalico.org/v1
kind: IPPool
metadata:
  name: default-ipv4-ippool
spec:
  cidr: 10.244.0.0/16
  ipipMode: Always
  natOutgoing: true
EOF

cat > custom-resources.yaml << EOF 
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  calicoNetwork:
    ipPools:
    - name: default-ipv4-ippool
      blockSize: 26
      cidr: 10.244.0.0/16
      encapsulation: VXLANCrossSubnet
      natOutgoing: Enabled
      nodeSelector: all()
---
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
  name: default
spec: {}

EOF

kubectl create -f tigera-operator.yaml
kubectl api-versions|grep calico

### calico 镜像问题
# 由于默认的Calico清单文件中所使用的镜像来源于docker.io国外镜像源,上面我们配置了Docker镜像加速,应删除docker.io前缀以使镜像从国内镜像加速站点下载。

# 修改镜像
cat > imageset.yaml << EOF
apiVersion: operator.tigera.io/v1
kind: ImageSet
metadata:
  name: calico-v3.28.1
spec:
  images:
    - image: 'calico/apiserver'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
    - image: 'calico/cni'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
    - image: 'calico/kube-controllers'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
    - image: 'calico/node'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
    - image: 'calico/typha'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
    - image: 'calico/pod2daemon-flexvol'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
    - image: 'calico/node-windows'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
    - image: 'tigera/operator'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
    - image: 'tigera/key-cert-provisioner'
      digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
EOF

cat > /etc/NetworkManager/conf.d/calico.conf << EOF
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
EOF

kubectl apply -f custom-resources.yaml

## 或 kube-flannel
# 下载kube-flannel.yml文件
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

# 更改network地址,是初始化时的pod地址范围
vim kube-flannel.yml
 ...
 net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
 ...

 ## calico.yaml
 https://github.com/projectcalico/calico/blob/v3.28.1/manifests/calico.yaml
 #在大概4551行的位置编辑以下配置
             - name: CALICO_IPV4POOL_CIDR 
               value: "10.244.0.0/16"    #填写为配置k8s集群时,设置的pod网络地址段
             - name: IP_AUTODETECTION_METHOD
               value: "interface=ens33" #宿主机的网卡信息,这项配置是要手动加入的,calico原本没有

#在大概4521行的位置编辑以下配置
             # Enable IPIP
             - name: CALICO_IPV4POOL_IPIP
               value: "Never"    #默认配置为Always,配置为Always时使用的时IPIP模式,更改为Never时使用的是bgp模式,使用bgp模式性能更高

# 清理
kubectl delete -f tigera-operator.yaml --grace-period=0 --force
kubectl delete -f custom-resources.yaml --grace-period=0 --force

# 检查所有名字里面带有 calico|tigera 的资源: 
kubectl get all --all-namespaces | egrep "calico|tigera"
 ​
# 检查所有名字里面带有 calico|tigera 的 api resources: 
kubectl api-resources --verbs=list --namespaced -o name | egrep "calico|tigera"

# 检查所有名字里面带有 calico|tigera 的 不带namespace信息的 api resources: 
kubectl api-resources --verbs=list -o name  | egrep "calico|tigera"
# 检查calico-node这个serviceaccounts的配置文件,
# 查看对应的finalizers和status中的conditions定位故障原因
kubectl get serviceaccounts calico-node -n calico-system -o yaml

# 删除cni下相关的配置文件
rm -rf /etc/cni/net.d/

# 清理路由信息
ip route flush proto bird

# 清理calico相关网卡
ip link list | grep cali | awk '{print $2}' | cut -c 1-15 | xargs -I {} ip link delete {}

# 删除ipip模块
modprobe -r ipip

# 清理iptables规则
iptables-save | grep -i cali | iptables -F
iptables-save | grep -i cali | iptables -X

# 清理ipvsadm规则
ipvsadm -C

kubectl get ns calico-system -o json > tmp.json
kubectl  proxy
curl -k -H "Content-Type: application/json" -X PUT --data-binary @tmp.json http://127.0.0.1:8001/api/v1/namespaces/calico-system/finalize

########## 使用cilium
export http_proxy=http://192.168.9.20:1081;export https_proxy=http://192.168.9.20:1081;
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
sh get_helm.sh

helm completion bash > /etc/bash_completion.d/helm

helm repo add stable https://charts.helm.sh/stable
helm repo add gitlab https://charts.gitlab.io/
helm repo add elastic https://helm.elastic.co
helm repo add pingcap https://charts.pingcap.org/
helm repo add emqx https://repos.emqx.io/charts
helm repo add apache https://pulsar.apache.org/charts
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo add helm-openldap https://jp-gouin.github.io/helm-openldap/
helm repo add istio https://istio-release.storage.googleapis.com/charts
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
helm repo add jenkins https://charts.jenkins.io
helm repo add gitlab-jh https://charts.gitlab.cn
helm repo add percona https://percona.github.io/percona-helm-charts/
helm repo add azure http://mirror.azure.cn/kubernetes/charts
helm repo add cilium https://helm.cilium.io/

helm repo update
helm search repo cilium
helm  pull cilium/cilium
tar xf cilium-1.16.0.tgz

## 取代 kube-proxy 组件
kubeadm init --skip-phases=addon/kube-proxy --config=kubeadm-vimll.yaml
kubectl -n kube-system delete ds kube-proxy
kubectl -n kube-system delete cm kube-proxy
iptables-save | grep -v KUBE | iptables-restore

helm install cilium ./cilium \
   --namespace kube-system \
   --set ipam.operator.clusterPoolIPv4PodCIDRList="10.244.0.0/16" \
   --set operator.replicas=1 \
   --set k8sServiceHost=192.168.9.20 \
   --set k8sServicePort=6443 \
   --set hubble.relay.enabled=true \
   --set hubble.ui.enabled=true \
   --set autoDirectNodeRoutes=true \
   --set ipv4NativeRoutingCIDR=10.244.0.0/16 \
   --set kubeProxyReplacement=true \
   --set routingMode=native \
   --set enableIPv4Masquerade=true \
   --set bpf.masquerade=true \
   --set loadBalancer.mode=dsr \
   --set loadBalancer.acceleration=best-effort \
   --set l2announcements.enabled=true \
   --set rollOutCiliumPods=true \
   --set operator.rollOutPods=true

kubectl exec -it -n kube-system cilium-822s8 -- cilium status | grep KubeProxyReplacement

apiVersion: cilium.io/v2alpha1
kind: CiliumLoadBalancerIPPool
metadata:
  name: data-plane
spec:
  blocks:
    - start: "192.168.9.90"
      stop: "192.168.9.100"
---
apiVersion: cilium.io/v2alpha1
kind: CiliumL2AnnouncementPolicy
metadata:
  name: default-policy
spec:
  # interfaces:
  # - ^enp\ds\d+ # no need to explicit setup
  # externalIPs: true # the cluster has no external IP
  loadBalancerIPs: true

## 测速
cat > iperf3.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: iperf3
  labels:
    app: iperf3
spec:
  minReadySeconds: 10
  replicas: 2
  revisionHistoryLimit: 5
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  selector:
    matchLabels:
      app: iperf3
  template:
    metadata:
      labels:
        app: iperf3
    spec:
      containers:
      - name: iperf3
        image: clearlinux/iperf:3
        command: ['/bin/sh', '-c', 'sleep 1d']
        ports:
        - containerPort: 5201
EOF

kubectl  get  po -owide
NAME                      READY   STATUS    RESTARTS   AGE   IP             NODE               
iperf3-566bccf8df-hbsv9   1/1     Running   0          45s   10.244.0.114   vimller-anolisos
iperf3-566bccf8df-jzz8k   1/1     Running   0          45s   10.244.0.214   vimller-anolisos

kubectl exec -it iperf3-566bccf8df-hbsv9 -- iperf3 -s
kubectl exec -it iperf3-566bccf8df-jzz8k -- iperf3 -c 10.244.0.114  -f M

kubectl  delete  -f iperf3.yaml

kubectl exec -it -n kube-system cilium-cq4sd -- cilium status
kubectl exec -it -n kube-system cilium-cq4sd -- cilium service list

helm pull metrics-server/metrics-server

defaultArgs:
  - --secure-port=10250
  - --cert-dir=/tmp
  - --kubelet-use-node-status-port
  - --metric-resolution=15s
  - --kubelet-insecure-tls
  - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
  - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
  - --requestheader-username-headers=X-Remote-User
  - --requestheader-group-headers=X-Remote-Group
  - --requestheader-extra-headers-prefix=X-Remote-Extra-

helm upgrade --install metrics-server ./metrics-server --set image.repository=d.vimll.com:9888/root/public/metrics-server/metrics-server -n kube-system

        - name: ca-ssl
          mountPath: /etc/kubernetes/pki
      volumes:
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki

kubectl  edit deployments.apps  -n kube-system metrics-server