# 🟩基础配置
```bash
#rocklinux 网络配置
nmcli con mod ens18 ipv4.addresses 172.10.10.185/24 && nmcli con down ens18 && nmcli con up ens18
#hosts配置
cat >> /etc/hosts << EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.10.10.180 lb-vip
172.10.10.181 k8s-master01
172.10.10.182 k8s-master02
172.10.10.183 k8s-master03
172.10.10.184 k8s-node01
172.10.10.185 k8s-node02
EOF
#修改主机名
hostnamectl set-hostname xx
# SELinux
#永久关闭
sed -ri 's#(SELINUX=)enforcing#\1disabled#g' /etc/selinux/config
#临时关闭
selinux setenforce 0
#查看状态
getenforce
#禁用swap
# 临时禁用
swapoff -a
# 永久禁用
sed -i 's/.*swap.*/#&/' /etc/fstab
#防火墙
systemctl disable --now firewalld.service
systemctl status firewalld.service
#更换yum源
sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
-i.bak \
/etc/yum.repos.d/[Rr]ocky*.repo
# 刷新缓存
dnf makecache
#免密登录
ssh-keygen -f /root/.ssh/id_rsa -P '' #生成SSH密钥对
export IP="172.10.10.185"
export SSHPASS=test4399
for HOST in $IP;do
sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done
#时间同步
# 安装时间同步 dnf install chrony -y
# 编辑配置文件加入一下内容 vim /etc/chrony.conf
pool ntp1.aliyun.com iburst
pool ntp2.aliyun.com iburst
pool cn.pool.ntp.org iburst
# 配置开机自启 systemctl enable --now chronyd
# 测试 chronyc sources
#修改内核参数
# k8s配置文件
cat >> /etc/sysctl.d/k8s.conf << EOF
#内核参数调整
vm.swappiness=0
#配置iptables参数,使得流经网桥的流量也经过iptables/netfilter防火墙
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# 加载网桥过滤模块
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
# 重新加载
sysctl --system
# 检测
lsmod | grep br_netfilter
# 返回如下内容表示成功
# br_netfilter 32768 0
# bridge 303104 1 br_netfilter
#配置ipvs
# 安装ipset和ipvsadm
dnf install ipset ipvsadm -y
# 添加需要加载的模块写入脚本文件
cat <<EOF | sudo tee /etc/modules-load.d/ipvs.conf
overlay
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
modprobe overlay
modprobe ip_vs && modprobe ip_vs_rr && modprobe ip_vs_wrr && modprobe ip_vs_sh && modprobe nf_conntrack
#查看模块是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
🟩docker安装配置
#安装配置docker
# 添加阿里云docker仓库
dnf config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 切换源
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# 更新源数据
dnf makecache
# 安装最新版 dnf install docker-ce -y
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": [
],
"max-concurrent-downloads": 10,
"log-driver": "json-file",
"log-level": "warn",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"data-root": "/var/lib/docker",
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
#运行时cri-docker
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.20/cri-dockerd-0.3.20.amd64.tgz
tar -zxvf cri-dockerd-0.3.20.amd64.tgz
# 拷贝并设置执行权限
cp cri-dockerd/cri-dockerd /usr/bin/ && chmod +x /usr/bin/cri-dockerd
# 系统服务cri-docker.service
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.10.1
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
### 添加cri-docker套接字
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
systemctl daemon-reload && systemctl enable cri-docker
systemctl start cri-docker
# 查看启动状态
systemctl is-active cri-docker # 输出结果为active表示启动正常
# 如果启动失败,可以通过以下命令查看日志
# journalctl -u cri-docker
🟩nginx
#rocklinux
#安装配置nginx
#nginx yum官方安装源配置
sudo tee /etc/yum.repos.d/nginx.repo << 'EOF'
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
EOF
#安装
yum remove nginx -y
yum install nginx -y
# 编辑ngin配置文件
echo > /etc/nginx/nginx.conf
cat <<"EOF" > /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 172.10.10.181:6443; # master1 apiserver
server 172.10.10.182:6443; # master2 apiserver
server 172.10.10.183:6443; # master3 apiserver
}
server {
listen 127.0.0.1:8443;
proxy_connect_timeout 1s;
proxy_pass k8s-apiserver;
}
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
server {
listen 8000 default_server;
server_name _;
location / {
}
}
}
EOF
# 启动nginx
systemctl start nginx && systemctl enable nginx
🟩 拷贝文件
#从master节点拷贝
#拷贝证书
for NODE in 172.10.10.185 ; do ssh $NODE mkdir -p /etc/kubernetes/pki; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done
# ca.pem 100% 1363 265.5KB/s 00:00
# ca-key.pem 100% 1679 365.8KB/s 00:00
# front-proxy-ca.pem 100% 1094 236.7KB/s 00:00
# bootstrap-kubelet.kubeconfig 100% 2232 1.2MB/s 00:00
# kube-proxy.kubeconfig
#拷贝软件
[root@k8s-master01 ~]# scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} 172.10.10.185:/usr/local/bin/
# kubelet 100% 78MB 363.2MB/s 00:00
# kubectl 100% 57MB 301.9MB/s 00:00
# kube-apiserver 100% 93MB 174.5MB/s 00:00
# kube-controller-manager 100% 87MB 455.3MB/s 00:00
# kube-scheduler 100% 66MB 354.9MB/s 00:00
# kube-proxy 100% 67MB 385.8MB/s 00:00
🟩kubelet服务
#使用docker作为Runtime
#创建kubelet服务
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=network-online.target firewalld.service cri-docker.service docker.socket containerd.service
Wants=network-online.target
Requires=cri-docker.socket containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-conf.yml \\
--container-runtime-endpoint=unix:///run/cri-dockerd.sock \\
--node-labels=node.kubernetes.io/node=
[Install]
WantedBy=multi-user.target
EOF
#创建kubelet的配置文件
cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
systemctl daemon-reload && systemctl enable --now kubelet.service
systemctl restart kubelet.service
systemctl status kubelet.service
#journalctl -n 100 -u kubelet.service --no-pager
#查看节点状态
#过几分钟就正常了
kubectl get node
#NAME STATUS ROLES AGE VERSION
#k8s-master01 Ready <none> 19h v1.33.6
#k8s-master02 Ready <none> 19h v1.33.6
#k8s-master03 Ready <none> 19h v1.33.6
#k8s-node02 Ready <none> 3m6s v1.33.6
#localhost.localdomain Ready <none> 12h v1.33.6
🟩kube-proxy服务
#service文件
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/etc/kubernetes/kube-proxy.yaml \\
--cluster-cidr=172.16.0.0/12 \\
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#kube-proxy的配置
cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 172.16.0.0/12
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
systemctl daemon-reload && systemctl enable --now kube-proxy.service
systemctl restart kube-proxy.service
systemctl status kube-proxy.service
🟩查看
[root@k8s-master01 ~]# kubectl get pod -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-apiserver calico-apiserver-6f745ff798-7zjrh 1/1 Running 0 18h 172.18.195.1 k8s-master03 <none> <none>
calico-apiserver calico-apiserver-6f745ff798-wtmwt 1/1 Running 0 18h 172.18.195.5 k8s-master03 <none> <none>
calico-system calico-kube-controllers-8b4974bfc-lrrw2 1/1 Running 0 18h 172.18.195.4 k8s-master03 <none> <none>
calico-system calico-node-2txhz 1/1 Running 0 18h 172.10.10.183 k8s-master03 <none> <none>
calico-system calico-node-5vsx6 1/1 Running 0 18h 172.10.10.182 k8s-master02 <none> <none>
calico-system calico-node-hf4hb 1/1 Running 0 18h 172.10.10.181 k8s-master01 <none> <none>
calico-system calico-node-rk2jd 1/1 Running 0 12h 172.10.10.184 localhost.localdomain <none> <none>
calico-system calico-node-wsppx 1/1 Running 0 7m48s 172.10.10.185 k8s-node02 <none> <none>
calico-system calico-typha-7f75f4579d-ncn4j 1/1 Running 0 7m42s 172.10.10.185 k8s-node02 <none> <none>
calico-system calico-typha-7f75f4579d-ptdwp 1/1 Running 0 18h 172.10.10.182 k8s-master02 <none> <none>
calico-system calico-typha-7f75f4579d-vdhx2 1/1 Running 0 18h 172.10.10.181 k8s-master01 <none> <none>
calico-system csi-node-driver-c2fsn 2/2 Running 0 12h 172.22.102.129 localhost.localdomain <none> <none>
calico-system csi-node-driver-glbhb 2/2 Running 0 7m48s 172.27.14.193 k8s-node02 <none> <none>
calico-system csi-node-driver-hlrlp 2/2 Running 0 18h 172.25.92.65 k8s-master02 <none> <none>
calico-system csi-node-driver-xr626 2/2 Running 0 18h 172.18.195.2 k8s-master03 <none> <none>
calico-system csi-node-driver-z58nf 2/2 Running 0 18h 172.25.244.193 k8s-master01 <none> <none>
calico-system goldmane-85c8f6d476-r79q7 1/1 Running 0 18h 172.18.195.3 k8s-master03 <none> <none>
calico-system whisker-798c9b6d7c-jdqv4 2/2 Running 0 18h 172.25.244.194 k8s-master01 <none> <none>
default busybox 1/1 Running 17 (31m ago) 17h 172.25.92.69 k8s-master02 <none> <none>
default nginx-deployment-6f6666c89d-9dpc4 1/1 Running 0 12h 172.18.195.8 k8s-master03 <none> <none>
default nginx-deployment-6f6666c89d-mgcbs 1/1 Running 0 12h 172.25.244.199 k8s-master01 <none> <none>
default nginx-deployment-6f6666c89d-srhg6 1/1 Running 0 12h 172.22.102.130 localhost.localdomain <none> <none>
default nginx-deployment-6f6666c89d-w9lqm 1/1 Running 0 12h 172.25.92.72 k8s-master02 <none> <none>
kube-system coredns-79dd457468-7tlsx 1/1 Running 0 17h 172.25.92.66 k8s-master02 <none> <none>
kube-system kubernetes-dashboard-api-5d789cb7b4-wfgkw 1/1 Running 0 17h 172.18.195.7 k8s-master03 <none> <none>
kube-system kubernetes-dashboard-auth-6557659cbf-rrbzd 1/1 Running 0 17h 172.25.244.197 k8s-master01 <none> <none>
kube-system kubernetes-dashboard-kong-76f95967c6-r49zd 1/1 Running 0 17h 172.25.244.196 k8s-master01 <none> <none>
kube-system kubernetes-dashboard-metrics-scraper-74d8cb664-99cm7 1/1 Running 0 17h 172.25.244.198 k8s-master01 <none> <none>
kube-system kubernetes-dashboard-web-7d564d6ddf-l6lzx 1/1 Running 0 17h 172.25.92.71 k8s-master02 <none> <none>
kube-system metrics-server-6b4c6778fb-drqfx 1/1 Running 0 17h 172.25.92.68 k8s-master02 <none> <none>
tigera-operator tigera-operator-755d956888-hzpsh 1/1 Running 0 18h 172.10.10.183 k8s-master03 <none> <none>
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 19h v1.33.6
k8s-master02 Ready <none> 19h v1.33.6
k8s-master03 Ready <none> 19h v1.33.6
k8s-node02 Ready <none> 7m50s v1.33.6
localhost.localdomain Ready <none> 12h v1.33.6