User-Profile-Image
hankin
  • 5
  • centos7
  • docker
  • mysql
  • PostgreSQL
  • git/gitlab
  • ELK
  • python
    • python-Tornado
    • python-django
  • redis
  • nginx
  • kvm
  • proxmox
  • mongo
  • kubernetes
  • prometheus
  • GlusterFs
  • nfs
  • freeswitch
  • httpd
  • shell脚本
  • linux
  • fastdfs
  • nextcloud
  • openssl
  • openvpn
  • rabbitmq
  • sqlite
  • svn
  • java
  • ubuntu
  • vue2
  • wordpress
  • php
  • IOT物联网
  • 项目
  • 故障处理
  • 树莓派
  • 博客存档
  • 未分类
  • 杂项
  • #1742(无标题)
  • 新视野
  • 分类
    • 项目
    • 树莓派
    • 杂项
    • 未分类
    • 新视野
    • 故障处理
    • 博客存档
    • 交换机
    • wordpress
    • vue2
    • ubuntu
    • svn
    • sqlite
    • shell脚本
    • redis
    • rabbitmq
    • python-django
    • python
    • proxmox
    • prometheus
    • PostgreSQL
    • php
    • openvpn
    • openssl
    • nginx
    • nfs
    • nextcloud
    • mysql
    • mongo
    • linux
    • kvm
    • kubernetes
    • java
    • IOT物联网
    • httpd
    • GlusterFs
    • git/gitlab
    • freeswitch
    • fastdfs
    • ELK
    • docker
    • centos7
  • 页面
    • #1742(无标题)
  • 友链
      请到[后台->主题设置->友情链接]中设置。
Help?

Please contact us on our email for need any support

Support
    首页   ›   kubernetes   ›   正文
kubernetes

kubernetes、k8s安装-二进制

2025-12-13 13:43:19
44  0 0
参考:https://github.com/cby-chen/Kubernetes/tree/main/doc
结构:
    3etcd集群,3master+2node集群,etcd和master共用主机
    高可用使用nginx upstream
    运行时使用docker

# 🟩下载
```bash 
#ssl
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.5/cfssl-certinfo_1.6.5_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.5/cfssljson_1.6.5_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.5/cfssl_1.6.5_linux_amd64

#etcd
wget https://github.com/etcd-io/etcd/releases/download/v3.6.6/etcd-v3.6.6-linux-amd64.tar.gz

#kubernetes https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.33.md
wget https://dl.k8s.io/v1.33.6/kubernetes-server-linux-amd64.tar.gz
    #https://dl.k8s.io/v1.34.2/kubernetes-server-linux-amd64.tar.gz
#wget https://dl.k8s.io/v1.33.6/kubernetes-client-linux-amd64.tar.gz
#wget https://dl.k8s.io/v1.33.6/kubernetes-node-linux-amd64.tar.gz

#运行时cri-docker
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.20/cri-dockerd-0.3.20.amd64.tgz

🟩网络配置

# 配置软路由上网环境
    软路由的意义在于免于以下修改安装源和镜像源等操作,简化安装流程
    仅需修改系统的网络配置,网关和dns均指向软路由的ip地址即可
    immortalWRT软路由的安装:
        略

#rocklinux9 修改ip
nmcli con mod ens18 ipv4.addresses 172.10.10.185/24 && nmcli con down ens18 && nmcli con up ens18

🟩基础配置


#hosts配置
cat >> /etc/hosts << EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

172.10.10.180 lb-vip
172.10.10.181 k8s-master01
172.10.10.182 k8s-master02
172.10.10.183 k8s-master03
172.10.10.184 k8s-node01
172.10.10.185 k8s-node02
EOF

#修改主机名 
hostnamectl set-hostname xx

# SELinux
    #永久关闭 
        sed -ri 's#(SELINUX=)enforcing#\1disabled#g' /etc/selinux/config
    #临时关闭
        selinux setenforce 0 
    #查看状态 
        getenforce

#禁用swap
    # 临时禁用
    swapoff -a
    # 永久禁用
    sed -i 's/.*swap.*/#&/' /etc/fstab

#防火墙
systemctl disable --now firewalld.service
systemctl status firewalld.service

#更换yum源
sed -e 's|^mirrorlist=|#mirrorlist=|g' \
    -e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
    -i.bak \
    /etc/yum.repos.d/[Rr]ocky*.repo

# 刷新缓存
dnf makecache

#安装工具
yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl sshpass ipvsadm ipset sysstat conntrack libseccomp 

#免密登录,非必须
ssh-keygen -f /root/.ssh/id_rsa -P '' #生成SSH密钥对
export IP="172.10.10.182 172.10.10.183 172.10.10.184 172.10.10.185"
export SSHPASS=123456
for HOST in $IP;do
     sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done

#时间同步
# 安装时间同步 dnf install chrony -y 
# 编辑配置文件加入一下内容 vim /etc/chrony.conf 
pool ntp1.aliyun.com iburst 
pool ntp2.aliyun.com iburst 
pool cn.pool.ntp.org iburst 
# 配置开机自启 systemctl enable --now chronyd 
# 测试 chronyc sources

#配置ulimit
ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF

#修改内核参数 k8s配置文件
cat >> /etc/sysctl.d/k8s.conf << EOF
#内核参数调整
vm.swappiness=0 
#配置iptables参数,使得流经网桥的流量也经过iptables/netfilter防火墙
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

# 加载网桥过滤模块
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

modprobe overlay
modprobe br_netfilter

# 重新加载
sysctl --system
# 检测
lsmod | grep br_netfilter
    # 返回如下内容表示成功
    # br_netfilter           32768  0
    # bridge                303104  1 br_netfilter

#配置ipvs
# 安装ipset和ipvsadm
dnf install ipset ipvsadm -y
# 添加需要加载的模块写入脚本文件

cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

modprobe overlay
modprobe ip_vs && modprobe ip_vs_rr && modprobe ip_vs_wrr && modprobe ip_vs_sh && modprobe nf_conntrack

systemctl restart systemd-modules-load.service

#查看模块是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#创建目录
mkdir -p /etc/cni/net.d
mkdir -p /etc/containerd/certs.d/docker.io
mkdir -p /etc/docker/
mkdir -p /etc/etcd/ssl
mkdir -p /etc/kubernetes/manifests/
mkdir -p /etc/kubernetes/pki/etcd
mkdir -p /etc/systemd/system/containerd.service.d
mkdir -p /etc/systemd/system/kubelet.service.d
mkdir -p /opt/cni/bin
mkdir -p /root/.kube
mkdir -p /var/lib/kubelet
mkdir -p /var/log/kubernetes

#cfssl
mv cfssl_1.6.5_linux_amd64 /usr/local/bin/cfssl
mv cfssljson_1.6.5_linux_amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_1.6.5_linux_amd64 /usr/local/bin/cfssl-certinfo

🟩docker安装配置

#安装配置docker作为运行时
# 添加阿里云docker仓库
dnf config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo  
# 切换源
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# 更新源数据
dnf makecache 

# 安装最新版 
dnf install docker-ce -y

cat > /etc/docker/daemon.json <<EOF
{
 "registry-mirrors": [
  ],
  "max-concurrent-downloads": 10,
  "log-driver": "json-file",
  "log-level": "warn",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
    },
  "data-root": "/var/lib/docker",
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

systemctl daemon-reload && systemctl restart docker && systemctl enable docker

#🟢运行时cri-docker
tar -zxvf cri-dockerd-0.3.20.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/  &&  chmod +x   /usr/bin/cri-dockerd

#🟢系统服务cri-docker.service
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.10.1
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF

#🟢添加cri-docker套接字
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF

systemctl daemon-reload && systemctl enable cri-docker

systemctl start cri-docker
# 查看启动状态
systemctl is-active cri-docker # 输出结果为active表示启动正常

# 如果启动失败,可以通过以下命令查看日志
# journalctl -u cri-docker

🟩etcd集群安装

#etcd安装,二进制安装etcd三节点
#证书在master1节点制作,再分发到其他节点

tar -zxvf etcd-v3.6.6-linux-amd64.tar.gz 
cp -ar etcd-v3.6.6-linux-amd64/etcd* /usr/local/bin/

etcdctl version

#🟢写入生成证书所需的配置文件
cat > ca-config.json << EOF 
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF

cat > etcd-ca-csr.json  << EOF 
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF

cat > etcd-csr.json << EOF 
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF

mkdir /etc/etcd/ssl -p

cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca

cfssl gencert \
   -ca=/etc/etcd/ssl/etcd-ca.pem \
   -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
   -config=ca-config.json \
   -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,172.10.10.181,172.10.10.182,172.10.10.183 \
   -profile=kubernetes \
   etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd

#🟢复制证书到其他节点
    pass

#🟢master01配置
cat > /etc/etcd/etcd.config.yml << EOF 
name: 'k8s-master01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://172.10.10.181:2380'
listen-client-urls: 'https://172.10.10.181:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://172.10.10.181:2380'
advertise-client-urls: 'https://172.10.10.181:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://172.10.10.181:2380,k8s-master02=https://172.10.10.182:2380,k8s-master03=https://172.10.10.183:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

#🟢master02配置
    pass
#🟢master03配置
    pass

#🟢创建service(所有master节点操作)
cat > /usr/lib/systemd/system/etcd.service << EOF

[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service

EOF

mkdir /etc/kubernetes/pki/etcd && ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/

systemctl daemon-reload && systemctl enable --now etcd.service 
systemctl restart etcd.service 
sleep 3 && systemctl status etcd.service

#🟢验证
ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://172.10.10.181:2379,https://172.10.10.182:2379,https://172.10.10.183:2379   endpoint status
   # - <code>--endpoints</code>:指定要连接的etcd集群节点的地址和端口
   # - <code>--cacert</code>:指定用于验证etcd服务器证书的CA证书的路径。 指定了CA证书的路径为<code>/etc/kubernetes/pki/etcd/etcd-ca.pem</code>。CA证书用于验证etcd服务器证书的有效性。
   # - <code>--cert</code>:指定用于与etcd服务器进行通信的客户端证书的路径。在这个例子中,指定了客户端证书的路径为<code>/etc/kubernetes/pki/etcd/etcd.pem</code>。客户端证书用于在与etcd服务器建立安全通信时进行身份验证。
   # - <code>--key</code>:指定与客户端证书配对的私钥的路径。在这个例子中,指定了私钥的路径为<code>/etc/kubernetes/pki/etcd/etcd-key.pem</code>。私钥用于对通信进行加密解密和签名验证。
   # - <code>endpoint status</code>:子命令,用于检查etcd集群节点的健康状态。
   # - <code>--write-out</code>:指定输出的格式。在这个例子中,指定以表格形式输出。

# 使用etcd workbench可视化查看 https://github.com/tzfun/etcd-workbench
# 可设置中文

🟩nginx安装-配置

#🟢nginx yum官方安装源配置
sudo tee /etc/yum.repos.d/nginx.repo << 'EOF'
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
EOF

#🟢安装
yum remove nginx -y
yum install nginx  -y

#🟢配置
echo > /etc/nginx/nginx.conf

cat <<"EOF" > /etc/nginx/nginx.conf
  user nginx;
  worker_processes auto;
  error_log /var/log/nginx/error.log;
  pid /run/nginx.pid;

  include /usr/share/nginx/modules/*.conf;

  events {
      worker_connections 1024;
  }

  # 四层负载均衡,为两台Master apiserver组件提供负载均衡
  stream {

      log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

      access_log  /var/log/nginx/k8s-access.log  main;

      upstream k8s-apiserver {
         server 172.10.10.181:6443;   # master1 apiserver
         server 172.10.10.182:6443;   # master2 apiserver
         server 172.10.10.183:6443;   # master3 apiserver
      }

      server {
        listen 127.0.0.1:8443;
        proxy_connect_timeout 1s;
        proxy_pass k8s-apiserver;
      }
  }

  http {
      log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                        '$status $body_bytes_sent "$http_referer" '
                        '"$http_user_agent" "$http_x_forwarded_for"';

      access_log  /var/log/nginx/access.log  main;

      sendfile            on;
      tcp_nopush          on;
      tcp_nodelay         on;
      keepalive_timeout   65;
      types_hash_max_size 2048;

      include             /etc/nginx/mime.types;
      default_type        application/octet-stream;

      server {
          listen       8000 default_server;
          server_name  _;

          location / {
          }
      }
  }

EOF

systemctl enable nginx && nginx -t && systemctl restart nginx 

🟩k8s相关证书

#在master01节点生成k8s证书

mkdir -p /etc/kubernetes/pki

#🟢证书配置文件
#ca-csr.json是用于生成 Kubernetes 根CA(Certificate Authority)的CSR(Certificate Signing Request) 配置文件
cat > ca-csr.json   << EOF 
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF

#apiserver-csr.json用于生成 kube-apiserver 服务器证书的CSR配置文件。
cat > apiserver-csr.json << EOF 
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#使用 CFSSL 工具根据之前生成的CA证书和密钥,以及 apiserver-csr.json 和 ca-config.json 配置,
#为 kube-apiserver 生成服务器 TLS 证书和密钥 (apiserver.pem, apiserver-key.pem)。
#hostname 列表包含了 API Server 可能被访问的 IP 地址和域名。
#10.96.0.1是service网段的第一个地址,172.10.10.180为预留高可用vip地址
cfssl gencert   \
-ca=/etc/kubernetes/pki/ca.pem   \
-ca-key=/etc/kubernetes/pki/ca-key.pem   \
-config=ca-config.json   \
-hostname=10.96.0.1,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,x.oiox.cn,z.oiox.cn,172.10.10.180,172.10.10.181,172.10.10.182,172.10.10.183,172.10.10.184,172.10.10.185  \
-profile=kubernetes   apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver

#🟢生成apiserver聚合证书

#front-proxy-ca-csr.json是用于生成前端代理(Front Proxy)根CA的CSR配置文件。这个CA通常用于聚合层(Aggregation Layer)的认证
cat > front-proxy-ca-csr.json  << EOF 
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  },
  "ca": {
    "expiry": "876000h"
  }
}
EOF

#初始化并生成前端代理的根CA证书和密钥 (front-proxy-ca.pem, front-proxy-ca-key.pem)。
cfssl gencert   -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca 

#front-proxy-client-csr.json是用于生成前端代理客户端证书的CSR配置文件。此证书供 API Server 用来连接扩展的聚合 API 服务
cat > front-proxy-client-csr.json  << EOF 
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

#使用前端代理CA证书和密钥,根据 front-proxy-client-csr.json 和 ca-config.json 配置,生成前端代理客户端证书和密钥 (front-proxy-client.pem, front-proxy-client-key.pem)。
cfssl gencert  \
-ca=/etc/kubernetes/pki/front-proxy-ca.pem   \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem   \
-config=ca-config.json   \
-profile=kubernetes   front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client

#🟢生成controller-manage的证书
#manager-csr.json是用于生成 kube-controller-manager 客户端证书的CSR配置文件。CN (Common Name) 和 O (Organization) 字段对于 RBAC 授权至关重要
cat > manager-csr.json << EOF 
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#使用 Kubernetes 根CA证书和密钥,根据 manager-csr.json 和 ca-config.json 配置,生成 kube-controller-manager 的客户端 TLS 证书和密钥 (controller-manager.pem, controller-manager-key.pem)。
cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager

#在controller-manager.kubeconfig 文件中定义了一个名为 kubernetes 的集群信息,指定了CA证书、API Server 地址,并嵌入了证书数据。
kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://127.0.0.1:8443 \
     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig  

#在controller-manager.kubeconfig 中创建一个名为 system:kube-controller-manager@kubernetes 的上下文 (context),它关联了之前定义的集群和用户
kubectl config set-context system:kube-controller-manager@kubernetes \
    --cluster=kubernetes \
    --user=system:kube-controller-manager \
    --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig     

#在 controller-manager.kubeconfig 中定义了一个名为 system:kube-controller-manager 的用户凭证,指定了客户端证书和密钥,并嵌入了证书数据。
kubectl config set-credentials system:kube-controller-manager \
   --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
   --client-key=/etc/kubernetes/pki/controller-manager-key.pem \
   --embed-certs=true \
   --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

#设置 controller-manager.kubeconfig 文件的当前上下文为 system:kube-controller-manager@kubernetes,使得 kube-controller-manager 在启动时知道如何连接和认证到 API Server。
kubectl config use-context system:kube-controller-manager@kubernetes \
     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

#🟢生成kube-scheduler的证书  
#scheduler-csr.json是用于生成 kube-scheduler 客户端证书的CSR配置文件。CN 和 O 字段同样用于 RBAC 授权。   
cat > scheduler-csr.json << EOF 
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#此命令使用 Kubernetes 根CA证书和密钥,根据 scheduler-csr.json 和 ca-config.json 配置,生成 kube-scheduler 的客户端 TLS 证书和密钥 (scheduler.pem, scheduler-key.pem)。
cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler

#在 scheduler.kubeconfig 文件中定义了一个名为 kubernetes 的集群信息
kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://127.0.0.1:8443 \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

#在 scheduler.kubeconfig 文件中定义了名为 system:kube-scheduler 的用户凭证。
kubectl config set-credentials system:kube-scheduler \
     --client-certificate=/etc/kubernetes/pki/scheduler.pem \
     --client-key=/etc/kubernetes/pki/scheduler-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

#在 scheduler.kubeconfig 文件中创建名为 system:kube-scheduler@kubernetes 的上下文
kubectl config set-context system:kube-scheduler@kubernetes \
     --cluster=kubernetes \
     --user=system:kube-scheduler \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig     

#设置 scheduler.kubeconfig 文件的当前上下文,使得 kube-scheduler 能够正确连接和认证。
kubectl config use-context system:kube-scheduler@kubernetes \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

#🟢生成admin的证书配置     
#admin-csr.json是用于生成管理员用户 (admin) 客户端证书的 CSR 配置文件。O: system:masters 是一个特殊的组,拥有集群范围内的最高权限证
 cat > admin-csr.json << EOF 
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#使用 Kubernetes 根 CA 证书和密钥,根据 admin-csr.json 和 ca-config.json 配置,生成管理员用户的客户端 TLS 证书和密钥 (admin.pem, admin-key.pem)。
cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin

#在 admin.kubeconfig 文件中定义了一个名为 kubernetes 的集群信息
kubectl config set-cluster kubernetes     \
  --certificate-authority=/etc/kubernetes/pki/ca.pem     \
  --embed-certs=true     \
  --server=https://127.0.0.1:8443     \
  --kubeconfig=/etc/kubernetes/admin.kubeconfig

#在 admin.kubeconfig 文件中定义了名为 kubernetes-admin 的用户凭证
kubectl config set-credentials kubernetes-admin  \
  --client-certificate=/etc/kubernetes/pki/admin.pem     \
  --client-key=/etc/kubernetes/pki/admin-key.pem     \
  --embed-certs=true     \
  --kubeconfig=/etc/kubernetes/admin.kubeconfig  

#在 admin.kubeconfig 文件中创建名为 kubernetes-admin@kubernetes 的上下文。
kubectl config set-context kubernetes-admin@kubernetes    \
  --cluster=kubernetes     \
  --user=kubernetes-admin     \
  --kubeconfig=/etc/kubernetes/admin.kubeconfig  

#设置 admin.kubeconfig 文件的当前上下文,方便管理员使用 kubectl 命令行工具管理集群。
kubectl config use-context kubernetes-admin@kubernetes  --kubeconfig=/etc/kubernetes/admin.kubeconfig  

#🟢创建kube-proxy证书
#kube-proxy-csr.json是用于生成 kube-proxy 客户端证书的 CSR 配置文件。CN 和 O 字段用于 kube-proxy 的 RBAC 认证
cat > kube-proxy-csr.json  << EOF 
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-proxy",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#使用 Kubernetes 根 CA 证书和密钥,根据 kube-proxy-csr.json 和 ca-config.json 配置,
#生成 kube-proxy 的客户端 TLS 证书和密钥 (kube-proxy.pem, kube-proxy-key.pem)。
cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy

#在 kube-proxy.kubeconfig 文件中定义了一个名为 kubernetes 的集群信息
kubectl config set-cluster kubernetes     \
  --certificate-authority=/etc/kubernetes/pki/ca.pem     \
  --embed-certs=true     \
  --server=https://127.0.0.1:8443     \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

#在 kube-proxy.kubeconfig 文件中定义了名为 kube-proxy 的用户凭证
kubectl config set-credentials kube-proxy  \
  --client-certificate=/etc/kubernetes/pki/kube-proxy.pem     \
  --client-key=/etc/kubernetes/pki/kube-proxy-key.pem     \
  --embed-certs=true     \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig     

#在 kube-proxy.kubeconfig 文件中创建名为 kube-proxy@kubernetes 的上下文。
kubectl config set-context kube-proxy@kubernetes    \
  --cluster=kubernetes     \
  --user=kube-proxy     \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig  

#设置 kube-proxy.kubeconfig 文件的当前上下文,使得 kube-proxy 能够正确连接和认证。
kubectl config use-context kube-proxy@kubernetes  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig  

#🟢创建ServiceAccount Key ——secret

#使用 OpenSSL 生成一个 2048 位的 RSA 私钥,用于签署 ServiceAccount Token。
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048

#从上面生成的私钥 (sa.key) 中提取对应的公钥,并保存到 sa.pub 文件中。API Server 使用这个公钥来验证 ServiceAccount Token
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

#🟢将证书发送到其他master节点
#其他节点创建目录
# mkdir  /etc/kubernetes/pki/ -p

#此循环脚本将所有非 ETCD 的 PKI 证书和密钥文件,以及关键的 kubeconfig 文件复制到其他的 Master 节点上,确保高可用性
for NODE in k8s-master02 k8s-master03; do \
  for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do \
    scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; \
  done; \
  for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do \
    scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; \
  done; \
done

ls /etc/kubernetes/pki/ |wc -l
  #26

🟩k8s组件配置

#🟢创建service apiserver 
#所有master节点,参数不同:--advertise-address
#master01节点配置  

tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

kubelet --version

mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
      --v=2  \\
      --allow-privileged=true  \\
      --bind-address=0.0.0.0  \\
      --secure-port=6443  \\
      --advertise-address=172.10.10.183 \\
      --service-cluster-ip-range=10.96.0.0/12 \\
      --service-node-port-range=30000-32767  \\
      --etcd-servers=https://172.10.10.181:2379,https://172.10.10.182:2379,https://172.10.10.183:2379 \\
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \\
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \\
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \\
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \\
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \\
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \\
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \\
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
      --authorization-mode=Node,RBAC  \\
      --enable-bootstrap-token-auth=true  \\
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \\
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \\
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \\
      --requestheader-allowed-names=aggregator  \\
      --requestheader-group-headers=X-Remote-Group  \\
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
      --requestheader-username-headers=X-Remote-User \\
      --enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload && systemctl enable --now kube-apiserver.service
systemctl restart kube-apiserver.service
systemctl status kube-apiserver.service

#🟢创建service kube-controller-manager
# 所有master节点配置,且配置相同
# 172.16.0.0/12为pod网段,按需求设置你自己的网段

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
      --v=2 \\
      --bind-address=0.0.0.0 \\
      --root-ca-file=/etc/kubernetes/pki/ca.pem \\
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \\
      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\
      --leader-elect=true \\
      --use-service-account-credentials=true \\
      --node-monitor-grace-period=40s \\
      --node-monitor-period=5s \\
      --controllers=*,bootstrapsigner,tokencleaner \\
      --allocate-node-cidrs=true \\
      --service-cluster-ip-range=10.96.0.0/12 \\
      --cluster-cidr=172.16.0.0/12 \\
      --node-cidr-mask-size-ipv4=24 \\
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload && systemctl enable --now kube-controller-manager.service
systemctl restart kube-controller-manager.service
sleep 6 && systemctl status kube-controller-manager.service

#🟢创建service kube-scheduler
所有master节点配置,且配置相同

cat > /usr/lib/systemd/system/kube-scheduler.service << EOF

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
      --v=2 \\
      --bind-address=0.0.0.0 \\
      --leader-elect=true \\
      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload && systemctl enable --now kube-scheduler.service
systemctl restart kube-scheduler.service
sleep 6 && systemctl status kube-scheduler.service

🟩TLS Bootstrapping配置

#🟢生成bootstrap-kubelet.kubeconfig文件

# 设置名为 "kubernetes" 的集群配置,并提供 CA 证书、API 服务器地址和端口,并将这些配置信息嵌入到 bootstrap-kubelet.kubeconfig 文件中。
# 这个 kubeconfig 文件可以用于认证和授权 kubelet 组件与 Kubernetes API 服务器之间的通信
kubectl config set-cluster kubernetes     \
--certificate-authority=/etc/kubernetes/pki/ca.pem     \
--embed-certs=true \     
--server=https://127.0.0.1:8443     \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

# 创建随机token
echo "$(head -c 6 /dev/urandom | md5sum | head -c 6)"."$(head -c 16 /dev/urandom | md5sum | head -c 16)"

# 设置名为 "tls-bootstrap-token-user" 的用户凭证,并将令牌信息加入到 bootstrap-kubelet.kubeconfig 文件中。
kubectl config set-credentials tls-bootstrap-token-user     \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

# 设置名为 "tls-bootstrap-token-user@kubernetes" 的上下文。并将其关联到名为 "kubernetes" 的集群配置和名为 "tls-bootstrap-token-user" 的用户凭证配置
kubectl config set-context tls-bootstrap-token-user@kubernetes     \
--cluster=kubernetes     \
--user=tls-bootstrap-token-user     \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

# 当你执行其他 kubectl 命令时,它们将使用该上下文与 Kubernetes 集群进行交互。
kubectl config use-context tls-bootstrap-token-user@kubernetes     \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改

mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config  #或者手动生成:

#生成kubectl连接用的config配置文件admin.conf
      APISERVER="https://127.0.0.1:8443"  #https://127.0.0.1:6443

      kubectl config set-cluster kubernetes \
        --certificate-authority=/etc/kubernetes/pki/ca.pem \
        --embed-certs=true \
        --server=${APISERVER} \
        --kubeconfig=admin.conf

      kubectl config set-credentials kubernetes-admin \
        --client-certificate=/etc/kubernetes/pki/admin.pem \
        --client-key=/etc/kubernetes/pki/admin-key.pem \
        --embed-certs=true \
        --kubeconfig=admin.conf

      kubectl config set-context kubernetes-admin@kubernetes \
        --cluster=kubernetes \
        --user=kubernetes-admin \
        --kubeconfig=admin.conf

      kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=admin.conf

      mkdir -p ~/.kube && cp admin.conf ~/.kube/config

#检查集群状态
kubectl get cs
      # Warning: v1 ComponentStatus is deprecated in v1.19+
      # NAME                 STATUS    MESSAGE   ERROR
      # controller-manager   Healthy   ok        
      # scheduler            Healthy   ok        
      # etcd-0               Healthy   ok

# 写入bootstrap-token
cat > bootstrap.secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-c8ad9c
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: c8ad9c
  token-secret: 2e4d610cf3e7426e
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF

kubectl create -f bootstrap.secret.yaml

🟩拷贝文件

#从master节点拷贝到所有节点

#拷贝证书
for NODE in 172.10.10.185; do \
    ssh $NODE mkdir -p /etc/kubernetes/pki; \
    for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem \
                bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do \
        scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; \
    done; \
done
    #输出:
    # ca.pem                    100% 1363   265.5KB/s   00:00    
    # ca-key.pem                100% 1679   365.8KB/s   00:00    
    # front-proxy-ca.pem        100% 1094   236.7KB/s   00:00    
    # bootstrap-kubelet.kubeconfig 100% 2232     1.2MB/s   00:00    
    # kube-proxy.kubeconfig

#拷贝软件
scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} 172.10.10.185:/usr/local/bin/
    #node节点 可不拷贝-apiserver,-controller-manager,-scheduler
    # kubelet                   100%   78MB 363.2MB/s   00:00    
    # kubectl                   100%   57MB 301.9MB/s   00:00    
    # kube-apiserver            100%   93MB 174.5MB/s   00:00    
    # kube-controller-manager   100%   87MB 455.3MB/s   00:00    
    # kube-scheduler            100%   66MB 354.9MB/s   00:00    
    # kube-proxy                100%   67MB 385.8MB/s   00:00  

🟩节点配置kubelet服务

#所有节点都配置,使用docker作为Runtime

#创建kubelet服务
cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=network-online.target firewalld.service cri-docker.service docker.socket containerd.service
Wants=network-online.target
Requires=cri-docker.socket containerd.service

[Service]
ExecStart=/usr/local/bin/kubelet \\
    --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig  \\
    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
    --config=/etc/kubernetes/kubelet-conf.yml \\
    --container-runtime-endpoint=unix:///run/cri-dockerd.sock  \\
    --node-labels=node.kubernetes.io/node= 

[Install]
WantedBy=multi-user.target
EOF

#创建kubelet的配置文件
cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

systemctl daemon-reload && systemctl enable --now kubelet.service
systemctl restart kubelet.service
systemctl status kubelet.service

#journalctl -n 100 -u kubelet.service --no-pager

#查看节点状态
#过几分钟就正常了
kubectl  get node
    #NAME                    STATUS   ROLES    AGE    VERSION
    #k8s-master01            Ready    <none>   19h    v1.33.6
    #k8s-master02            Ready    <none>   19h    v1.33.6
    #k8s-master03            Ready    <none>   19h    v1.33.6
    #k8s-node02              Ready    <none>   3m6s   v1.33.6
    #localhost.localdomain   Ready    <none>   12h    v1.33.6

🟩节点配置kube-proxy服务

#service文件
cat >  /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \\
  --config=/etc/kubernetes/kube-proxy.yaml \\
  --cluster-cidr=172.16.0.0/12 \\
  --v=2
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

#kube-proxy的配置
cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 172.16.0.0/12
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF

systemctl daemon-reload && systemctl enable --now kube-proxy.service
systemctl restart kube-proxy.service
systemctl status kube-proxy.service

🟩查看集群状态

kubectl get pod -A -o wide
kubectl get node

#查看容器运行时
kubectl describe node | grep Runtime

#诊断故障点
journalctl -n 100 --no-pager -u kube-proxy.service

🟩安装网络插件-Calico

# 仅再master01执行

# 安装operator
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.30.3/manifests/tigera-operator.yaml

# 下载配置文件
curl https://raw.githubusercontent.com/projectcalico/calico/v3.30.3/manifests/custom-resources.yaml -O

# 修改地址池
vim custom-resources.yaml
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  calicoNetwork:
    ipPools:
    - name: default-ipv4-ippool
      blockSize: 26
      cidr: 172.16.0.0/12
      encapsulation: VXLANCrossSubnet
      natOutgoing: Enabled
      nodeSelector: all()

# 执行安装
kubectl create -f custom-resources.yaml

# 安装客户端
curl -L https://github.com/projectcalico/calico/releases/download/v3.30.3/calicoctl-linux-amd64 -o calicoctl

# 给客户端添加执行权限
chmod +x ./calicoctl

# 查看集群节点
./calicoctl get nodes
# 查看集群节点状态
./calicoctl node status
#查看地址池
./calicoctl get ipPool
./calicoctl get ipPool -o yaml

🟩安装CoreDNS

#安装helm
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
sh get_helm.sh

# 下载tgz包
helm repo add coredns https://coredns.github.io/helm
helm pull coredns/coredns
tar xvf coredns-*.tgz
cd coredns/

# 修改IP地址
vim values.yaml
cat values.yaml | grep clusterIP:
clusterIP: "10.96.0.10"

# 示例
---
service:
# clusterIP: ""
# clusterIPs: []
# loadBalancerIP: ""
# externalIPs: []
# externalTrafficPolicy: ""
# ipFamilyPolicy: ""
  # The name of the Service
  # If not set, a name is generated using the fullname template
  clusterIP: "10.96.0.10"
  name: ""
  annotations: {}
---

# 修改为国内源,使用旁路由上网则无须
#sed -i "s#registry.k8s.io/#k8s.m.daocloud.io/#g" values.yaml

# 默认参数安装
helm install  coredns ./coredns/ -n kube-system

kubectl get pod -A

🟩安装Metrics Server

# 下载 
wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -O metrics-server.yaml 

# 修改配置
# 不更改会导致探针失败
vim metrics-server.yaml 
    ---
        # 1
            - args:
                - --cert-dir=/tmp
                - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
                - --kubelet-use-node-status-port
                - --metric-resolution=15s
                - --kubelet-insecure-tls
                - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
                - --requestheader-username-headers=X-Remote-User
                - --requestheader-group-headers=X-Remote-Group
                - --requestheader-extra-headers-prefix=X-Remote-Extra-
        # 2
                volumeMounts:
                - mountPath: /tmp
                  name: tmp-dir
                - name: ca-ssl
                  mountPath: /etc/kubernetes/pki
        # 3
              volumes:
              - emptyDir: {}
                name: tmp-dir
              - name: ca-ssl
                hostPath:
                  path: /etc/kubernetes/pki
    ---

# 修改为国内源 docker源可选
#sed -i "s#registry.k8s.io/#k8s.m.daocloud.io/#g" metrics-server.yaml

# 执行部署
kubectl apply -f metrics-server.yaml 

# 测试
kubectl top node

🟩测试

## 部署一个busybox pod资源
cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: docker.m.daocloud.io/library/busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF

## 部署nginx
cat nginx-deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 4
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80
        resources:
          requests:
            memory: "64Mi"
            cpu: "50m"
          limits:
            memory: "128Mi"
            cpu: "100m"
        # 添加存活探针
        livenessProbe:
          httpGet:
            path: /
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 10
        # 添加就绪探针
        readinessProbe:
          httpGet:
            path: /
            port: 80
          initialDelaySeconds: 3
          periodSeconds: 5

🟩### 安装监控面板

# 添加源信息 
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/ 
# 默认参数安装 
helm upgrade --install kubernetes-dashboard ./kubernetes-dashboard/ --create-namespace --namespace kube-system

### 修改为NodePort
kubectl edit svc  -n kube-system kubernetes-dashboard-kong-proxy
    # type: NodePort

kubectl get svc -A | grep NodePort
kubectl get svc kubernetes-dashboard-kong-proxy -n kube-system

## 创建token
cat > dashboard-user.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

kubectl  apply -f dashboard-user.yaml

kubectl -n kube-system create token admin-user

---
#长期token
cat > dashboard-user-token.yaml << EOF apiVersion: v1 kind: Secret metadata: name: admin-user namespace: kube-system annotations: kubernetes.io/service-account.name: "admin-user" type: kubernetes.io/service-account-token EOF kubectl apply -f dashboard-user-token.yaml 

# 查看密码 
kubectl get secret admin-user -n kube-system -o jsonpath={".data.token"} | base64 -d

#代理端口,内网其他电脑使用浏览器访问
kubectl -n kube-system port-forward --address 0.0.0.0 svc/kubernetes-dashboard-kong-proxy 58443:443
https://ip:58443
评论 (0)

点击这里取消回复。

欢迎您 游客  

Copyright © 2025 网站备案号 : 蜀ICP备2022017747号
smarty_hankin 主题. Designed by hankin
主页
页面
  • #1742(无标题)
博主
tang.show
tang.show 管理员
linux、centos、docker 、k8s、mysql等技术相关的总结文档
217 文章 3 评论 260617 浏览
测试
测试