kube-proxy
kubernetes-server-linux-amd64.tar.gz(相关的这里都能找到二进制文件!)
falnnel
[root@localhost ~]# uname -a
Linux localhost.localdomain 4.18.0-80.11.2.el8_0.x86_64 #1 SMP Tue Sep 24 11:32:19 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
[root@localhost ~]# cat /etc/redhat-release
CentOS Linux release 8.0.1905 (Core)
hostnamectl set-hostname k8s-node01
...
vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.2.201 k8s-master01
192.168.2.202 k8s-master02
192.168.2.203 k8s-master03
192.168.2.11 k8s-node01
192.168.2.12 k8s-node02
yum install -y wget vim yum-utils net-tools tar chrony curl jq ipvsadm ipset conntrack iptables sysstat libseccomp
# 关闭防火墙并清空防火墙规则
systemctl disable --now firewalld
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
iptables -P FORWARD ACCEP
# 关闭dnsmasq否则可能导致docker容器无法解析域名!
systemctl disable --now dnsmasq
# 关闭selinux --->selinux=disabled 需重启生效!
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# 关闭swap --->注释掉swap那一行, 需重启生效!
swapoff -a && sed -i '/ swap / s/^\(.*\)$/# \1/g' /etc/fstab
timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0
systemctl enable chronyd && systemctl restart chronyd
# 先加载模块
modprobe br_netfilter
cat> kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max = 6553500
net.nf_conntrack_max = 6553500
net.ipv4.tcp_max_tw_buckets = 4096
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf
cat> /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 引导和验证!
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# 在每台机器上预创建目录:
mkdir -p /opt/k8s/{bin,cert}
mkdir -p /opt/flanneld/{bin,cert}
mkdir -p /opt/docker/{bin,cert}
mkdir -p /opt/lib/{kubelet,kube-proxy}
mkdir -p /root/.kube/
# 在每台node上添加环境变量:
sh -c "echo 'PATH=/opt/k8s/bin:/opt/flanneld/bin:/opt/docker/bin:$PATH:$HOME/bin:$JAVA_HOME/bin' >> /etc/profile.d/k8s.sh"
source /etc/profile.d/k8s.sh
# 在matser上建立如下脚本并运行
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_node_init.sh
NODE_IPS=("$1" "$2" "$3")
for node_ip in ${NODE_IPS[@]};do
echo ">>> ${node_ip}"
# 导入CA证书
scp /opt/k8s/cert/ca*.pem root@${node_ip}:/opt/k8s/cert/
# 导入flanneld证书
scp /opt/flanneld/cert/* root@${node_ip}:/opt/flanneld/cert/
# 导入flannel二进制文件和mk-docker-opts.sh
scp /opt/flanneld/bin/{flanneld,mk-docker-opts.sh} root@${node_ip}:/opt/flanneld/bin/
# 导入flannel.service文件
scp /etc/systemd/system/flanneld.service root@${node_ip}:/etc/systemd/system/
# 导入kubectl配置文件
scp /root/.kube/config root@${node_ip}:/root/.kube/
# 导入k8s-node所需的二进制文件
scp /root/kubernetes/server/bin/{kubectl,kubelet,kube-proxy} root@${node_ip}:/opt/k8s/bin/
done
# 注意传参, 根据需要修改脚本参数传递:
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_node_init.sh 192.168.2.11 192.168.2.12
[root@k8s-node01 ~]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
# 解压到/opt/k8s/bin目录
[root@k8s-node01 ~]# tar -xvf flannel-v0.11.0-linux-amd64.tar.gz
[root@k8s-node01 ~]# vi /opt/flanneld/flanneld.service.template
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service
[Service]
Type=notify
ExecStart=/opt/flanneld/bin/flanneld -etcd-cafile=/opt/k8s/cert/ca.pem -etcd-certfile=/opt/flanneld/cert/flanneld.pem -etcd-keyfile=/opt/flanneld/cert/flanneld-key.pem -etcd-endpoints=https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379 -etcd-prefix=/atomic.io/network -iface=eth0
ExecStartPost=/opt/flanneld/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
[root@k8s-node01 ~]# systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld && systemctl status flanneld
[root@k8s-node01 ~]# wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.4.tgz
[root@k8s-node01 ~]# tar -xvf docker-19.03.4.tgz
[root@k8s-node01 ~]# cp ~/docker/* /opt/docker/bin/
[root@k8s-node01 ~]# vi /opt/docker/docker.service.template
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
[Service]
Environment="PATH=/opt/docker/bin:/bin:/sbin:/usr/bin:/usr/sbin"
EnvironmentFile=-/run/flannel/docker
ExecStart=/opt/docker/bin/dockerd --log-level=error $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
Restart=on-failure
RestartSec=5
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
[root@k8s-node01 ~]# cat > /opt/docker/daemon.json <<EOF
{
"registry-mirrors": ["http://registry.aliyuncs.com/google_containers","https://hub-mirror.c.163.com"],
"max-concurrent-downloads": 20
}
EOF
# 准备(文件位置调整!)
[root@k8s-node01 ~]# cp /opt/docker/docker.service.template /etc/systemd/system/docker.service
# 启动并添加开机启动, 检查服务状态
[root@k8s-node01 ~]# systemctl daemon-reload && systemctl enable docker && systemctl restart docker && systemctl status docker | grep Active
# 检查docker0网桥
[root@k8s-node01 ~]# /usr/sbin/ip addr show flannel.1 && /usr/sbin/ip addr show docker0
3: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether 06:c3:76:09:95:c2 brd ff:ff:ff:ff:ff:ff
inet 10.30.65.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::4c3:76ff:fe09:95c2/64 scope link
valid_lft forever preferred_lft forever
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:76:65:08:06 brd ff:ff:ff:ff:ff:ff
inet 10.30.96.1/24 brd 10.30.65.255 scope global docker0
valid_lft forever preferred_lft forever
kubernetes-server-linux-amd64.tar.gz # 我在这里, 在这里! 哪里找? 自己想办法! 里面事kubectl工具!!!
[root@k8s-master01 ~]# cp kubernetes/server/bin/kubeadm /opt/k8s/bin/
[root@k8s-master01 ~]# vi /opt/k8s/script/bootstrap_kubeconfig.sh
NODE_NAMES=("$1" "$2")
for node_name in ${NODE_NAMES[@]};do
echo ">>> ${node_name}"
# 用kubeadm创建 token --> 并写入/root/.kube/config
export BOOTSTRAP_TOKEN=$(kubeadm token create --description kubelet-bootstrap-token --groups system:bootstrappers:${node_name} --kubeconfig /root/.kube/config)
# 设置集群参数
kubectl config set-cluster kubernetes --certificate-authority=/opt/k8s/cert/ca.pem --embed-certs=true --server=https://192.168.2.210:8443 --kubeconfig=/opt/k8s/kubelet/kubelet-bootstrap-${node_name}.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=/opt/k8s/kubelet/kubelet-bootstrap-${node_name}.kubeconfig
# 设置上下文参数
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=/opt/k8s/kubelet/kubelet-bootstrap-${node_name}.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=/opt/k8s/kubelet/kubelet-bootstrap-${node_name}.kubeconfig
done
[root@k8s-master01 ~]# bash /opt/k8s/script/bootstrap_kubeconfig.sh k8s-node01 k8s-node02
[root@k8s-master01 ~]# kubeadm token list --kubeconfig ~/.kube/config
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
3atb7y.gsq4t23paxjutx2n 23h 2019-11-16T01:37:09+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-node02
54ixvq.ote8az8qffgq3lug 23h 2019-11-16T01:37:09+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-node01
创建的 token 有效期为 1 天,超期后将不能再被使用,且会被 kube-controller-manager 的 tokencleaner 清理(如果启用该 controller 的话)
[root@k8s-master01 ~]# kubectl get secrets -n kube-system
[root@k8s-master01 ~]# vi /opt/k8s/kubelet/kubelet.config.json.template
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/opt/k8s/cert/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "##NODE_IP##",
"port": 10250,
"readOnlyPort": 0,
"cgroupDriver": "cgroupfs",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"featureGates": {
"RotateKubeletClientCertificate": true,
"RotateKubeletServerCertificate": true
},
"clusterDomain": "cluster.local",
"clusterDNS": ["10.90.0.2"]
}
[root@k8s-master01 ~]# vi /opt/k8s/kubelet/kubelet.service.template
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/opt/lib/kubelet
ExecStart=/opt/k8s/bin/kubelet --bootstrap-kubeconfig=/opt/k8s/kubelet-bootstrap.kubeconfig --cert-dir=/opt/k8s/cert/ --kubeconfig=/opt/k8s/kubelet.kubeconfig --config=/opt/k8s/kubelet.config.json --hostname-override=##NODE_NAME## --pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest --alsologtostderr=true --logtostderr=false --log-dir=/var/log/kubernetes --v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
kube-apiserver 收到 CSR 请求后,对其中的 Token 进行认证(事先使用 kubeadm 创建的 token),认证通过后将请求的 user 设置为 system:bootstrap:,group 设置为 system:bootstrappers,这一过程称为 Bootstrap Token Auth。
# 在k8s-master01节点上操作!
[root@k8s-master01 ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
#查看kubelet-bootstrap绑定信息
[root@k8s-master01 ~]# kubectl describe clusterrolebinding kubelet-bootstrap
#删除kubelet-bootstrap绑定信息
[root@k8s-master01 ~]# kubectl delete clusterrolebinding kubelet-bootstrap
[root@k8s-master01 ~]# vi /opt/k8s/script/kubelet_service.sh
# 变量根据需求调整, 两个相对应不能多也不能少!!
NODE_IPS=("192.168.2.11" "192.168.2.12")
NODE_NAMES=("k8s-node01" "k8s-node02")
# 分发kubelet-bootstrap.kubeconfig配置文件
for node_name in ${NODE_NAMES[@]};do
echo ">>> ${node_name}"
scp /opt/k8s/kubelet/kubelet-bootstrap-${node_name}.kubeconfig root@${node_name}:/opt/k8s/kubelet-bootstrap.kubeconfig
done
# 分发kubelet.config.json
for node_ip in ${NODE_IPS[@]};do
echo ">>> ${node_ip}"
sed -e "s/##NODE_IP##/${node_ip}/" /opt/k8s/kubelet/kubelet.config.json.template > /opt/k8s/kubelet/kubelet.config-${node_ip}.json
scp /opt/k8s/kubelet/kubelet.config-${node_ip}.json root@${node_ip}:/opt/k8s/kubelet.config.json
done
#分发kubelet systemd unit 文件
for node_name in ${NODE_NAMES[@]};do
echo ">>> ${node_name}"
sed -e "s/##NODE_NAME##/${node_name}/" /opt/k8s/kubelet/kubelet.service.template > /opt/k8s/kubelet/kubelet-${node_name}.service
scp /opt/k8s/kubelet/kubelet-${node_name}.service root@${node_name}:/etc/systemd/system/kubelet.service
done
#开启检查kubelet 服务
for node_ip in ${NODE_IPS[@]};do
ssh root@${node_ip} "mkdir -p /opt/lib/kubelet"
ssh root@${node_ip} "mkdir -p /var/log/kubernetes"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet"
ssh root@${node_ip} "systemctl status kubelet | grep Active"
done
[root@k8s-master01 ~]# bash /opt/k8s/script/kubelet_service.sh
# 查看 CSR 列表:
[root@k8s-master01 ~]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-DzSCvZQM86B7X8wkZV6mK8TQCOBBtVg1RMUohpx2P2c 25s system:bootstrap:3atb7y Pending
node-csr-Rctd6tMgFECldiqhkP2ZOirO_VIBACu0foTJxK7Skf4 63s system:bootstrap:54ixvq Pending
# 手动approve csr:
[root@k8s-master01 ~]# kubectl certificate approve node-csr-ADMwXCLrvlOo0Hoal7ttm3E9Ova1QOtRciO66Pd4Wqc
# )查看 approve 结果:
[root@k8s-master01 ~]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-ADMwXCLrvlOo0Hoal7ttm3E9Ova1QOtRciO66Pd4Wqc 24m system:bootstrap:n9t3z1 Approved,Issued
[root@k8s-master01 ~]# cat > /opt/k8s/csr-crb.yaml <<EOF
# Approve all CSRs for the group "system:bootstrappers"
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: auto-approve-csrs-for-group
subjects:
- kind: Group
name: system:bootstrappers
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
apiGroup: rbac.authorization.k8s.io
---
# To let a node of the group "system:nodes" renew its own credentials
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-client-cert-renewal
subjects:
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests/selfnodeserver"]
verbs: ["create"]
---
# To let a node of the group "system:nodes" renew its own server credentials
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-server-cert-renewal
subjects:
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: approve-node-server-renewal-csr
apiGroup: rbac.authorization.k8s.io
EOF
[root@k8s-master01 ~]# kubectl apply -f /opt/k8s/csr-crb.yaml
clusterrolebinding.rbac.authorization.k8s.io/auto-approve-csrs-for-group created
clusterrolebinding.rbac.authorization.k8s.io/node-client-cert-renewal created
clusterrole.rbac.authorization.k8s.io/approve-node-server-renewal-csr created
clusterrolebinding.rbac.authorization.k8s.io/node-server-cert-renewal created
[root@k8s-master01 ~]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-DzSCvZQM86B7X8wkZV6mK8TQCOBBtVg1RMUohpx2P2c 3m40s system:bootstrap:3atb7y Approved,Issued
node-csr-Rctd6tMgFECldiqhkP2ZOirO_VIBACu0foTJxK7Skf4 4m18s system:bootstrap:54ixvq Approved,Issued
# 节点已经ready
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-node01 Ready <none> 7s v1.16.2
k8s-node02 Ready <none> 5s v1.16.2
# kube-controller-manager 为各 node 生成了 kubeconfig 文件和公私钥:(*** 在node节点保存! ***)
[root@k8s-node01 ~]# ll /opt/k8s/kubelet.kubeconfig
-rw------- 1 root root 2299 11月 14 02:19 /opt/k8s/kubelet/kubelet.kubeconfig
[root@k8s-node01 ~]# ll /opt/k8s/cert/ | grep kubelet
-rw------- 1 root root 1273 11月 14 02:19 kubelet-client-2019-11-14-02-19-31.pem
lrwxrwxrwx 1 root root 60 11月 14 02:19 kubelet-client-current.pem -> /opt/k8s/kubelet/cert/kubelet-client-2019-11-14-02-19-31.pem
-rw-r--r-- 1 root root 2185 11月 14 00:43 kubelet.crt
-rw------- 1 root root 1675 11月 14 00:43 kubelet.key
[root@k8s-node02 ~]# ss -nutlp |grep kubelet
tcp LISTEN 0 128 127.0.0.1:33313 0.0.0.0:* users:(("kubelet",pid=3416,fd=15))
tcp LISTEN 0 128 127.0.0.1:10248 0.0.0.0:* users:(("kubelet",pid=3416,fd=34))
tcp LISTEN 0 128 192.168.2.12:10250 0.0.0.0:* users:(("kubelet",pid=3416,fd=31))
[root@k8s-node01 ~]# kubectl describe clusterrole system:kubelet-api-admin
Name: system:kubelet-api-admin
Labels: kubernetes.io/bootstrapping=rbac-defaults
Annotations: rbac.authorization.kubernetes.io/autoupdate: true
PolicyRule:
Resources Non-Resource URLs Resource Names Verbs
--------- ----------------- -------------- -----
nodes/log [] [] [*]
nodes/metrics [] [] [*]
nodes/proxy [] [] [*]
nodes/spec [] [] [*]
nodes/stats [] [] [*]
nodes [] [] [get list watch proxy]
1、kublet 配置了如下认证参数:
2、kubelet 收到请求后,使用 clientCAFile 对证书签名进行认证,或者查询 bearer token 是否有效。如果两者都没通过,则拒绝请求,提示 Unauthorized:
3、通过认证后,kubelet 使用 SubjectAccessReview API 向 kube-apiserver 发送请求,查询证书或 token 对应的 user、group 是否有操作资源的权限(RBAC);
4、bear token 认证和授权:
占位!!!
[root@k8s-master01 ~]# cat > /opt/k8s/cert/kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "steams"
}
]
}
EOF
[root@k8s-master01 ~]# cfssl gencert -ca=/opt/k8s/cert/ca.pem -ca-key=/opt/k8s/cert/ca-key.pem -config=/opt/k8s/cert/ca-config.json -profile=kubernetes /opt/k8s/cert/kube-proxy-csr.json | cfssljson -bare /opt/k8s/cert/kube-proxy
[root@kube-master ~]# kubectl config set-cluster kubernetes --certificate-authority=/opt/k8s/cert/ca.pem --embed-certs=true --server=https://192.168.2.210:8443 --kubeconfig=/opt/k8s/kube-proxy/kube-proxy.kubeconfig
[root@kube-master ~]# kubectl config set-credentials kube-proxy --client-certificate=/opt/k8s/cert/kube-proxy.pem --client-key=/opt/k8s/cert/kube-proxy-key.pem --embed-certs=true --kubeconfig=/opt/k8s/kube-proxy/kube-proxy.kubeconfig
[root@kube-master ~]# kubectl config set-context kube-proxy@kubernetes --cluster=kubernetes --user=kube-proxy --kubeconfig=/opt/k8s/kube-proxy/kube-proxy.kubeconfig
[root@kube-master ~]# kubectl config use-context kube-proxy@kubernetes --kubeconfig=/opt/k8s/kube-proxy/kube-proxy.kubeconfig
[root@k8s-master01 ~]# cat >/opt/k8s/kube-proxy/kube-proxy.config.yaml.template <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: ##NODE_IP##
clientConnection:
kubeconfig: /opt/k8s/kube-proxy.kubeconfig
clusterCIDR: 10.96.0.0/16
healthzBindAddress: ##NODE_IP##:10256
hostnameOverride: ##NODE_NAME##
kind: KubeProxyConfiguration
metricsBindAddress: ##NODE_IP##:10249
mode: "ipvs"
EOF
[root@k8s-master01 ~]# vi /opt/k8s/kube-proxy/kube-proxy.service.template
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/opt/lib/kube-proxy
ExecStart=/opt/k8s/bin/kube-proxy \ --config=/opt/k8s/kube-proxy.config.yaml \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@k8s-master01 ~]# vi /opt/k8s/script/kube-proxy_service.sh
# 变量根据需求调整, 两个相对应不能多也不能少!
NODE_IPS=("192.168.2.11" "192.168.2.12")
NODE_NAMES=("k8s-node01" "k8s-node02")
# 注意循环内的变量要根据node数量也做相应调整!
for (( i=0; i < 2; i++ ));do
echo ">>> ${NODE_NAMES[i]}"
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" /opt/k8s/kube-proxy/kube-proxy.config.yaml.template > /opt/k8s/kube-proxy/kube-proxy-${NODE_NAMES[i]}.config.yaml
scp /opt/k8s/kube-proxy/kube-proxy-${NODE_NAMES[i]}.config.yaml root@${NODE_NAMES[i]}:/opt/k8s/kube-proxy.config.yaml
done
for node_ip in ${NODE_IPS[@]};do
echo ">>> ${node_ip}"
scp /opt/k8s/kube-proxy/kube-proxy.kubeconfig root@${node_ip}:/opt/k8s/
scp /opt/k8s/kube-proxy/kube-proxy.service.template root@${node_ip}:/etc/systemd/system/kube-proxy.service
ssh root@${node_ip} "mkdir -p /opt/lib/kube-proxy"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy"
ssh root@${node_ip} "systemctl status kube-proxy|grep Active"
done
[root@k8s-master01 ~]# bash /opt/k8s/script/kube-proxy_service.sh
原文:https://www.cnblogs.com/colman/p/11875118.html