集群方案: - 发行版:CentOS 7.6 - 容器运行时: 19.03.2 - 内核:3.10.0-957.27.2.el7.x86_64 - 版本:Kubernetes: 1.15.3 - 网络方案: Calico - kube-proxy mode: IPVS - master高可用方案:HAProxy keepalived LVS - DNS插件: CoreDNS - metrics插件:metrics-server
Kubernetes集群搭建
Host Name | Role | IP |
---|---|---|
ha-master0 | ha-master0 | 192.168.1.166 |
ha-master1 | ha-master1 | 192.168.1.167 |
ha-master2 | ha-master2 | 192.168.1.168 |
ha-node0 | ha-node0 | 192.168.1.169 |
Ha-node1 | Ha-node1 | 192.168.1.170 |
1、下载安装包(基于能够访问外网的服务器下载相应安装包)
# 设置yum缓存路径,cachedir 缓存路径 keepcache=1保持安装包在软件安装之后不删除
cat /etc/yum.conf
[main]
cachedir=/home/yum
keepcache=1
...
# 安装ifconfig
yum install -y net-tools
# 时间同步
yum install -y ntpdate
# 安装docker(建议19.8.06)
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
yum makecache fast
## 列出Docker版本
yum list docker-ce --showduplicates | sort -r
## 安装指定版本
sudo yum install docker-ce-<VERSION_STRING>
# 安装文件管理器,XShell可通过rz sz命令上传或者下载服务器文件
yum intall lrzsz -y
# 安装keepalived、haproxy
yum install -y socat keepalived ipvsadm haproxy
# 安装kubernetes相关组件
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet kubeadm kubectl ebtables
# 其他软件安装
yum install wget
...
2、节点系统配置
- 关闭SELinux、防火墙
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
- 关闭系统的Swap(Kubernetes 1.8开始要求)
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
- 配置L2网桥在转发包时会被iptables的FORWARD规则所过滤,该配置被CNI插件需要,更多信息请参考Network Plugin Requirements
echo """
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
""" > /etc/sysctl.conf
sysctl -p
如果出错执行,modprobe br_netfilter
- 同步时间
ntpdate -u ntp.api.bz
- 重启系统,确认内核版本后,开启IPVS(如果未升级内核,去掉ip_vs_fo)
uname -a
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
执行sysctl -p报错可执行modprobe br_netfilter.
- 所有机器需要设定/etc/sysctl.d/k8s.conf的系统参数
# https://github.com/moby/moby/issues/31208
# ipvsadm -l --timout
# 修复ipvs模式下长连接timeout问题 小于900即可
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF
sysctl --system
- 设置开机启动
# 启动docker
sed -i "13i ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /usr/lib/systemd/system/docker.service
systemctl daemon-reload
systemctl enable docker
systemctl start docker
# 设置kubelet开机启动
systemctl enable kubelet
systemctl enable keepalived
systemctl enable haproxy
- 设置免密登录
# 1、三次回车后,密钥生成完成
ssh-keygen
# 2、拷贝密钥到其他节点
ssh-copy-id -i ~/.ssh/id_rsa.pub username@192.168.x.xxx
3、keepalived+haproxy配置
# 所有节点执行
echo """
CP0_IP=192.168.1.166
CP1_IP=192.168.1.167
CP2_IP=192.168.1.168
VIP=192.168.1.165
NET_IF=ens160
CIDR=10.244.0.0/16
""" > ./cluster-info
bash -c "$(curl -fsSL https://raw.githubusercontent.com/hnbcao/kubeadm-ha-master/v1.14.0/keepalived-haproxy.sh)"
4、部署HA Master
1)、kubelet reset
kubeadm reset -f
rm -rf /etc/kubernetes/pki/
2)、初始化master0的kubelet
echo """
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.15.3
controlPlaneEndpoint: "192.168.1.165:8443"
maxPods: 100
networkPlugin: cni
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
certSANs:
- 192.168.1.166
- 192.168.1.167
- 192.168.1.168
- 192.168.1.165
networking:
# This CIDR is a Calico default. Substitute or remove for your CNI provider.
podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
""" > /etc/kubernetes/kubeadm-config.yaml
kubeadm init --config=kubeadm-config.yaml --upload-certs
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
3)、初始化master1,master2和node节点
master节点执行先前由kubeadm init
第一个节点上的输出的join命令
sudo kubeadm join 192.168.1.165:8443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
node节点执行先前由kubeadm init
第二个节点上的输出的join命令
sudo kubeadm join 192.168.1.165:8443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866
4)、安装cni插件
kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml
5) 检查集群状态
所有Pod处于”Running”集群则基本安装完毕
kubectl get pods --all-namespaces
参考:
1.https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/
2.https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/