栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 系统运维 > 运维 > Linux

傻瓜式搭建高可用kubernetes

Linux 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

傻瓜式搭建高可用kubernetes

高可用kubernetes搭建11.26

环境ubuntu18.04 kubernetes版本1.18.3

设置k8s和docker源
cat <>/etc/apt/sources.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
# 写入两个 key
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -
curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -

docker国外源

deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable

安装一些依赖

apt -y install apt-transport-https ca-certificates curl software-properties-common
apt-get install -y kubeadm=1.18.3-00 kubelet=1.18.3-00 kubectl=1.18.3-00

docker 源

add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt update
apt-get install docker-ce docker-ce-cli containerd.io
echo 1 > /proc/sys/net/ipv4/ip_forward

非常重要!!!

cat <  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

时间同步

免密钥登入

关闭swap

配置docker

每个机器都要执行一遍

cat /etc/systemd/system/multi-user.target.wants/docker.service

root@master3:~# cat /etc/docker/daemon.json 
{"insecure-registries":["10.1.30.55:80"]}
root@master3:~# systemctl daemon-reload && systemctl restart docker && systemctl enable docker
安装keeplive

在要作为master的节点上改keeplive文件:

修改:

interface # 网卡名

virtual_ipaddress # 你要设置的虚拟ip

real_server # master节点ip

如下

master1:

global_defs {
   router_id LVS_DEVEL
}
vrrp_instance VI_1 {
    state BACKUP
    nopreempt
    interface ens33
    virtual_router_id 80
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass just0kk
    }
    virtual_ipaddress {
        10.1.28.100
    }
}
virtual_server 10.1.28.100 6443 {
    delay_loop 6
    lb_algo loadbalance
    lb_kind DR
    net_mask 255.255.255.0
    persistence_timeout 0
    protocol TCP
    real_server 10.1.28.135 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.21 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.63 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

master2:

global_defs {
   router_id LVS_DEVEL
}
vrrp_instance VI_1 {
    state BACKUP
    nopreempt
    interface ens33
    virtual_router_id 80
    priority 50
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass just0kk
    }
    virtual_ipaddress {
        10.1.28.100
    }
}
virtual_server 10.1.28.100 6443 {
    delay_loop 6
    lb_algo loadbalance
    lb_kind DR    net_mask 255.255.255.0
    persistence_timeout 0
    protocol TCP
    real_server 10.1.28.135 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.21 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.63 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

master3:

global_defs {
   router_id LVS_DEVEL
}
vrrp_instance VI_1 {
    state BACKUP
    nopreempt
    interface ens33
    virtual_router_id 80
    priority 30
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass just0kk
    }
    virtual_ipaddress {
        10.1.28.100
    }
}
virtual_server 10.1.28.100 6443 {
    delay_loop 6
    lb_algo loadbalance
    lb_kind DR
    net_mask 255.255.255.0
    persistence_timeout 0
    protocol TCP
    real_server 10.1.28.135 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.21 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.63 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

然后按顺序依次启动: 执行以下

systemctl enable keepalived && systemctl start keepalived && systemctl status keepalived

执行完可以在master1 上 ip a 看一下,会多一个虚拟ip

准备开始初始化k8s

编写初始化文件

需要修改的地方:

kubernetesVersion # 写kubeadm 组件的 版本

controlPlaneEndpoint # 写虚拟ip 加端口

certSANs # 写master节点的ip

vim kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.3
controlPlaneEndpoint: 10.1.28.100:6443
# imageRepository: registry.aliyuncs.com/google_containers  如果有镜像可以不写
apiServer:
 certSANs:
 - 10.1.28.135
 - 10.1.28.21
 - 10.1.28.63
 - 10.1.28.100
networking:
 podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:  KubeProxyConfiguration
mode: ipvs

kubeadm init --config kubeadm-config.yaml # 开始初始化

成功之后执行:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown ( i d − u ) : (id -u): (id−u):(id -g) $HOME/.kube/config

flannel:

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/documentation/kube-flannel.yml
scp /etc/kubernetes/pki/ca.crt node1:/etc/kubernetes/pki/ 
scp /etc/kubernetes/pki/ca.key node1:/etc/kubernetes/pki/ 
scp /etc/kubernetes/pki/sa.key node1:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.pub node1:/etc/kubernetes/pki/ 
scp /etc/kubernetes/pki/front-proxy-ca.crt node1:/etc/kubernetes/pki/ 
scp /etc/kubernetes/pki/front-proxy-ca.key node1:/etc/kubernetes/pki/ 
scp /etc/kubernetes/pki/etcd/ca.key node1:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.crt node1:/etc/kubernetes/pki/etcd/

另外几台master使用那条带–control-plane的join 加入集群


获取镜像脚本:

images=(
    kube-apiserver:v1.18.3
    kube-controller-manager:v1.18.3
    kube-scheduler:v1.18.3
    kube-proxy:v1.18.3
    pause:3.2
    etcd:3.4.3
    coredns:1.6.7
)

for imageName in ${images[@]} ; do
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
done
转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/599323.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号