master节点192.168.2.90
node节点192.168.2.91、192.168.2.92
在master节点和每个node节点都分别执行以下命令
更新yumyum -y update关闭SELINUX
vim /etc/selinux/config
将SELINUX=enforcing修改为SELINUX=disabled
禁用防火墙systemctl stop firewalld systemctl disable firewalld安装master节点
在master节点运行以下命令
yum install kubernetes-master etcd -y安装node节点
在每个node节点都分别运行以下命令
yum install kubernetes-node flannel docker etcd -y配置master节点ETCD
vim /etc/etcd/etcd.conf
修改ETCD_LISTEN_PEER_URLS ETCD_LISTEN_CLIENT_URLS ETCD_NAME ETCD_INITIAL_ADVERTISE_PEER_URLS ETCD_ADVERTISE_CLIENT_URLS ETCD_INITIAL_CLUSTER
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://192.168.2.90:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.2.90:2379,http://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd0" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.2.90:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.2.90:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd0=http://192.168.2.90:2380,etcd1=http://192.168.2.91:2380,etcd2=http://192.168.2.92:2380" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true"配置node1节点ETCD
vim /etc/etcd/etcd.conf
修改ETCD_LISTEN_PEER_URLS ETCD_LISTEN_CLIENT_URLS ETCD_NAME ETCD_INITIAL_ADVERTISE_PEER_URLS ETCD_ADVERTISE_CLIENT_URLS ETCD_INITIAL_CLUSTER
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://192.168.2.91:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.2.91:2379,http://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd1" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.2.91:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.2.91:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd0=http://192.168.2.90:2380,etcd1=http://192.168.2.91:2380,etcd2=http://192.168.2.92:2380" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true"配置node2节点ETCD
vim /etc/etcd/etcd.conf
修改ETCD_LISTEN_PEER_URLS ETCD_LISTEN_CLIENT_URLS ETCD_NAME ETCD_INITIAL_ADVERTISE_PEER_URLS ETCD_ADVERTISE_CLIENT_URLS ETCD_INITIAL_CLUSTER
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://192.168.2.92:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.2.92:2379,http://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd2" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.2.92:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.2.92:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd0=http://192.168.2.90:2380,etcd1=http://192.168.2.91:2380,etcd2=http://192.168.2.92:2380" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true"master节点和node节点分别运行
systemctl start etcd systemctl enable etcd systemctl status etcd
如下图所示说明启动成功
vim /etc/kubernetes/apiserver
### # kubernetes system config # # The following values are used to configure the kube-apiserver # # The address on the local server to listen to. KUBE_API_ADDRESS="--address=0.0.0.0" # The port on the local server to listen on. KUBE_API_PORT="--port=8080" # Port minions listen on KUBELET_PORT="--kubelet-port=10250" # Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.2.90:2379,http://192.168.2.91:2379,http://192.168.2.92:2379" # Address range to use for services KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" # default admission control policies KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota" # Add your own! KUBE_API_ARGS=""
systemctl start kube-apiserver.service systemctl start kube-controller-manager.service systemctl start kube-scheduler.service systemctl enable kube-apiserver.service systemctl enable kube-controller-manager.service systemctl enable kube-scheduler.service systemctl status kube-apiserver.service systemctl status kube-controller-manager.service systemctl status kube-scheduler.service
如下图所示说明启动成功
vim /etc/kubernetes/config
### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=false" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://192.168.2.90:8080"
vim /etc/kubernetes/kubelet
### ### # kubernetes kubelet (minion) config # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS="--address=0.0.0.0" # The port for the info server to serve on KUBELET_PORT="--port=10250" # You may leave this blank to use the actual hostname KUBELET_HOSTNAME="--hostname-override=192.168.2.91" # location of the api-server KUBELET_API_SERVER="--api-servers=http://192.168.2.90:8080" # pod infrastructure container KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" # Add your own! KUBELET_ARGS=""
vim /etc/kubernetes/proxy
### # kubernetes proxy config # default config should be adequate # Add your own! KUBE_PROXY_ARGS="--bind-address=0.0.0.0"
systemctl start kube-proxy.service systemctl start kubelet.service systemctl enable kube-proxy.service systemctl enable kubelet.service systemctl status kube-proxy.service systemctl status kubelet.servicenode2采用相同的配置过程,把node1配置中的192.168.2.91替换为192.168.2.92即可,其他不变 配置网络 在master节点运行
etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16", "SubnetMin":"172.17.1.0", "SubnetMax":"172.17.254.0"}'
在node节点配置
vim /etc/sysconfig/flanneld
# Flanneld configuration options # etcd url location. Point this to the server where etcd runs FLANNEL_ETCD_ENDPOINTS="http://192.168.2.90:2379,http://192.168.2.91:2379,http://192.168.2.92:2379" # etcd config key. This is the configuration key that flannel queries # For address range assignment FLANNEL_ETCD_PREFIX="/atomic.io/network" # Any additional options that you want to pass FLANNEL_OPTIONS="--iface=ens192"
systemctl start flanneld systemctl enable flanneld systemctl status flanneld
如下图所示说明启动成功
在node节点设置
vim /etc/docker/daemon.json
{
"registry-mirrors": [
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn",
"https://registry.docker-cn.com"
]
}
systemctl restart docker验证
在master节点执行kubectl get node,如下图所示说明启动成功
在每个node节点执行以下命令即可解决
openssl s_client -showcerts -servername registry.access.redhat.com -connect registry.access.redhat.com:443 /dev/null | openssl x509 -text > /etc/rhsm/ca/redhat-uep.pem



