安装docker-ce

#yum install -y docker-ce
#指定版本,需配合kubeadm
yum install -y docker-ce-18.*

# 打包
ls -al /var/cache/yum/x86_64/7/*/packages/
tar -zcvf k8s.tar.gz /var/cache/yum/x86_64/7/*/packages/

# 解包
cd kubeadm1.18/
tar -zxvf k8s.tar.gz -C /
#docker-ce-18.09.9过旧,需手动安装
cd /var/cache/yum/x86_64/7/docker-ce-stable/packages/
yum install -y *.rpm
cd -

# 更改 Docker cgroup driver,"cgroupfs" -> "systemd"
[root@localhost ~]# docker info |grep "Cgroup Driver"
Cgroup Driver: cgroupfs

#vi /usr/lib/systemd/system/docker.service
#ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd
sed -i 's/\(^ExecStart=.*$\)/\1 --exec-opt native.cgroupdriver=systemd/' /usr/lib/systemd/system/docker.service

# 启动docker
systemctl daemon-reload
systemctl restart docker
systemctl enable docker

部署kubernetes

# 配置kubernetes yum源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=0
EOF

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

#安装kubeadm,安装时,kubeadm 和 kubelet、kubectl、kubernetes-cni 这几个二进制文件都会被自动安装好。
yum install -y kubeadm
#指定版本
yum install -y kubeadm-1.18.18 kubelet-1.18.18 kubectl-1.18.18 kubernetes-cni-0.8.7

# 启动
systemctl enable kubelet.service
systemctl restart kubelet.service
systemctl status kubelet.service

预配置

# 修改hosts
cat >> /etc/hosts << EOF
192.168.237.130 master
EOF

# 设置主机名
hostnamectl set-hostname master
hostnamectl set-hostname worker01

# 禁用防火墙
#[警告防火墙]:防火墙已激活,请确保端口[6443 10250]已打开,否则您的群集可能无法正常运行
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld

# 关闭SELinux
setenforce 0
sed -i 's/^\(SELINUX=\).*/\1disabled/' /etc/sysconfig/selinux

# 禁用swap
swapoff -a
# 注释/etc/fstab中的swap
sed -i 's/\(^\/dev.*swap.*\)/# \1/' /etc/fstab

#确保 br_netfilter 模块被加载。这一操作可以通过运行 lsmod | grep br_netfilter 来完成。
[root@localhost kubeadm]# lsmod | grep br_netfilter
br_netfilter           22256  0
bridge                151336  1 br_netfilter
[root@localhost kubeadm]#

#为了让你的 Linux 节点上的 iptables 能够正确地查看桥接流量,你需要确保在你的 sysctl 配置中将 net.bridge.bridge-nf-call-iptables 设置为 1。
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system

拉取镜像

#查看已安装版本k8s,所需镜像的版本:
kubeadm config images list
[root@localhost ~]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.18.18
k8s.gcr.io/kube-controller-manager:v1.18.18
k8s.gcr.io/kube-scheduler:v1.18.18
k8s.gcr.io/kube-proxy:v1.18.18
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.7
[root@localhost ~]#

# 从阿里云拉取镜像
kubeadm config images pull --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers

vim pullimages.sh
#!/bin/bash
images=$(docker images |awk '{if (NR>1){print $1":"$2}}' | sed 's#registry.cn-hangzhou.aliyuncs.com/google_containers/##')
for imageName in ${images[@]} ; do
#docker pull anjia0532/google-containers.$imageName
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done

sh pullimages.sh


[root@localhost ~]# docker images
REPOSITORY                                 TAG                 IMAGE ID            CREATED             SIZE
rook/ceph                                  master              0cd4862e3c5e        8 days ago          1.19 GB
weaveworks/weave-npc                       2.8.1               7f92d556d4ff        3 months ago        39.3 MB
weaveworks/weave-kube                      2.8.1               df29c0a4002c        3 months ago        89 MB
k8s.gcr.io/kubernetes-dashboard-amd64      v1.10.0             0dab2435c100        2 years ago         122 MB
k8s.gcr.io/kube-proxy-amd64                v1.11.1             d5c25579d0ff        2 years ago         97.8 MB
k8s.gcr.io/kube-scheduler-amd64            v1.11.1             272b3a60cd68        2 years ago         56.8 MB
k8s.gcr.io/kube-controller-manager-amd64   v1.11.1             52096ee87d0e        2 years ago         155 MB
k8s.gcr.io/kube-apiserver-amd64            v1.11.1             816332bd9d11        2 years ago         187 MB
k8s.gcr.io/coredns                         1.1.3               b3b94275d97c        2 years ago         45.6 MB
k8s.gcr.io/etcd-amd64                      3.2.18              b8df3b177be2        3 years ago         219 MB
k8s.gcr.io/pause                           3.1                 da86e6ba6ca1        3 years ago         742 kB
[root@localhost ~]#

## 将镜像搬运到另一台机器
# 打包所有镜像,这里包含了后续操作的镜像
mkdir k8s.gcr.io kubernetesui calico
docker images | grep -v REPOSITORY | awk '{print "docker save "$1":"$2" -o "$1$2".img"}'
# 需要执行上一个命令的输出结果,即:
docker save k8s.gcr.io/kube-proxy:v1.18.18 -o k8s.gcr.io/kube-proxyv1.18.18.img
docker save k8s.gcr.io/kube-apiserver:v1.18.18 -o k8s.gcr.io/kube-apiserverv1.18.18.img
docker save k8s.gcr.io/kube-scheduler:v1.18.18 -o k8s.gcr.io/kube-schedulerv1.18.18.img
docker save k8s.gcr.io/kube-controller-manager:v1.18.18 -o k8s.gcr.io/kube-controller-managerv1.18.18.img
docker save kubernetesui/dashboard:v2.0.3 -o kubernetesui/dashboardv2.0.3.img
docker save calico/node:v3.8.9 -o calico/nodev3.8.9.img
docker save calico/pod2daemon-flexvol:v3.8.9 -o calico/pod2daemon-flexvolv3.8.9.img
docker save calico/cni:v3.8.9 -o calico/cniv3.8.9.img
docker save calico/kube-controllers:v3.8.9 -o calico/kube-controllersv3.8.9.img
docker save kubernetesui/metrics-scraper:v1.0.4 -o kubernetesui/metrics-scraperv1.0.4.img
docker save k8s.gcr.io/pause:3.2 -o k8s.gcr.io/pause3.2.img
docker save k8s.gcr.io/coredns:1.6.7 -o k8s.gcr.io/coredns1.6.7.img
docker save k8s.gcr.io/etcd:3.4.3-0 -o k8s.gcr.io/etcd3.4.3-0.img
#将镜像压缩
tar -zcvf k8s_img.tar.gz k8s_img

#将k8s_img.tar.gz复制到另一台机器上,将镜像解压
tar -xf k8s_img.tar.gz
cd k8s_img
# 加载所有镜像
for i in $(find . -name *.img);do echo $i;docker load -i $i; done
docker images
cd ..

配置主节点

# 输出kubeadm默认配置,新版本才有此功能
#kubeadm config print init-defaults > kubeadm-defaults.yaml

# 部署Kubernetes Master
kubeadm init
# 指定版本号,否则会联网安装新版本 1.18.18
kubeadm init --kubernetes-version 1.18.18

就可以完成 Kubernetes Master 的部署了,这个过程只需要几分钟。部署完成后,输出如下:

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.xxx.xxx:6443 --token xxxxxx.xxxxxxxxxxxxxxxx \
    --discovery-token-ca-cert-hash sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

配置kubectl与apiserver的认证

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#这个 kubeadm join 命令,就是用来给这个 Master 节点添加更多工作节点(Worker)的命令。
#我们在后面部署 Worker 节点的时候马上会用到它,所以找一个地方把这条命令记录下来。
kubeadm join 192.168.237.130:6443 --token o3un07.inwoeflle50h10a5 \
    --discovery-token-ca-cert-hash sha256:6ac163fcce39971cbe59b65ff26dbeab9edf798889e767d421ea36d2d9f1f5e4

# 检查健康状态
kubectl get cs
[root@master ~]# kubectl get cs
NAME                 STATUS      MESSAGE                                                                                     ERROR
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused
scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
etcd-0               Healthy     {"health":"true"}

出现这种情况,是因为/etc/kubernetes/manifests/{kube-controller-manager.yaml,kube-scheduler.yaml}设置的默认端口为0导致的,注释即可:#  - --port=0

sed -i 's/\(^ *- --port=0$\)/#\1/' /etc/kubernetes/manifests/kube-controller-manager.yaml
sed -i 's/\(^ *- --port=0$\)/#\1/' /etc/kubernetes/manifests/kube-scheduler.yaml
systemctl restart kubelet.service
kubectl get cs
[root@master ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
etcd-0               Healthy   {"health":"true"}
scheduler            Healthy   ok

# 查看节点状态
kubectl get nodes
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES    AGE   VERSION
master   NotReady   master   24m   v1.18.18


# 查看这个节点(Node)对象的详细信息、状态和事件(Event)
kubectl describe node master
Conditions:
  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----             ------  -----------------                 ------------------                ------                       -------
  MemoryPressure   False   Sat, 01 May 2021 05:41:33 +0800   Sat, 01 May 2021 05:34:48 +0800   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure     False   Sat, 01 May 2021 05:41:33 +0800   Sat, 01 May 2021 05:34:48 +0800   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure      False   Sat, 01 May 2021 05:41:33 +0800   Sat, 01 May 2021 05:34:48 +0800   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready            False   Sat, 01 May 2021 05:41:33 +0800   Sat, 01 May 2021 05:34:48 +0800   KubeletNotReady              runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
Addresses:
  InternalIP:  192.168.237.130
  Hostname:    master

# 检查这个节点上各个系统 Pod 的状态
kubectl get pods -n kube-system
[root@master ~]# kubectl get pods -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-75d555c48-vqq5l   1/1     Running   0          7m27s
calico-node-869tt                         1/1     Running   0          7m27s
coredns-66bff467f8-6lwrk                  1/1     Running   0          21m
coredns-66bff467f8-92m76                  1/1     Running   0          21m
etcd-master                               1/1     Running   0          21m
kube-apiserver-master                     1/1     Running   0          21m
kube-controller-manager-master            1/1     Running   0          18m
kube-proxy-fltb6                          1/1     Running   0          21m
kube-scheduler-master                     1/1     Running   0          16m

部署网络插件

#cacico安装方法
#kubectl apply -f wget https://docs.projectcalico.org/manifests/calico.yaml
#kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml
#kubectl apply -f https://docs.projectcalico.org/v3.18/manifests/calico.yaml


kubectl apply -f calico/calico3.8.yaml

[root@master ~]# kubectl get pods -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-75d555c48-cltpp   1/1     Running   0          56s
calico-node-ckqgn                         1/1     Running   0          56s
coredns-66bff467f8-45jmp                  1/1     Running   0          5m57s
coredns-66bff467f8-48867                  1/1     Running   0          5m57s
etcd-master                               1/1     Running   0          6m6s
kube-apiserver-master                     1/1     Running   0          6m6s
kube-controller-manager-master            1/1     Running   0          5m41s
kube-proxy-76f55                          1/1     Running   0          5m57s
kube-scheduler-master                     1/1     Running   0          5m41s


#flannel安装方法
#Shttps://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

kubectl apply -f flannel/kube-flannel.yml

Taint/Toleration 机制

# 检查一下 Master 节点的 Taint 字段
[root@master ~]# kubectl describe node master
Name:               master
Roles:              master
Taints:             node.kubernetes.io/not-ready:NoExecute
                    node-role.kubernetes.io/master:NoSchedule
                    node.kubernetes.io/not-ready:NoSchedule
# 调整Master 执行Pod 的策略
kubectl taint nodes --all node-role.kubernetes.io/master-
[root@master ~]# kubectl taint nodes --all node-role.kubernetes.io/master-
node/master untainted

# 检查一下 Master 节点的 Taint 字段
[root@master ~]# kubectl describe node master
Name:               master
Roles:              master
Taints:             node.kubernetes.io/not-ready:NoExecute
                    node.kubernetes.io/not-ready:NoSchedule

配置从节点

# 加入 Master 节点
kubeadm join xxx

#将master节点中的【/etc/kubernetes/admin.conf】文件拷贝到从节点相同目录下:
#在从节点执行
scp root@master:/etc/kubernetes/admin.conf /etc/kubernetes/admin.conf

# 配置环境变量
echo -e "\nexport KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source .bash_profile

# 查看节点状态
kubectl get nodes
[root@worker01 ~]# kubectl get nodes
The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@worker01 ~]#
[root@worker136 ~]# kubectl get nodes
NAME                    STATUS   ROLES    AGE     VERSION
master                  Ready    master   7m6s    v1.18.18
worker136               Ready    <none>   2m22s   v1.18.18
[root@worker136 ~]#

部署 Dashboard 可视化插件

# 配置了从节点之后,再部署 Dashboard,可能会运行在从节点上

#1.7 版本之后的 Dashboard 项目部署完成后,默认只能通过 Proxy 的方式在本地访问。
#kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml

#kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
#修改kubernetes-dashboard.yaml,允许远程访问
vi kubernetes-dashboard.yaml
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  # 添加Service的type为NodePort
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      # 添加映射到虚拟机的端口,k8s只支持30000以上的端口
      nodePort: 30001
  selector:
    k8s-app: kubernetes-dashboard

# 部署Dashboard 可视化插件
kubectl apply -f dashboard/recommended.yaml

#查看 Dashboard 对应的 Pod 的状态
kubectl get pods --all-namespaces
[root@master ~]# kubectl get pods -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-6b4884c9d5-vktss   1/1     Running   0          76s
kubernetes-dashboard-6bcdf44897-pm9sk        1/1     Running   0          76s

#允许外部访问:


#获取token命令
kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token:
[root@master ~]# kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token:
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IkVabU8wUVpsVTZqMHFYTE0ySlc3Y3IwTFc1Rml2Q05QS180VFlWdzJISmsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJuYW1lc3BhY2UtY29udHJvbGxlci10b2tlbi13cnpiOSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJuYW1lc3BhY2UtY29udHJvbGxlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjU4MjJjMDdhLWM0YzctNDRiZS1iMTNhLTljNjllMWMwNmUwNyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTpuYW1lc3BhY2UtY29udHJvbGxlciJ9.bPJ5DCn-7qJiojdzTWBhOSrFXpzmbWo-fQteqJikWArs7zXo9sSAxXoK32sg2GPnDV1qsuCN_0cj3KszzCgdKbEQGSgNqSO5HaygFfVwxOtbkHp7eeZ4IP16M6d-QZ82-J7KFJKW-npDeyRAVI62ReIZYztk9U7kJTi6yYIekQ6Kr1C_4km_Q-KEUEWfvFhUkqMrUvmCmJM9H_qT2JONuEm1iyWaWl6mrqXLhmHl-m9Z4TQ48wfcAX8aPvDI8w51L0wG8-ZpcQd7EyO2fS-qnYt_fsWD82Ag4kzYE65OmWb1ronJTcfZLKdO6QkAgXy7Er7pQw81-J2fi33Id5WIbQ
[root@master ~]#

#访问dashboard
#通过node节点的ip,加刚刚我们设置的nodePort就可以访问了。
https://<node-ip>:<node-port>
https://192.168.237.130:30001
#可以用上面的token登录,
#也可以用kubeconfig文件登陆,复制一份$HOME/.kube/config,在文件最后加上[    token: xxx]
cp $HOME/.kube/config $HOME/config
echo "    "$(kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token:) >> $HOME/config

部署容器存储插件



部署 Ingress