安装docker-ce

#yum install -y docker-ce
#指定版本,需配合kubeadm
#yum install -y docker-ce-17.03.3.ce

# 打包
ls -al /var/cache/yum/x86_64/7/*/packages/
tar -zcvf k8s.tar.gz /var/cache/yum/x86_64/7/*/packages/

# 解包
cd kubeadm1.11/
tar -zxvf k8s.tar.gz -C /
#docker-ce-17.03.3.ce过旧,需手动安装
cd /var/cache/yum/x86_64/7/docker-ce-stable/packages/
yum install -y *.rpm
cd -

# 更改 Docker cgroup driver,"cgroupfs" -> "systemd"
[root@localhost ~]# docker info |grep "Cgroup Driver"
Cgroup Driver: cgroupfs

#vi /usr/lib/systemd/system/docker.service
#ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd
sed -i 's/\(^ExecStart=.*$\)/\1 --exec-opt native.cgroupdriver=systemd/' /usr/lib/systemd/system/docker.service

# 启动docker
systemctl daemon-reload
systemctl restart docker
systemctl enable docker

部署kubernetes

# 配置kubernetes yum源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=0
EOF

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

#安装kubeadm,安装时,kubeadm 和 kubelet、kubectl、kubernetes-cni 这几个二进制文件都会被自动安装好。
yum install -y kubeadm
#指定版本
yum install -y kubeadm-1.11.1 kubelet-1.11.1 kubectl-1.11.1 kubernetes-cni-0.6.0

# 启动
systemctl enable kubelet.service
systemctl restart kubelet.service
systemctl status kubelet.service

预配置

# 修改hosts
cat >> /etc/hosts << EOF
192.168.237.130 master
EOF

# 设置主机名
hostnamectl set-hostname master
hostnamectl set-hostname worker01

# 禁用防火墙
#[警告防火墙]:防火墙已激活,请确保端口[6443 10250]已打开,否则您的群集可能无法正常运行
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld

# 关闭SELinux
setenforce 0
sed -i 's/^\(SELINUX=\).*/\1disabled/' /etc/sysconfig/selinux

# 禁用swap
swapoff -a
# 注释/etc/fstab中的swap
sed -i 's/\(^\/dev.*swap.*\)/# \1/' /etc/fstab

#确保 br_netfilter 模块被加载。这一操作可以通过运行 lsmod | grep br_netfilter 来完成。
[root@localhost kubeadm]# lsmod | grep br_netfilter
br_netfilter           22256  0
bridge                151336  1 br_netfilter
[root@localhost kubeadm]#

#为了让你的 Linux 节点上的 iptables 能够正确地查看桥接流量,你需要确保在你的 sysctl 配置中将 net.bridge.bridge-nf-call-iptables 设置为 1。
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system

## 在 IPVS 模式下运行 kube-proxy,

#安装ipvsadm
yum install -y ipvsadm

#加载ipvs相关内核模块
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
lsmod | grep ip_vs

# 开机加载ipvs相关内核模块
cat >> /etc/modules-load.d/k8s-ipvs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF

# 开启路由转发功能
cat >> /etc/sysctl.conf << EOF
net.ipv4.ip_forward = 1
EOF
sysctl -p

#开启IPVS,修改ConfigMap的kube-system/kube-proxy中的模式为ipvs
kubectl edit configmap kube-proxy -n kube-system
mode: "ipvs"
#其中mode原来是空,默认为iptables模式,改为ipvs
#scheduler默认是空,默认负载均衡算法为轮训

#删除所有kube-proxy的pod,重启kube-proxy
kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'

#查看kube-proxy的pod日志,有.....Using ipvs Proxier......即可
kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl logs "$1" -n kube-system")}' | grep "Using ipvs Proxier"

#查看网卡,kube-proxy 会在宿主机上创建一个虚拟网卡(叫作:kube-ipvs0),并为它分配 Service VIP 作为 IP 地址
[root@master ~]# ip a
5: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default 
    link/ether 4a:ce:1d:89:0f:c6 brd ff:ff:ff:ff:ff:ff
    inet 10.96.0.1/32 brd 10.96.0.1 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.96.0.10/32 brd 10.96.0.10 scope global kube-ipvs0
       valid_lft forever preferred_lft forever

#通过 ipvsadm 查看到这个设置
ipvsadm -ln
[root@master kubeadm1.11]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 192.168.237.130:6443         Masq    1      0          0         
TCP  10.96.0.10:53 rr
  -> 10.32.0.1:53                 Masq    1      0          0         
  -> 10.32.0.6:53                 Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 10.32.0.1:53                 Masq    1      0          0         
  -> 10.32.0.6:53                 Masq    1      0          0

拉取镜像

#查看已安装版本k8s,所需镜像的版本:
kubeadm config images list
[root@localhost ~]# kubeadm config images list
k8s.gcr.io/kube-apiserver-amd64:v1.11.10
k8s.gcr.io/kube-controller-manager-amd64:v1.11.10
k8s.gcr.io/kube-scheduler-amd64:v1.11.10
k8s.gcr.io/kube-proxy-amd64:v1.11.10
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd-amd64:3.2.18
k8s.gcr.io/coredns:1.1.3
[root@localhost ~]#

3.由于国内网络原因,kubernetes的镜像托管在google云上,无法直接下载,所以直接把把镜像搞下来有个技术大牛把gcr.io的镜像
每天同步到https://github.com/anjia0532/gcr.io_mirror这个站点,因此,如果需要用到gcr.io的镜像,可以执行如下的脚本进行镜像拉取
# 注:2019年之后就不再更新了
vim pullimages.sh
#!/bin/bash
images=(kube-proxy-amd64:v1.11.1 kube-scheduler-amd64:v1.11.1 kube-controller-manager-amd64:v1.11.1
kube-apiserver-amd64:v1.11.1 etcd-amd64:3.2.18 coredns:1.1.3 pause:3.1 )
for imageName in ${images[@]} ; do
docker pull anjia0532/google-containers.$imageName
docker tag anjia0532/google-containers.$imageName k8s.gcr.io/$imageName
docker rmi anjia0532/google-containers.$imageName
done

sh pullimages.sh


[root@localhost ~]# docker images
REPOSITORY                                 TAG                 IMAGE ID            CREATED             SIZE
rook/ceph                                  master              0cd4862e3c5e        About an hour ago   1.19 GB
weaveworks/weave-npc                       2.8.1               7f92d556d4ff        2 months ago        39.3 MB
weaveworks/weave-kube                      2.8.1               df29c0a4002c        2 months ago        89 MB
k8s.gcr.io/kubernetes-dashboard-amd64      v1.10.0             0dab2435c100        2 years ago         122 MB
k8s.gcr.io/kube-proxy-amd64                v1.11.1             d5c25579d0ff        2 years ago         97.8 MB
k8s.gcr.io/kube-controller-manager-amd64   v1.11.1             52096ee87d0e        2 years ago         155 MB
k8s.gcr.io/kube-scheduler-amd64            v1.11.1             272b3a60cd68        2 years ago         56.8 MB
k8s.gcr.io/kube-apiserver-amd64            v1.11.1             816332bd9d11        2 years ago         187 MB
k8s.gcr.io/coredns                         1.1.3               b3b94275d97c        2 years ago         45.6 MB
k8s.gcr.io/etcd-amd64                      3.2.18              b8df3b177be2        3 years ago         219 MB
k8s.gcr.io/pause                           3.1                 da86e6ba6ca1        3 years ago         742 kB
[root@localhost ~]#

## 将镜像搬运到另一台机器
# 打包所有镜像,这里包含了后续操作的镜像
mkdir k8s.gcr.io  rook  weaveworks
docker images | grep -v REPOSITORY | awk '{print "docker save "$1":"$2" -o "$1$2".img"}'
# 需要执行上一个命令的输出结果,即:
docker save rook/ceph:master -o rook/cephmaster.img
docker save weaveworks/weave-npc:2.8.1 -o weaveworks/weave-npc2.8.1.img
docker save weaveworks/weave-kube:2.8.1 -o weaveworks/weave-kube2.8.1.img
docker save k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0 -o k8s.gcr.io/kubernetes-dashboard-amd64v1.10.0.img
docker save k8s.gcr.io/kube-proxy-amd64:v1.11.1 -o k8s.gcr.io/kube-proxy-amd64v1.11.1.img
docker save k8s.gcr.io/kube-controller-manager-amd64:v1.11.1 -o k8s.gcr.io/kube-controller-manager-amd64v1.11.1.img
docker save k8s.gcr.io/kube-apiserver-amd64:v1.11.1 -o k8s.gcr.io/kube-apiserver-amd64v1.11.1.img
docker save k8s.gcr.io/kube-scheduler-amd64:v1.11.1 -o k8s.gcr.io/kube-scheduler-amd64v1.11.1.img
docker save k8s.gcr.io/coredns:1.1.3 -o k8s.gcr.io/coredns1.1.3.img
docker save k8s.gcr.io/etcd-amd64:3.2.18 -o k8s.gcr.io/etcd-amd643.2.18.img
docker save k8s.gcr.io/pause:3.1 -o k8s.gcr.io/pause3.1.img
#将镜像压缩
tar -zcvf k8s_img.tar.gz k8s_img

#将k8s_img.tar.gz复制到另一台机器上,将镜像解压
tar -xf k8s_img.tar.gz
cd k8s_img
# 加载所有镜像
for i in $(find . -name *.img);do echo $i;docker load -i $i; done
docker images
cd ..

配置主节点

# 编写了一个给 kubeadm 用的 YAML 文件
vi kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
controllerManagerExtraArgs:
  horizontal-pod-autoscaler-use-rest-clients: "true"
  horizontal-pod-autoscaler-sync-period: "10s"
  node-monitor-grace-period: "10s"
apiServerExtraArgs:
  runtime-config: "api/all=true"
kubernetesVersion: "v1.11.1"

# 部署Kubernetes Master
kubeadm init --config kubeadm.yaml

就可以完成 Kubernetes Master 的部署了,这个过程只需要几分钟。部署完成后,输出如下:

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join 192.168.xxx.xxx:6443 --token xxxxxx.xxxxxxxxxxxxxxxx --discovery-token-ca-cert-hash sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

配置kubectl与apiserver的认证

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#这个 kubeadm join 命令,就是用来给这个 Master 节点添加更多工作节点(Worker)的命令。
#我们在后面部署 Worker 节点的时候马上会用到它,所以找一个地方把这条命令记录下来。
kubeadm join 192.168.237.130:6443 --token p6q3rb.beohpezlpkw6n8i5 --discovery-token-ca-cert-hash sha256:104d0386f8d6329ef47980fa5d9cd9c18bb2dbd20107fe9589402c3451a3b412

# 检查健康状态
kubectl get cs
[root@localhost ~]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok
controller-manager   Healthy   ok
etcd-0               Healthy   {"health": "true"}

# 查看节点状态
kubectl get nodes
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES     AGE       VERSION
master   NotReady   master    56m       v1.11.1

# 查看这个节点(Node)对象的详细信息、状态和事件(Event)
kubectl describe node master
Conditions:
  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----             ------  -----------------                 ------------------                ------                       -------
  OutOfDisk        False   Wed, 21 Apr 2021 23:59:07 -0400   Wed, 21 Apr 2021 23:55:44 -0400   KubeletHasSufficientDisk     kubelet has sufficient disk space available
  MemoryPressure   False   Wed, 21 Apr 2021 23:59:07 -0400   Wed, 21 Apr 2021 23:55:44 -0400   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure     False   Wed, 21 Apr 2021 23:59:07 -0400   Wed, 21 Apr 2021 23:55:44 -0400   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure      False   Wed, 21 Apr 2021 23:59:07 -0400   Wed, 21 Apr 2021 23:55:44 -0400   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready            False   Wed, 21 Apr 2021 23:59:07 -0400   Wed, 21 Apr 2021 23:55:44 -0400   KubeletNotReady              runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
Addresses:
  InternalIP:  192.168.237.133
  Hostname:    master

# 检查这个节点上各个系统 Pod 的状态
kubectl get pods -n kube-system
[root@master ~]# kubectl get pods -n kube-system
NAME                             READY     STATUS     RESTARTS   AGE
coredns-78fcdf6894-kxt2n         0/1       Pending    0          12m
coredns-78fcdf6894-xcnxc         0/1       Pending    0          12m
etcd-master                      0/1       Pending    0          4s
kube-apiserver-master            0/1       Pending    0          4s
kube-controller-manager-master   0/1       Pending    0          4s
kube-proxy-h4hxh                 1/1       Pending    0          12m
kube-scheduler-master            0/1       Pending    0          4s

部署网络插件

#这个地址无法访问了
#kubectl apply -f https://git.io/weave-kube-1.6
#参考自:https://www.cnblogs.com/Irving/p/9818440.html
#使用这2个地址
#kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
#curl "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" > cloud.weave.works.yaml
#kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.5.0/weave-daemonset-k8s-1.8.yaml
kubectl apply -f weave-kube/cloud.weave.works.yaml

[root@localhost ~]#  kubectl get pods -n kube-system
NAME                                            READY     STATUS    RESTARTS   AGE
coredns-78fcdf6894-2wpnm                        1/1       Running   0          20m
coredns-78fcdf6894-7vzgs                        1/1       Running   0          20m
etcd-localhost.localdomain                      1/1       Running   0          19m
kube-apiserver-localhost.localdomain            1/1       Running   0          19m
kube-controller-manager-localhost.localdomain   1/1       Running   0          19m
kube-proxy-t5wkg                                1/1       Running   0          20m
kube-scheduler-localhost.localdomain            1/1       Running   0          19m
weave-net-9m9hw                                 2/2       Running   1          1m      <---这个

Taint/Toleration 机制

# 检查一下 Master 节点的 Taint 字段
[root@master ~]# kubectl describe node master
Name:               master
Roles:              master
Taints:             node-role.kubernetes.io/master:NoSchedule

# 调整Master 执行Pod 的策略
kubectl taint nodes --all node-role.kubernetes.io/master-
[root@master ~]# kubectl taint nodes --all node-role.kubernetes.io/master-
node/master untainted

# 检查一下 Master 节点的 Taint 字段
[root@master ~]# kubectl describe node master
Name:               master
Roles:              master
Taints:             <none>

配置从节点

# 加入 Master 节点
kubeadm join xxx

#将master节点中的【/etc/kubernetes/admin.conf】文件拷贝到从节点相同目录下:
#在从节点执行
scp root@master:/etc/kubernetes/admin.conf /etc/kubernetes/admin.conf

# 配置环境变量
echo -e "\nexport KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source .bash_profile

# 查看节点状态
kubectl get nodes
[root@worker01 ~]# kubectl get nodes
The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@worker01 ~]#
[root@worker132 ~]# kubectl get nodes
NAME        STATUS     ROLES     AGE       VERSION
master      NotReady   master    8m        v1.11.1
worker132   NotReady   <none>    53s       v1.11.1
[root@worker132 ~]#

部署 Dashboard 可视化插件

# 配置了从节点之后,再部署 Dashboard,可能会运行在从节点上

#下载镜像
docker pull anjia0532/google-containers.kubernetes-dashboard-amd64:v1.10.0
docker tag  anjia0532/google-containers.kubernetes-dashboard-amd64:v1.10.0   k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0
docker rmi  anjia0532/google-containers.kubernetes-dashboard-amd64:v1.10.0

#1.7 版本之后的 Dashboard 项目部署完成后,默认只能通过 Proxy 的方式在本地访问。
#kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
#https://github.com/kubernetes/dashboard/archive/refs/tags/v1.10.0.tar.gz
#修改kubernetes-dashboard.yaml,允许远程访问
vi kubernetes-dashboard.yaml
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  # 添加Service的type为NodePort
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      # 添加映射到虚拟机的端口,k8s只支持30000以上的端口
      nodePort: 30001
  selector:
    k8s-app: kubernetes-dashboard

# 部署Dashboard 可视化插件
kubectl apply -f dashboard/kubernetes-dashboard.yaml

#查看 Dashboard 对应的 Pod 的状态
kubectl get pods -n kube-system
[root@localhost ~]# kubectl get pods -n kube-system
NAME                                   READY     STATUS    RESTARTS   AGE
coredns-78fcdf6894-kxt2n               1/1       Running   0          1h
coredns-78fcdf6894-xcnxc               1/1       Running   0          1h
etcd-master                            1/1       Running   0          1h
kube-apiserver-master                  1/1       Running   0          1h
kube-controller-manager-master         1/1       Running   0          1h
kube-proxy-h4hxh                       1/1       Running   0          1h
kube-scheduler-master                  1/1       Running   0          1h
kubernetes-dashboard-767dc7d4d-s9vvl   1/1       Running   0          8s    <---这个
weave-net-5srrs                        2/2       Running   1          1h

#允许外部访问:已经使用Service nodePort将端口映射出来了,不需要了
#nohup  kubectl proxy --address='0.0.0.0'  --accept-hosts='^*$'  --disable-filter=true &

#获取token命令
kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token:
[root@master ~]# kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token:
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJuYW1lc3BhY2UtY29udHJvbGxlci10b2tlbi1xcGJyeCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJuYW1lc3BhY2UtY29udHJvbGxlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjYxOTkwOWExLWE3NGEtMTFlYi04NTZkLTAwMGMyOWYxMzRiZCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTpuYW1lc3BhY2UtY29udHJvbGxlciJ9.SI0QAd4b56LdW0Y8lcPEiG_7VJzzZUN08y8VkLcJGNX_TUJhpNFKFxk33-UHpzJHu4oHPL6hyhdXHLK0CRI3T5spAeBL5H8w8JMlHOEX3Og6t5z9r59-gkKO_Svp1rCvFg2QIK7VLbz1EWc5rwCuEY9MHTUYqCuCfQF4SHVVWF5tWbr4jEOLY6PqtQADt7_hBI_pjJKKvisR79RbuJNYh8LfxwyNuxME6SGPckJ58rukbKZarNtene0xuHBSOj6bSr65Z08Wm5r5crNmNQgveJAqMOGhOh4fKuzpImZo5oUhY07Zm2jCCNEF0BE-h4CqFDXeeTNkQiH5MoBFvgwi1w
[root@master ~]#

#访问dashboard
#通过node节点的ip,加刚刚我们设置的nodePort就可以访问了。
https://<node-ip>:<node-port>
https://192.168.237.130:30001
#可以用上面的token登录,
#也可以用kubeconfig文件登陆,复制一份$HOME/.kube/config,在文件最后加上[    token: xxx]
cp $HOME/.kube/config $HOME/config
echo "    "$(kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token:) >> $HOME/config

部署容器存储插件

#CentOS Linux release 7.6.1810 (Core)
#Kubernetes v1.14.1
#按照文档部署Rook时会报错
#解决方式:
#Error from server (NotFound): error when creating "operator.yaml": namespaces "rook-ceph" not found
#apply common.yaml 再执行后续apply
#kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/common.yaml
#
#error: unable to recognize "cluster.yaml": no matches for kind "CephCluster" in version "ceph.rook.io/v1"
#在后续的版本中crds被单独拆出来了,需要单独安装
#kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/crds.yaml
#
#https://rook.github.io/docs/rook/master/ceph-quickstart.html
#Rook支持Kubernetes v1.11或更高版本。         file:///rook-master.zip
#重要信息:如果您使用的是K8s 1.15或更早版本,则需要创建其他版本的Rook CRD。创建在示例清单的k8s-1.16之前的子文件夹中找到的crds.yaml。
#kubectl apply -f https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml
#
#kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/operator.yaml
#kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/cluster.yaml


# 部署容器存储插件
kubectl apply -f ceph/common.yaml
#kubectl apply -f ceph/crds.yaml
kubectl apply -f ceph/pre-k8s-1.16/crds.yaml
kubectl apply -f ceph/operator.yaml
kubectl apply -f ceph/cluster.yaml

#查看 Rook 对应的 Pod 的状态
kubectl get pods -n rook-ceph-system
kubectl get pods -n rook-ceph

[root@localhost ~]# kubectl get pods -n rook-ceph-system
No resources found.
[root@localhost ~]#
[root@localhost ~]# kubectl get pods -n rook-ceph
NAME                                  READY     STATUS    RESTARTS   AGE
rook-ceph-operator-7f5854bddc-4txgm   1/1       Running   7          27m
[root@localhost ~]#

部署 Ingress

# 部署 Nginx Ingress Controller
#https://github.com/kubernetes/ingress-nginx
#ingress-nginx-nginx-0.20.0.tar.gz\ingress-nginx-nginx-0.20.0\deploy\mandatory.yaml
kubectl apply -f ingress-nginx/mandatory.yaml

# 把 Nginx Ingress Controller 管理的 Nginx 服务暴露出去
#ingress-nginx-nginx-0.20.0.tar.gz\ingress-nginx-nginx-0.20.0\deploy\provider\baremetal\service-nodeport.yaml
kubectl apply -f ingress-nginx/service-nodeport.yaml

# 记录下这个 Service 的访问入口
kubectl get svc -n ingress-nginx
[root@master kubeadm1.11]# kubectl get svc -n ingress-nginx
NAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
default-http-backend   ClusterIP   10.101.233.24   <none>        80/TCP                       2m
ingress-nginx          NodePort    10.96.104.197   <none>        80:32194/TCP,443:30787/TCP   1m

# 为了后面方便使用,我会把上述访问入口设置为环境变量:
IC_IP=192.168.237.130 # 任意一台宿主机的地址
IC_HTTPS_PORT=30787 # NodePort端口


#这个“咖啡厅”Ingress 的所有示例文件,都在https://github.com/resouer/kubernetes-ingress/tree/master/examples/complete-example
# kubernetes-ingress-master.zip\kubernetes-ingress-master\examples\complete-example\cafe.yaml

#部署我们的应用 Pod 和它们对应的 Service
kubectl apply -f ingress-nginx/cafe.yaml

#创建 Ingress 所需的 SSL 证书(tls.crt)和密钥(tls.key),这些信息都是通过 Secret 对象定义好的
kubectl apply -f ingress-nginx/cafe-secret.yaml

#创建在本篇文章一开始定义的 Ingress 对象
kubectl apply -f ingress-nginx/cafe-ingress.yaml

#查看一下这个 Ingress 对象的信息
kubectl get ingress
kubectl describe ingress cafe-ingress

curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/tea --insecure
[root@master kubeadm1.11]# curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/tea --insecure
Server address: 10.32.0.11:80
Server name: tea-7d57856c44-mbl7r
Date: 12/May/2021:16:42:32 +0000
URI: /tea
Request ID: e34e90245ec84c7b3f7b9820aa388100

curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee --insecure
[root@master kubeadm1.11]# curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee --insecure
Server address: 10.32.0.14:80
Server name: coffee-7dbb5795f6-dqzcn
Date: 12/May/2021:16:43:11 +0000
URI: /coffee
Request ID: ab1b6d89280d654957f7136c1a294d79