×
新网 > 虚机资讯 > 正文

二进制方式部署kubernetes 1.16.0

  • 作者:zccc
  • 来源:网络
  • 2020-06-23 15:24:40

发布时间:2020-06-23 11:24:04 来源:51CTO 阅读:1972 作者:juestnow 栏目:系统运维

发布时间:2020-06-23 11:24:04 来源:51CTO 阅读:1972 作者:juestnow 栏目:系统运维 环境

操作系统:CentOS Linux release 7.7.1908 (Core)
Kernel version:3.10.0-1062.1.1.el7.x86_64
工作系统:win10 on Ubuntu 19.04
ETCD 部署IP: 192.168.30.50
ETCD 版本: v3.4.1
kube-apiserver,kube-scheduler,kube-controller-manager 部署IP: 192.168.30.52
kubelet部署IP:192.168.30.52,192.168.30.51
flannel版本:v0.11.0
cni版本:v0.8.2
kubernetes版本: 1.16.0
工作目录:/root/work
远程服务器工作目录:/apps/业务名称
kubernetes 集群通信cidr: 10.66.0.0/16
POD 集群通信cidr:10.67.0.0/16
准备工作
# 创建工作目录
mkdir /root/work
cd /root/work
# 下载二进制包
wget https://storage.googleapis.com/kubernetes-release/release/v1.16.0/kubernetes-server-linux-amd64.tar.gz
wget https://github.com/etcd-io/etcd/releases/download/v3.4.1/etcd-v3.4.1-linux-amd64.tar.gz
wget https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz
# 解压压缩包
tar -xvf kubernetes-server-linux-amd64.tar.gz
tar -xvf etcd-v3.4.1-linux-amd64.tar.gz
# cni-plugins 压缩包不带子文件夹所以我们创建cni 文件夹
mkdir cni
cd cni
mv ../cni-plugins-linux-amd64-v0.8.2.tgz ./
tar -xvf cni-plugins-linux-amd64-v0.8.2.tgz
# 清理解压缩无用的文件创建目录结构方便分发
rm cni-plugins-linux-amd64-v0.8.2.tgz
cd ../etcd-v3.4.1-linux-amd64
rm -rf Documentation  README-etcdctl.md  README.md  READMEv2-etcdctl.md
mkdir -p {bin,ssl,conf,data}
mv etcd* ./bin
cd ../kubernetes/server/bin/
rm -rf *.tar
rm -rf *_tag
rm -rf apiextensions-apiserver  hyperkube  kubeadm mounter
# 备份旧kubectl
mv /bin/kubectl /bin/kubectl1.14
cp kubectl /bin/kubectl
ETCD 部署
# 回到顶级工作目录/root/work
cd /root/work
# 创建ssl 配置文件目录
mkdir -p cfssl/
# 创建ca 证书json
cat << EOF | tee ./cfssl/ca-config.json
{
signing: {
default: {
expiry: 87600h
},
profiles: {
kubernetes: {
usages: [
signing,
key encipherment,
server auth,
client auth
],
expiry: 87600h
}
}
}
}
EOF
# 创建etcd ca证书配置
mkdir -p ./cfssl/etcd
cat << EOF | tee ./cfssl/etcd/etcd-ca-csr.json
{
CN: etcd,
key: {
algo: rsa,
size: 2048
},
names: [
{
C: CN,
ST: GuangDong,
L: GuangZhou,
O: cluster,
OU: cluster
}
]
}
EOF
# 生成 ETCD CA 证书和私钥
mkdir -p ./cfssl/pki/etcd
cfssl gencert -initca ./cfssl/etcd/etcd-ca-csr.json | \\
cfssljson -bare ./cfssl/pki/etcd/etcd-ca
# 创建 ETCD Server 证书 
export ETCD_SERVER_IPS= \\
\\192.168.30.50\\ \\
 && \\
export ETCD_SERVER_HOSTNAMES= \\
\\etcd\\ \\
 && \\
cat << EOF | tee ./cfssl/etcd/etcd_server.json
{
CN: etcd,
hosts: [
127.0.0.1,
${ETCD_SERVER_IPS},
${ETCD_SERVER_HOSTNAMES}
],
key: {
algo: rsa,
size: 2048
},
names: [
{
C: CN,
ST: GuangDong,
L: GuangZhou,
O: cluster,
OU: cluster
}
]
}
EOF
# 生成 ETCD Server 证书和私钥
cfssl gencert \\
-ca=./cfssl/pki/etcd/etcd-ca.pem \\
-ca-key=./cfssl/pki/etcd/etcd-ca-key.pem \\
-config=./cfssl/ca-config.json \\
-profile=kubernetes \\
./cfssl/etcd/etcd_server.json | \\
cfssljson -bare ./cfssl/pki/etcd/etcd_server

# 创建 ETCD Member 证书
export ETCD_MEMBER_1_IP= \\
    \\192.168.30.50\\ \\
 && \\
export ETCD_MEMBER_1_HOSTNAMES=etcd\\
 && \\
cat << EOF | tee ./cfssl/etcd/${ETCD_MEMBER_1_HOSTNAMES}.json
{
  CN: etcd,
  hosts: [
    127.0.0.1,
    ${ETCD_MEMBER_1_IP},
    ${ETCD_MEMBER_1_HOSTNAMES}
  ],
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: cluster,
      OU: cluster
    }
  ]
}
EOF
##### 生成 ETCD Member 1 证书和私钥
cfssl gencert \\
    -ca=./cfssl/pki/etcd/etcd-ca.pem \\
    -ca-key=./cfssl/pki/etcd/etcd-ca-key.pem \\
    -config=./cfssl/ca-config.json \\
    -profile=kubernetes \\
    ./cfssl/etcd/${ETCD_MEMBER_1_HOSTNAMES}.json | \\
    cfssljson -bare ./cfssl/pki/etcd/etcd_member_${ETCD_MEMBER_1_HOSTNAMES}

# 创建 ETCD Client 配置文件
cat << EOF | tee ./cfssl/etcd/etcd_client.json
{
CN: client,
hosts: [], 
key: {
algo: rsa,
size: 2048
},
names: [
{
C: CN,
ST: GuangDong,
L: GuangZhou,
O: cluster,
OU: cluster
}
]
}
EOF
#生成 ETCD Client 证书和私钥

cfssl gencert \\
-ca=./cfssl/pki/etcd/etcd-ca.pem \\
-ca-key=./cfssl/pki/etcd/etcd-ca-key.pem \\
-config=./cfssl/ca-config.json \\
-profile=kubernetes \\
./cfssl/etcd/etcd_client.json | \\
cfssljson -bare ./cfssl/pki/etcd/etcd_client
# 复制证书到etcd 分发目录
cp -pdr ./cfssl/pki/etcd/*  ./etcd-v3.4.1-linux-amd64/ssl
# 创建etcd 启动配置文件
cd ./etcd-v3.4.1-linux-amd64/conf
vi etcd
ETCD_OPTS=--name=etcd \\
           --data-dir=/apps/etcd/data/default.etcd \\
           --listen-peer-urls=https://192.168.30.50:2380 \\
           --listen-client-urls=https://192.168.30.50:2379,https://127.0.0.1:2379 \\
           --advertise-client-urls=https://192.168.30.50:2379 \\
           --initial-advertise-peer-urls=https://192.168.30.50:2380 \\
           --initial-cluster=etcd=https://192.168.30.50:2380\\
           --initial-cluster-token=node4=etcd=https://192.168.30.50:2380 \\
           --initial-cluster-state=new \\
           --heartbeat-interval=6000 \\
           --election-timeout=30000 \\
           --snapshot-count=5000 \\
           --auto-compaction-retention=1 \\
           --max-request-bytes=33554432 \\
           --quota-backend-bytes=17179869184 \\
           --trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem \\
           --cert-file=/apps/etcd/ssl/etcd_server.pem \\
           --key-file=/apps/etcd/ssl/etcd_server-key.pem \\
           --peer-cert-file=/apps/etcd/ssl/etcd_member_etcd.pem \\
           --peer-key-file=/apps/etcd/ssl/etcd_member_etcd-key.pem \\
           --peer-client-cert-auth \\
           --peer-trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem
# 远程服务器创建目录
ssh 192.168.30.50 mkdir -p /apps/etcd
# 分发ETCD 到远程运行服务器
#回到etcd-v3.4.1-linux-amd64
cd ../
scp -r * 192.168.30.50:/apps/etcd
# 远程服务器创建etcd 账号
ssh 192.168.30.50 useradd etcd -s /sbin/nologin -M
# 远程etcd 目录etcd 账号权限
ssh 192.168.30.50 chown -R etcd.etcd /apps/etcd
# 创建etcd.service 
vi etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
User=etcd
Group=etcd
EnvironmentFile=-/apps/etcd/conf/etcd
ExecStart=/bin/bash -c GOMAXPROCS=$(nproc) /apps/etcd/bin/etcd $ETCD_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
# 分发etcd.service 到远程服务器
scp etcd.service 192.168.30.50:/usr/lib/systemd/system/etcd.service
# 启动 etcd 
ssh 192.168.30.50 systemctl start etcd
# 查看启动是否成功
ssh 192.168.30.50  systemctl status etcd
[root@]~/work/etcd-v3.4.1-linux-amd64]#ssh 192.168.30.50  systemctl status etcd
● etcd.service - Etcd Server
   Loaded: loaded (/usr/lib/systemd/system/etcd.service; enabled; vendor preset: disabled)
   Active: active (running) since Thu 2019-09-19 16:54:34 CST; 17h ago
 Main PID: 9321 (etcd)
   CGroup: /system.slice/etcd.service
           └─9321 /apps/etcd/bin/etcd --name=etcd --data-dir=/apps/etcd/data/default.etcd --listen-peer-urls=https://192.168.30.50:2380 --listen-client-urls=https://192.168.30.50:2379,https://127.0.0.1:2379 --advertise-client-urls=https://192.168.30.50:2379 --initial-advertise-peer-urls=https://192.168.30.50:2380 --initial-cluster=etcd=https://192.168.30.50:2380 --initial-cluster-token=node4=etcd=https://192.168.30.50:2380 --initial-cluster-state=new --heartbeat-interval=6000 --election-timeout=30000 --snapshot-count=5000 --auto-compaction-retention=1 --max-request-bytes=33554432 --quota-backend-bytes=17179869184 --trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem --cert-file=/apps/etcd/ssl/etcd_server.pem --key-file=/apps/etcd/ssl/etcd_server-key.pem --peer-cert-file=/apps/etcd/ssl/etcd_member_etcd.pem --peer-key-file=/apps/etcd/ssl/etcd_member_etcd-key.pem --peer-client-cert-auth --peer-trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem

Sep 20 10:10:02 etcd etcd[9321]: store.index: compact 79182
Sep 20 10:10:02 etcd etcd[9321]: finished scheduled compaction at 79182 (took 1.966939ms)
# 设置开机启动
ssh 192.168.30.50 systemctl enable etcd
kubernetes 证书准备
#创建kube-apiserver ca证书配置
mkdir -p ./cfssl/k8s
cat << EOF | tee ./cfssl/k8s/k8s-ca-csr.json
{
CN: kubernetes,
key: {
algo: rsa,
size: 2048
},
names: [
{
C: CN,
ST: GuangDong,
L: GuangZhou,
O: cluster,
OU: cluster
}
]
}
EOF
#生成 Kubernetes CA 证书和私钥

mkdir -p ./cfssl/pki/k8s
cfssl gencert -initca ./cfssl/k8s/k8s-ca-csr.json | \\
cfssljson -bare ./cfssl/pki/k8s/k8s-ca
#创建 Kubernetes API Server 证书配置文件
export K8S_APISERVER_VIP= \\
\\192.168.30.52\\ \\
 && \\
export K8S_APISERVER_SERVICE_CLUSTER_IP=10.66.0.1 && \\
export K8S_APISERVER_HOSTNAME=api.k8s.cluster.local && \\
export K8S_CLUSTER_DOMAIN_SHORTNAME=cluster && \\
export K8S_CLUSTER_DOMAIN_FULLNAME=cluster.local && \\
cat << EOF | tee ./cfssl/k8s/k8s_apiserver.json
{
CN: kubernetes,
hosts: [
127.0.0.1,
${K8S_APISERVER_VIP},
${K8S_APISERVER_SERVICE_CLUSTER_IP}, 
${K8S_APISERVER_HOSTNAME},
kubernetes,
kubernetes.default,
kubernetes.default.svc,
kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_SHORTNAME},
kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_FULLNAME} 
],
key: {
algo: rsa,
size: 2048
},
names: [
{
C: CN,
ST: GuangDong,
L: GuangZhou,
O: cluster,
OU: cluster
}
]
}
EOF
#生成 Kubernetes API Server 证书和私钥

cfssl gencert \\
-ca=./cfssl/pki/k8s/k8s-ca.pem \\
-ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
-config=./cfssl/ca-config.json \\
-profile=kubernetes \\
./cfssl/k8s/k8s_apiserver.json | \\
cfssljson -bare ./cfssl/pki/k8s/k8s_server
# 创建 Kubernetes webhook 证书配置文件
cat << EOF | tee ./cfssl/k8s/aggregator.json
{
  CN: aggregator,
  hosts: [], 
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: cluster,
      OU: cluster
    }
  ]
}
EOF
# 生成Kubernetes webhook 证书
cfssl gencert \\
    -ca=./cfssl/pki/k8s/k8s-ca.pem \\
    -ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
    -config=./cfssl/ca-config.json \\
    -profile=kubernetes \\
    ./cfssl/k8s/aggregator.json | \\
    cfssljson -bare ./cfssl/pki/k8s/aggregator
    # 创建 Kubernetes admin 证书配置文件    
    cat << EOF | tee ./cfssl/k8s/k8s_apiserver_admin.json
{
  CN: admin,
  hosts: [], 
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: system:masters,
      OU: Kubernetes-manual
    }
  ]
}
EOF
# 生成Kubernetes admin 证书
cfssl gencert -ca=./cfssl/pki/k8s/k8s-ca.pem \\
    -ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
    -config=./cfssl/ca-config.json \\
    -profile=kubernetes \\
        ./cfssl/k8s/k8s_apiserver_admin.json | \\
        cfssljson -bare ./cfssl/pki/k8s/k8s_apiserver_admin
# 创建kube-scheduler  证书配置文件  
cat << EOF | tee ./cfssl/k8s/k8s_scheduler.json
{
  CN: system:kube-scheduler,
  hosts: [], 
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: system:kube-scheduler,
      OU: Kubernetes-manual
    }
  ]
}
EOF

#  生成 Kubernetes Scheduler 证书和私钥
cfssl gencert \\
    -ca=./cfssl/pki/k8s/k8s-ca.pem \\
    -ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
    -config=./cfssl/ca-config.json \\
    -profile=kubernetes \\
    ./cfssl/k8s/k8s_scheduler.json | \\
    cfssljson -bare ./cfssl/pki/k8s/k8s_scheduler
# kube-controller-manager        证书配置文件 
cat << EOF | tee ./cfssl/k8s/k8s_controller_manager.json
{
  CN: system:kube-controller-manager,
  hosts: [], 
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: system:kube-controller-manager,
      OU: Kubernetes-manual
    }
  ]
}
EOF

## 生成 Kubernetes Controller Manager 证书和私钥
cfssl gencert \\
    -ca=./cfssl/pki/k8s/k8s-ca.pem \\
    -ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
    -config=./cfssl/ca-config.json \\
    -profile=kubernetes \\
    ./cfssl/k8s/k8s_controller_manager.json | \\
    cfssljson -bare ./cfssl/pki/k8s/k8s_controller_manager
# 创建flannel 证书配置
cat << EOF | tee ./cfssl/k8s/flannel.json
{
  CN: flannel,
  hosts: [], 
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: system:masters,
      OU: Kubernetes-manual
    }
  ]
}
EOF

## 生成 flannel 证书和私钥
cfssl gencert \\
        -ca=./cfssl/pki/k8s/k8s-ca.pem \\
        -ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
        -config=./cfssl/ca-config.json \\
        -profile=kubernetes \\
         ./cfssl/k8s/flannel.json | \\
         cfssljson -bare ./cfssl/pki/k8s/flannel
# 创建kube-proxy 证书配置
cat << EOF | tee ./cfssl/k8s/kube-proxy.json
{
  CN: system:kube-proxy,
  hosts: [], 
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: system:masters,
      OU: Kubernetes-manual
    }
  ]
}
EOF
## 生成 kube-proxy 证书和私钥
cfssl gencert \\
        -ca=./cfssl/pki/k8s/k8s-ca.pem \\
        -ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
        -config=./cfssl/ca-config.json \\
        -profile=kubernetes \\
         ./cfssl/k8s/kube-proxy.json | \\
         cfssljson -bare ./cfssl/pki/k8s/kube-proxy
# 创建 kubernetes-dashboard证书配置
cat << EOF | tee ./cfssl/k8s/dashboard.json
{
  CN: dashboard,
  hosts: [], 
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: cluster,
      OU: cluster
    }
  ]
}
EOF
##### 生成kubernetes-dashboard 证书
cfssl gencert \\
        -ca=./cfssl/pki/k8s/k8s-ca.pem \\
       -ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
        -config=./k8s/cfssl/ca-config.json \\
        -profile=kubernetes \\
        ./cfssl/k8s/dashboard.json | \\
        cfssljson -bare ./dashboard         
# 创建metrics-server 证书配置
  cat << EOF | tee ./cfssl/k8s/metrics-server.json
{
  CN: metrics-server,
  key: {
    algo: rsa,
    size: 2048
  },
  names: [
    {
      C: CN,
      ST: GuangDong,
      L: GuangZhou,
      O: cluster,
      OU: cluster
    }
  ]
}
EOF
# 生成metrics-server证书
cfssl gencert -ca=./cfssl/pki/k8s/k8s-ca.pem \\
    -ca-key=./cfssl/pki/k8s/k8s-ca-key.pem \\
    -config=./cfssl/ca-config.json \\
    -profile=kubernetes ./cfssl/k8s/metrics-server.json | \\
        cfssljson -bare ./metrics-server
创建kubernetes kubeconfig配置文件
设置环境变量
export KUBE_APISERVER=https://192.168.30.52:5443
# 创建 admin kubeconfig
# 设置集群参数
kubectl config set-cluster kubernetes \\
--certificate-authority=./cfssl/pki/k8s/k8s-ca.pem \\
--embed-certs=true  \\
--server=${KUBE_APISERVER} \\
--kubeconfig=admin.kubeconfig
# 设置客户端认证参数
 kubectl config set-credentials admin \\
 --client-certificate=./cfssl/pki/k8s/k8s_apiserver_admin.pem \\
 --client-key=./cfssl/pki/k8s/k8s_apiserver_admin-key.pem \\
 --embed-certs=true \\
 --kubeconfig=admin.kubeconfig
 # 设置上下文参数
kubectl config set-context kubernetes \\
--cluster=kubernetes \\
--user=admin \\
--namespace=kube-system \\
--kubeconfig=admin.kubeconfig
# 设置默认上下文
kubectl config use-context kubernetes --kubeconfig=admin.kubeconfig
# 创建kube-scheduler kubeconfig 配置文件
# 设置集群参数
kubectl config set-cluster kubernetes \\
    --certificate-authority=./cfssl/pki/k8s/k8s-ca.pem \\
    --embed-certs=true \\
    --server=${KUBE_APISERVER} \\
    --kubeconfig=kube_scheduler.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials system:kube-scheduler \\
    --client-certificate=./cfssl/pki/k8s/k8s_scheduler.pem \\
    --embed-certs=true \\
    --client-key=./cfssl/pki/k8s/k8s_scheduler-key.pem \\
    --kubeconfig=kube_scheduler.kubeconfig
 # 设置上下文参数
kubectl config set-context kubernetes \\
    --cluster=kubernetes \\
    --user=system:kube-scheduler \\
    --kubeconfig=kube_scheduler.kubeconfig
# 设置默认上下文
kubectl config use-context kubernetes --kubeconfig=kube_scheduler.kubeconfig
# 创建kube-controller-manager kubeconfig 配置文件
# 设置集群参数
kubectl config set-cluster kubernetes \\
   --certificate-authority=./cfssl/pki/k8s/k8s-ca.pem \\
   --embed-certs=true \\
   --server=${KUBE_APISERVER} \\
   --kubeconfig=kube_controller_manager.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials system:kube-controller-manager \\
   --client-certificate=./cfssl/pki/k8s/k8s_controller_manager.pem \\
   --embed-certs=true \\
   --client-key=./cfssl/pki/k8s/k8s_controller_manager-key.pem \\
   --kubeconfig=kube_controller_manager.kubeconfig
 # 设置上下文参数
kubectl config set-context kubernetes \\
   --cluster=kubernetes \\
   --user=system:kube-controller-manager \\
   --kubeconfig=kube_controller_manager.kubeconfig
# 设置默认上下文
kubectl config use-context kubernetes --kubeconfig=kube_controller_manager.kubeconfig
# 创建bootstrap  kubeconfig 配置
# 生成TOKEN
export TOKEN_ID=$(head -c 6 /dev/urandom | md5sum | head -c 6)
export TOKEN_SECRET=$(head -c 16 /dev/urandom | md5sum | head -c 16)
export BOOTSTRAP_TOKEN=${TOKEN_ID}.${TOKEN_SECRET}
# 设置集群参数
kubectl config set-cluster kubernetes \\
  --certificate-authority=./cfssl/pki/k8s/k8s-ca.pem \\
  --embed-certs=true \\
  --server=${KUBE_APISERVER} \\
  --kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials system:bootstrap:${TOKEN_ID} \\
  --token=${BOOTSTRAP_TOKEN} \\
  --kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \\
  --cluster=kubernetes \\
  --user=system:bootstrap:${TOKEN_ID} \\
  --kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
# 创建flannel kubeconfig
# 设置集群参数
kubectl config set-cluster kubernetes \\
  --certificate-authority=./cfssl/pki/k8s/k8s-ca.pem \\
  --embed-certs=true \\
  --server=${KUBE_APISERVER} \\
  --kubeconfig=kubeconfig.conf
# 设置客户端认证参数
    kubectl config set-credentials flannel \\
  --client-certificate=./cfssl/pki/k8s/flannel.pem \\
  --client-key=./cfssl/pki/k8s/flannel-key.pem \\
  --embed-certs=true \\
  --kubeconfig=kubeconfig.conf
# 设置上下文参数
    kubectl config set-context default \\
  --cluster=kubernetes \\
  --user=flannel \\
  --kubeconfig=kubeconfig.conf
# 设置默认上下文
kubectl config use-context default --kubeconfig=kubeconfig.conf
# 创建kube-proxy kubeconfig
# 设置集群参数
kubectl config set-cluster kubernetes \\
  --certificate-authority=./cfssl/pki/k8s/k8s-ca.pem \\
  --embed-certs=true \\
  --server=${KUBE_APISERVER} \\
  --kubeconfig=kube-proxy.kubeconfig 
# 设置客户端认证参数
    kubectl config set-credentials system:kube-proxy \\
  --client-certificate=./cfssl/pki/k8s/kube-proxy.pem \\
  --client-key=./cfssl/pki/k8s/kube-proxy-key.pem \\
  --embed-certs=true \\
  --kubeconfig=kube-proxy.kubeconfig 
# 设置上下文参数
    kubectl config set-context default \\
  --cluster=kubernetes \\
  --user=system:kube-proxy \\
  --kubeconfig=kube-proxy.kubeconfig 
# 设置默认上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig 
# k8s 所需要的kubeconfig 创建完成
创建kubernetes 启动配置文件
cd /root/work/kubernetes/server
# 创建配置文件目录
# 目录说明 conf 主要存放启动参数文件 config 存放其它配置文件 log 存放运行日志 kubelet-plugins 插件目录
mkdir conf config log  kubelet-plugins
# 创建启动配置文件
cd conf
#kube-apiserver 配置
vi kube-apiserver
KUBE_APISERVER_OPTS=--logtostderr=false \\
        --bind-address=192.168.30.52 \\
        --advertise-address=192.168.30.52 \\
        --secure-port=5443 \\
        --insecure-port=0 \\
        --service-cluster-ip-range=10.66.0.0/16 \\
        --service-node-port-range=30000-65000 \\
        --etcd-cafile=/apps/kubernetes/ssl/etcd/etcd-ca.pem \\
        --etcd-certfile=/apps/kubernetes/ssl/etcd/etcd_client.pem \\
        --etcd-keyfile=/apps/kubernetes/ssl/etcd/etcd_client-key.pem \\
        --etcd-prefix=/registry \\
        --etcd-servers=https://192.168.30.50:2379 \\
        --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \\
        --tls-cert-file=/apps/kubernetes/ssl/k8s/k8s_server.pem \\
        --tls-private-key-file=/apps/kubernetes/ssl/k8s/k8s_server-key.pem \\
        --kubelet-client-certificate=/apps/kubernetes/ssl/k8s/k8s_server.pem \\
        --kubelet-client-key=/apps/kubernetes/ssl/k8s/k8s_server-key.pem \\
        --service-account-key-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \\
        --requestheader-client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \\
        --proxy-client-cert-file=/apps/kubernetes/ssl/k8s/aggregator.pem \\
        --proxy-client-key-file=/apps/kubernetes/ssl/k8s/aggregator-key.pem \\
        --requestheader-allowed-names=aggregator \\
        --requestheader-group-headers=X-Remote-Group \\
        --requestheader-extra-headers-prefix=X-Remote-Extra- \\
        --requestheader-username-headers=X-Remote-User \\
        --enable-aggregator-routing=true \\
        --anonymous-auth=false \\
        --allow-privileged=true \\
        --experimental-encryption-provider-config=/apps/kubernetes/config/encryption-config.yaml \\
        --enable-admission-plugins=DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,NamespaceExists,NamespaceLifecycle,NodeRestriction,OwnerReferencesPermissionEnforcement,PodNodeSelector,PersistentVolumeClaimResize,PodPreset,PodTolerationRestriction,ResourceQuota,ServiceAccount,StorageObjectInUseProtection MutatingAdmissionWebhook ValidatingAdmissionWebhook \\
        --disable-admission-plugins=DenyEscalatingExec,ExtendedResourceToleration,ImagePolicyWebhook,LimitPodHardAntiAffinityTopology,NamespaceAutoProvision,Priority,EventRateLimit,PodSecurityPolicy \\
        --cors-allowed-origins=.* \\
        --enable-swagger-ui \\
        --runtime-config=api/all=true \\
        --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
        --authorization-mode=Node,RBAC \\
        --apiserver-count=1 \\
        --audit-log-maxage=30 \\
        --audit-log-maxbackup=3 \\
        --audit-log-maxsize=100 \\
        --kubelet-https \\
        --event-ttl=1h \\
        --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true \\
        --enable-bootstrap-token-auth=true \\
        --audit-log-path=/apps/kubernetes/log/api-server-audit.log \\
        --alsologtostderr=true \\
        --log-dir=/apps/kubernetes/log \\
        --v=2 \\
        --endpoint-reconciler-type=lease \\
        --max-mutating-requests-inflight=100 \\
        --max-requests-inflight=500 \\
        --target-ram-mb=6000
# 创建kube-controller-manager 配置文件
vi kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS=--logtostderr=false \\
 --leader-elect=true \\
 --address=0.0.0.0 \\
 --service-cluster-ip-range=10.66.0.0/16 \\
 --cluster-cidr=10.67.0.0/16 \\
 --node-cidr-mask-size=24 \\
 --cluster-name=kubernetes \\
 --allocate-node-cidrs=true \\
 --kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \\
 --authentication-kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \\
 --authorization-kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \\
 --use-service-account-credentials=true \\
 --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \\
 --requestheader-client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \\
 --node-monitor-grace-period=40s \\
 --node-monitor-period=5s \\
 --pod-eviction-timeout=5m0s \\
 --terminated-pod-gc-threshold=50 \\
 --alsologtostderr=true \\
 --cluster-signing-cert-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \\
 --cluster-signing-key-file=/apps/kubernetes/ssl/k8s/k8s-ca-key.pem  \\
 --deployment-controller-sync-period=10s \\
 --experimental-cluster-signing-duration=86700h0m0s \\
 --enable-garbage-collector=true \\
 --root-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \\
 --service-account-private-key-file=/apps/kubernetes/ssl/k8s/k8s-ca-key.pem \\
 --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true \\
 --controllers=*,bootstrapsigner,tokencleaner \\
 --horizontal-pod-autoscaler-use-rest-clients=true \\
 --horizontal-pod-autoscaler-sync-period=10s \\
 --flex-volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume \\
 --tls-cert-file=/apps/kubernetes/ssl/k8s/k8s_controller_manager.pem \\
 --tls-private-key-file=/apps/kubernetes/ssl/k8s/k8s_controller_manager-key.pem \\
 --kube-api-qps=100 \\
 --kube-api-burst=100 \\
 --log-dir=/apps/kubernetes/log \\
 --v=2
 # 创建kube-scheduler 配置文件
 vi kube-scheduler
 KUBE_SCHEDULER_OPTS= \\
                   --logtostderr=false \\
                   --address=0.0.0.0 \\
                   --leader-elect=true \\
                   --kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \\
                   --authentication-kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \\
                   --authorization-kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \\
                   --alsologtostderr=true \\
                   --kube-api-qps=100 \\
                   --kube-api-burst=100 \\
                   --log-dir=/apps/kubernetes/log \\
                   --v=2
# 创建kubelet 配置文件
KUBELET_OPTS=--bootstrap-kubeconfig=/apps/kubernetes/conf/bootstrap.kubeconfig \\
              --fail-swap-on=false \\
              --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/apps/cni/bin \\
              --kubeconfig=/apps/kubernetes/conf/kubelet.kubeconfig \\
              --address=192.168.30.52 \\节点IP 一定要修改
              --node-ip=192.168.30.52 \\ 节点IP 一定要修改
              --hostname-override=master \\节点hostname 一定要修改
              --cluster-dns=10.66.0.2 \\ # dns IP
              --cluster-domain=cluster.local \\ # 集群域
              --authorization-mode=Webhook \\
              --authentication-token-webhook=true \\
              --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \\
              --rotate-certificates=true \\
              --cgroup-driver=cgroupfs \\
              --healthz-port=10248 \\
              --healthz-bind-address=192.168.30.52 \\ # 节点IP 一定要修改
              --cert-dir=/apps/kubernetes/ssl \\
              --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \\
              --node-labels=node.kubernetes.io/k8s-node=true \\ # node-role.kubernetes.io 已经取消添加会报错
              --serialize-image-pulls=false \\
              --enforce-node-allocatable=pods,kube-reserved,system-reserved \\
              --pod-manifest-path=/apps/work/kubernetes/manifests \\
              --runtime-cgroups=/systemd/system.slice/kubelet.service \\
              --kubelet-cgroups=/systemd/system.slice/kubelet.service \\
              --kube-reserved-cgroup=/systemd/system.slice/kubelet.service \\
              --system-reserved-cgroup=/systemd/system.slice \\
              --root-dir=/apps/work/kubernetes/kubelet \\
              --log-dir=/apps/kubernetes/log \\
              --alsologtostderr=true \\
              --logtostderr=false \\
              --anonymous-auth=true \\
              --image-gc-high-threshold=70 \\
              --image-gc-low-threshold=50 \\
              --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \\
              --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \\
              --eviction-hard=memory.available<500Mi,nodefs.available<10% \\
              --serialize-image-pulls=false \\
              --sync-frequency=30s \\
              --resolv-conf=/etc/resolv.conf \\
              --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \\
              --image-pull-progress-deadline=30s \\
              --v=2 \\
              --event-burst=30 \\
              --event-qps=15 \\
              --kube-api-burst=30 \\
              --kube-api-qps=15 \\
              --max-pods=200 \\
              --pods-per-core=10 \\
              --read-only-port=0 \\
              --allowed-unsafe-sysctls \'kernel.msg*,kernel.shm*,kernel.sem,fs.mqueue.*,net.*\' \\
              --volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume
# 创建 kube-proxy 配置文件
vi  kube-proxy
KUBE_PROXY_OPTS=--logtostderr=false \\
--v=2 \\
--feature-gates=SupportIPVSProxyMode=true \\
--masquerade-all=true \\
--proxy-mode=ipvs \\
--ipvs-min-sync-period=5s \\
--ipvs-sync-period=5s \\
--ipvs-scheduler=rr \\
--cluster-cidr=10.67.0.0/16 \\  #pod  CIDR 
--log-dir=/apps/kubernetes/log \\
--kubeconfig=/apps/kubernetes/conf/kube-proxy.kubeconfig
# 创建kube-apiserver 其它配置 放到config目录
cd ../config
#创建 encryption-config.yaml
 export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
 cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: ${ENCRYPTION_KEY}
      - identity: {}
EOF
# cp kubeconfig 到config
cp -r ../../../kube_scheduler.kubeconfig ../../../kube_controller_manager.kubeconfig ./
复制 bootstrap.kubeconfig  kube-proxy.kubeconfig 到conf 每个node 节点都有
cd ../conf
cp -r ../../../bootstrap.kubeconfig ../../../kube-proxy.kubeconfig ./
# 创建启动配置文件
cd  /root/work/kubernetes/server
# kube-apiserver 启动文件
vi kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
Type=notify
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity

EnvironmentFile=-/apps/kubernetes/conf/kube-apiserver
ExecStart=/apps/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
User=k8s
[Install]
WantedBy=multi-user.target
# kube-controller-manager启动文件
vi kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kubernetes/conf/kube-controller-manager
ExecStart=/apps/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
User=k8s

[Install]
WantedBy=multi-user.target
#  kube-scheduler 启动文件
vi  kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity

EnvironmentFile=-/apps/kubernetes/conf/kube-scheduler
ExecStart=/apps/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
User=k8s

[Install]
WantedBy=multi-user.target
# kubelet启动文件
vi kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kubernetes/conf/kubelet
ExecStart=/apps/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
# kube-proxy启动文件
vi kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kubernetes/conf/kube-proxy
ExecStart=/apps/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
# cp ssl /root/work/kubernetes/server/ssl
mkdir ssl
cd ssl
cp -pdr /root/work/cfssl/pki/k8s ./
# 复制etcd 证书
mkdir etcd
cp -pdr /root/work/cfssl/pki/etcd/etcd_client* ./etcd/
cp -pdr /root/work/cfssl/pki/etcd/etcd-ca.pem ./etcd/
分发文件并启动kubernetes server
cd /root/work/kubernetes/server
# 创建远程目录
ssh 192.168.30.52 mkdir -p /apps/kubernetes
scp -r bin conf config ssl 192.168.30.52:/apps/kubernetes
# 分发启动文件
scp -r *.service 192.168.30.52:/usr/lib/systemd/system/
# 创建 k8s 用户
ssh 192.168.30.52 useradd k8s -s /sbin/nologin -M
# /apps/kubernetes 目录k8s 权限
ssh 192.168.30.52 chown -R k8s.k8s /apps/kubernetes
# 启动  kube-apiserver  kube-controller-manager kube-scheduler
# 启动kube-apiserver
ssh 192.168.30.52 systemctl start kube-apiserver
# 设置开机启动
ssh 192.168.30.52 systemctl enable kube-apiserver
# 启动状态
ssh 192.168.30.52 systemctl status kube-apiserver
# 验证api 是否启动成功
# 备份旧config 文件
mv  ~/.kube/config  ~/.kube/config.old
# 复制kubeconfig 到~/.kube 目录
cp ~/work/admin.kubeconfig ~/.kube/config
# 验证kube-apiserver 是否正常
kubectl cluster-info 
[root@]~/work]#kubectl cluster-info
Kubernetes master is running at https://192.168.30.52:5443
# 启动kube-controller-manager 
ssh 192.168.30.52 systemctl start kube-controller-manager 
# 设置开机启动
ssh 192.168.30.52 systemctl enable kube-controller-manager 
# 启动状态
ssh 192.168.30.52 systemctl status kube-controller-manager 
# 启动  kube-scheduler
ssh 192.168.30.52 systemctl startkube-scheduler
# 设置开机启动
ssh 192.168.30.52 systemctl enable kube-scheduler
# 启动状态
ssh 192.168.30.52 systemctl status kube-scheduler
# 验证是否启动成功
[root@]~/work]#kubectl get cs # 最新kubectl  返回
NAME                 AGE
controller-manager   <unknown>
scheduler            <unknown>
etcd-0               <unknown>
../kubernetes-1.14.4/_output/bin/kubectl get cs
[root@]~/work]#../kubernetes-1.14.4/_output/bin/kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {health:true}
# 配置 kube-controller-manager,kubelet 、kube-scheduler 访问kube-api 用户授权
授予 kubernetes API 的权限
kubectl create clusterrolebinding controller-node-clusterrolebing --clusterrole=system:kube-controller-manager  --user=system:kube-controller-manager
kubectl create clusterrolebinding scheduler-node-clusterrolebing  --clusterrole=system:kube-scheduler --user=system:kube-scheduler
kubectl create clusterrolebinding controller-manager:system:auth-delegator --user system:kube-controller-manager --clusterrole system:auth-delegator
授予 kubernetes 证书访问 kubelet API 的权限
kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
kubectl create clusterrolebinding kubelet-node-clusterbinding --clusterrole=system:node --group=system:nodes
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes
配置启动node 节点
192.168.30.52 daemon 节点其它节点参考
分发cni
cd /root/work
scp -r cni 192.168.30.52:/apps/
# 创建cni 配置目录
ssh 192.168.30.52 mkdir -p  /etc/cni/net.d
# 安装docker 
# 创建 /etc/docker/daemon.json
ssh 192.168.30.52 mkdir -p /etc/docker
vi daemon.json
{
    max-concurrent-downloads: 20,
    data-root: /apps/docker,
    exec-root: /apps/docker,
    log-driver: json-file,
    bridge: none,
    oom-score-adjust: -1000,
    debug: false,
    log-opts: {
        max-size: 100M,
        max-file: 10
    },
    default-ulimits: {
        nofile: {
            Name: nofile,
            Hard: 1024000,
            Soft: 1024000
        },
        nproc: {
            Name: nproc,
            Hard: 1024000,
            Soft: 1024000
        },
      core: {
            Name: core,
            Hard: -1,
            Soft: -1    
      }

    }
}
scp -r daemon.json 192.168.30.52:/etc/docker

ssh 192.168.30.52 sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
ssh 192.168.30.52 yum install -y  epel-release
ssh 192.168.30.52 yum install -y   yum-utils  ipvsadm  telnet  wget  net-tools  conntrack  ipset  jq  iptables  curl  sysstat  libseccomp  socat  nfs-utils  fuse  fuse-devel 
# 安装docker依赖
ssh 192.168.30.52 yum install -y    python-pip python-devel yum-utils device-mapper-persistent-data lvm2 
# 安装docker
ssh 192.168.30.52 yum install -y docker-ce
# reload service 配置
ssh 192.168.30.52 systemctl daemon-reload
# 重启docker
ssh 192.168.30.52 systemctl restart docker
# 设置开机启动
ssh 192.168.30.52 systemctl enable docker
# bootstrap secret 
cat << EOF | tee bootstrap.secret.yaml
apiVersion: v1
kind: Secret
metadata:
  # Name MUST be of form bootstrap-token-<token id>
  name: bootstrap-token-${TOKEN_ID}
  namespace: kube-system

# Type MUST be \'bootstrap.kubernetes.io/token\'
type: bootstrap.kubernetes.io/token
stringData:
  # Human readable description. Optional.
  description: The default bootstrap token generated by \'kubelet \'.

  # Token ID and secret. Required.
  token-id: ${TOKEN_ID}
  token-secret: ${TOKEN_SECRET}

  # Allowed usages.
  usage-bootstrap-authentication: true
  usage-bootstrap-signing: true

  # Extra groups to authenticate the token as. Must start with system:bootstrappers:
  auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
rules:
- apiGroups: [certificates.k8s.io]
  resources: [certificatesigningrequests/selfnodeserver]
  verbs: [create]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: true
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kubernetes-to-kubelet
rules:
  - apiGroups:
      - 
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - *
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kubernetes
  namespace: 
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kubernetes-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
EOF

#  创建资源
kubectl create -f bootstrap.secret.yaml
### 查看创建的token
kubeadm token list
# 允许 system:bootstrappers 组用户创建 CSR 请求
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
# 自动批准 system:bootstrappers 组用户 TLS bootstrapping 首次申请证书的 CSR 请求
kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:bootstrappers
# 自动批准 system:nodes 组用户更新 kubelet 自身与 apiserver 通讯证书的 CSR 请求
kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes

# 自动批准 system:nodes 组用户更新 kubelet 10250 api 端口证书的 CSR 请求
kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes
# kubernetes 工作目录
ssh 192.168.30.52 mkdir -p /apps/work/kubernetes/{manifests,kubelet}
# 启动kubelet
ssh 192.168.30.52 systemctl kubelet
# 设置开机启动
ssh 192.168.30.52 systemctl enable kubelet
# 启动状态
ssh 192.168.30.52 systemctl status kubelet
# 启动kube-proxy
ssh 192.168.30.52 systemctl kube-proxy
# 设置开机启动
ssh 192.168.30.52 systemctl enable kube-proxy
# 启动状态
ssh 192.168.30.52 systemctl status kube-proxy
[root@]~/work]#kubectl get node
NAME      STATUS     ROLES    AGE    VERSION
master    NotReady   <none>   140m   v1.16.0
master2   NotReady   <none>   34m    v1.16.0
# 由于cni 一直没就绪所以一直存在这个状态
flannel 部署
# 创建flannel configmap  kubeconfig
kubectl create configmap kube-proxy --from-file=kubeconfig.conf
# 创建yaml
vi kube-flannel.yml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups:
      - 
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - 
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - 
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
     {
     name:cni0,
     cniVersion:0.3.1, # 一定要添加不然Kubelet 一直出现NotReady 状态
     plugins:[
       {
         type:flannel,
         delegate:{
          hairpinMode: true,
           isDefaultGateway:true
         }
       },
       {
         type:portmap,
         capabilities:{
           portMappings:true
         }
       }
     ]
     }
  net-conf.json: |
    {
      Network: 10.67.0.0/16, # 记得修改POD cidr
      Backend: {
        Type: vxlan
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      hostNetwork: true
      nodeSelector:
        beta.kubernetes.io/arch: amd64
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --healthz-port=10244
        - --kubeconfig-file=/var/lib/flannel/kubeconfig
        livenessProbe:
          httpGet:
            path: /healthz
            port: 10244
          initialDelaySeconds: 10
          periodSeconds: 3
        resources:
          requests:
            cpu: 100m
            memory: 50Mi
          limits:
            cpu: 100m
            memory: 50Mi
        securityContext:
          privileged: true
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: kubeconfig
          mountPath: /var/lib/flannel
          readOnly: true
      volumes:
        - name: run
          hostPath:
            path: /run
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
        - name: kubeconfig
          configMap:
            name: kube-proxy
            items:
            - key: kubeconfig.conf
              path: kubeconfig
  updateStrategy:
    rollingUpdate:
      maxUnavailable: 1
    type: RollingUpdate
# 创建 flannel 服务
kubectl apply -f kube-flannel.yml
# 查看POD 状态
traefik-zs6h3                     1/1     Running   0          3h37m
[root@]~/work]#kubectl get pod| grep flannel
kube-flannel-ds-amd64-6bpf7       1/1     Running   0          3h57m
kube-flannel-ds-amd64-6sxz2       1/1     Running   0          3h58m
# 查看node 状态
[root@]~/work]#kubectl get node
NAME      STATUS   ROLES    AGE   VERSION
master    Ready    <none>   18h   v1.16.0
master2   Ready    <none>   16h   v1.16.0
# 已经正常状态 cni 也能正常分配ip
[root@master2 ~]# ip a | grep cni
7: cni0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default qlen 1000
    inet 10.67.2.1/24 brd 10.67.2.255 scope global cni0
部署coredns
# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: true
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - 
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - 
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: true
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods insecure
            upstream /etc/resolv.conf
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: true
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: CoreDNS
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: \'docker/default\'
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: CriticalAddonsOnly
          operator: Exists
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns
        imagePullPolicy: Always
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ -conf, /etc/coredns/Corefile ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: 9153
    prometheus.io/scrape: true
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: true
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: CoreDNS
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.66.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
# 创建CoreDNS 服务
kubectl apply -f coredns.yaml
# 验证dns
[root@master net.d]# dig @10.66.0.2 www.baidu.com

; <<>> DiG 9.11.4-P2-RedHat-9.11.4-9.P2.el7 <<>> @10.66.0.2 www.baidu.com
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 31727
;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 4, ADDITIONAL: 5

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;www.baidu.com.                 IN      A

;; ANSWER SECTION:
www.baidu.com.          30      IN      CNAME   www.a.shifen.com.
www.a.shifen.com.       30      IN      A       14.215.177.38
www.a.shifen.com.       30      IN      A       14.215.177.39

;; AUTHORITY SECTION:
shifen.com.             30      IN      NS      ns3.baidu.com.
shifen.com.             30      IN      NS      ns4.baidu.com.
shifen.com.             30      IN      NS      ns2.baidu.com.
shifen.com.             30      IN      NS      dns.baidu.com.

;; ADDITIONAL SECTION:
ns3.baidu.com.          30      IN      A       112.80.248.64
ns2.baidu.com.          30      IN      A       220.181.33.31
ns4.baidu.com.          30      IN      A       14.215.178.80
dns.baidu.com.          30      IN      A       202.108.22.220

;; Query time: 3 msec
;; SERVER: 10.66.0.2#53(10.66.0.2)
;; WHEN: Fri Sep 20 13:07:01 CST 2019
;; MSG SIZE  rcvd: 413
返回正常
创建 traefik Ingress 启用https
# base64 加密
cat tls.crt |base64 | tr -d \'\\n\'
cat tls.key|base64 | tr -d \'\\n\'
# 创建traefik-secret
vi traefik-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: tls-cert
  name: tls-cert
type: Opaque
data:
  tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUdYVENDQlVXZ0F3SUJBZ0lTQTBCSy82MUwvZVNHb2FjNmFjRnZLaDVOTUEwR0NTcUdTSWIzRFFFQkN3VUEKTUVveEN6QUpCZ05WQkFZVEFsVlRNUll3RkFZRFZRUUtFdzFNWlhRbmN5QkZibU55ZVhCME1TTXdJUVlEVlFRRApFeHBNWlhRbmN5QkZibU55ZVhCMElFRjFkR2h3Y21sMGVTQllNekFlRncweE9UQTVNRFl4TlRNNE1EVmFGdzB4Ck9URXlNRFV4TlRNNE1EVmFNQll4RkRBU0JnTlZCQU1UQzIxa1pHZGhiV1V1WTI5dE1JSUNJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQWc4QU1JSUNDZ0tDQWdFQW9mSVdOdTE4YUp1T3Jzd0JjZE9lODN0dWpXZ2dpUXl0VVYxQwpqNVhYbzNjQTM1L2ZxQXNGVHpJRGNwUmxhTGJ6SHd1d1psOWNSKzJuRENaUzI4VlhZaXcrSkQvQXpna3FzTHFJCjZ3YlFhcHNCa1lYUzRuT1UrZzhSMVgwcm52ckpickE1eHFJSWJKM002ajVLTXZ4RktvMEV3YXNBY2NiYlVGOW4KMHQ2RzNreG4zWW1Sek5HeHh2bXZ4V2prNWNkSWMza0MyT1VuRktGOG5XemJab2JiNk9PUnZSaElEWW5YdjkxdgoyMUYwQnZ0Q21GY0FEaDRqZXUrLzNKVDVLcEJkdkFHOHI3aU1wbkhKaFU1alhqTXlPRytMbkcvcnJuRzJGaXpHCmx1UHQwKzRlK0ZRSXFZY1BUM1cyTUF2ZDlzQTNEMThsUW82M00vZlMyYjNIYVNidFY0b1pmNS9zTzJNeEVPVnoKVEd1M0NxYk40TkcrZE8ycXoxYWxMQmlGZlVjNEdmUVpYRmlLaDFzazl3Qm5zeWhqYUZmdUx6bHRxMDg3STJLYQorVlRaUzFQSlJFbGduM3UwY1FmaENjelF5ZTJ3Vjl6RE9lVmUxeTBjLzZ0RWJhNllCeGR2ZGcwOFpKL0QwYTBLCnJvWlVJMW5Rc2RKeE8rQ3N1OURLYjROZzJCYnZkWVpHVWJrSCtSUDU0UUdrS1VnYnVxNVIwbXI0U1I2VUwrRE4KZjNxem81a3ZiMXVRWXFpaDZYUFVDVUVPOTNOU1Y2MTNUSUVOTUpyYjVhbGRLUkhPZlpWL201QThlUy9ibFFYcgpOV3FCRy9OL2RtckZjMmcyNGJEY3d5OXIzL3FkNy9MTWxmMVRVdzJGczR3M2x2VHJFanlwWEZhQ3BRRGxkc0xJCkYwcWVKVnNDQXdFQUFhT0NBbTh4Z2dKck1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVHUUNXOGNFbgpaNWhVWjBDa004QW03Wjh7NGJNd0h4WURWUjBqQkJnd0ZvQVVxRXBxWXdSOTNicm0wVG0zcGtWbDcvT283S0V3CmJ3WUlLd1lCQlFVSEFRRUVZekJoTUM0R0NDc0dBUVVGQnpBQmhpSm9kSFJ3T2k4dmIyTnpjQzVwYm5RdGVETXUKYkdWMGMyVnVZM0o1Y0hRdWIzSm5NQzhHQ0NzR0FRVUZCekFDaGlOb2RIUndPaTh3WTJWeWRDNXBiblF0ZURNdQpiR1YwYzJWdVkzSjVjSFF1YjNKbkx6QWxCZ05WSFJFRUhqQWNnZzBxTG0xa1pHZGhiV1V1WTI5dGdndHRaR1JuCllXMWxMbU52YlRCTUJnTlZIU0FFUlRCRE1BZ0dCbWVCREFFQ0FUQTNCZ3NyQmdFRUFZTGZFd0VCQVRBb01DWUcKQ0NzR0FRVUZCd0lCRmhwb2RIUndPaTh3WTNCekxteGxkSE5sYm1OeWVYQjBMbTl5WnpDQ0FRUUdDaXNHQVFRQgoxbmtDQkFJRWdmVUVnZklBOEFCM0FPSnBTNjRtNk9sQUNlaUdHN1k3ZzlRKzUvNTBpUHVranlpVEFaM2Q4ZHYrCkFBQUJiUWR3b2dNQUFBUURBRWd3UmdJaEFLWldRaVVPZkZDcGdjT0JPZ0xoTjFBQjgycHg3bUR2QXYxUnRKVmoKQU0zNEFpRUFtQWpPY012WTQ2Y0VwT2lKbW4vKzB4bnZsTmR0TlNoNExvWHJaUW9sUnJZQWRRQXBQRkdXVk1nNQpaYnFxVVB4WUI5UzNiNzlZZWlseTNLVEREUFRsUlVmMGVBQUFBVzBIY0tJbEFBQUVBd0JHTUVRQ0lEeGhFMThpCm14MjBySFFHS2RpYzVCVnQ3bFBiTzBRNy9KdGI3bkVvR1grSEFpQnRDTWxXbGxlMStNV3JrUXBKbXBaTHE3bWYKWXEyZjZXc2k1QVpmQmZFRndqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFXUS8ycG8wcHRXWXJkbk5ndkZIbgpMK2RyclBDT2xpUXNuaFJWajdiTlhFOGNWb0l6TmU3VGRjazJINE5CUTZUZkZicmkvdHdubkFXRThzCDNPNHVWClV1bVM1Y2FGYmFPdnJIa3ZLVTNUVGhLODNqcmpFZ1N6cEo0d3k2MUlkNGhPZ0FYODVpd2REUEhvL0o0YXkzVDEKanpyMGduY0x0N1R0Tjd3dzJ5Z1RZSXBPTTBVVWtjd05GUGZZYmFRYzVqVjdvcU1raGlMNUtiSGpYVDdRcXR4YwprY3J2VXZMdERDTTQvMGpWN01FNnd4enhCQ1N1ekZWTlVlSEVVS0dDci9qRHRXV0hFZ25JNEZ5MGhQT0F0RlZzCmpDVDhWSTVYMUVmeExTRUdONkxob2NoOHl1akJYWTVNSGlVUDc5REtHaXkzaXJTZ2xtU3BVZXpMSzkwdDVzb3MKd2c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJRWtqQ0NBM3FnQXdJQkFnSVFDZ0ZCUWdBQUFWT0ZjMm9MaGV5bkNEQU5CZ2txaGtpRzl3MEJBUXNGQURBLwpNU1F3SWdZRFZRUUtFeHRFYVdkcGRHRnNJRk5wWjI1aGRIVnlaU0JVY25WemRDQkRieTR4RnpBVkJnTlZCQU1UCkRrUlRWQ0JTYjI5MElFTkJJRmd6TUI0WERURTJNRE14TnpFMk5EQTBObG9YRFRJeE1ETXhOekUyTkRBME5sb3cKU2pFTE1Ba0dBMVVFQmhNQ1ZWTXhGakFVQmdOVkJBb1REVXhsZENkeklFVnVZM0o1Y0hReEl6QWhCZ05WQkFNVApHa3hsZENkeklFVnVZM0o1Y0hRZ1FYVjBhRzl5YVhSNUlGZ3pNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUFuTk1NOEZybExrZTNjbDAzZzdOb1l6RHExelVtR1NYaHZiNDE4WENTTDdlNFMwRUYKcTZtZU5RaFk3TEVxeEdpSEM2UGpkZVRtODZkaWNicDVnV0FmMTVHYW4vUFFlR2R4eUdrT2xaSFAvdWFaNldBOApTTXgreWsxM0VpU2RSeHRhNjduc0hqY0FISnlzZTZjRjZzNUs2NzFCNVRhWXVjdjliVHlXYU44aktrS1FESVowClo4aC9wWnE0VW1FVUV6OWw2WUtIeTl2NkRsYjJob256aFQrWGhxK3czQnJ2YXcyVkZuM0VLNkJsc3BrRU5uV0EKYTZ4Szh5dVFTWGd2b3BaUEtpQWxLUVRHZE1EUU1jMlBNVGlWRnJxb003aEQ4YkVmd3pCL29ua3hFejB0TnZqagovUEl6YXJrNU1jV3Z4STBOSFdRV002cjZoQ20yMUF2QTJIM0Rrd0lEQVFBQm80SUJmVENDQVhrd0VnWURWUjBUCkFRSC9CQWd3QmdFQi93SUJBREFPQmdOVkhROEJBZjhFQkFNQ0FZWXdmd1lJS3dZQkJRVUhBUUVFY3pCeE1ESUcKQ0NzR0FRVUZCekFCaGlab2RIUndPaTh3YVhOeVp5NTBjblZ6ZEdsa0xtOWpjM0F1YVdSbGJuUnlkWE4wTG1OdgpiVEE3QmdnckJnRUZCUWN3QW9ZdmFIUjBjRG92TDJGd2NITXVhV1JsYm5SeWRYTjBMbU52YlM5eWIyOTBjeTlrCmMzUnliMjkwWTJGNE15NXdOMk13SHdZRFZSMGpCQmd3Rm9BVXhLZXhwSHNzY2ZyYjRVdVFkZi9FRldDRmlSQXcKVkFZRFZSMGdCRTB3U3pBSUJnWm5nUXdCQWdFd1B3WUxLd1lCQkFHQzN4TUJBUUV3TURBdUJnZ3JCZ0VGQlFjQwpBUllpYUhSMGNEb3ZMMk53Y3k1eWIyOTBMWGd4TG14bGRITmxibU55ZVhCMExtOXlaekE4QmdOVkhSOEVOVEF6Ck1ER2dMNkF0aGl0b2RIUndPaTh3WTNKc0xtbGtaVzUwY25WemRDNWpiMjB2UkZOVVVrOVBWRU5CV0RORFVrd3UKWTNKc01CMEdBMVVkRGdRV0JCU29TbXBqQkgzZHV1YlJPYmVtUldYdjg2anNvVEFOQmdrcWhraUc5dzBCQVFzRgpBQU9DQVFFQTNUUFhFZk5qV0RqZEdCWDdDVlcrZGxhNWNFaWxhVWNuZThJa0NKTHhXaDlLRWlrM0pIUlJIR0pvCnVNMlZjR2ZsOTZTOFRpaFJ6WnZvcm9lZDZ0aTZXcUVCbXR6dzNXb2RhdGcrVnlPZXBoNEVZcHIvMXdYS3R4OC8Kd0FwSXZKU3d0bVZpNE1GVTVhTXFyU0RFNmVhNzNNajJ0Y015bzVqTWQ2am1lV1VISzhzby9qb1dVb0hPVWd3dQpYNFBvMVFZeiszZHN6a0RxTXA0ZmtseEJ3WFJzVzEwS1h7UE1UWitzT1BBdmV5eGluZG1qa1c4bEd5K1FzUmxHClBmWitHNlo2aDdtamVtMFkraVdsa1ljVjRQSVdMMWl3Qmk4c2FDYkdTNWpOMnA4TStYK1E3VU5LRWtST2IzTjYKS09xa3FtNTdUSDJIM2VESkFrU25oNi9ETkZ1MFFnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
  tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBb2ZJV051MThhSnVPcnN3QmNkT2U4M3R1aldnZ2lReXRVVjFDajVYWG8zY0EzNS9mCnFBc0ZUeklEY3BSbGFMYnpId3V3Wmw5Y1IrMm5EQ1pTMjhWWFlpdytKRC9BemdrcXNMcUk2d2JRYXBzQmtZWFMKNG5PVStnOFIxWDBybnZySmJyQTV4cUlJYkozTTZqNUtNdnhGS28wRXdhc0FjY2JiVUY5bjB0Nkcza3huM1ltUgp6Tkd4eHVtdnhXams1Y2RJYzNrQzJPVW5GS0Y4bld6YlpvYmI2T09SdlJoSURZblh3OTF2MjFGMEJ2dENtRmNBCkRoNGpldSsvM0pUNUtwQmR2QUc4cjdpTXBuSEpoVTVqWGpNeU9HK0xuRy9ycm5HMkZpekdsdVB0MCs0ZStGUUkKcVljUFQzVzJNQXZkOXNBM0QxOGxRbzYzTS9mUzJiM0hhU2J0VjRvWmY1L3NPMk14RU9WelRHdTNDcWJONE5HKwpkTzJxejFhbExCaUZmVWM0R2ZRWlhGaUtoMXNrOXdCbnN5aGphRmZ1THpsdHEwODdJMkthK1ZUWlMxUEpSRWxnCm4zdTBjUWZoQ2N6UXllMndWOXpET2VWZTF5MGMvNnRFYmE2WUJ4ZHZkZzA4WkovRDBhMEtyb1pVSTFuUXNkSngKTytDc3U5REtiNE5nMkJidmRZWkdVYmtIK1JQNTRRR2tLVWdidXE1UjBtcjRTUjZVTCtETmYzcXpvNWt2YjF1UQpZcWloNlhQVUNVRU85M05TVjYxM1RJRU5NSnJiNWFsZEtSSE9mWlYvbTVBOGVTL2JsUVhyTldxQkcvTi9kbXJGCmMyZzI0YkRjd3k5cjMvcWQ3L0xNbGYxVFV3MkZzNHczbHZUckVqeXBYRmFDcFFEbGRzTElGMHFlSlZzQ0F3RUEKQVFLQ0FnQXY5Zk13UnpzTisrdlF4cWd5M3JwM1gzbkpOU3BWakVTVUVTdVNQSTFGWXd3R0xtSGRjWTRiK3pMYwpMeWl0VDJsSEszNE5nM1pmOHZrQzl5S1k1YVBRZGt2ZERtaDZYR3FoTmswd1ZhOUpzeWhPd2JSSHpuVXpiVjBaCnZkMDZVd2x1MTQvMHpLMzBCUFBYOTZTZjN1aFpCclIrNnJiUisxT2VSUE1KbDArWDdFYmliRWlhd1F1R1hsVHAKQVB5eE5FaTNzZ0h2M0VhcnJIdXNYNzNHYW5BY1U3RW9zRlUrZFRGSktEcGxXSVVsUUNwajFYZzF0aVZKMWxFYQo4Wit0UkY0T1BQRjFsUkZLaGU1cHBXSjJWbkVzRjVUZ09xRXc0NHBLbk80Zlo5ZGFhVzRRbTBxSmNtOU5XQTRoCndwSDA3czRmcGt6eG5qU1JsbmFDZDlyandGeVBsSkJzUXNhVlFFNzlpQzJZMTRnTk9KQ0xyMXRKSEQ2ODN3bW4KS3ZNOHZpOTdHTmIybXZHeWNtZnloNVpzTFBpTWNqOFFER3VWZU53dlNESXpybnhqVkZlc0liTWt5UlZRem9IVApTTHRQbXdVR3lwRHVrMDhaZytsT0lYOC85K3lqMER3MDRqenllTVptYlFVdkd2N2lNWjFUaHdaRHF1YkJXV3J4CmtYTmJwTG9BMGxrcHh5bjdGam9Ya20zM2ZKQURjd2xWSS82WFNrSm1FaFVlZmZnaFFSMGNyVGphQVd1Qkx2Qk0KT0s5aEEzT3RTN2F0S2FDb1lvSmRrYkpHQTdWdytNNzA4NEJOTGhxM1Fyckg4S3M3Z05pdC9NN3lxSnU1alBaZgo2SE1seHNyWU9NVUhuVlk4VDkwN0Q3cS9ORUNnRThzODhnZzAyQ3JNWTFqanE4UnBpUUtDQVFFQTE2UHJaMUEwClNISS83akdmS3BETkJzQ0xrVUFxRERKSzQ0dFdJYmJBUXFhRTN1eDh4bkFlU2NjSHozbS9ScEpPSGtteHZTZlgKbTJ1Wk8veGtNTWhYK2lwOHdFOHZibzR1enVNYitTSXE3bWpialJkK1JJczJ5NHJsZVQ2NGVjRWc4R2pZckExZgpiSEI0MmhQclVTcXpxUVIwOTZocm1Lb1diU0RDZDZwOUVNeWVzT3IwTjdtQmJYVVZPazJxZGtYRlZWbHBlUDdpClFxWGdRUUI0bHgzLzJJdlpBMlhJUXlQdGJ0RWVRbmgyQ3FNM2NDMzR0VEVjZ244K0VwNG9SWmkwTTBHaUY3bXgKOTEvZHY2THZlNTR5K1pON1lXd1NFQ09ubzd5bDlvTlBZVnVGMGRiMjh0elppMThCeHJTQ2JESE1XbExvUzhWNgpXTEo0OGlSODJDYkc1d0tDQVFFQXdFRjM4KzYyeDhDU2x0blZZNlJaN0J0NEdiNEJqVWhWYXZ0NFkxUGFlbXFNCjFidFVnR2JyUnBoNHFUSEFTckUwUUZLeVZKYnlCUkJyRHIxWHU4WWRSVXQzZC92VzlIR1dPd1BKdTN2M3pLbHMKQ2xsZnpFY3J5L1l2aHAzSzlEcGR6OE1icHdueW5xcGV6b0xMNlJpL3JnK0hyTzBueXd1RSt0T2xYVFo2eUtadApHWVdTSVBWaG00NUJkc2ZxUzhnYjVvbjA0bHh4bnhxVnJvN0c0TUR6cmVEYlFhaGdyS3VuRWxwajZ4eW1PVWpBCkdCZDR3QUVrUExxNUUrRWcreDY4TkRLVTYwK29ybFhLWVhDQm5HSFZOQ3BVcmswVXkrcHFZZmFEN3VuR2VzaHMKSEwra3lXbXl5a3ErTmNKbnRXMFNSNy9sU1IvZUFhVEZyVzZVaXV0RGJRS0NBUUVBemhRYU9PNmVPSW51N016QgpScVdCT3EyeDg4cjFKQmpBRnZzbkFpc3JTOGJsZmtGVTdXREdvVTB5K3FWb0ZhSm1RMjI4RFlCUS9YZnp4aTdxCjlPL1JuQU1VbTVoUlJQOWVYbHNPZGFXZ2o1em9ETXRoNFZHRnVUbHhHZERGN1oyU3hBMysyMVlnVm5xYUZCY3IKTUxOMVpOWWNqajJITGl1R0tSNUFtcW4wd2FRN0YrcENJQ3NKTkxqSzQ2QXJnc0lrMXU4TzdCSHgyeTI0eFlZVQp1SjV6emRmQU9nNEFONkhURzY5L2twaWFmb29DeGhNNDlyZ0xmZTdxUEZLbk8vTzJhckdUbmNiWi9BWEMzb3h4Ci81dHRMYlF6R2lSMGtyWHdWSHRKdys4elltQmIzL0RtcWF4RHZueTZMdEo5UGJiTmk1aGw1VnZCRTVqa0dzeWgKL3RQNEN3S0NBUUJ2R1dZb0lKcWZkRGxCMHovdEJOeXlCRzJ5OG9vVEN1blJtT0JKQmZ3TEllZWcyMUJKb3kveQo2OGxPZk9HU1NEVFp0dkEyMGNPcUNZTFVVYmFSWERzdUFCNVp4NzdBSTZPZEZ1Tk01S2FlTG9td3NWVWF4MFlYCjUzd3ZYcUFaNG1DejN4dnJ1MlBwTEtyOHk3anFTdEw1MHgra1hxZlFQaWZxaXNQVXlkYktmT0l2RFhFVWVyaWQKRytmWXJFNUkzS3JDM3BZVStUWmJ1eEVrZm4yUEEvSE5XVk5hN2VKdjVnSDJLU1gwaCtuRzBMT3hPRjhmRlluTApUbHdGa09OdU9xU254Vk1wYUM4aUQ1R1VIVi9JN3dBMTFRQjZlVEM3Wmd0ejhQRHM3MHN6U1A2dzNrNXIxaGpyCnJhV2RpMnBDL1hUQzRiR3VRQ3dhNXcwVTNBSWJCVGxCQW9JQkFEc1RONGhvclVHNWw3MXhLZk5ibVBTbDZ6RlIKYTJ4d2U2VVZPOVZzMFpHeEdLWWJSN1VuVDBDL1FqUiswS2JsbE9leDdFY3cyMklCcmFFVzBGbXpuVnoyUW9FNwpMUE5COXhyTTFEeE56UjZEbFBUeERMcEFGWVlUcm40SWY1cjFVdVdpc2lMdmd6T2xGTlVITnN5UFJIZWNGblhUCnNhTk9JWkgrQTJ5KzF3QWdpSFZIS2JPRGRHeVFQVlQ0TXFFWkJaY2pQcmRBekNKcnloSHlYdHBqRjFSdlFEYTMKTVM3U3JVTGM4djJGQWJ1VG1QZ2R1ZHBKd1Q4dENCa2VRKzZ4YmJWN3YrZzBEMG5EWFNIZFVwNXFyUzcrTnhtVwp4NWV4UHo1VENhYXcxSnkzWjRmT1MzMTV6eHJGdmRHTmhWRXhMMzRlUVlzOHRYN0N0VWxuWkNray9zYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K
#  创建traefik bac
vi traefik-rbac.yaml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik
  namespace: kube-system
rules:
  - apiGroups:
      - 
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
    - extensions
    resources:
    - ingresses/status
    verbs:
    - update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik
subjects:
- kind: ServiceAccount
  name: traefik
  namespace: kube-system
# 创建 traefik-daemonset-https
vi traefik-daemonset-https.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik
  namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: traefik
  namespace: kube-system
  labels:
    k8s-app: traefik
spec:
  selector:
    matchLabels:
      k8s-app: traefik
  template:
    metadata:
      labels:
        k8s-app: traefik
        name: traefik
    spec:
      serviceAccountName: traefik
      terminationGracePeriodSeconds: 60
      volumes:
      - name: ssl
        secret:
          secretName: tls-cert
      hostNetwork: true
      dnsPolicy: ClusterFirstWithHostNet      
      containers:
      - image: traefik:v1.7.16
        name: traefik
        imagePullPolicy: Always
        volumeMounts:
        - mountPath: /certs         
          name: ssl
        ports:
        - name: http
          containerPort: 80
          hostPort: 80
        - name: https
          containerPort: 443
          hostPort: 443
        - name: admin
          containerPort: 8080
        securityContext:
          capabilities:
            drop:
            - ALL
            add:
            - NET_BIND_SERVICE
        args:
        - --api
        - --web
        - --api.dashboard
        - --logLevel=INFO
        - --web.metrics
        - --metrics.prometheus
        - --web.metrics.prometheus
        - --kubernetes
        - --traefiklog
        - --traefiklog.format=json
        - --accesslog
        - --accesslog.format=json
        - --accessLog.fields.headers.defaultMode=redact
        - --insecureskipverify=true
        - --defaultentrypoints=http,https
        - --entrypoints=Name:https Address::443 TLS
        - --entrypoints=Name:http Address::80      
      #nodeSelector:
      #  ingress: yes
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/ingress
        operator: Equal
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1

---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: traefik
  name: traefik
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik
  clusterIP: None
  ports:
    - protocol: TCP
      port: 80
      name: http
    - protocol: TCP
      port: 443
      name: https
    - protocol: TCP
      port: 8080
      name: admin
  type: ClusterIP
# 创建traefik-dashboard
vi traefik-dashboard.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-dashboard
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
    traefik.ingress.kubernetes.io/frontend-entry-points: http,https
spec:
  rules:
  - host: trae.xxxx.com
    http:
      paths:
        - backend:
            serviceName: traefik
            servicePort: 8080
  tls:
   - secretName: tls-cert

#创建traefik 服务
kubectl apply -f .
# dns 解析 
http://trae.xxxx.com
kubernetes-dashboard 部署
# base64 加密
cat dashboard.pem|base64 | tr -d \'\\n\'
cat dashboard-key.pem|base64 | tr -d \'\\n\'
vi kubernetes-dashboard.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque
data:
  dashboard.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFlFV0MxbGlqcnFzNW5vcHBxTXF0YzZSY0pnSWFJSGhGemZZUWhRQm5pK0Vjam8vCkRTUkYvY3BUOFlkTTg2MVpEV1lSN1FEelFLNmJUTmRLWXJJYmpVWHJpRFVFU01EUW13Y1VteTMzWjFpeXR6K0wKUUVmTVFvWVNReGVIY2RqUHp3bUhFS0todk9vNmxQTHNFWkMwQ3ZCamw2VHlERjhuSDEzby9kRlRVbGJhWUlGaQpPeGVIWkxMMTZKbmNLK3RVaW9ncjdLekFKMUkxTjdwOVQ1blZ5YU9PbWNCVEFnU3RJM0ZwSzdMZG1zaVU0ZEZ0CkpSSFZ0eTh6Y3dCSU9wWnhqV29mM2ROVkRrVUFsYjVtV2psU0RaQ2lhYmFYQi91NmJ0R0k3RlY2cENaUzdDVG4KeWlpUFlFSXRPSGRCT0VycGpKZWQ0bHQ5K2MvNDE3UTRIaiswdndJREFRQUJBb0lCQVFDK1daSWdjQTZRRnhScQpzVlNST1BNQjlFdXlJNlQrN0NZL2xXQUZGM2tUdHlKRVlTVEJpck0yVFprbjBFbjNGSndlVU1CNEZwRmJScTJBCm1vSWpxeHJveG5taGRjOWlPd3NTVHZtcU1kd2ZLNXBiQ0pBeDdNRE5ZS0FiTDRNbjAxazlaaVpaZnhTNG1WcksKa1hHNTRDZlYzeWR0VU5qRDJiVkFBdWQ2TVJQSDV5QWJTVktsMG9ONkRCaFV4MlYyWEo0WnRUVHE0b3R6VGYxZwp3SjNJeVFjSXl3czE2V3dkeHpuYStqVmpOYU5OQ3ZCT1BMbm9TeXZBQXZGRG9UYmUrMG1tcnZLVmlSeDBDT1FzCkUwNjFtNHY2eUExL3locndkT1BDYXN6SkpjWlYzOThJTzFKb2QxUHk3OU9aT1FpY1FEOGhwQmxqb0FSQ2JlY3QKRFFPcG5CR0JBb0dCQVBhYlJSSGpPTkxIQ25JZWlFQU1EYXNwQXo2RGxRNkQvdWNNdzROdkVPRVNVa3dvQ0p4cApwK1hJeVVzT1B1d2swTzVCcHJRcHZjdGYyWXlLZTFtR25iVUpmUVNWNGpLdWpqb0M0OWhOWk9lSE8zd0xMcnNXCkl1SU1Qeko0TjhxSzl0dUpDQ3BVYUZFVzRiN1R2OGsyK1pJWHJwN3hzNklDd01EUnpTaW9wY0hCQW9HQkFNMEgKQVl1bmdzY3hTM2JnZ05idU5sQ3lIOHBLZFVPbi95cU9IQUdYcG9vZmJUbXJiUUlWN0ZOVSszUTlYc2ErVVE0QwpUbVdFbzhabVhrL3lIV2FDVWxpRkN0ckRhTzNUZVhvb2pia1JyaDcxakFXN0pjVDRVZ1ZwcG1RakFVUW8vOWtVCmxHMUNpOTFZZy94dlV5dHlYM1BnZHJ6SnU2aWNsM1pVZ1h4dzNoWi9Bb0dBZENmY2w3bFVLWXZSTXNHSTRjb0wKb2lRMlAvclFlYjdZa05IbFFZSk9EQVdLT0E3ZlIzVkl2U1lmRWpoS2tRWWlWeWNiTTE4NTQ1SnBNUmFGVlR6ZwpDY2JIV1NLVUlkVXdic2l2czFGNUJza2V6cVdoeEVOLytNTlYvUnE5QkswQjY1UVhBWUV5aFlkbW0zQzN0RG90CndZOWdFOE83SGNONE1ScGhMUmFLeE1FQ2dZRUFoS2E5eHorUUM1VEhRSmlzZzJNSVhWbUIyLzRrdEt0akdvTnIKZDFSSStpQ3ZLSnJUSW9CUXNQSFE1em8xc2R5ODBKV0paNEZUL1MrS1lhdENmbXBmSU1xalpUcjlEcksrYTkwRgpKUEpkZDhaaTIrcGoyM2JXaW8zNmk5dGlIRmx5ZjE4alVUVzNESFVTb0NiZTVzTlBJc2ZkeXZPeXFMcjMvQ1ZjCnlaOU1jYjBDZ1lBMVp2RVM3bU42Nm10T2JpSlR3a3hhaTVvS2tHbDdHTDJkZXJFUmxsc1YrNWRCSVY4dG5DTnAKT2tjMFlMbHV2TEg4cG4zd2VCNzg5dUFCQjNXYmNKcHg0L2NIRm9oZDNhdlR0RThRVjJod0tNS2RKQVBvTHNoMgprK2lEUWd1dmFxSzNmL1RYUW43bWU3dWFqSDk3SXZldXJtWWsvVmRJY0dicnd1SVRzd0FEYWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
  dashboard.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ5ekNDQXQrZ0F3SUJBZ0lVUWRIVXdKS1JYc1ZRb2VYS1JDTjd0eVcwWU04d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUk13CkVRWURWUVFERXdwcmRXSmxjbTVsZEdWek1CNFhEVEU1TURjd05ERXhNVE13TUZvWERUSTVNRGN3TVRFeE1UTXcKTUZvd2JURUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUkl3CkVBWURWUVFERXdsa1lYTm9ZbTloY21Rd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFERmdSWUxXV0tPdXF6bWVpbW1veXExenBGd21BaG9nZUVYTjloQ0ZBR2VMNFJ5T2o4TkpFWDl5bFB4aDB6egpyVmtOWmhIdEFQTkFycHRNMTBwaXNodU5SZXVJTlFSSXdOQ2JCeFNiTGZkbldMSzNQNHRBUjh5Q2hoSkRGNGR4CjJNL1BDWWNRb3FHODZqcVU4dXdSa0xRSzhHT1hwUElNWHljZlhlajkwVk5TVnRwZ2dXSTdGNGRrc3ZYb21kd3IKNjFTS2lDdnNyTUFuVWpVM3VuMVBtZFhKbzQ2WndGTUNCSzBqY1drcnN0MmF5SlRoMFcwbEVkVzNMekp6QUVnNgpsbkdOYWgvZDAxVU9SUUNWdm1aYU9WSU5rS0pwdHBjSCs3cHUwWWpzVlhxa0psTHNKT2ZLS0k5Z1FpMDRkMEU0ClN1bU1sNTNpVzMzNXovalh0RGdlUDdTL0FnTUJBQUdqZ1kwd2dZb3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEcKQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjBHQTFVZApEZ1FXQkJURTl6cWx4dkErRXMrbE8zWlFEMlhubGFHRFpqQWZCZ05WSFNNRUdEQVdnQlJ4NEtjQVJjYWtSL2J4Cm13b1RCZURzK3hBb2FUQUxCZ05WSFJFRUJEQUNnZ0F3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJnWHZwTEMKQjIybXlQaURlZnhsWGNZRzAvY0R2RXlYcTlENWtKTnBxKzFZQ0EvMlp2RDIyN1Q5VjY3aHVyTlA3T2FvSG95Tgo0MHpkR3lZTGRNV3pyZTQwVksxdC84N3pDTENzamt1ZXRCRWEwNVRqUTJhbDRhSzJ6TXl5MkJLWEpYbjlvdkhzCjJwNndvL001eklEOXl2OEhyRkZqWHM3NitTUTFzNXpOdUxuaDBET0Z1SktiZUZxSUJyNmZRbXlsb0l1VURtZjYKcGtQYkJyRnJpNHFGS0lDcVZKRCt3Z01zRFBiclVMZXF5NWlBVjNqRzJKMFgxOE4zdklCeUFwdWhZbjNudlV0TwpLREVIWkFJcFpjRWdqQ2ZLVDNyaERLL3JLN0VFZkxLcGlCdGJya3pFbjVWV3FQUFJEK3ZPU2VySldETDl1K0xyCmhEazlvZ084cmNqQzZGdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: dashboard-tls-cert
  namespace: kubernetes-dashboard
type: Opaque
data:
  tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUdYVENDQlVXZ0F3SUJBZ0lTQTBCSy82MUwvZVNHb2FjNmFjRnZLaDVOTUEwR0NTcUdTSWIzRFFFQkN3VUEKTUVveEN6QUpCZ05WQkFZVEFsVlRNUll3RkFZRFZRUUtFdzFNWlhRbmN5QkZibU55ZVhCME1TTXdJUVlEVlFRRApFeHBNWlhRbmN5QkZibU55ZVhCMElFRjFkR2h3Y21sMGVTQllNekFlRncweE9UQTVNRFl4TlRNNE1EVmFGdzB4Ck9URXlNRFV4TlRNNE1EVmFNQll4RkRBU0JnTlZCQU1UQzIxa1pHZGhiV1V1WTI5dE1JSUNJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQWc4QU1JSUNDZ0tDQWdFQW9mSVdOdTE4YUp1T3Jzd0JjZE9lODN0dWpXZ2dpUXl0VVYxQwpqNVhYbzNjQTM1L2ZxQXNGVHpJRGNwUmxhTGJ6SHd1d1psOWNSKzJuRENaUzI4VlhZaXcrSkQvQXpna3FzTHFJCjZ3YlFhcHNCa1lYUzRuT1UrZzhSMVgwcm52ckpickE1eHFJSWJKM002ajVLTXZ4RktvMEV3YXNBY2NiYlVGOW4KMHQ2RzNreG4zWW1Sek5HeHh2bXZ4V2prNWNkSWMza0MyT1VuRktGOG5XemJab2JiNk9PUnZSaElEWW5YdjkxdgoyMUYwQnZ0Q21GY0FEaDRqZXUrLzNKVDVLcEJkdkFHOHI3aU1wbkhKaFU1alhqTXlPRytMbkcvcnJuRzJGaXpHCmx1UHQwKzRlK0ZRSXFZY1BUM1cyTUF2ZDlzQTNEMThsUW82M00vZlMyYjNIYVNidFY0b1pmNS9zTzJNeEVPVnoKVEd1M0NxYk40TkcrZE8ycXoxYWxMQmlGZlVjNEdmUVpYRmlLaDFzazl3Qm5zeWhqYUZmdUx6bHRxMDg3STJLYQorVlRaUzFQSlJFbGduM3UwY1FmaENjelF5ZTJ3Vjl6RE9lVmUxeTBjLzZ0RWJhNllCeGR2ZGcwOFpKL0QwYTBLCnJvWlVJMW5Rc2RKeE8rQ3N1OURLYjROZzJCYnZkWVpHVWJrSCtSUDU0UUdrS1VnYnVxNVIwbXI0U1I2VUwrRE4KZjNxem81a3ZiMXVRWXFpaDZYUFVDVUVPOTNOU1Y2MTNUSUVOTUpyYjVhbGRLUkhPZlpWL201QThlUy9ibFFYcgpOV3FCRy9OL2RtckZjMmcyNGJEY3d5OXIzL3FkNy9MTWxmMVRVdzJGczR3M2x2VHJFanlwWEZhQ3BRRGxkc0xJCkYwcWVKVnNDQXdFQUFhT0NBbTh4Z2dKck1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVHUUNXOGNFbgpaNWhVWjBDa004QW03Wjh7NGJNd0h4WURWUjBqQkJnd0ZvQVVxRXBxWXdSOTNicm0wVG0zcGtWbDcvT283S0V3CmJ3WUlLd1lCQlFVSEFRRUVZekJoTUM0R0NDc0dBUVVGQnpBQmhpSm9kSFJ3T2k4dmIyTnpjQzVwYm5RdGVETXUKYkdWMGMyVnVZM0o1Y0hRdWIzSm5NQzhHQ0NzR0FRVUZCekFDaGlOb2RIUndPaTh3WTJWeWRDNXBiblF0ZURNdQpiR1YwYzJWdVkzSjVjSFF1YjNKbkx6QWxCZ05WSFJFRUhqQWNnZzBxTG0xa1pHZGhiV1V1WTI5dGdndHRaR1JuCllXMWxMbU52YlRCTUJnTlZIU0FFUlRCRE1BZ0dCbWVCREFFQ0FUQTNCZ3NyQmdFRUFZTGZFd0VCQVRBb01DWUcKQ0NzR0FRVUZCd0lCRmhwb2RIUndPaTh3WTNCekxteGxkSE5sYm1OeWVYQjBMbTl5WnpDQ0FRUUdDaXNHQVFRQgoxbmtDQkFJRWdmVUVnZklBOEFCM0FPSnBTNjRtNk9sQUNlaUdHN1k3ZzlRKzUvNTBpUHVranlpVEFaM2Q4ZHYrCkFBQUJiUWR3b2dNQUFBUURBRWd3UmdJaEFLWldRaVVPZkZDcGdjT0JPZ0xoTjFBQjgycHg3bUR2QXYxUnRKVmoKQU0zNEFpRUFtQWpPY012WTQ2Y0VwT2lKbW4vKzB4bnZsTmR0TlNoNExvWHJaUW9sUnJZQWRRQXBQRkdXVk1nNQpaYnFxVVB4WUI5UzNiNzlZZWlseTNLVEREUFRsUlVmMGVBQUFBVzBIY0tJbEFBQUVBd0JHTUVRQ0lEeGhFMThpCm14MjBySFFHS2RpYzVCVnQ3bFBiTzBRNy9KdGI3bkVvR1grSEFpQnRDTWxXbGxlMStNV3JrUXBKbXBaTHE3bWYKWXEyZjZXc2k1QVpmQmZFRndqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFXUS8ycG8wcHRXWXJkbk5ndkZIbgpMK2RyclBDT2xpUXNuaFJWajdiTlhFOGNWb0l6TmU3VGRjazJINE5CUTZUZkZicmkvdHdubkFXRThzcDNPNHVWClV1bVM1Y2FGYmFPdnJIa3ZLVTNUVGhLODNqcmpFZ1N6cEo0d3k2MUlkNGhPZ0FYODVpd2REUEhvL0o0YXkzVDEKanpyMGduY0x0N1R0Tjd3dzJ5Z1RZSXBPTTBVVWtjd05GUGZZYmFRYzVqVjdvcU1raGlMNUtiSGpYVDdRcXR4YwprY3J2VXZMdERDTTQvMGpWN01FNnd4enhCQ1N1ekZWTlVlSEVVS0dDci9qRHRXV0hFZ25JNEZ5MGhQT0F0RlZzCmpDVDhWSTVYMUVmeExTRUdONkxob2NoOHl1akJYWTVNSGlVUDc5REtHaXkzaXJTZ2xtU3BVZXpMSzkwdDVzb3MKd2c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJRWtqQ0NBM3FnQXdJQkFnSVFDZ0ZCUWdBQUFWT0ZjMm9MaGV5bkNEQU5CZ2txaGtpRzl3MEJBUXNGQURBLwpNU1F3SWdZRFZRUUtFeHRFYVdkcGRHRnNJRk5wWjI1aGRIVnlaU0JVY25WemRDQkRieTR4RnpBVkJnTlZCQU1UCkRrUlRWQ0JTYjI5MElFTkJJRmd6TUI0WERURTJNRE14TnpFMk5EQTBObG9YRFRJeE1ETXhOekUyTkRBME5sb3cKU2pFTE1Ba0dBMVVFQmhNQ1ZWTXhGakFVQmdOVkJBb1REVXhsZENkeklFVnVZM0o1Y0hReEl6QWhCZ05WQkFNVApHa3hsZENkeklFVnVZM0o1Y0hRZ1FYVjBhRzl5YVhSNUlGZ3pNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUFuTk1NOEZybExrZTNjbDAzZzdOb1l6RHExelVtR1NYaHZiNDE4WENTTDdlNFMwRUYKcTZtZU5RaFk3TEVxeEdpSEM2UGpkZVRtODZkaWNicDVnV0FmMTVHYW4vUFFlR2R4eUdrT2xaSFAvdWFaNldBOApTTXgreWsxM0VpU2RSeHRhNjduc0hqY0FISnlzZTZjRjZzNUs2NzFCNVRhWXVjdjliVHlXYU44aktrS1FESVowClo4aC9wWnE0VW1FVUV6OWw2WUtIeTl2NkRsYjJob256aFQrWGhxK3czQnJ2YXcyVkZuM0VLNkJsc3BrRU5uV0EKYTZ4Szh5dVFTWGd2b3BaUEtpQWxLUVRHZE1EUU1jMlBNVGlWRnJxb003aEQ4YkVmd3pCL29ua3hFejB0TnZqagovUEl6YXJrNU1jV3Z4STBOSFdRV002cjZoQ20yMUF2QTJIM0Rrd0lEQVFBQm80SUJmVENDQVhrd0VnWURWUjBUCkFRSC9CQWd3QmdFQi93SUJBREFPQmdOVkhROEJBZjhFQkFNQ0FZWXdmd1lJS3dZQkJRVUhBUUVFY3pCeE1ESUcKQ0NzR0FRVUZCekFCaGlab2RIUndPaTh3YVhOeVp5NTBjblZ6ZEdsa0xtOWpjM0F1YVdSbGJuUnlkWE4wTG1OdgpiVEE3QmdnckJnRUZCUWN3QW9ZdmFIUjBjRG92TDJGd2NITXVhV1JsYm5SeWRYTjBMbU52YlM5eWIyOTBjeTlrCmMzUnliMjkwWTJGNE15NXdOMk13SHdZRFZSMGpCQmd3Rm9BVXhLZXhwSHNzY2ZyYjRVdVFkZi9FRldDRmlSQXcKVkFZRFZSMGdCRTB3U3pBSUJnWm5nUXdCQWdFd1B3WUxLd1lCQkFHQzN4TUJBUUV3TURBdUJnZ3JCZ0VGQlFjQwpBUllpYUhSMGNEb3ZMMk53Y3k1eWIyOTBMWGd4TG14bGRITmxibU55ZVhCMExtOXlaekE4QmdOVkhSOEVOVEF6Ck1ER2dMNkF0aGl0b2RIUndPaTh3WTNKc0xtbGtaVzUwY25WemRDNWpiMjB2UkZOVVVrOVBWRU5CV0RORFVrd3UKWTNKc01CMEdBMVVkRGdRV0JCU29TbXBqQkgzZHV1YlJPYmVtUldYdjg2anNvVEFOQmdrcWhraUc5dzBCQVFzRgpBQU9DQVFFQTNUUFhFZk5qV0RqZEdCWDdDVlcrZGxhNWNFaWxhVWNuZThJa0NKTHhXaDlLRWlrM0pIUlJIR0pvCnVNMlZjR2ZsOTZTOFRpaFJ6WnZvcm9lZDZ0aTZXcUVCbXR6dzNXb2RhdGcrVnlPZXBoNEVZcHIvMXdYS3R4OC8Kd0FwSXZKU3d0bVZpNE1GVTVhTXFyU0RFNmVhNzNNajJ0Y015bzVqTWQ2am1lV1VISzhzby9qb1dVb0hPVWd3dQpYNFBvMVFZeiszZHN6a0RxTXA0ZmtseEJ3WFJzVzEwS1h7UE1UWitzT1BBdmV5eGluZG1qa1c4bEd5K1FzUmxHClBmWitHNlo2aDdtamVtMFkraVdsa1ljVjRQSVdMMWl3Qmk4c2FDYkdTNWpOMnA4TStYK1E3VU5LRWtST2IzTjYKS09xa3FtNTdUSDJIM2VESkFrU25oNi9ETkZ1MFFnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
  tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBb2ZJV051MThhSnVPcnN3QmNkT2U4M3R1aldnZ2lReXRVVjFDajVYWG8zY0EzNS9mCnFBc0ZUeklEY3BSbGFMYnpId3V3Wmw5Y1IrMm5EQ1pTMjhWWFlpdytKRC9BemdrcXNMcUk2d2JRYXBzQmtZWFMKNG5PVStnOFIxWDBybnZySmJyQTV4cUlJYkozTTZqNUtNdnhGS28wRXdhc0FjY2JiVUY5bjB0Nkcza3huM1ltUgp6Tkd4eHVtdnhXams1Y2RJYzNrQzJPVW5GS0Y4bld6YlpvYmI2T09SdlJoSURZblh3OTF2MjFGMEJ2dENtRmNBCkRoNGpldSsvM0pUNUtwQmR2QUc4cjdpTXBuSEpoVTVqWGpNeU9HK0xuRy9ycm5HMkZpekdsdVB0MCs0ZStGUUkKcVljUFQzVzJNQXZkOXNBM0QxOGxRbzYzTS9mUzJiM0hhU2J0VjRvWmY1L3NPMk14RU9WelRHdTNDcWJONE5HKwpkTzJxejFhbExCaUZmVWM0R2ZRWlhGaUtoMXNrOXdCbnN5aGphRmZ1THpsdHEwODdJMkthK1ZUWlMxUEpSRWxnCm4zdTBjUWZoQ2N6UXllMndWOXpET2VWZTF5MGMvNnRFYmE2WUJ4ZHZkZzA4WkovRDBhMEtyb1pVSTFuUXNkSngKTytDc3U5REtiNE5nMkJidmRZWkdVYmtIK1JQNTRRR2tLVWdidXE1UjBtcjRTUjZVTCtETmYzcXpvNWt2YjF1UQpZcWloNlhQVUNVRU85M05TVjYxM1RJRU5NSnJiNWFsZEtSSE9mWlYvbTVBOGVTL2JsUVhyTldxQkcvTi9kbXJGCmMyZzI0YkRjd3k5cjMvcWQ3L0xNbGYxVFV3MkZzNHczbHZUckVqeXBYRmFDcFFEbGRzTElGMHFlSlZzQ0F3RUEKQVFLQ0FnQXY5Zk13UnpzTisrdlF4cWd5M3JwM1gzbkpOU3BWakVTVUVTdVNQSTFGWXd3R0xtSGRjWTRiK3pMYwpMeWl0VDJsSEszNE5nM1pmOHZrQzl5S1k1YVBRZGt2ZERtaDZYR3FoTmswd1ZhOUpzeWhPd2JSSHpuVXpiVjBaCnZkMDZVd2x1MTQvMHpLMzBCUFBYOTZTZjN1aFpCclIrNnJiUisxT2VSUE1KbDArWDdFYmliRWlhd1F1R1hsVHAKQVB5eE5FaTNzZ0h2M0VhcnJIdXNYNzNHYW5BY1U3RW9zRlUrZFRGSktEcGxXSVVsUUNwajFYZzF0aVZKMWxFYQo4Wit0UkY0T1BQRjFsUkZLaGU1cHBXSjJWbkVzRjVUZ09xRXc0NHBLbk80Zlo5ZGFhVzRRbTBxSmNtOU5XQTRoCndwSDA3czRmcGt6eG5qU1JsbmFDZDlyandGeVBsSkJzUXNhVlFFNzlpQzJZMTRnTk9KQ0xyMXRKSEQ2ODN3bW4KS3ZNOHZpOTdHTmIybXZHeWNtZnloNVpzTFBpTWNqOFFER3VWZU53dlNESXpybnhqVkZlc0liTWt5UlZRem9IVApTTHRQbXdVR3lwRHVrMDhaZytsT0lYOC85K3lqMER3MDRqenllTVptYlFVdkd2N2lNWjFUaHdaRHF1YkJXV3J4CmtYTmJwTG9BMGxrcHh5bjdGam9Ya20zM2ZKQURjd2xWSS82WFNrSm1FaFVlZmZnaFFSMGNyVGphQVd1Qkx2Qk0KT0s5aEEzT3RTN2F0S2FDb1lvSmRrYkpHQTdWdytNNzA4NEJOTGhxM1Fyckg4S3M3Z05pdC9NN3lxSnU1alBaZgo2SE1seHNyWU9NVUhuVlk4VDkwN0Q3cS9ORUNnRThzODhnZzAyQ3JNWTFqanE4UnBpUUtDQVFFQTE2UHJaMUEwClNISS83akdmS3BETkJzQ0xrVUFxRERKSzQ0dFdJYmJBUXFhRTN1eDh4bkFlU2NjSHozbS9ScEpPSGtteHZTZlgKbTJ1Wk8veGtNTWhYK2lwOHdFOHZibzR1enVNYitTSXE3bWpialJkK1JJczJ5NHJsZVQ2NGVjRWc4R2pZckExZgpiSEI0MmhQclVTcXpxUVIwOTZocm1Lb1diU0RDZDZwOUVNeWVzT3IwTjdtQmJYVVZPazJxZGtYRlZWbHBlUDdpClFxWGdRUUI0bHgzLzJJdlpBMlhJUXlQdGJ0RWVRbmgyQ3FNM2NDMzR0VEVjZ244K0VwNG9SWmkwTTBHaUY3bXgKOTEvZHY2THZlNTR5K1pON1lXd1NFQ09ubzd5bDlvTlBZVnVGMGRiMjh0elppMThCeHJTQ2JESE1XbExvUzhWNgpXTEo0OGlSODJDYkc1d0tDQVFFQXdFRjM4KzYyeDhDU2x0blZZNlJaN0J0NEdiNEJqVWhWYXZ0NFkxUGFlbXFNCjFidFVnR2JyUnBoNHFUSEFTckUwUUZLeVZKYnlCUkJyRHIxWHU4WWRSVXQzZC92VzlIR1dPd1BKdTN2M3pLbHMKQ2xsZnpFY3J5L1l2aHAzSzlEcGR6OE1icHdueW5xcGV6b0xMNlJpL3JnK0hyTzBueXd1RSt0T2xYVFo2eUtadApHWVdTSVBWaG00NUJkc2ZxUzhnYjVvbjA0bHh4bnhxVnJvN0c0TUR6cmVEYlFhaGdyS3VuRWxwajZ4eW1PVWpBCkdCZDR3QUVrUExxNUUrRWcreDY4TkRLVTYwK29ybFhLWVhDQm5HSFZOQ3BVcmswVXkrcHFZZmFEN3VuR2VzaHMKSEwra3lXbXl5a3ErTmNKbnRXMFNSNy9sU1IvZUFhVEZyVzZVaXV0RGJRS0NBUUVBemhRYU9PNmVPSW51N016QgpScVdCT3EyeDg4cjFKQmpBRnZzbkFpc3JTOGJsZmtGVTdXREdvVTB5K3FWb0ZhSm1RMjI4RFlCUS9YZnp4aTdxCjlPL1JuQU1VbTVoUlJQOWVYbHNPZGFXZ2o1em9ETXRoNFZHRnVUbHhHZERGN1oyU3hBMysyMVlnVm5xYUZCY3IKTUxOMVpOWWNqajJITGl1R0tSNUFtcW4wd2FRN0YrcENJQ3NKTkxqSzQ2QXJnc0lrMXU4TzdCSHgyeTI0eFlZVQp1SjV6emRmQU9nNEFONkhURzY5L2twaWFmb29DeGhNNDlyZ0xmZTdxUEZLbk8vTzJhckdUbmNiWi9BWEMzb3h4Ci81dHRMYlF6R2lSMGtyWHdWSHRKdys4elltQmIzL0RtcWF4RHZueTZMdEo5UGJiTmk1aGw1VnZCRTVqa0dzeWgKL3RQNEN3S0NBUUJ2R1dZb0lKcWZkRGxCMHovdEJOeXlCRzJ5OG9vVEN1blJtT0JKQmZ3TEllZWcyMUJKb3kveQo2OGxPZk9HU1NEVFp0dkEyMGNPcUNZTFVVYmFSWERzdUFCNVp4NzdBSTZPZEZ1Tk01S2FlTG9td3NWVWF4MFlYCjUzd3ZYcUFaNG1DejN4dnJ1MlBwTEtyOHk3anFTdEw1MHgra1hxZlFQaWZxaXNQVXlkYktmT0l2RFhFVWVyaWQKRytmWXJFNUkzS3JDM3BZVStUWmJ1eEVrZm4yUEEvSE5XVk5hN2VKdjVnSDJLU1gwaCtuRzBMT3hPRjhmRlluTApUbHdGa09OdU9xU254Vk1wYUM4aUQ1R1VIVi9JN3dBMTFRQjZlVEM3Wmd0ejhQRHM3MHN6U1A2dzNrNXIxaGpyCnJhV2RpMnBDL1hUQzRiR3VRQ3dhNXcwVTNBSWJCVGxCQW9JQkFEc1RONGhvclVHNWw3MXhLZk5ibVBTbDZ6RlIKYTJ4d2U2VVZPOVZzMFpHeEdLWWJSN1VuVDBDL1FqUiswS2JsbE9leDdFY3cyMklCcmFFVzBGbXpuVnoyUW9FNwpMUE5COXhyTTFEeE56UjZEbFBUeERMcEFGWVlUcm40SWY1cjFVdVdpc2lMdmd6T2xGTlVITnN5UFJIZWNGblhUCnNhTk9JWkgrQTJ5KzF3QWdpSFZIS2JPRGRHeVFQVlQ0TXFFWkJaY2pQcmRBekNKcnloSHlYdHBqRjFSdlFEYTMKTVM3U3JVTGM4djJGQWJ1VG1QZ2R1ZHBKd1Q4dENCa2VRKzZ4YmJWN3YrZzBEMG5EWFNIZFVwNXFyUzcrTnhtVwp4NWV4UHo1VENhYXcxSnkzWjRmT1MzMTV6eHJGdmRHTmhWRXhMMzRlUVlzOHRYN0N0VWxuWkNray9zYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K
---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: 

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: []
    resources: [secrets]
    resourceNames: [kubernetes-dashboard-key-holder, kubernetes-dashboard-certs, kubernetes-dashboard-csrf]
    verbs: [get, update, delete]
    # Allow Dashboard to get and update \'kubernetes-dashboard-settings\' config map.
  - apiGroups: []
    resources: [configmaps]
    resourceNames: [kubernetes-dashboard-settings]
    verbs: [get, update]
    # Allow Dashboard to get metrics.
  - apiGroups: []
    resources: [services]
    resourceNames: [heapster, dashboard-metrics-scraper]
    verbs: [proxy]
  - apiGroups: []
    resources: [services/proxy]
    resourceNames: [heapster, http:heapster:, https:heapster:, dashboard-metrics-scraper, http:dashboard-metrics-scraper]
    verbs: [get]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: [metrics.k8s.io]
    resources: [pods, nodes]
    verbs: [get, list, watch]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.0-beta4
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            - --token-ttl=43200
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.1
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/ingress.class: traefik
    traefik.ingress.kubernetes.io/frontend-entry-points: http,https
    traefik.ingress.kubernetes.io/redirect-entry-point: https
spec:
  rules:
  - host: csdd.xxxx.com
    http:
      paths:
        - backend:
            serviceName: kubernetes-dashboard
            servicePort: 443
  tls:
   - secretName: dashboard-tls-cert
 # 创建kubernetes-dashboard 服务
 kubectl apply -f kubernetes-dashboard.yaml
 # 创建kubernetes-dashboard token 登录
#  生成token
kubectl create sa dashboard-admin -n kube-system
 # 授权token 访问权限
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
# 获取token 
ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk \'{print $1}\')
# 获取dashboard.kubeconfig 使用token   值
DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E \'^token\' | awk \'{print $2}\')
echo ${DASHBOARD_LOGIN_TOKEN}
# 设置集群参数
kubectl config set-cluster kubernetes \\
  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \\
  --embed-certs=true \\
  --server=${KUBE_APISERVER} \\
  --kubeconfig=dashboard.kubeconfig

# 设置客户端认证参数,使用上面创建的 Token
kubectl config set-credentials dashboard_user \\
  --token=${DASHBOARD_LOGIN_TOKEN} \\
  --kubeconfig=dashboard.kubeconfig

# 设置上下文参数
kubectl config set-context default \\
  --cluster=kubernetes \\
  --user=dashboard_user \\
  --kubeconfig=dashboard.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=dashboard.kubeconfig
# 绑定hosts
https://csdd.xxxx.com/#/overview?namespace=default
# kubernetes-dashboard 使用metrics 显示cpu内存资源 所有要部署metrics-server
metrics-server 部署
# 创建metrics-server-secrets.yaml
# base64 加密
cat metrics-server.pem|base64 | tr -d \'\\n\'
cat metrics-server-key.pem|base64 | tr -d \'\\n\'
vi metrics-server-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-certs
  namespace: kube-system
type: Opaque
data:
  metrics-server.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ3VENDQXRXZ0F3SUJBZ0lVZmdGSjJSUTF6Y20ydndjazFQc1NzTXJtNnJJd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2RqYkhWemRHVnlNUkF3RGdZRFZRUUxFd2RqYkhWemRHVnlNUk13CkVRWURWUVFERXdwcmRXSmxjbTVsZEdWek1CNFhEVEU1TURreU1EQXhOREV3TUZvWERUSTVNRGt4TnpBeE5ERXcKTUZvd2NqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2RqYkhWemRHVnlNUkF3RGdZRFZRUUxFd2RqYkhWemRHVnlNUmN3CkZRWURWUVFERXc1dFpYUnlhV056TFhObGNuWmxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTzIvZ1RTUERMOU94RUhmQTBtdmpFYWtLMUtrWmoxSDlMYXV0dFRQbS8xYlR5T2pSMFNxemFsZgpCSFJESlNkME5DOGlyZGRDcGtZWUVROVJBWWFIWldIb2ZLbDJ6eWpPSXZqc2JrVWJ0T2N4Q0R6dlNZcDBkaXRECnd4NGpuR2hmSGhrUUw1TWQwaEFQTm5rLzdkakxsQ0c2azlBN00wUHB6dEZZNWVOSWJmZG8wMFRMb2c1VjBIb3YKUTZ4TDRGQWwzbGhSRy9nRHYrdjBkSy9NVUJ3eWtaK2E1WlJJRk5hNE5sS2lac29QdVZ2WlpRek10VFJWLzBXbwp1NmlJZnVkcTB4eWk1dWIxSXFnbml0Wm5TN09ZeDBXWW0xV2VEZlIwbis1TU5oR3hTQ3c4bEM1QkVLU3I2a252Ckp6M0tHci95NzFrU2RsTWhHeVFxT09Ld2pkTmowQThDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCVHBBUm5VVmNOS1kzYWFOS0NNZXlNZnZ4aXFPREFmQmdOVkhTTUVHREFXZ0JRWkx0RG9rcm55CnJ4WUhEb2FZSlZQQVZKUDg0ekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBT3AwWTg1ZjRCSXBscXQ2K3FDMFMKbm9DR2RiVVoyNUNpQ1hEUDBFTWNuQ1QzZzdiWTNGaUpSZDhkbGg1aVNncTRxUlR6N1hDeWhMb1VNVmJTRVVlZApvaEI2Vk90eGJ1azErKzhob2diUUtIQVZlbERjS1MyOGttclZHRXdsSGpjNG1pRUxIbHZ6K0xUYUZzdlYxMFlzCjROdmNMTEE3SVE5N3hMcFNvZURVek5LU2dzU3RQUmVSTW5DR1BMdE1MMFcxRHpjOHNFRWJsTm9sbEFYRWdKMU4KVkxmbHJxbWdxaTcyQmZHOTBFbEtmbVFMV3QwR0lKU0wwdmF2a0kxL0Z0NGFuV2RVT2Zqa3JmYzBpUnhxUVBVNQphRWN5cjF6MzhnZmR6c3ZtV1VuVnpMS0pNeGtPckV3NGZBWHJudmRFSm92YlhJeU9Rd2pqQjhNaklsWER2ZjRvCnN3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
  metrics-server-key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBN2IrQk5JOE12MDdFUWQ4RFNhK01ScVFyVXFSbVBVZjB0cTYyMU0rYi9WdFBJNk5IClJLck5xVjhFZEVNbEozUTBMeUt0MTBLbVJoZ1JEMUVCaG9kbFllaDhxWGJQS000aStPeHVSUnUwNXpFSVBPOUoKaW5SMkswUERIaU9jYUY4ZUdSQXZreDNTRUE4MmVUL3QyTXVVSWJxVDBEc3pRK25PMFZqbDQwaHQ5MmpUUk11aQpEbFhRZWk5RHJFdmdVQ1hlV0ZFYitBTy82L1Iwcjh5UUhES1JuNXJsbEVnVTFyZzJVcUpteWcrNVc5bGxETXkxCk5GWC9SYWk3cUloKzUyclRIS0xtNXZVaXFDZUsxbWRMczVqSFJaaWJWWjROOUhTZjdrdzJFYkZJTER5VUxrRVEKcEt2cVNlOG5QY29hdi9MdldSSjJVeUViSkNvNDRyQ04wMlBRRHdJREFRQUJBb0lCQVFDV09RVW84cUo1VnduSApIV1QwY0VuUWNQYzIxczRMTnFZM3NCbXlTaVFrYUVlUEd5SnpEd0c0WFdOeEd1UWxFOVhOV3JwQlk4bXdUSkNxCi91Slo0TDk4cCt2dElEY3hiMTdGcm83V2QvVk1oN3pPMDl2QjhtaWdXY2EyQ29aUHBKcGQ5ODQzeFFYd1E4eUYKdkpGTEJRZHFjSHZwZlI2ZGNPVFBmcjV6YUZhamxvN3l4eWc5QTRpQ2xSRXUwNVAwNytTeWNiNndUbGJjVFlOeAo4ZU5DaUs5QTVCZXlYZVNmeEJJYmh4bHcrZ2kwdmV4RUxEOTJFWWZjVXNzZjlzT2sxZUhJVmpJNGZmQndodk5kCjhhbkVNRXJEd3pCc0ZGT2Z3Mkdya3hWSndoMXVEMkl4c253b3hjZ0RxWFI5dy9Wais3emhKZ2ZiWXFpT0FUVmUKQTZOeExHb3hBb0dCQVBhUk1ENHFqOGxwcEhnR3FjZWI0aHpaWkI4cG9maS9aUTIvNUl0aHlQV2FHMDlSRFkxZAowd1Z5dXFWb0RwalFWOENGU2U2RzZlQzNXSDRGVy9IMU9wKzVjSjBDT1B1MFJWMUV0eDlaL0I1U1Z2K3lQWU5RCm12Tm9qeWF2Mi9ZbnpuZDBGVkYrckJhT0lDZ0pxUVMvcnA0OGhmelVaWWsrNFlVVUdkeVBHUS85QW9HQkFQYlgKOGRKZjllcTgwN2lGTW9GTlFuODR4L1pUdVJ4N21obEJwaUhOa0tIUGZ1cHBCMFlMRDJQOWxhYkFGaWtZblY2SgpCQjZ1LzdMTm1mOFZHUjNVOEFiak0rZW43VkdVMkExcXIydTZ5ejRESWRTM040czRlVUw5QmZkYWQ4cGVkZis5Cks5SmQ3MmNSaFM2UUxYcmttZXZFOUpNYTF1VWpzeU1RaFQvK1g1LzdBb0dBWkt5ZGZSU1Z4eEJhZGlPS1dSVTkKK3JlTW1PaS8yTGdWUThyeFB6UDdBTVVlbDRFcHZtbnJ5cEt3d082KzN3aGFmQ0l3TUxObmRUaUhhbFUzMkpCZgprbTMrSEMyWEpMYlRoNlNSL0x3YUpDdE1tSFNuaHlGM1V5R0RLYkd1WjFDVGpkU1pDOEJqOVlXc2ZZeU1OWU1xCmdqT0dKZGgzYU5XQzhYcG1vTmJRemVVQ2dZRUF6M1VlWUZrV0xYc1YxZmJjUTUvVFMybEZaZGxuc25DUFNycksKRFk3ZkI1K0VZeTV5Vm9QbEkzeDAwZmlPcDJ0d2w0dEFVeWx3N2EydXg1dkx5QzYyckpNM2hISzJHZUttMGwvZgpud01XM2I5MEozcjB5NlZqQk5IeXViam5CTVh3RmtpL0U4YXU5a2piVGc4T3FrS0d1b2lGcFR6aGJ5Tlo0eFozClp4azY5UkVDZ1lFQW1qTVdrM0JlSy95ZVkrLytCcFVQVzRteWtlOXdHQnNIeUpPa0wyMllXT3lJYnJBMTdMY1QKZUFzNmtvRitMTjY3Q01UQk45Qm5zQnpYTURsQ3RhMjlCcDkyNWZraEpNU0VMajhWNUIwRmNVQXg2dlVEY2o4RAoxQlJBRWNpZjN5eFExd1ZkTUJnaFdEUkduZzFKREtRVXUwaWY1WkNDWkFyWnpCek95YUhreUE4PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
# resource-reader.yaml
vi resource-reader.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: system:metrics-server
rules:
- apiGroups:
  - 
  resources:
  - pods
  - nodes
  - nodes/stats
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
# metrics-server-service.yaml
vi metrics-server-service.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    kubernetes.io/name: Metrics-server
spec:
  selector:
    k8s-app: metrics-server
  ports:
  - port: 443
    protocol: TCP
    targetPort: 443
# metrics-apiservice.yaml
vi metrics-apiservice.yaml
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
  name: v1beta1.metrics.k8s.io
spec:
  service:
    name: metrics-server
    namespace: kube-system
  group: metrics.k8s.io
  version: v1beta1
  insecureSkipTLSVerify: true
  groupPriorityMinimum: 100
  versionPriority: 100

# auth-reader.yaml
vi auth-reader.yaml
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system

#  auth-delegator.yaml
vi auth-delegator.yaml
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
# aggregated-metrics-reader.yaml
vi aggregated-metrics-reader.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:aggregated-metrics-reader
  labels:
    rbac.authorization.k8s.io/aggregate-to-view: true
    rbac.authorization.k8s.io/aggregate-to-edit: true
    rbac.authorization.k8s.io/aggregate-to-admin: true
rules:
- apiGroups: [metrics.k8s.io]
  resources: [pods]
  verbs: [get, list, watch]

# metrics-server-deployment.yaml
vi  metrics-server-deployment.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: metrics-server
  namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    k8s-app: metrics-server
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  template:
    metadata:
      name: metrics-server
      labels:
        k8s-app: metrics-server
    spec:
      serviceAccountName: metrics-server
      tolerations:
        - effect: NoSchedule
          key: node.kubernetes.io/unschedulable
          operator: Exists
        - key: NoSchedule
          operator: Exists
          effect: NoSchedule
      volumes:
      # mount in tmp so we can safely use from-scratch images and/or read-only containers
      - name: tmp-dir
        emptyDir: {}
      - name: metrics-server-certs
        secret:
          secretName: metrics-server-certs
      containers:
      - name: metrics-server
        image: juestnow/metrics-server-amd64:v0.3.4
        imagePullPolicy: Always
        command:
        - /metrics-server
        - --tls-cert-file=/certs/metrics-server.pem
        - --tls-private-key-file=/certs/metrics-server-key.pem
        - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
        - --kubelet-insecure-tls
        volumeMounts:
        - name: tmp-dir
          mountPath: /tmp
        - name: metrics-server-certs
          mountPath: /certs
# 创建metrics-server 服务
# 创建metrics-server 服务
kubectl apply -f .
# 验证metrics-server 
kubectl top node
root@]~/work]#kubectl top node
NAME      CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
master    162m         6%     957Mi           16%
master2   155m         2%     591Mi           4%
[root@]~/work]#kubectl top pods -A
NAMESPACE              NAME                                         CPU(cores)   MEMORY(bytes)
kube-system            coredns-9d5b6bdb6-phcwt                      6m           11Mi
kube-system            kube-flannel-ds-amd64-6bpf7                  3m           11Mi
kube-system            kube-flannel-ds-amd64-6sxz2                  4m           12Mi
kube-system            metrics-server-668c6bb96b-z7nfl              1m           14Mi
kube-system            traefik-zkhd2                                8m           23Mi
kube-system            traefik-zs6h3                                5m           21Mi
kubernetes-dashboard   dashboard-metrics-scraper-566cddb686-sft6k   7m           12Mi
kubernetes-dashboard   kubernetes-dashboard-6cd89cd7df-nlkzj        18m          29Mi



新网虚拟主机

  • 相关专题

免责声明:本文内容由互联网用户自发贡献自行上传,本网站不拥有所有权,也不承认相关法律责任。如果您发现本社区中有涉嫌抄袭的内容,请发送邮件至:operations@xinnet.com进行举报,并提供相关证据,一经查实,本站将立刻删除涉嫌侵权内容。

免费咨询获取折扣

Loading