跳转至

二进制安装

一、基础优化

1.配置免密
#创建
ssh-keygen
#分发
ssh-copy-id  -i /root/.ssh/id_rsa.pub root@172.16.1.


2.升级内核
yum install -y kernel-ml{,-devel}-5.8.3-1.el7.elrepo.x86_64.rpm

cat /boot/grub2/grub.cfg |grep menuentry
# 调整默认内核
grub2-set-default "CentOS Linux (5.8.3-1.el7.elrepo.x86_64) 7 (Core)"
# 检查是否修改正确
grub2-editenv list
reboot


3.安装依赖
 yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y


4.关闭swap
swapoff -a
#完全关闭 vim /etc/fstab

5.修改主机名
hostnamectl set-hostname 


6.配置域名解析
cat >> /etc/hosts << EOF
172.16.1.81 k8s-master-1
172.16.1.82 k8s-master-2
172.16.1.83 k8s-master-3
172.16.1.84 k8s-node-1
172.16.1.85 k8s-node-2
172.16.1.86 proxy
EOF


7.时间同步
yum install -y ntpdate
crontab -l
*/5 * * * * ntpdate ntp.aliyun.com &> /dev/null


8.修改内核参数
cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

sysctl -p /etc/sysctl.d/kubernetes.conf


9.安装IPVS
cat > /etc/sysconfig/modules/k8s.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- ip_tables
modprobe -- ip_set
modprobe -- xt_set
modprobe -- ipt_set
modprobe -- ipt_rpfilter
modprobe -- ipt_REJECT
modprobe -- ipip
EOF

# 查看是否加载,如未看到信息可以重启试试。
chmod 755 /etc/sysconfig/modules/k8s.modules && bash /etc/sysconfig/modules/k8s.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4


10.安装Docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum -y install docker-ce
systemctl start docker


11.配置docker加速并修改驱动
cat > /etc/docker/daemon.json <<EOF
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors": [
        "https://1nj0zren.mirror.aliyuncs.com",
        "https://kfwkfulq.mirror.aliyuncs.com",
        "https://2lqq34jg.mirror.aliyuncs.com",
        "https://pee6w651.mirror.aliyuncs.com",
        "http://hub-mirror.c.163.com",
        "https://docker.mirrors.ustc.edu.cn",
        "http://f1361db2.m.daocloud.io",
        "https://registry.docker-cn.com"
    ]
}
EOF
systemctl restart docker
systemctl enable docker

二、SSL证书

# 加密
    非对称加密∶ 用公钥~私钥的密钥对实现加解密
    单向加密∶ 只能加密,不能解密,MD5
PKI (Public Key Infrastructure公钥基础设)
一个完整的PKI包括以下几个部分
    1、端实体(申请者)
    2、注册结构(RC)
    3、签证机构(CA)#重点
    4、证书撤销列表(CRL)
    5、证书存取库

1.Master节点上创建证书颁发机构(CA)

  • 下载证书生成工具 并将其设置为全局可执行命令
# 下载
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

# 设置执行权限
chmod +x cfssljson_linux-amd64
chmod +x cfssl_linux-amd64

# 移动到/usr/local/bin
mv cfssljson_linux-amd64 cfssljson
mv cfssl_linux-amd64 cfssl
mv cfssljson cfssl /usr/local/bin

2.创建根证书

  • CA负责数字证书的批审、发放、归档、撤销等功能,CA颁发的数字证书拥有CA的数字签名,所以除了CA自身,其他机构无法不被察觉的改动。
  • 所谓根证书,是CA认证中心与用户建立信任关系的基础,用户的数字证书必须有一个受信任的根证书,用户的数字证书才是有效的。
  • 证书其实包含三部分:用户的信息,用户的公钥,以及证书签名。

1)创建请求证书的配置文件

mkdir -p /root/cert/ca
cd /root/cert/ca

cat > ca-config.json <<EOF
{
  "signing": {
    "default": {  
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ],
         "expiry": "8760h"
      }
    }
  }
}
EOF

2)创建根CA证书签名请求文件

cat > ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names":[{
    "C": "CN",
    "ST": "ShangHai",
    "L": "ShangHai"
  }]
}
EOF

3)使用请求文件生成证书

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

# gencert :生成新的key(密钥)和签名证书

# -initca :初始化一个新CA证书

三、部署ETCD集群

  • ETCD是基于Raft的分布式key-value存储系统,由CoreOS团队开发,常用于服务发现,共享配置,以及并发控制(如leader选举,分布式锁等等)。Kubernetes使用ETCD进行状态和数据存储。

ETCD节点规划

Etcd名称 IP
etcd-01 172.16.1.81
etcd-02 172.16.1.82
etcd-03 172.16.1.83

1.创建ETCD证书签名请求文件

mkdir -p /root/cert/etcd
cd /root/cert/etcd

cat > etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "127.0.0.1",
    "172.16.1.81",
    "172.16.1.82",
    "172.16.1.83",
    "172.16.1.84",
    "172.16.1.85",
    "172.16.1.86"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "ST": "ShangHai",
          "L": "ShangHai"
        }
    ]
}
EOF

2.使用请求文件生成证书

cfssl gencert -ca=/root/cert/ca/ca.pem -ca-key=/root/cert/ca/ca-key.pem -config=/root/cert/ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

# gencert : 生成新的key(密钥)和签名证书
# -initca :初始化一个新ca
# -ca :指明ca的证书
# -ca-key :指明ca的私钥文件
# -config :指明请求证书的json文件
# -profile :与`config`中的`profile`对应,是指根据`config`中的`profile`段来生成证书的相关信息

3.将证书分发至ETCD服务器

for ip in k8s-master-1 k8s-node-1 k8s-node-2
do
  ssh root@${ip} "mkdir -pv /etc/etcd/ssl"
  scp ../ca/ca*.pem  root@${ip}:/etc/etcd/ssl
  scp ./etcd*.pem  root@${ip}:/etc/etcd/ssl
done

4.在各个节点部署ETCD

 wget https://mirrors.huaweicloud.com/etcd/v3.3.24/etcd-v3.3.24-linux-amd64.tar.gz

version="v3.3.24"

tar xf etcd-${version}-linux-amd64.tar.gz
chmod +x etcd-${version}-linux-amd64/etcd*

for i in k8s-master-1 k8s-master-2 k8s-master-3
do
scp etcd-${version}-linux-amd64/etcd* root@$i:/usr/local/bin/
done

5.用systemctl管理ETCD

ETCD_NAME=`hostname`
INTERNAL_IP=`hostname -i`
INITIAL_CLUSTER=k8s-master-1=https://172.16.1.81:2380,k8s-master-2=https://172.16.1.82:2380,k8s-master-3=https://172.16.1.83:2380

cat << EOF | sudo tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos

[Service]
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME} \\
  --cert-file=/etc/etcd/ssl/etcd.pem \\
  --key-file=/etc/etcd/ssl/etcd-key.pem \\
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://${INTERNAL_IP}:2379 \\
  --initial-cluster-token etcd-cluster \\
  --initial-cluster ${INITIAL_CLUSTER} \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF


systemctl daemon-reload

6.启动并测试

# 启动
systemctl start etcd

# 测试

ETCDCTL_API=3 etcdctl member update a665824b9865a000 --peer-urls=https://10.0.0.81:2380 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key 

ETCDCTL_API=3 etcdctl member list --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key
1.
etcdctl --ca-file=/etc/etcd/ssl/ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --endpoints="https://172.16.1.81:2379,https://172.16.1.82:2379,https://172.16.1.83:2379" cluster-health
结果:
member 18273711b3029818 is healthy: got healthy result from https://172.16.1.52:2379
member 6525a0f42816b4cc is healthy: got healthy result from https://172.16.1.54:2379
member f42951486b449d48 is healthy: got healthy result from https://172.16.1.53:2379
cluster is healthy

2.
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/ca.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://172.16.1.81:2379,https://172.16.1.84:2379,https://172.16.1.85:2379" \
endpoint status --write-out='table'
结果:# 没有主节点时(true) 重启所有节点
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
|         ENDPOINT         |        ID        | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
| https://172.16.1.52:2379 | 18273711b3029818 |   3.3.5 |   20 kB |     false |        10 |         15 |
| https://172.16.1.53:2379 | f42951486b449d48 |   3.3.5 |   20 kB |     false |        10 |         15 |
| https://172.16.1.54:2379 | 6525a0f42816b4cc |   3.3.5 |   20 kB |      true |        10 |         15 |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+


# 相关端口
[root@k8s-master-1 ~]# netstat -lntup
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 172.16.1.52:2379        0.0.0.0:*               LISTEN      2906/etcd           
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      2906/etcd           
tcp        0      0 172.16.1.52:2380        0.0.0.0:*               LISTEN      2906/etcd

2380 :用于接收集群中其他ETCD发送的信息
2379 :用于接收API Server发送的信息

7.ETCD配置文件(可有可无)

mkdir -pv /etc/kubernetes/conf/etcd
cd /etc/kubernetes/conf/etcd
cat > etcd.conf <<EOF
#[Member] 当前节点配置
ETCD_NAME="etcd-1"  # 节点名称
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"  # 指定节点的数据存储目录,若不指定,则默认是当前目录
ETCD_LISTEN_PEER_URLS="https://$(hostname -i):2380"  # 监听其他etcd端口
ETCD_LISTEN_CLIENT_URLS="https://$(hostname -i):2379"  # 监听api server端口
#[Clustering] 集群配置
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://$(hostname -i):2380"  # 在集群中声名自己监听的etcd端口
ETCD_ADVERTISE_CLIENT_URLS="https://$(hostname -i):2379"   # 在集群中声名自己监听的api server端口
ETCD_INITIAL_CLUSTER="etcd-1=https://172.16.1.52:2380,etcd-2=https://172.16.1.53:2380,etcd-3=https://172.16.1.54:2380"  # 记录集群中所有的节点
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"  # 认证的token名称
ETCD_INITIAL_CLUSTER_STATE="new"  # 集群状态 加入新建为new  加入已有为existing
EOF

四、在Master节点上创建所有模块的证书

节点名称 IP
k8s-master-1 172.16.1.81
k8s-master-2 172.16.1.82
k8s-master-3 172.16.1.83
k8s-node-1 172.16.1.84
k8s-node-2 172.16.1.85
  • 因为部署的部分过于复杂 于是省略了vip部分 需要可以参考老师的文档进行添加
  • 也可以 ifconfig eth1:1 172.16.1.86 模拟一下

1.创建Master根证书

mkdir -p /root/cert/master
cd /root/cert/master

cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

# 生成
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

2.创建kube-apiserver证书

cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "172.16.1.81",
        "172.16.1.82",
        "172.16.1.83",
        "172.16.1.86",
        "10.96.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF

# `host`:`localhost`地址 + `master`部署节点的ip地址 + `etcd`节点的部署地址 + 负载均衡指定的VIP(`172.16.0.55`) + `service ip`段的第一个合法地址(10.96.0.1) + k8s默认指定的一些地址

# 生成
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

3.创建controller-manager证书

cat > kube-controller-manager-csr.json << EOF
{
    "CN": "system:kube-controller-manager",
    "hosts": [
        "127.0.0.1",
        "172.16.1.81"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:kube-controller-manager",
            "OU": "System"
        }
    ]
}
EOF

# 生成
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

4.创建kube-scheduler证书

cat > kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
        "127.0.0.1",
        "172.16.1.81"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:kube-scheduler",
            "OU": "System"
        }
    ]
}
EOF

# 生成
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

5.创建kube-proxy证书

cat > kube-proxy-csr.json << EOF
{
    "CN":"system:kube-proxy",
    "hosts":[],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"system:kube-proxy",
            "OU":"System"
        }
    ]
}
EOF

# 生成
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

6.创建操作用户证书

  • 为了能让集群客户端工具安全的访问集群,所以要为集群客户端创建证书,使其具有所有的集群权限。
cat > admin-csr.json << EOF
{
    "CN":"admin",
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"system:masters",
            "OU":"System"
        }
    ]
}
EOF

# 生成
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

7.为master节点分发证书

mkdir -pv /etc/kubernetes/ssl

cp -p ./*.pem /etc/kubernetes/ssl

for i in k8s-master-2 k8s-master-3; do  
  ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
  scp /etc/kubernetes/ssl/* root@$i:/etc/kubernetes/ssl
done

8.为node节点分发证书

for i in k8s-node-1 k8s-node-2; do  
  ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
  scp -pr ./{ca*.pem,admin*pem,kube-proxy*pem} root@$i:/etc/kubernetes/ssl
done

9.为vip节点分发证书(可有可无)

ssh root@kubernetes-master-vip "mkdir -pv /etc/kubernetes/ssl"

scp admin*pem root@kubernetes-master-vip:/etc/kubernetes/ssl

五、部署master节点

1.下载组件

# 下载server安装包
wget https://dl.k8s.io/v1.19.0/kubernetes-server-linux-amd64.tar.gz

# 下载client安装包
wget https://dl.k8s.io/v1.19.0/kubernetes-client-linux-amd64.tar.gz

# 下载Node安装包
wget https://dl.k8s.io/v1.19.0/kubernetes-node-linux-amd64.tar.gz

# 如果无法下载,可用下方方法
docker pull registry.cn-hangzhou.aliyuncs.com/k8sos/k8s:v1.18.8.1

# 紧接着在容器当中复制出来即可
启动运行容器并创建虚拟终端--在终端中查看包的路径--另开窗口使用docker命令指定路径从容器中复制到本地

2.分发master组件

# 上传
rz kube.zip
# 授权
chmod +x kube-apiserver  kube-controller-manager  kubectl  kubelet  kube-proxy  kube-scheduler
# 分发
for i in k8s-master-1 k8s-master-2 k8s-master-3;  do   scp kube-apiserver kube-controller-manager kube-scheduler kubectl  root@$i:/usr/local/bin/; done

3.配置TLS bootstrapping

  • TLS bootstrapping 是用来简化管理员配置kubelet 与 apiserver 双向加密通信的配置步骤的一种机制。当集群开启了 TLS 认证后,每个节点的 kubelet 组件都要使用由 apiserver 使用的 CA 签发的有效证书才能与 apiserver 通讯,此时如果有很多个节点都需要单独签署证书那将变得非常繁琐且极易出错,导致集群不稳。
  • TLS bootstrapping 功能就是让 node节点上的kubelet组件先使用一个预定的低权限用户连接到 apiserver,然后向 apiserver 申请证书,由 apiserver 动态签署颁发到Node节点,实现证书签署自动化。
mkdir -p  /etc/kubernetes/cfg/
cd  /etc/kubernetes/cfg/

# 获取一个随机值并赋值给变量
TLS_BOOTSTRAPPING_TOKEN=`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`

# 生成token
cat > token.csv << EOF
${TLS_BOOTSTRAPPING_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

4.创建集群配置文件

#写到一个文件中执行

# 定义集群master
export KUBE_APISERVER="https://172.16.1.81:6443"

token_id=$(cut -d',' -f1 token.csv)

# 创建kubelet-bootstrap.kubeconfig
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-credentials "kubelet-bootstrap" \
  --token=$token_id \
  --kubeconfig=kubelet-bootstrap.kubeconfig                          

kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig


# 创建controller-manager配置文件
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-credentials "kube-controller-manager" \
  --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig    

kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-controller-manager" \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig

# 创建scheduler配置文件
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-credentials "kube-scheduler" \
  --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \
  --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig   

kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-scheduler" \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig


# 创建kube-proxy配置文件
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials "kube-proxy" \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-proxy" \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig



# 创建admin配置文件
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=admin.kubeconfig

kubectl config set-credentials "admin" \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --client-key=/etc/kubernetes/ssl/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=admin.kubeconfig 

kubectl config set-context default \
  --cluster=kubernetes \
  --user="admin" \
  --kubeconfig=admin.kubeconfig

kubectl config use-context default --kubeconfig=admin.kubeconfig

5.分发配置文件到各节点

# 分发至master
for i in k8s-master-1 k8s-master-2 k8s-master-3; 
do 
  ssh root@$i "mkdir -p  /etc/kubernetes/cfg"; 
  scp token.csv kube-scheduler.kubeconfig kube-controller-manager.kubeconfig admin.kubeconfig kube-proxy.kubeconfig kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg; 
done

# 分发至node
for i in k8s-node-1 k8s-node-2; 
do     
  ssh root@$i "mkdir -p  /etc/kubernetes/cfg";     
  scp kube-proxy.kubeconfig kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg; 
done

6.部署并启动API server

# 生成API server配置文件 (master节点)
KUBE_APISERVER_IP=`hostname -i`

cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--advertise-address=${KUBE_APISERVER_IP} \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=10-52767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://172.16.1.81:2379,https://172.16.1.82:2379,https://172.16.1.83:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
EOF

# 创建systemd启动脚本
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 启动
mkdir -p /var/log/kubernetes/
systemctl daemon-reload
systemctl enable --now kube-apiserver

7.部署并启动controller-manager

# 生成controller-manager配置文件 (master节点)
cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--leader-elect=true \\
--cluster-name=kubernetes \\
--bind-address=127.0.0.1 \\
--cluster-cidr=10.244.0.0/12 \\
--service-cluster-ip-range=10.96.0.0/16 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--kubeconfig=/etc/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=10s \\
--horizontal-pod-autoscaler-use-rest-clients=true"
EOF

# 创建systemd启动脚本
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# 启动
systemctl daemon-reload
systemctl enable --now kube-controller-manager.service

8.部署并启动kube-scheduler

# 生成kube-scheduler配置文件 (master节点)
cat > /etc/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--kubeconfig=/etc/kubernetes/cfg/kube-scheduler.kubeconfig \\
--leader-elect=true \\
--master=http://127.0.0.1:8080 \\
--bind-address=127.0.0.1 "
EOF

# 创建systemd启动脚本
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# 启动
systemctl daemon-reload
systemctl enable --now kube-scheduler.service

六、部署node节点

1.分发命令

# 分发node组件
cd
for i in k8s-master-1  k8s-node-1 k8s-node-2; do scp kubelet kube-proxy root@$i:/usr/local/bin/; done

2.部署并启动kubelet

# 生成kubelet配置文件 (所有节点)
KUBE_HOSTNAME=`hostname`
local_ip=`hostname -i`

cat > /etc/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--hostname-override=${KUBE_HOSTNAME} \\
--container-runtime=docker \\
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig \\
--config=/etc/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/etc/kubernetes/ssl \\
--image-pull-progress-deadline=15m \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sos/pause:3.2"
EOF

# 创建kubelet-config.conf配置文件
cat > /etc/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${local_ip}
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
- 10.96.0.2
clusterDomain: cluster.local 
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/ssl/ca.pem 
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF

# 创建systemd启动脚本
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 启动
systemctl daemon-reload
systemctl enable --now kubelet

3.部署并启动kube-proxy

KUBE_HOSTNAME=`hostname`
local_ip=`hostname -i`

# 生成kube-proxy配置文件 (所有节点)
cat > /etc/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--config=/etc/kubernetes/cfg/kube-proxy-config.yml"
EOF

cat > /etc/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: ${local_ip}
healthzBindAddress: ${local_ip}:10256
metricsBindAddress: ${local_ip}:10249
clientConnection:
  burst: 200
  kubeconfig: /etc/kubernetes/cfg/kube-proxy.kubeconfig
  qps: 100
hostnameOverride: ${KUBE_HOSTNAME}
clusterCIDR: 10.96.0.0/16
enableProfiling: true
mode: "ipvs"
kubeProxyIPTablesConfiguration:
  masqueradeAll: false
kubeProxyIPVSConfiguration:
  scheduler: rr
  excludeCIDRs: []
EOF


# 创建systemd启动脚本
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 启动
systemctl daemon-reload
systemctl enable --now kube-proxy

七、集群搭建

1.将kubelet加入集群

# 在master主节点执行  创建kubelet-bootstrap集群用户
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

# 查看kubelet加入集群请求
kubectl get csr
# 批准加入集群
kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`
# 查看节点
kubectl get node
# 查看kubelet证书文件
ll /etc/kubernetes/ssl/

lrwxrwxrwx 1 root root   58 9月  14 09:36 kubelet-client-current.pem -> /etc/kubernetes/ssl/kubelet-client-2020-09-14-09-36-32.pem

2.集群配置

# 将集群中各节点打上相应标签(k8s依据标签管理节点)
1.master节点
kubectl label nodes k8s-master-1 node-role.kubernetes.io/master=k8s-master-1
kubectl label nodes k8s-master-2 node-role.kubernetes.io/master=k8s-master-2
kubectl label nodes k8s-master-3 node-role.kubernetes.io/master=k8s-master-3
2.node节点
kubectl label nodes k8s-node-1 node-role.kubernetes.io/node=k8s-master-1
kubectl label nodes k8s-node-2 node-role.kubernetes.io/node=k8s-master-2

# 将master节点打上污点使其不备调度(不在其之上运行pod)
kubectl taint nodes k8s-master-1 node-role.kubernetes.io/master=k8s-master-1:NoSchedule --overwrite
kubectl taint nodes k8s-master-2 node-role.kubernetes.io/master=k8s-master-2:NoSchedule --overwrite
kubectl taint nodes k8s-master-3 node-role.kubernetes.io/master=k8s-master-3:NoSchedule --overwrite

八、部署网络插件

kubernetes设计了网络模型,但却将它的实现交给了网络插件,CNI网络插件最主要的功能就是实现POD资源能够跨主机进行通讯。常见的CNI网络插件:

  • Flannel
  • Calico
  • Canal
  • Contiv
  • OpenContrail
  • NSX-T
  • Kube-router

1.部署Flanneld

# 分发组件
for i in k8s-master-1 k8s-node-2 k8s-node-1; do scp flanneld mk-docker-opts.sh  root@$i:/usr/local/bin; done

# 将Flanneld配置写入Etcd中
etcdctl \
--ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://172.16.1.81:2379,https://172.16.1.84:2379,https://172.16.1.85:2379" \
mk /coreos.com/network/config '{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}'


# 使用get查看信息
etcdctl \
--ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://172.16.1.81:2379,https://172.16.1.82:2379,https://172.16.1.83:2379" \
 get /coreos.com/network/config

{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}

# 复制master节点的etcd证书至node节点(Flanneld启动的时候需要其访问集群)
for i in k8s-node-1 k8s-node-2;do 
ssh root@$i "mkdir -pv /etc/etcd/ssl"
scp -p /etc/etcd/ssl/*.pem root@$i:/etc/etcd/ssl    
done

# 创建systemd管理脚本
cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld address
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \\
  -etcd-cafile=/etc/etcd/ssl/ca.pem \\
  -etcd-certfile=/etc/etcd/ssl/etcd.pem \\
  -etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
  -etcd-endpoints=https://172.16.1.81:2379,https://172.16.1.82:2379,https://172.16.1.83:2379 \\
  -etcd-prefix=/coreos.com/network \\
  -ip-masq
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

2.修改docker的systemd管理脚本以让Flanneld接管docker

sed -i '/ExecStart/s/\(.*\)/#\1/' /usr/lib/systemd/system/docker.service
sed -i '/ExecReload/a ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock' /usr/lib/systemd/system/docker.service
sed -i '/ExecReload/a EnvironmentFile=-/run/flannel/subnet.env' /usr/lib/systemd/system/docker.service

# 分发
for ip in k8s-master-2 k8s-master-3 k8s-node-2 k8s-node-1;do scp /usr/lib/systemd/system/docker.service root@${ip}:/usr/lib/systemd/system; done

3.重启docker并开启Flanneld

for i in k8s-master-1  k8s-node-2 k8s-node-1;do
echo ">>>  $i"
ssh root@$i "systemctl daemon-reload"
ssh root@$i "systemctl restart docker"
ssh root@$i "systemctl start flanneld"
ssh root@$i "systemctl restart docker"
done

九、部署CoreDNS解析插件

  • CoreDNS用于集群中Pod解析Service的名字,Kubernetes基于CoreDNS用于服务发现功能。
# 下载配置文件
git clone https://github.com/coredns/deployment.git

# 确认DNS
CLUSTER_DNS_IP="10.96.0.2"

# 绑定集群匿名用户权限
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubernetes

# 替换DNS镜像为registry.cn-hangzhou.aliyuncs.com/k8sos/coredns:1.7.0
cd deployment/kubernetes/

sed -i 's#coredns/coredns#registry.cn-hangzhou.aliyuncs.com/k8sos/coredns#g' coredns.yaml.sed

# 构建CoreDNS
./deploy.sh -i 10.96.0.2 -s | kubectl apply -f -

十、测试集群

# 创建nginx服务并绑定端口映射
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort

# 查看

kubectl get pod -o wide -w

kubectl get svc -o wide

# 访问
curl 10.0.0.81:(随机端口)