kubeadm部署高可用的K8S集群(1)+flannel
kubeadm部署高可用的K8S集群(1)+flannel

kubeadm部署高可用的K8S集群(1)+flannel

使用kubeadm部署Kubernetes集群的前提条件

◼ 支持Kubernetes运行的Linux主机,例如Debian、RedHat及其变体等
◼ 每主机2GB以上的内存,以及2颗以上的CPU
◼ 各主机间能够通过网络无障碍通信
◼ 独占的hostname、MAC地址以及product_uuid,主机名能够正常解析
◼ 放行由Kubernetes使用到的各端口,或直接禁用iptables
◼ 禁用各主机的上的Swap设备
◼ 各主机时间同步

部署机环境准备

[root@ubuntu2004 ~]#apt -y install ansible
[root@ubuntu2004 ~]#mkdir /data/ansible -p
[root@ubuntu2004 ~]#cp /etc/ansible/ansible.cfg /data/ansible/
[root@ubuntu2004 ~]#cd /data/ansible/

[root@ubuntu2004 ansible]#vim ansible.cfg 
[defaults]
inventory      = /data/ansible/inventory
roles_path    = /data/ansible/roles
host_key_checking = False
remote_user = root

[privilege_escalation]
become=True
become_method=sudo
become_user=root
become_ask_pass=False

[root@ubuntu2004 ansible]#vim inventory
[Master]
10.0.0.201
10.0.0.202
10.0.0.203
[Node]
10.0.0.204
10.0.0.205
10.0.0.206

[Ansible]
10.0.0.207

[root@ubuntu2004 ansible]#bash ssh

[root@ubuntu2004 ansible]#ansible all -m ping 
10.0.0.202 | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": false,
    "ping": "pong"
}
10.0.0.201 | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": false,
    "ping": "pong"
}
10.0.0.204 | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": false,
    "ping": "pong"
}
10.0.0.203 | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": false,
    "ping": "pong"
}
10.0.0.205 | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": false,
    "ping": "pong"
}
10.0.0.206 | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": false,
    "ping": "pong"
}
10.0.0.207 | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": false,
    "ping": "pong"
}

[root@ubuntu2004 ansible]#vim adhoc.sh 
#!/bin/bash
# 
#********************************************************************
#Author:            shuhong
#QQ:                985347841
#Date:              2022-10-03
#FileName:          adhoc.sh
#URL:               hhhhh
#Description:       The test script
#Copyright (C):     2022 All rights reserved
#********************************************************************
ansible 10.0.0.201 -m hostname -a 'name=k8s-Master-01'
ansible 10.0.0.202 -m hostname -a 'name=k8s-Master-02'
ansible 10.0.0.203 -m hostname -a 'name=k8s-Master-03'
ansible 10.0.0.204 -m hostname -a 'name=k8s-Node-01'
ansible 10.0.0.205 -m hostname -a 'name=k8s-Node-02'
ansible 10.0.0.206 -m hostname -a 'name=k8s-Node-03'
ansible 10.0.0.207 -m hostname -a 'name=k8s-Ansible'


[root@ubuntu2004 ansible]#bash adhoc.sh 
10.0.0.201 | CHANGED => {
    "ansible_facts": {
        "ansible_domain": "networksolutions.com",
        "ansible_fqdn": "underconstruction.networksolutions.com",
        "ansible_hostname": "k8s-Master-01",
        "ansible_nodename": "k8s-Master-01",
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "name": "k8s-Master-01"
}
10.0.0.202 | CHANGED => {
    "ansible_facts": {
        "ansible_domain": "networksolutions.com",
        "ansible_fqdn": "underconstruction.networksolutions.com",
        "ansible_hostname": "k8s-Master-02",
        "ansible_nodename": "k8s-Master-02",
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "name": "k8s-Master-02"
}
10.0.0.203 | CHANGED => {
    "ansible_facts": {
        "ansible_domain": "networksolutions.com",
        "ansible_fqdn": "underconstruction.networksolutions.com",
        "ansible_hostname": "k8s-Master-03",
        "ansible_nodename": "k8s-Master-03",
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "name": "k8s-Master-03"
}
10.0.0.204 | CHANGED => {
    "ansible_facts": {
        "ansible_domain": "networksolutions.com",
        "ansible_fqdn": "underconstruction.networksolutions.com",
        "ansible_hostname": "k8s-Node-01",
        "ansible_nodename": "k8s-Node-01",
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "name": "k8s-Node-01"
}
10.0.0.205 | CHANGED => {
    "ansible_facts": {
        "ansible_domain": "networksolutions.com",
        "ansible_fqdn": "underconstruction.networksolutions.com",
        "ansible_hostname": "k8s-Node-02",
        "ansible_nodename": "k8s-Node-02",
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "name": "k8s-Node-02"
}
10.0.0.206 | CHANGED => {
    "ansible_facts": {
        "ansible_domain": "networksolutions.com",
        "ansible_fqdn": "underconstruction.networksolutions.com",
        "ansible_hostname": "k8s-Node-03",
        "ansible_nodename": "k8s-Node-03",
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "name": "k8s-Node-03"
}
10.0.0.207 | CHANGED => {
    "ansible_facts": {
        "ansible_domain": "networksolutions.com",
        "ansible_fqdn": "underconstruction.networksolutions.com",
        "ansible_hostname": "k8s-Ansible",
        "ansible_nodename": "k8s-Ansible",
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "name": "k8s-Ansible"
}

[root@k8s-Ansible ansible]#ansible-playbook chrony.yml 

PLAY [chrony] ************************************************************************************************************************************

TASK [Gathering Facts] ***************************************************************************************************************************
ok: [10.0.0.205]
ok: [10.0.0.201]
ok: [10.0.0.202]
ok: [10.0.0.204]
ok: [10.0.0.203]
ok: [10.0.0.207]
ok: [10.0.0.206]

TASK [apt] ***************************************************************************************************************************************
changed: [10.0.0.201]
changed: [10.0.0.203]
changed: [10.0.0.204]
changed: [10.0.0.202]
changed: [10.0.0.205]
ok: [10.0.0.207]
changed: [10.0.0.206]

PLAY RECAP ***************************************************************************************************************************************
10.0.0.201                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=2    changed=0    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   

[root@k8s-Ansible ansible]#vim swap.yml
---
- name: swap
  hosts: all
  tasks:
    - name: shell
      shell: swapoff -a
    - name: replace
      replace:
        path: /etc/fstab
        regexp: "/swap.img"
        replace: "#/swap.img"

[root@k8s-Ansible ansible]#ansible-playbook swap.yml 
...

PLAY RECAP *****************************************************************************************************************************************
10.0.0.201                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   

[root@k8s-Ansible ansible]#cat files/hosts 
127.0.0.1 localhost
127.0.1.1 ubuntu2004

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

10.0.0.201 k8s-Master-01 kubeapi.shuhong.com
10.0.0.202 k8s-Master-02
10.0.0.203 k8s-Master-03
10.0.0.204 k8s-Node-01
10.0.0.205 k8s-Node-02
10.0.0.206 k8s-Node-03
10.0.0.207 k8s-Ansible

[root@k8s-Ansible ansible]#vim hosts.yml
--- 
- name:  host
  hosts: all
  tasks:
    - copy:
        src: hosts
        dest: /etc/hosts

[root@k8s-Ansible ansible]#ansible-playbook hosts.yml 

PLAY [host] **************************************************************************************************************************************************************************************

TASK [Gathering Facts] ***************************************************************************************************************************************************************************
ok: [10.0.0.205]
ok: [10.0.0.204]
ok: [10.0.0.201]
ok: [10.0.0.203]
ok: [10.0.0.202]
ok: [10.0.0.207]
ok: [10.0.0.206]

TASK [copy] **************************************************************************************************************************************************************************************
changed: [10.0.0.201]
changed: [10.0.0.202]
changed: [10.0.0.204]
changed: [10.0.0.203]
changed: [10.0.0.205]
changed: [10.0.0.206]
changed: [10.0.0.207]

PLAY RECAP ***************************************************************************************************************************************************************************************
10.0.0.201                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   

安装docker

[root@k8s-Ansible ansible]#cat docker.yml 
---
- name: install docker
  hosts: all
  tasks: 
    - name: apt
      apt: 
        name: 
          - apt-transport-https 
          - ca-certificates 
          - curl 
          - software-properties-common
        state: present
    - name: shell
      shell: curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -
    - name: shell 
      shell: add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
    - name: docker-ce
      apt: 
        update_cache: yes
        name: docker-ce
        state: present
    - name: copy
      copy:
        src: daemon.json
        dest: /etc/docker/daemon.json
    - name: service
      service: 
        daemon_reload: yes
        name: docker
        state: restarted
        enabled: yes


[root@k8s-Ansible ansible]#ansible-playbook docker.yml 

PLAY [install docker] *****************************************************************************************************************************

TASK [Gathering Facts] ****************************************************************************************************************************
ok: [10.0.0.204]
ok: [10.0.0.205]
ok: [10.0.0.203]
ok: [10.0.0.201]
ok: [10.0.0.207]
ok: [10.0.0.202]
ok: [10.0.0.206]

TASK [apt] ****************************************************************************************************************************************
ok: [10.0.0.201]
ok: [10.0.0.205]
ok: [10.0.0.203]
ok: [10.0.0.202]
ok: [10.0.0.204]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [shell] **************************************************************************************************************************************
[WARNING]: Consider using the get_url or uri module rather than running 'curl'.  If you need to use command because get_url or uri is insufficient
you can add 'warn: false' to this command task or set 'command_warnings=False' in ansible.cfg to get rid of this message.
changed: [10.0.0.205]
changed: [10.0.0.204]
changed: [10.0.0.203]
changed: [10.0.0.202]
changed: [10.0.0.201]
changed: [10.0.0.207]
changed: [10.0.0.206]

TASK [shell] **************************************************************************************************************************************
changed: [10.0.0.204]
changed: [10.0.0.205]
changed: [10.0.0.201]
changed: [10.0.0.202]
changed: [10.0.0.203]
changed: [10.0.0.206]
changed: [10.0.0.207]

TASK [docker-ce] **********************************************************************************************************************************
ok: [10.0.0.205]
ok: [10.0.0.203]
ok: [10.0.0.204]
ok: [10.0.0.202]
ok: [10.0.0.201]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [copy] ***************************************************************************************************************************************
ok: [10.0.0.202]
ok: [10.0.0.204]
ok: [10.0.0.203]
ok: [10.0.0.201]
ok: [10.0.0.205]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [service] ************************************************************************************************************************************
changed: [10.0.0.204]
changed: [10.0.0.205]
changed: [10.0.0.202]
changed: [10.0.0.203]
changed: [10.0.0.201]
changed: [10.0.0.207]
changed: [10.0.0.206]

PLAY RECAP ****************************************************************************************************************************************
10.0.0.201                 : ok=7    changed=3    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=7    changed=3    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=7    changed=3    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=7    changed=3    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=7    changed=3    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=7    changed=3    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=7    changed=3    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   

安装docker-cri

https://github.com/Mirantis/cri-dockerd
[root@k8s-Ansible files]#ll
总用量 18732
drwxr-xr-x 2 root root     4096 11月  7 10:31 ./
drwxr-xr-x 3 root root     4096 11月  7 10:27 ../
-rw-r--r-- 1 root root 19170592 11月  7 10:31 cri-dockerd_0.2.6.3-0.ubuntu-focal_amd64.deb

[root@k8s-Ansible ansible]#vim cri-docker.yml
---
- name: install cridocker
  hosts: all
  tasks:
    - name: copy
      copy:
        src: cri-dockerd_0.2.6.3-0.ubuntu-focal_amd64.deb
        dest: /root/cri-dockerd_0.2.6.3-0.ubuntu-focal_amd64.deb
    - name: shell        
      shell: apt -y install /root/cri-dockerd_0.2.6.3-0.ubuntu-focal_amd64.deb
                                                                                            
[root@k8s-Ansible ansible]#ansible-playbook cri-docker.yml 

PLAY [install cridocker] ***************************************************************************************************************************

TASK [Gathering Facts] *****************************************************************************************************************************
ok: [10.0.0.204]
ok: [10.0.0.203]
ok: [10.0.0.201]
ok: [10.0.0.202]
ok: [10.0.0.205]
ok: [10.0.0.207]
ok: [10.0.0.206]

TASK [copy] ****************************************************************************************************************************************
ok: [10.0.0.204]
ok: [10.0.0.202]
ok: [10.0.0.203]
ok: [10.0.0.205]
ok: [10.0.0.201]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [shell] ***************************************************************************************************************************************
changed: [10.0.0.203]
changed: [10.0.0.201]
changed: [10.0.0.202]
changed: [10.0.0.205]
changed: [10.0.0.204]
changed: [10.0.0.207]
changed: [10.0.0.206]

PLAY RECAP *****************************************************************************************************************************************
10.0.0.201                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   

[root@k8s-Ansible ansible]#ss -ntlp
State       Recv-Q      Send-Q             Local Address:Port              Peer Address:Port      Process                                           
LISTEN      0           4096               127.0.0.53%lo:53                     0.0.0.0:*          users:(("systemd-resolve",pid=47442,fd=13))      
LISTEN      0           128                      0.0.0.0:22                     0.0.0.0:*          users:(("sshd",pid=766,fd=3))                    
LISTEN      0           128                         [::]:22                        [::]:*          users:(("sshd",pid=766,fd=4))                    
LISTEN      0           4096                           *:42393                        *:*          users:(("cri-dockerd",pid=59839,fd=3)) 

安装kubelet、kubeadm和kubectl

https://mirrors.huaweicloud.com/home
[root@k8s-Ansible ansible]#cat <<EOF > /etc/apt/sources.list.d/kubernetes.list 
> deb https://repo.huaweicloud.com/kubernetes/apt/ kubernetes-xenial main
> EOF
[root@k8s-Ansible ansible]#ll /etc/apt/sources.list.d/kubernetes.list 
-rw-r--r-- 1 root root 72 11月  7 10:48 /etc/apt/sources.list.d/kubernetes.list
[root@k8s-Ansible ansible]#cat /etc/apt/sources.list.d/kubernetes.list
deb https://repo.huaweicloud.com/kubernetes/apt/ kubernetes-xenial main
[root@k8s-Ansible ansible]#cp /etc/apt/sources.list.d/kubernetes.list files/


[root@k8s-Ansible ansible]#vim kubelet-kubeadm-kubectl.yml 
---
- name: kubelet-kubeadm-kubectl
  hosts: all
  tasks:
    - name: copy
      copy:
        src: kubernetes.list
        dest: /etc/apt/sources.list.d/kubernetes.list
    - name: shell
      shell: curl -s https://repo.huaweicloud.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
    - name: apt
      apt:
        update_cache: yes
        name:
          - kubeadm
          - kubelet
          - kubectl
        state: latest
    - name: service
      service:
        name: kubelet
        enabled: yes

[root@k8s-Ansible ansible]#ansible-playbook kubelet-kubeadm-kubectl.yml 

PLAY [kubelet-kubeadm-kubectl] *********************************************************************************************************************

TASK [Gathering Facts] *****************************************************************************************************************************
ok: [10.0.0.201]
ok: [10.0.0.203]
ok: [10.0.0.202]
ok: [10.0.0.204]
ok: [10.0.0.206]
ok: [10.0.0.205]
ok: [10.0.0.207]

TASK [copy] ****************************************************************************************************************************************
ok: [10.0.0.205]
ok: [10.0.0.204]
ok: [10.0.0.202]
ok: [10.0.0.201]
ok: [10.0.0.203]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [shell] ***************************************************************************************************************************************
[WARNING]: Consider using the get_url or uri module rather than running 'curl'.  If you need to use command because get_url or uri is insufficient
you can add 'warn: false' to this command task or set 'command_warnings=False' in ansible.cfg to get rid of this message.
changed: [10.0.0.205]
changed: [10.0.0.204]
changed: [10.0.0.201]
changed: [10.0.0.203]
changed: [10.0.0.202]
changed: [10.0.0.207]
changed: [10.0.0.206]

TASK [apt] *****************************************************************************************************************************************
ok: [10.0.0.205]
ok: [10.0.0.201]
ok: [10.0.0.202]
ok: [10.0.0.203]
ok: [10.0.0.204]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [service] *************************************************************************************************************************************
ok: [10.0.0.204]
ok: [10.0.0.201]
ok: [10.0.0.202]
ok: [10.0.0.203]
ok: [10.0.0.205]
ok: [10.0.0.206]
ok: [10.0.0.207]

PLAY RECAP *****************************************************************************************************************************************
10.0.0.201                 : ok=5    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=5    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=5    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=5    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=5    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=5    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=5    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   


[root@k8s-Master-01 ~]#apt list |egrep kube

WARNING: apt does not have a stable CLI interface. Use with caution in scripts.

cri-tools/kubernetes-xenial,now 1.25.0-00 amd64 [已安装,自动]
docker-engine/kubernetes-xenial 1.11.2-0~xenial amd64
golang-github-kubernetes-gengo-dev/focal 0.0~git20170531.0.c79c13d-1 all
kubeadm/kubernetes-xenial,now 1.25.3-00 amd64 [已安装]
kubectl/kubernetes-xenial,now 1.25.3-00 amd64 [已安装]
kubelet/kubernetes-xenial,now 1.25.3-00 amd64 [已安装]
kubernetes-cni/kubernetes-xenial,now 1.1.1-00 amd64 [已安装,自动]
kubernetes/focal 1.0 all
kubetail/focal 1.6.5-2 all
python3-kubernetes/focal 7.0.0~a1-2 all
ruby-kubeclient/focal 4.6.0-1 all

整合kubelet和cri-dockerd

配置cri-dockerd,确保其能够正确加载到CNI插件。编辑/usr/lib/systemd/system/cri-docker.service文件,确保其[Service]配置段中的ExecStart的值类似如下内容。

ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d
需要添加的各配置参数(各参数的值要与系统部署的CNI插件的实际路径相对应):

--network-plugin:指定网络插件规范的类型,这里要使用CNI;

--cni-bin-dir:指定CNI插件二进制程序文件的搜索目录;

--cni-cache-dir:CNI插件使用的缓存目录;

--cni-conf-dir:CNI插件加载配置文件的目录;

配置完成后,重载并重启cri-docker.service服务。

~# systemctl daemon-reload && systemctl restart cri-docker.service
[root@k8s-Ansible ansible]#scp /usr/lib/systemd/system/cri-docker.service files/

[root@k8s-Ansible ansible]#cat files/cri-docker.service 
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
#ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd://
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target


[root@k8s-Ansible ansible]#vim config.yml
---
- name: config
  hosts: all
  tasks:
    - name: copy
      copy:
        src: cri-docker.service
        dest: /usr/lib/systemd/system/cri-docker.service
    - name: service
      service:
        daemon_reload: yes
        name: cri-docker.service
        state: restarted
        enabled: yes

[root@k8s-Ansible ansible]#ansible-playbook config.yml 

PLAY [config] ************************************************************************************************************************************************************************************

TASK [Gathering Facts] ***************************************************************************************************************************************************************************
ok: [10.0.0.205]
ok: [10.0.0.201]
ok: [10.0.0.203]
ok: [10.0.0.204]
ok: [10.0.0.202]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [copy] **************************************************************************************************************************************************************************************
changed: [10.0.0.204]
changed: [10.0.0.203]
changed: [10.0.0.202]
changed: [10.0.0.201]
changed: [10.0.0.205]
changed: [10.0.0.206]
ok: [10.0.0.207]

TASK [service] ***********************************************************************************************************************************************************************************
changed: [10.0.0.204]
changed: [10.0.0.201]
changed: [10.0.0.202]
changed: [10.0.0.203]
changed: [10.0.0.205]
changed: [10.0.0.206]
changed: [10.0.0.207]

PLAY RECAP ***************************************************************************************************************************************************************************************
10.0.0.201                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=3    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   

配置kubelet

配置kubelet,为其指定cri-dockerd在本地打开的Unix Sock文件的路径,该路径一般默认为“/run/cri-dockerd.sock“。编辑文件/etc/sysconfig/kubelet,为其添加 如下指定参数。

提示:若/etc/sysconfig目录不存在,则需要先创建该目录。

KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/run/cri-dockerd.sock"
需要说明的是,该配置也可不进行,而是直接在后面的各kubeadm命令上使用“--cri-socket unix:///run/cri-dockerd.sock”选项。
[root@k8s-Ansible ansible]#cat kubelet_conf.yml 
---
- name: configkubelet
  hosts: all
  tasks: 
    - name: dir
      file: 
         path: /etc/sysconfig
         state: directory
    - name: copy
      copy: 
        src: kubelet
        dest: /etc/sysconfig/kubelet


[root@k8s-Ansible ansible]#ansible-playbook kubelet_conf.yml 

PLAY [configkubelet] ***************************************************************************************************************************************************************************

TASK [Gathering Facts] *************************************************************************************************************************************************************************
ok: [10.0.0.205]
ok: [10.0.0.203]
ok: [10.0.0.204]
ok: [10.0.0.201]
ok: [10.0.0.206]
ok: [10.0.0.207]
ok: [10.0.0.202]

TASK [dir] *************************************************************************************************************************************************************************************
changed: [10.0.0.201]
changed: [10.0.0.204]
changed: [10.0.0.205]
changed: [10.0.0.202]
changed: [10.0.0.203]
changed: [10.0.0.206]
changed: [10.0.0.207]

TASK [copy] ************************************************************************************************************************************************************************************
changed: [10.0.0.202]
changed: [10.0.0.201]
changed: [10.0.0.205]
changed: [10.0.0.204]
changed: [10.0.0.203]
changed: [10.0.0.206]
changed: [10.0.0.207]

PLAY RECAP *************************************************************************************************************************************************************************************
10.0.0.201                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=3    changed=2    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   

初始化第一个主节点

该步骤开始尝试构建Kubernetes集群的master节点,配置完成后,各worker节点直接加入到集群中的即可。需要特别说明的是,由kubeadm部署的Kubernetes集群上,集群核心组件kube-apiserver、kube-controller-manager、kube-scheduler和etcd等均会以静态Pod的形式运行,它们所依赖的镜像文件默认来自于k8s.gcr.io这一Registry服务之上。但我们无法直接访问该服务,常用的解决办法有如下两种,本示例将选择使用更易于使用的前一种方式。

  • 使用能够到达该服务的代理服务;
  • 使用国内的镜像服务器上的服务,例如registry.aliyuncs.com/google_containers等。

初始化master节点(在k8s-Master-01上完成如下操作)

在运行初始化命令之前先运行如下命令单独获取相关的镜像文件,而后再运行后面的kubeadm init命令,以便于观察到镜像文件的下载过程。
~# kubeadm config images list
上面的命令会列出类似如下的Image信息。
[root@k8s-Master-01 ~]#kubeadm config images list
registry.k8s.io/kube-apiserver:v1.25.3
registry.k8s.io/kube-controller-manager:v1.25.3
registry.k8s.io/kube-scheduler:v1.25.3
registry.k8s.io/kube-proxy:v1.25.3
registry.k8s.io/pause:3.8
registry.k8s.io/etcd:3.5.4-0
registry.k8s.io/coredns/coredns:v1.9.3

[root@k8s-Master-01 ~]#kubeadm config images pull --cri-socket unix:///run/cri-dockerd.sock
#注意如果无法下载,就加如下选项从国内阿里云仓库拉取, --image-repository=registry.aliyuncs.com/google_containers
[root@k8s-Master-01 ~]#kubeadm config images pull --cri-socket unix:///run/cri-dockerd.sock --image-repository=registry.aliyuncs.com/google_containers
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.25.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.25.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.25.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.25.3
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.8
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.4-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.9.3


而后即可进行master节点初始化。kubeadm init命令支持两种初始化方式,一是通过命令行选项传递关键的部署设定,另一个是基于yaml格式的专用配置文件,后一种允许用户自定义各个部署参数,在配置上更为灵活和便捷。下面分别给出了两种实现方式的配置步骤,建议读者采用第二种方式进行。

初始化方式一

运行如下命令完成k8s-master01节点的初始化:

  ~# kubeadm init \        
        --control-plane-endpoint="kubeapi.shuhong.com" \
        --kubernetes-version=v1.25.3 \
        --pod-network-cidr=10.244.0.0/16 \
        --service-cidr=10.96.0.0/12 \
        --token-ttl=0 \
        --cri-socket unix:///run/cri-dockerd.sock \
        --upload-certs
命令中的各选项简单说明如下:

--image-repository:指定要使用的镜像仓库,默认为gcr.io;

--kubernetes-version:kubernetes程序组件的版本号,它必须要与安装的kubelet程序包的版本号相同;

--control-plane-endpoint:控制平面的固定访问端点,可以是IP地址或DNS名称,会被用于集群管理员及集群组件的kubeconfig配置文件的API Server的访问地址;单控制平面部署时可以不使用该选项;

--pod-network-cidr:Pod网络的地址范围,其值为CIDR格式的网络地址,通常,Flannel网络插件的默认为10.244.0.0/16,Project Calico插件的默认值为192.168.0.0/16;

--service-cidr:Service的网络地址范围,其值为CIDR格式的网络地址,默认为10.96.0.0/12;通常,仅Flannel一类的网络插件需要手动指定该地址;

--apiserver-advertise-address:apiserver通告给其他组件的IP地址,一般应该为Master节点的用于集群内部通信的IP地址,0.0.0.0表示节点上所有可用地址;

--token-ttl:共享令牌(token)的过期时长,默认为24小时,0表示永不过期;为防止不安全存储等原因导致的令牌泄露危及集群安全,建议为其设定过期时长。未设定该选项时,在token过期后,若期望再向集群中加入其它节点,可以使用如下命令重新创建token,并生成节点加入命令。

kubeadm token create --print-join-command
提示:无法访问grc.io时,可以在上面的命令中使用“--image-repository=registry.aliyuncs.com/google_containers”选项,以便从国内的镜像服务中获取各Image;

注意:若各节点未禁用Swap设备,还需要附加选项“--ignore-preflight-errors=Swap”,从而让kubeadm忽略该错误设定;

[root@k8s-Master-01 ~]#kubeadm init  --control-plane-endpoint="kubeapi.shuhong.com" --kubernetes-version=v1.25.3 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12  --token-ttl=0 --cri-socket unix:///run/cri-dockerd.sock --upload-certs --image-repository=registry.aliyuncs.com/google_containers
[init] Using Kubernetes version: v1.25.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master-01 kubeapi.shuhong.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.201]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master-01 localhost] and IPs [10.0.0.201 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master-01 localhost] and IPs [10.0.0.201 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 8.503608 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
98e9805ecf2099ccc45cd294999cd8dc272162756fe4d6cff9c51aaed149ce00
[mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: ip0thm.05rwqv97bmhu8aez
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join kubeapi.shuhong.com:6443 --token ip0thm.05rwqv97bmhu8aez \
	--discovery-token-ca-cert-hash sha256:f1b04cf0b9f413ab72a65f80790e8b7ee86dd7d62a23b23d2756279d230cc6cf \
	--control-plane --certificate-key 98e9805ecf2099ccc45cd294999cd8dc272162756fe4d6cff9c51aaed149ce00

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join kubeapi.shuhong.com:6443 --token ip0thm.05rwqv97bmhu8aez \
	--discovery-token-ca-cert-hash sha256:f1b04cf0b9f413ab72a65f80790e8b7ee86dd7d62a23b23d2756279d230cc6cf 


[root@k8s-Master-01 ~]#mkdir -p .kube
[root@k8s-Master-01 ~]#cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

初始化方式二

kubeadm也可通过配置文件加载配置,以定制更丰富的部署选项。获取内置的初始配置文件的命令

kubeadm config print init-defaults
下面的配置示例,是以上面命令的输出结果为框架进行修改的,它明确定义了kubeProxy的模式为ipvs,并支持通过修改imageRepository的值修改获取系统镜像时使用的镜像仓库。

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
kind: InitConfiguration
localAPIEndpoint:
  # 这里的地址即为初始化的控制平面第一个节点的IP地址;
  advertiseAddress: 172.29.1.1
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  # 第一个控制平面节点的主机名称;
  name: k8s-master01.magedu.com
  taints: 
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
  - effect: NoSchedule
    key: node-role.kubernetes.io/control-plane
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
# 控制平面的接入端点,我们这里选择适配到kubeapi.magedu.com这一域名上;
controlPlaneEndpoint: "kubeapi.magedu.com:6443"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.24.3
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# 用于配置kube-proxy上为Service指定的代理模式,默认为iptables;
mode: "ipvs"
将上面的内容保存于配置文件中,例如kubeadm-config.yaml,而后执行如下命令即能实现类似前一种初始化方式中的集群初始配置,但这里将Service的代理模式设定为了ipvs。

  ~# kubeadm init --config kubeadm-config.yaml --upload-certs

初始化完成后的操作步骤
对于Kubernetes系统的新用户来说,无论使用上述哪种方法,命令运行结束后,请记录最后的kubeadm join命令输出的最后提示的操作步骤。下面的内容是需要用户记录的一个命令输出示例,它提示了后续需要的操作步骤。

# 下面是成功完成第一个控制平面节点初始化的提示信息及后续需要完成的步骤
Your Kubernetes control-plane has initialized successfully!

# 为了完成初始化操作,管理员需要额外手动完成几个必要的步骤
To start using your cluster, you need to run the following as a regular user:

# 第1个步骤提示, Kubernetes集群管理员认证到Kubernetes集群时使用的kubeconfig配置文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 我们也可以不做上述设定,而使用环境变量KUBECONFIG为kubectl等指定默认使用的kubeconfig;
Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

# 第2个步骤提示,为Kubernetes集群部署一个网络插件,具体选用的插件则取决于管理员;
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

# 第3个步骤提示,向集群添加额外的控制平面节点,但本文会略过该步骤,并将在其它文章介绍其实现方式。
You can now join any number of the control-plane node running the following command on each as root:

# 在部署好kubeadm等程序包的其他控制平面节点上以root用户的身份运行类似如下命令,
# 命令中的hash信息对于不同的部署环境来说会各不相同;该步骤只能在其它控制平面节点上执行;
# 提示:与cri-dockerd结合使用docker-ce作为container runtime时,通常需要为下面的命令
#      额外附加“--cri-socket unix:///run/cri-dockerd.sock”选项;
  kubeadm join kubeapi.magedu.com:6443 --token emvhys.9d7623w48vgm99qz \
        --discovery-token-ca-cert-hash sha256:f13e30d459bf18fa4415f30822a09fe95ab84b213d6dc77d29beb0542bed4cee \
        --control-plane --certificate-key e1996b2c9bf3e60fb75a622c9245539c6d82904fd2dd89f12c6efe459edd0c5b

# 因为在初始化命令“kubeadm init”中使用了“--upload-certs”选项,因而初始化过程会自动上传添加其它Master时用到的数字证书等信息;
# 出于安全考虑,这些内容会在2小时之后自动删除;
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

# 第4个步骤提示,向集群添加工作节点
Then you can join any number of worker nodes by running the following on each as root:

# 在部署好kubeadm等程序包的各工作节点上以root用户运行类似如下命令;
# 提示:与cri-dockerd结合使用docker-ce作为container runtime时,通常需要为下面的命令
#      额外附加“--cri-socket unix:///run/cri-dockerd.sock”选项;
kubeadm join kubeapi.magedu.com:6443 --token emvhys.9d7623w48vgm99qz \
        --discovery-token-ca-cert-hash sha256:f13e30d459bf18fa4415f30822a09fe95ab84b213d6dc77d29beb0542bed4cee
另外,kubeadm init命令完整参考指南请移步官方文档,地址为https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/。

设定kubectl
kubectl是kube-apiserver的命令行客户端程序,实现了除系统部署之外的几乎全部的管理操作,是kubernetes管理员使用最多的命令之一。kubectl需经由API server认证及授权后方能执行相应的管理操作,kubeadm部署的集群为其生成了一个具有管理员权限的认证配置文件/etc/kubernetes/admin.conf,它可由kubectl通过默认的“$HOME/.kube/config”的路径进行加载。当然,用户也可在kubectl命令上使用--kubeconfig选项指定一个别的位置。

下面复制认证为Kubernetes系统管理员的配置文件至目标用户(例如当前用户root)的家目录下:

~# mkdir ~/.kube

~# cp /etc/kubernetes/admin.conf  ~/.kube/config

部署网络插件

Kubernetes系统上Pod网络的实现依赖于第三方插件进行,这类插件有近数十种之多,较为著名的有flannel、calico、canal和kube-router等,简单易用的实现是为CoreOS提供的flannel项目。下面的命令用于在线部署flannel至Kubernetes系统之上:

https://github.com/flannel-io/flannel/releases/tag/v0.20.1
[root@k8s-Ansible ansible]#cat flanneld.yml 
---
- name: flanneld
  hosts: all
  tasks:
    - name: dir
      file:
        path: /opt/bin
        state: directory
    - name: copy
      copy:
        src: flanneld-amd64
        dest: /opt/bin/flanneld
    - name: shell
      shell: chmod +x /opt/bin/flanneld

[root@k8s-Ansible ansible]#ansible-playbook flanneld.yml 

PLAY [flanneld] *************************************************************************************************************************

TASK [Gathering Facts] ******************************************************************************************************************
ok: [10.0.0.202]
ok: [10.0.0.204]
ok: [10.0.0.203]
ok: [10.0.0.201]
ok: [10.0.0.205]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [dir] ******************************************************************************************************************************
ok: [10.0.0.204]
ok: [10.0.0.203]
ok: [10.0.0.205]
ok: [10.0.0.202]
ok: [10.0.0.201]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [copy] *****************************************************************************************************************************
ok: [10.0.0.205]
ok: [10.0.0.202]
ok: [10.0.0.203]
ok: [10.0.0.204]
ok: [10.0.0.201]
ok: [10.0.0.206]
ok: [10.0.0.207]

TASK [shell] ****************************************************************************************************************************
[WARNING]: Consider using the file module with mode rather than running 'chmod'.  If you need to use command because file is
insufficient you can add 'warn: false' to this command task or set 'command_warnings=False' in ansible.cfg to get rid of this message.
changed: [10.0.0.203]
changed: [10.0.0.205]
changed: [10.0.0.202]
changed: [10.0.0.204]
changed: [10.0.0.201]
changed: [10.0.0.206]
changed: [10.0.0.207]

PLAY RECAP ******************************************************************************************************************************
10.0.0.201                 : ok=4    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.202                 : ok=4    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.203                 : ok=4    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.204                 : ok=4    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.205                 : ok=4    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.206                 : ok=4    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.0.0.207                 : ok=4    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   

主节点执行
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
[root@k8s-Master-01 ~]#kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created

[root@k8s-Master-01 ~]#kubectl get pods -n kube-flannel 
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-jft6d   1/1     Running   0          94s

验证master节点已经就绪

[root@k8s-Master-01 ~]#kubectl get nodes
NAME            STATUS   ROLES           AGE   VERSION
k8s-master-01   Ready    control-plane   26m   v1.25.3

添加节点到集群中

#将初始化时,打印出的命令复制到其他节点执行,另外添加--cri-socket unix:///run/cri-dockerd.sock

kubeadm join kubeapi.shuhong.com:6443 --token w0116z.0sdrjhz4ub7zxsr8 \
	--discovery-token-ca-cert-hash sha256:b9ba4ae7b90130d217bdc40e529e15bbb1d38c2dec2ece6af2d45f1ab7f7307a \
	--control-plane --certificate-key 447af045ceaf878f0332d96878f1a0c7d39fc68920ff41c755a995c99d967550 --cri-socket unix:///run/cri-dockerd.sock

 [root@k8s-Master-02 ~]#kubeadm join kubeapi.shuhong.com:6443 --token ip0thm.05rwqv97bmhu8aez \
> --discovery-token-ca-cert-hash sha256:f1b04cf0b9f413ab72a65f80790e8b7ee86dd7d62a23b23d2756279d230cc6cf \
> --control-plane --certificate-key 98e9805ecf2099ccc45cd294999cd8dc272162756fe4d6cff9c51aaed149ce00  --cri-socket unix:///run/cri-dockerd.sock
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master-02 localhost] and IPs [10.0.0.202 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master-02 localhost] and IPs [10.0.0.202 127.0.0.1 ::1]
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master-02 kubeapi.shuhong.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.202]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Creating static Pod manifest for "etcd"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation
[mark-control-plane] Marking the node k8s-master-02 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master-02 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.

[root@k8s-Master-03 ~]#kubeadm join kubeapi.shuhong.com:6443 --token ip0thm.05rwqv97bmhu8aez \
> --discovery-token-ca-cert-hash sha256:f1b04cf0b9f413ab72a65f80790e8b7ee86dd7d62a23b23d2756279d230cc6cf \
> --control-plane --certificate-key 98e9805ecf2099ccc45cd294999cd8dc272162756fe4d6cff9c51aaed149ce00   --cri-socket unix:///run/cri-dockerd.sock
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master-03 localhost] and IPs [10.0.0.203 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master-03 localhost] and IPs [10.0.0.203 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master-03 kubeapi.shuhong.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.203]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Creating static Pod manifest for "etcd"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation
[mark-control-plane] Marking the node k8s-master-03 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master-03 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.

#node节点执行相同操作,注意node节点命令在最后几行,也需要添加--cri-socket unix:///run/cri-dockerd.sock
kubeadm join kubeapi.shuhong.com:6443 --token w0116z.0sdrjhz4ub7zxsr8 \
	--discovery-token-ca-cert-hash sha256:b9ba4ae7b90130d217bdc40e529e15bbb1d38c2dec2ece6af2d45f1ab7f7307a  --cri-socket unix:///run/cri-dockerd.sock
 
[root@k8s-Node-01 ~]#kubeadm join kubeapi.shuhong.com:6443 --token w0116z.0sdrjhz4ub7zxsr8 \
> --discovery-token-ca-cert-hash sha256:b9ba4ae7b90130d217bdc40e529e15bbb1d38c2dec2ece6af2d45f1ab7f7307a  --cri-socket unix:///run/cri-dockerd.sock
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s-Node-02 ~]#kubeadm join kubeapi.shuhong.com:6443 --token w0116z.0sdrjhz4ub7zxsr8 \
> --discovery-token-ca-cert-hash sha256:b9ba4ae7b90130d217bdc40e529e15bbb1d38c2dec2ece6af2d45f1ab7f7307a  --cri-socket unix:///run/cri-dockerd.sock
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s-Node-03 ~]#kubeadm join kubeapi.shuhong.com:6443 --token w0116z.0sdrjhz4ub7zxsr8 \
> --discovery-token-ca-cert-hash sha256:b9ba4ae7b90130d217bdc40e529e15bbb1d38c2dec2ece6af2d45f1ab7f7307a  --cri-socket unix:///run/cri-dockerd.sock
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.


[root@k8s-Master-01 ~]#kubectl get nodes 
NAME            STATUS   ROLES           AGE     VERSION
k8s-master-01   Ready    control-plane   4m40s   v1.25.3
k8s-master-02   Ready    control-plane   2m3s    v1.25.3
k8s-master-03   Ready    control-plane   73s     v1.25.3
k8s-node-01     Ready    <none>          48s     v1.25.3
k8s-node-02     Ready    <none>          25s     v1.25.3
k8s-node-03     Ready    <none>          6s      v1.25.3
[root@k8s-Master-01 ~]#kubectl get pods -n kube-flannel
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-g4blp   1/1     Running   0          22s
kube-flannel-ds-m9rvp   1/1     Running   0          88s
kube-flannel-ds-mhhhp   1/1     Running   0          2m19s
kube-flannel-ds-pbl29   1/1     Running   0          3m28s
kube-flannel-ds-s97b8   1/1     Running   0          41s
kube-flannel-ds-xgstl   1/1     Running   0          64s

案例运行三副本的nginx

[root@k8s-Master-01 ~]#kubectl create deployment nginx --image nginx:alpine --replicas=3
deployment.apps/nginx created
[root@k8s-Master-01 ~]#kubectl get pods -owide 
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE          NOMINATED NODE   READINESS GATES
nginx-55f494c486-2rrht   1/1     Running   0          62s   10.244.3.2   k8s-node-01   <none>           <none>
nginx-55f494c486-hd7xn   1/1     Running   0          62s   10.244.4.2   k8s-node-02   <none>           <none>
nginx-55f494c486-hwl5v   1/1     Running   0          62s   10.244.5.2   k8s-node-03   <none>           <none>
[root@k8s-Master-01 ~]#kubectl create service nodeport nginx --tcp=80:80
service/nginx created
[root@k8s-Master-01 ~]#kubectl get service
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        6m52s
nginx        NodePort    10.107.59.225   <none>        80:31658/TCP   6s

[root@k8s-Master-01 ~]#curl 10.107.59.225
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

[root@k8s-Ansible ~]#curl 10.0.0.201:31658
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

部署wordpress

[root@k8s-Master-01 ~]#mkdir  /data
[root@k8s-Master-01 ~]#cd  /data
[root@k8s-Master-01 data]#git clone https://github.com/iKubernetes/learning-k8s.git
[root@k8s-Master-01 data]#cd learning-k8s/wordpress/
[root@k8s-Master-01 wordpress]#ls
mysql  mysql-ephemeral  nginx  README.md  wordpress  wordpress-apache-ephemeral

[root@k8s-Master-01 wordpress]#kubectl apply -f mysql-ephemeral/
[root@k8s-Master-01 wordpress]#kubectl apply -f wordpress-apache-ephemeral/
[root@k8s-Master-01 wordpress]#kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP        28h
mysql        ClusterIP   10.110.29.22     <none>        3306/TCP       26m
nginx        NodePort    10.107.59.225    <none>        80:31658/TCP   28h
wordpress    NodePort    10.105.168.178   <none>        80:30616/TCP   24m
[root@k8s-Master-01 wordpress]#kubectl get pods
NAME                         READY   STATUS    RESTARTS   AGE
mysql-787575d954-l99pl       1/1     Running   0          27m
nginx-55f494c486-2rrht       1/1     Running   0          28h
nginx-55f494c486-hd7xn       1/1     Running   0          28h
nginx-55f494c486-hwl5v       1/1     Running   0          28h
wordpress-6c854887c8-s6f9g   1/1     Running   0          24m