k8s-v1.20.10 1master&2node binary deployment guidance document

k8s-v1.20.10 1master&2node

Experimental environment

Host network information and component information

K8S cluster roleIPhost nameInstalled components
master192.168.0.10k8s-master-1apiserver,controller-manager,scheduler,etcd,docker,kubectl,kubelet,kube-proxy,calico,coredns,metric-server
node192.168.0.11k8s-node-1kubelet,kube-proxy,docker,calico
node192.168.0.12k8s-node-2kubelet,kube-proxy,docker,calico

Note: under normal circumstances, the master node is only responsible for scheduling, not running Kube proxy, calico, coredns and metric server. In consideration of saving resources, the master node is also responsible for working here

#System version 
	Centos7.9(4.19.12-1.el7.elrepo.x86_64)

# to configure
	4GB Memory/2vcpu/70G Hard disk, turn on Virtualization,NAT Network mode
	
# Component version
	k8s-server&k8s-node(apiserver,kubectl,kube-scheduler,kube-proxy) 1.20.10
	etcd 3.5.0
	pause: v3.6
	calico/node: v3.20.1
	calico/pod2daemon-flexvol: v3.20.1
	calico/cni: v3.20.1
	coredns/coredns: v1.7.0
	docker: 20.10.8
	metric-server: v0.4.1

# network
	service: 10.0.0.0/24
	pod: 10.70.2.0/24

Host certificate information

There are three sets of Ca institutions: one apiserver, one etcd, and one api aggregation layer (since sharing a CA with apiserver may cause conflicts, a separate CA is used here). The issuing institutions are: Ca apiserver, CA etcd, and front proxy ca

Host initialization

Configure host name

# master-1
	hostnamectl set-hostname k8s-master-1 && bash

# node-1
	hostnamectl set-hostname k8s-node-1 && bash

# node-2
	hostnamectl set-hostname k8s-node-2 && bash

Configure hosts file

# master,node
cat >> /etc/hosts <<EOF
192.168.0.10 k8s-master-1
192.168.0.11 k8s-node-1
192.168.0.12 k8s-node-2
EOF

Secret free login

# master	
	ssh-keygen -t rsa
	ssh-copy-id -i .ssh/id_rsa.pub root@k8s-node-1
	ssh-copy-id -i .ssh/id_rsa.pub root@k8s-node-2

Turn off firewall

# master,node

# Turn off firewall
	systemctl disable firewalld --now

# Close selinux
	sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
	sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
	setenforce 0

Close swap partition

# master,node
	swapoff -a && sysctl -w vm.swappiness=0
	sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

Configure yum source

# master,node
	curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
	yum install -y yum-utils device-mapper-persistent-data lvm2
	yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
	sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo

# Install base dependency package
	yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel  python-devel epel-release openssh-server socat  ipvsadm conntrack ntpdate

Configure synchronization time

# master,node
# Synchronization time
    yum install ntpdate -y
    ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
    echo 'Asia/Shanghai' >/etc/timezone
    ntpdate time2.aliyun.com
    
# Join crontab
	*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com

Upgrade kernel

# master,node# Update system	yum update -y --exclude=kernel* # take kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm,kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm Upload to the second node	for i in k8s-node-1 k8s-node-2; do scp kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm root@$i:/root ;done	# Install kernel	yum localinstall -y kernel-ml*	# All nodes change the kernel boot order and turn on the kernel user namespace	grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg	grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"	# Restart all nodes and check whether the default kernel is 4.19 	 grubby --default-kernel

Modify kernel parameters

# master,node,# Add the following at the end cat > > / etc / security / limits.conf < < EOF * soft nofile 65536 * hard nofile 131072 * soft nproc 65535 * hard nproc 655350 * soft memlock unlimited * hard memlock unlimited EOF
# master,node# If used firewalld I'm not used to it. I can install it iptables	yum install iptables-services -y	service iptables stop   && systemctl disable iptables	# Install the appropriate package	yum install ipvsadm ipset sysstat conntrack libseccomp -y# If you enable IPVS, iptables will be used for packet forwarding if you do not enable IPVS, but the efficiency is low. Therefore, it is recommended to enable IPVS on the official website. cat > /etc/modules-load.d/ipvs.conf <<EOFip_ vsip_ vs_ lcip_ vs_ wlcip_ vs_ rrip_ vs_ wrrip_ vs_ lblcip_ vs_ lblcrip_ vs_ dhip_ vs_ ship_ vs_ foip_ vs_ nqip_ vs_ sedip_ vs_ ftpip_ vs_ shnf_ conntrackip_ tablesip_ setxt_ setipt_ setipt_ rpfilteript_ REJECTipipEOFsystemctl enable --now systemd-modules-load.service
# master,node# open k8s Kernel parameters cat <<EOF > /etc/sysctl.d/k8s.confnet.ipv4.ip_forward = 1net.bridge.bridge-nf-call-iptables = 1net.bridge.bridge-nf-call-ip6tables = 1fs.may_detach_mounts = 1vm.overcommit_memory=1vm.panic_on_oom=0fs.inotify.max_user_watches=89100fs.file-max=52706963fs.nr_open=52706963net.netfilter.nf_conntrack_max=2310720net.ipv4.tcp_keepalive_time = 600net.ipv4.tcp_keepalive_probes = 3net.ipv4.tcp_keepalive_intvl =15net.ipv4.tcp_max_tw_buckets = 36000net.ipv4.tcp_tw_reuse = 1net.ipv4.tcp_max_orphans = 327680net.ipv4.tcp_orphan_retries = 3net.ipv4.tcp_syncookies = 1net.ipv4.tcp_max_syn_backlog = 16384net.ipv4.ip_conntrack_max = 65536net.ipv4.tcp_max_syn_backlog = 16384net.ipv4.tcp_timestamps = 0net.core.somaxconn = 16384EOFsysctl --system# Check whether the k8s parameter is effective. rebootlsmod | grep --color=auto -e ip_vs -e nf_conntrack

Install docker CE

# master,node# Install docker CE 	 yum install docker-ce.* -ymkdir /etc/dockercat > /etc/docker/daemon.json <<EOF{  "exec-opts": ["native.cgroupdriver=systemd"],  "registry-mirrors": [" https://ornb7jit.mirror.aliyuncs.com "],  "default-ipc-mode": "shareable"}EOFsystemctl daemon-reload && systemctl enable --now docker

Install cfssl

# master Node installation# Process strategy

CA initialization

Note:

  1. All certificates are generated in the master node and then distributed to other node nodes
  2. Etcd, apiserver and apiaggregion use three sets of CA institutions to issue certificates. Generally, etcd, apiserver and other components communicating with apiserver can share one set of CA institutions and apiaggregion one set of CA institutions
# Create a folder for certificates 	 mkdir /root/ssl
# Create CA configuration file cat > ca-config.json < < EOF {"signing": {"default": {"expiry": "87600h"}, "profiles": {"kubernetes": {"expiry": "87600h", "usages": ["signing", "key enclosure", "server auth" ,                    "client auth"                ]            }        }    }}EOF

Notes:

fieldexplain
signingIndicates that the certificate can be used to sign other certificates. In the generated ca.pem certificate, CA=TRUE
server authIndicates that the client can use the certificate to verify the certificate provided by the server
client authIndicates that the server can use the certificate to verify the certificate provided by the client;
config.jsonMultiple profiles can be defined to specify different expiration time, usage scenario and other parameters; Use a profile when signing a certificate later

etcd-ca

# Create CA request file cat > etcd-ca-csr.json < < EOF {"CN": "etcd", "key": {"algo": "RSA", "size": 2048}, "names": [{"C": "CN", "L": "Hunan", "ST": "Changsha", "O": "k8s", "Ou": "system"}]} EOF

Notes:

fieldexplain
hostsThis field is empty. Any host can use etcd-ca.pem
CNCommon Name, Kube apiserver extracts this field from the certificate as the requested user name. The browser uses this field to verify whether the website is legal and the specific website domain name for applying for SSL certificate
CThe country of the applicant can only be a two letter country code. For example, China should fill in CN
LLocality, region, city
STState, state, province
OOrganization, Kube apiserver extracts this field from the certificate as the group and company name to which the requesting user belongs
OUDepartment name
# generate CA certificate[root@k8s-master-1 ssl]# cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca2021/10/04 09:32:53 [INFO] generating a new CA key and certificate from CSR2021/10/04 09:32:53 [INFO] generate received request2021/10/04 09:32:53 [INFO] received CSR2021/10/04 09:32:53 [INFO] generating key: rsa-20482021/10/04 09:32:53 [INFO] encoded CSR2021/10/04 09:32:53 [INFO] signed certificate with serial number 465476681475479358683323025732390386015350218727# View generated content[root@k8s-master-1 ssl]# ls etcd*etcd-ca.csr  etcd-ca-csr.json  etcd-ca-key.pem  etcd-ca.pem

Notes:

  1. Private key generated by etcd-ca-key.pem
  2. The certificate generated by etcd-ca.pem will be used later to issue the certificate

kube-apiserver-ca

# establish CA Request file cat > kube-apiserver-ca-csr.json <<EOF {  "CN": "kubernetes",  "key": {      "algo": "rsa",      "size": 2048  },  "names": [{      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "k8s",      "OU": "system"    }]}EOF# Generate CA certificate 	 cfssl gencert -initca kube-apiserver-ca-csr.json | cfssljson -bare kube-apiserver-ca

front-proxy-ca

# establish CA Request file cat > front-proxy-ca-csr.json <<EOF {  "CN": "kubernetes",  "key": {      "algo": "rsa",      "size": 2048  },  "names": [{      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "k8s",      "OU": "system"    }]}EOF# Generate CA certificate 	 cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca

Deploy etcd

Kubernetes uses etcd for data storage, so an etcd database should be prepared first. In order to solve the single point of failure of etcd, it should be deployed in a cluster. If three sets are used as a cluster, one fault can be tolerated, and if five sets are used as a cluster, two faults can be tolerated. Since the laboratory is a single node, single node etcd is used here

Create etcd certificate

# establish etcd Request file cat > etcd-csr.json<<EOF {  "CN": "etcd",  "hosts": [    "127.0.0.1",    "192.168.0.10"  ],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [{    "C": "CN",    "ST": "hunan",    "L": "changsha",    "O": "k8s",    "OU": "system"  }]}EOF# Generate certificate	cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd# be careful: 	 hosts needs to fill in the IP address of the machine running etcd

Create etcd profile

# upload etcd Compressed package	mv etcd etcdctl etcdutl /usr/bin# Create the appropriate folder	mkdir -p /etc/etcd/ssl	mkdir -p /var/lib/etcd/default.etcd# establish etcd configuration file cat > /etc/etcd/etcd.conf<<EOF#[Member]ETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="https://192.168.0.10:2380"ETCD_LISTEN_CLIENT_URLS="https://192.168.0.10:2379,http://127.0.0.1:2379"ETCD_NAME="etcd1"#[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.10:2380"ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.10:2379"ETCD_INITIAL_CLUSTER="etcd1=https://192.168.0.10:2380"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_INITIAL_CLUSTER_STATE="new"EOF # create startup service file cat > / usr / lib / SYSTEMd / system / etcd. Service < < EOF "[unit] description = etcd serverafter = network. Targetafter = network online. Targetwants = network online. Target [service] Type=notifyEnvironmentFile=/etc/etcd/etcd.confWorkingDirectory=/var/lib/etcd/ExecStart=/usr/bin/etcd \    --cert-file=/etc/etcd/ssl/etcd.pem \    --key-file=/etc/etcd/ssl/etcd-key.pem \    --trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \    --peer-cert-file=/etc/etcd/ssl/etcd.pem \    --peer-key-file=/etc/etcd/ssl/etcd-key.pem \    --peer-trusted-ca- File = / etc / etcd / SSL / etcd-ca.pem \ -- peer client cert auth \ -- client cert authrestart = on failurerestartsec = 5limitnofile = 65536 [install] wantedby = multi-user.targeteofsystemctl daemon reload # move the certificate to the corresponding location 	 cp etcd.pem etcd-key.pem etcd-ca.pem /etc/etcd/ssl/ 	#  Start etcd 	 systemctl enable etcd --now 	#  View etcd cluster status[ root@k8s -master-1 ssl] # etcdctl --write-out=table --cacert=/etc/etcd/ssl/etcd-ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints= https://192.168.0.10:2379   endpoint health+---------------------------+--------+------------+-------+|         ENDPOINT          | HEALTH |    TOOK    | ERROR |+---------------------------+--------+------------+ -------+|  https://192.168.0.10:2379  |   true | 6.250211ms |       |+---------------------------+--------+------------+-------+

Notes:

ETCD_DATA_DIR			# Data directory ETCD_LISTEN_PEER_URLS	# Cluster communication listening address ETCD_LISTEN_CLIENT_URLS	# Client access listening address ETCD_NAME				# Node name, unique in the cluster ETCD_INITIAL_ADVERTISE_PEER_URLS 	# Cluster notification address ETCD_ADVERTISE_CLIENT_URLS			# Client notification address ETCD_INITIAL_CLUSTER				# Cluster node address ETCD_INITIAL_CLUSTER_TOKEN			# colony TokenETCD_INITIAL_CLUSTER_STATE			#The current status of joining a cluster. New is a new cluster, and existing means joining an existing cluster
--cert-file :  Client server TLS Path to certificate file,etcd The certificate will be sent to apiserver,Hand over apiserver authentication--key-file: Client server TLS The path of the key file, which will be used to encrypt data for subsequent communication--trusted-ca-file: Path to the client server TLS credible CA Certificate file, apiserver visit etcd The certificate sent is specified by this CA De authentication--peer-key-file: Peer server TLS The path to the key file. This is the key to peer-to-peer traffic for servers and clients--peer-trusted-ca-file: Peer server TLS credible CA Path to file--peer-client-cert-auth: Enable peer client certificate authentication--client-cert-auth: Enable client certificate validation note: normally etcd Should be used server End certificate, apiserver Should use etcd client End certificate. Here, the same certificate is used for later simple maintenance

Deploy apiserver

Upload k8s component

# upload kubernetes-server Binary package(master)	cp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy /usr/bin	scp kubectl kube-proxy kubelet root@k8s-node-1:/usr/bin	for i in root@k8s-node-1 root@k8s-node-2; do scp kubelet kube-proxy $i:/usr/bin; done# Create related directories (master,node) 	 mkdir -p /etc/kubernetes/ssl 	 mkdir -p /var/log/kubernetes

Create a token.csv file

# Format: token,user name, UID,User group,kubelet-bootstrap This user will be api-server Trusted cat > token.csv << EOF$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"EOF# System: kubelet bootstrap is built in this group

Note: the following part of token.csv is used to automatically issue certificates to kubelet

Create apiserver certificate

# establish apiserver Request file cat > kube-apiserver-csr.json <<EOF{  "CN": "kubernetes",  "hosts": [    "127.0.0.1",    "192.168.0.10",    "10.0.0.1",    "kubernetes",    "kubernetes.default",    "kubernetes.default.svc",    "kubernetes.default.svc.cluster",    "kubernetes.default.svc.cluster.local"  ],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "k8s",      "OU": "system"    }  ]}EOF# host	host Fill in operation apiserver Host IP/VIP,service The first one IP,The rest can be filled in according to the above, node Node is used bootstrap Mechanism automatically issues certificates without IP Fill it in	Normally hosts Field IP For all Master/LB/VIP IP	# Generate certificate 	 cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson  -bare kube-apiserver

Create front proxy CA certificate

# establish apiaggregation Certificate request file cat > front-proxy-client-csr.json <<EOF {  "CN": "front-proxy-client",  "key": {    "algo": "rsa",    "size": 2048  },  "names": [{    "C": "CN",    "ST": "hunan",    "L": "changsha",    "O": "k8s",    "OU": "system"  }]}EOF# Generate certificate 	 cfssl gencert -ca=front-proxy-ca.pem -ca-key=front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson  -bare front-proxy-client

Create service public / private key

# Generate private key	openssl genrsa -out ./service.key 2048# Generate public key 	 openssl rsa -in ./service.key -pubout -out ./service.pub

Note: this pair of public and private keys is mainly used for service account

Create apiserver configuration file

#  establish apiserver configuration file cat > /usr/lib/systemd/system/kube-apiserver.service <<"EOF"[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=etcd.serviceWants=etcd.service [Service]ExecStart=/usr/bin/kube-apiserver \    --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds, ResourceQuota \    --anonymous-auth=false \    --bind-address=192.168.0.10 \    --secure-port=6443 \    --advertise-address=192.168.0.10 \    --insecure-port=0 \    --authorization-mode=Node, RBAC \    --runtime-config=api/all=true \    --enable-bootstrap-token-auth \    --token-auth-file=/etc/kubernetes/token.csv \    --service-cluster-ip-range=10.0.0.0/24 \    --service-node-port-range=30000-50000 \    --service-account-key-file=/etc/kubernetes/ssl/service.pub \    --service-account-signing-key-file=/etc/kubernetes/ssl/service.key \     --service-account-issuer= https://kubernetes.default.svc.cluster.local  \    --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \    --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \    --client-ca-file=/etc/kubernetes/ssl/kube-apiserver-ca.pem \    --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \    -- kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \    --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \    --etcd-certfile=/etc/etcd/ssl/etcd.pem \    --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \    --etcd-servers= https://192.168.0.10:2379  \    --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem \    --requestheader-allowed-n ames=front-proxy-client   \    --requestheader-extra-headers-prefix=X-Remote-Extra-  \    --requestheader-group-headers=X-Remote-Group     \    --requestheader-username-headers=X-Remote-User   \    --proxy-client-cert-file=/etc/kubernetes/ssl/front-proxy-client.pem  \    --proxy-client-key-file=/etc/kubernetes/ssl/front-proxy-client-key.pem    \     --enable-swagger-ui=true \    --allow-privileged=true \    --apiserver-count=1 \    --audit-log-maxage=30 \    --audit-log-maxbackup=3 \    --audit-log-maxsize=100 \    --audit-log-path=/var/log/kube-apiserver-audit.log \    --event-ttl=1h \    --alsologtostderr=true \    --logtostderr=false \    --log-dir=/var/log/kubernetes \    --v=2    Resta RT = on failurerestartsec = 5type = notifylimitnofile = 65536 [install] wantedby = multi-user.targeteof # copy the certificate to the corresponding directory 	 cp service.pub service.key kube-apiserver.pem kube-apiserver-key.pem kube-apiserver-ca.pem kube-apiserver-ca-key.pem front-proxy-client.pem front-proxy-client-key.pem front-proxy-ca.pem /etc/kubernetes/ssl/ 	 cp token.csv /etc/kubernetes# startup 	 systemctl daemon-reload  	 systemctl enable kube-apiserver.service --now# check for normal operation 	 Systemctl status Kube apiserver# does not carry access certificate[ root@k8s -master-1 ~]# curl -k  https://192.168.0.10:6443 {  "kind": "Status",  "apiVersion": "v1",  "metadata": {      },  "status": "Failure",  "message": "Unauthorized",  "reason": "Unauthorized",  "code" : 401 # normally # carry the certificate for access. The certificate error is reported here. It is said on the Internet that the combination of names fields in csr is different. No specific reason has been found at present 	 curl -v --cert /etc/kubernetes/ssl/kube-apiserver.pem --key /etc/kubernetes/ssl/kube-apiserver-key.pem --cacert /etc/kubernetes/ssl/kube-apiserver-ca.pem  https://192.168.0.10:6443/healthz

Notes:

--enable-admission-plugins: In addition to the plug-ins enabled by default, additional plug-ins should be started admission plug-in unit--anonymous-auth: Allow anonymous requests to API server The secure port for. Requests that are not rejected by other authentication methods will be treated as anonymous requests system username: anonymous,system group name: unauthenticated. Default: true--bind-address: apiserver Listening address--secure-port: HTTPS Communication port number--advertise-address: Publish to cluster members apiserver of IP Address, which must be accessible by members of the cluster--insecure-port: Whether to use HTTP visit apiserver,0 Default prohibition--authorization-mode: An ordered list of plug-ins that perform authorization on secure ports. Default: AlwaysAllow Comma separated list: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node--runtime-config: api/all=true Enable all apiserver of api--enable-bootstrap-token-auth: Enable allow'kube-system' namespace Medium secrets Type'bootstrap.kubernetes.io/token'be used for TLS Boot authentication--service-cluster-ip-range: CIDR IP Range for assignment service colony IP. Cannot be associated with a node assigned to it pod Any IP Range overlap. Default: 10.0.0.0/24--service-node-port-range: by NodePort Port range reserved by the visibility service. Default: 30000-32767--service-account-key-file: autograph ServiceAccount Token Public key file, kube-controller-manager of --service-account-private-key-file Specifies the private key file, which is used in pairs--service-account-signing-key-file: Point to include service account token The path to the issuer's current private key file. The issuer will sign the issued private key with this private key ID token. Need to set'TokenRequest' feature gate--service-account-issuer: Service account token Identifier of the publisher. The publisher will“ iss"Declare the identifier. The value is a string or URL--token-auth-file: If set, the file will be used through token Authentication to protect API Secure port of the server--tls-cert-file: whether apiserver As server or client,apiserver Send this certificate to kubelet,etcd,proxy External components--tls-private-key-file: Private key used when accessing--client-ca-file: verification client (kue-controller-manager,kube-scheduler,kubelet,kube-proxy etc.)Requested certificate--kubelet-client-certificate apiserver visit kubelet Certificate and private key used when--kubelet-client-key: apiserver visit kubelet Use the private key of the certificate when--etcd-cafile: award etcd Certificated ca Certificate, subsequent apiserver Use this certificate to authenticate etcd Certificate from--etcd-certfile: apiserver meeting etcd This certificate will be sent to when communicating etcd authentication--etcd-keyfile: stay apiserver And etcd Before negotiating the symmetric secret key encryption information, apiserver This private key will be used to encrypt the information--etcd-servers: etcd IP Addresses, if more than one, separated by commas--requestheader-client-ca-file: This file is used for authentication proxy Certificate from--requestheader-allowed-names: Clients allowed to access common names List. Client common names Your name needs to be in client-ca-file When it is set to a null value, it means that any client can access it--requestheader-extra-headers-prefix: List of request header prefixes to check. Recommended setting is X-Remote-Extra.--requestheader-group-headers: The group name to be checked in the request header--requestheader-username-headers: User name to be checked in the request header--proxy-client-cert-file: apiserver visit Aggregator Certificate of--proxy-client-key-file: apiserver visit Aggregatior Private key used--enable-swagger-ui: apiserver Enable swagger-ui--allow-privileged: If yes true,Container that allows privileged mode. Default: false--apiserver-count: Running in a cluster apiserver Must be a positive number. Default value: 1 when used--endpoint-reconciler-type=master-count Enabled when--audit-log-maxage: The maximum number of days to save the audit log file according to the encoded timestamp in the file name--audit-log-maxbackup: Maximum number of audit log files saved--audit-log-maxsize: Maximum size of audit log file before flow(In megabytes)--audit-log-path: If set, all to apiserver All requests will be recorded in this file.'-'Indicates writing to standard output.--event-ttl: Retention time. Default: 1 h0m0s--alsologtostderr: All log Output to standard error output--logtostderr: The log is written to the label error output instead of the file. The default value is true--log-dir: If it is not empty, write the log file to the directory--v: Log level of the log

Deploy kubectl

Create kubctl certificate

# establish kubectl Certificate request file cat > admin-csr.json <<EOF{  "CN": "admin",  "hosts": [],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "system:masters",                   "OU": "system"    }  ]}EOF# Generate certificate	cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin# Place the certificate in the appropriate location 	 cp admin*.pem /etc/kubernetes/ssl/

Notes:

  1. Cluster admin (built-in Role with maximum permission) binds Group system:masters to Role cluster admin, which grants the permission to call all API s of Kube apiserver
  2. O specify that the Group of the certificate is system:masters, which must be system:masters. Otherwise, kubectl create clusterrolebinding will report an error

Create kubeconfig configuration file for kubectl

# Set cluster parameters	kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver.pem --embed-certs=true --server=https://192.168.0.10:6443 --kubeconfig=kube.config # set client authentication parameters 	 Kubectl config set credentials admin -- client certificate = admin.pem -- client key = admin key. PEM -- embedded certs = true -- kubeconfig = kube.config # set context parameters 	 kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config 	#  Set default context 	 Kubectl config use context kubernetes -- kubeconfig = kube.config # copy to the specified directory MKDIR - P ~ /. Kube CP - I kube.config ~ /. Kube / config 	#  View SVC[ root@k8s -Master-1 ssl]# kubectl get svcname type cluster-ip external-ip port (s) agekubernetes clusterip 10.0.0.1 < none > 443 / TCP 52m# authorizes apiserver user to access kubelet. This user declares in the CN field of apiserver certificate that subsequent apiservers need to communicate with kubelet 	 kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

Deploy Kube Controller Manager

Create Kube controller manager certificate

# establish kube-controller-manager Certificate request file cat > kube-controller-manager-csr.json <<EOF{    "CN": "system:kube-controller-manager",    "key": {        "algo": "rsa",        "size": 2048    },    "hosts": [      "127.0.0.1",      "192.168.0.10"    ],    "names": [      {        "C": "CN",        "ST": "hunan",        "L": "changsha",        "O": "system:kube-controller-manager",        "OU": "system"      }    ]}EOF# Generate certificate 	 cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

Notes:

  1. System: Kube controller manager, the built-in clusterrolebindings system of kubernetes: Kube controller manager gives Kube controller manager the necessary permissions to work

Create kubeconfig for Kube Controller Manager

# Set cluster parameters	kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.10:6443 -- kubeconfig = Kube controller manager.kubeconfig# set client authentication parameters 	 kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig 	#  Setting context parameters 	 Kubectl config set context system: Kube Controller Manager -- cluster = kubernetes -- user = system: Kube Controller Manager -- kubeconfig = Kube controller manager.kubeconfig# sets the default context 	 kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

Create Kube controller manager configuration file

# establish kube-controller-manager Launch profile cat > /usr/lib/systemd/system/kube-controller-manager.service <<"EOF"[Unit]                                                                     Description=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetes[Service]       ExecStart=/usr/bin/kube-controller-manager \    --port=10252 \    --secure-port=10257 \    --bind-address=127.0.0.1 \    --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \    --service-cluster-ip-range=10.0.0.0/24 \    --cluster-name=kubernetes \    --cluster-signing-cert-file=/etc/kubernetes/ssl/kube-apiserver-ca.pem \    --c luster-signing-key-file=/etc/kubernetes/ssl/kube-apiserver-ca-key.pem \    --cluster-signing-duration=87600h \    --allocate-node-cidrs=true \    --cluster-cidr=10.70.2.0/24 \    --root-ca-file=/etc/kubernetes/ssl/kube-apiserver-ca.pem \    --service-account-private-key-file=/etc/kubernetes/ssl/service.key \    --use-service-account-credentials=tr ue \    --leader-elect=true \    --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true \    --controllers=*,bootstrapsigner,tokencleaner \    --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \    --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \ 	-- requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem \ 	-- requestheader-allowed-names=front-proxy-client   \ 	-- requestheader-extra-headers-prefix=X-Remote-Extra-  \ 	-- requestheader-group-headers=X-Remote-Group     \ 	-- requestheader-username-headers=X-Remote-User   \ 	-- Horizontal pod autoscaler use rest clients = true \ -- alsologtostdir = true \ -- logtostderr = false \ -- log dir = / var / log / kubernetes \ -- v = 2 restart = on failurerestartsec = 5 [install] wantedby = multi-user.targeteof# copy files 	 cp kube-controller-manager*.pem /etc/kubernetes/ssl/ 	 cp kube-controller-manager.kubeconfig /etc/kubernetes / # start the service 	 systemctl daemon-reload 	 Systemctl enable Kube Controller Manager -- now# check the running status of Kube Controller Manager[ root@k8s-master-1 ssl]# kubectl get csWarning: v1 ComponentStatus is deprecated in v1.19+NAME                 STATUS      MESSAGE                                                                                       ERRORscheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   controller-manager   Healthy     ok                                                                                            etcd-0               Healthy     {"health":"true","reason":""}

Notes:

--port: be used for HTTP Communication, the default port is 10252, kubectl get cs The default is to use HTTP Mode query kube-controller-manager Component status--secure-port: be used for HTTPS signal communication--bind-address: Because only and apiserver For communication, the listening address can be set to 127.0.0.1--kubeconfig: appoint kubeconfig File path, kube-controller-manager Use it to connect and verify kube-apiserver,It contains CA certificate--service-cluster-ip-range: CIDR IP Range for assignment service colony IP. Cannot be associated with a node assigned to it pod****Any IP Range overlap. Default: 10.0.0.0/24--cluster-name: Prefix of cluster instance. Default value:"kubernetes"--cluster-signing-cert-file: because bootstrap The actual issuing component of the certificate is kube-controller-manager,kubelet To be with apiserver Mutual trust, so you need to use and apiserver same CA Institutions, kube-controller-manager This will be used ca Certificate for kubelet Issue certificate--cluster-signing-key-file: Private key--cluster-signing-duration: bootstrap Issued certificate, the default time is one year--allocate-node-cidrs: Should it be assigned and set up on the cloud provider Pod of CIDR--cluster-cidr: In cluster Pods of CIDR Scope. Requirements --allocate-node-cidrs Flag is true--root-ca-file: If this flag is not empty, the token in the service account will be displayed Secret This root certificate authority will be included in the. The specified flag value must be a legal one PEM Coded CA Certificate package--service-account-private-key-file: And apiserver Medium–service-account-key-file Public private key pair for pairing--use-service-account-credentials: When this flag is true The service account credentials are used separately for each controller--leader-elect: Start the leadership election before executing the main cycle( Leader Election)Client and try to gain leadership. Enabling this flag when running multi copy components helps improve availability--feature-gates: a set key=value Yes, it's used to describe testability/Characteristic gating of experimental functions( Feature Gate),RotateKubeletServerCertificate=true Option, then kubelet A certificate is automatically initiated when the certificate is about to expire renew Own certificate CSR Request; meanwhile controller manager It needs to be added at startup --feature-gates=RotateKubeletServerCertificate=true Parameters, and then cooperate with the corresponding created parameters ClusterRoleBinding,kubelet client and kubelet server The certificate will be automatically signed--controllers: List of controllers to enable.\* Indicates that all default enabled controllers are enabled; foo Enable named foo Controller of; -foo Indicates that the name is disabled foo The default disabled controllers are: bootstrapsigner and tokencleaner,So you need to add--tls-*-file: kube-controller-manager visit apiserver The certificate and secret key used`Must be with apiserver Same CA Certificate issued by authority`--requestheader*: reference apiserver explain--alsologtostderr: While outputting logs to files, the logs are also written to standard output--log-dir: Log storage path--v2: Logging level

Deploy Kube scheduler

Create Kube scheduler certificate

# Create certificate request file cat > kube-scheduler-csr.json <<EOF{    "CN": "system:kube-scheduler",    "hosts": [      "127.0.0.1",      "192.168.0.10"    ],    "key": {        "algo": "rsa",        "size": 2048    },    "names": [      {        "C": "CN",        "ST": "hunan",        "L": "changsha",        "O": "system:kube-scheduler",        "OU": "system"      }    ]}EOF Note:	O by system:kube-scheduler,kubernetes Built in ClusterRoleBindings system:kube-scheduler Will give kube-scheduler Permissions required for work	# Generate certificate 	 cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

Create kubeconfig for Kube scheduler

# Set cluster parameters	kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.10:6443 --kubeconfig=kube-scheduler.kubeconfig 	#  Setting client authentication parameters 	 Kubectl config set credentials system: Kube scheduler -- client certificate = Kube scheduler.pem -- client key = Kube scheduler key. PEM -- embedded certs = true -- kubeconfig = Kube scheduler.kubeconfig# set context parameters 	 Kubectl config set context system: Kube scheduler -- cluster = kubernetes -- user = system: Kube scheduler -- kubeconfig = Kube scheduler.kubeconfig# sets the default context 	 kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

Create Kube scheduler configuration file

cat > /usr/lib/systemd/system/kube-scheduler.service <<"EOF"[Unit]                                      Description=Kubernetes SchedulerDocumentation=https://github.com/kubernetes/kubernetes[Service]ExecStart=/usr/bin/kube-scheduler  \    --address=127.0.0.1  \    --port=10251  \    --secure-port=10259  \    --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig  \    --leader-elect=true  \    --alsologtostderr=true  \    --logtostderr=false  \    --log-dir=/var/log/kubernetes  \     --v=2 Restart=on-failureRestartSec=5 [Install]WantedBy=multi-user.targetEOF# copy file 	 cp kube-scheduler*.pem /etc/kubernetes/ssl/ 	 CP Kube scheduler.kubeconfig / etc / kubernetes / # start service 	 systemctl daemon-reload 	 systemctl enable kube-scheduler.service --now# view service status[ root@k8s-master-1 ssl]# kubectl get csWarning: v1 ComponentStatus is deprecated in v1.19+NAME                 STATUS    MESSAGE                         ERRORscheduler            Healthy   ok                              controller-manager   Healthy   ok                              etcd-0               Healthy   {"health":"true","reason":""} 

Deploy kubelet

be careful:

In this article, because the master node needs to run calico, coredns and other system components (running in pod mode), the master node needs to deploy kubelet and Kube proxy

# Intercept tokenBOOTSTRAP_TOKEN=$(awk -F "," '{print  }' /etc/kubernetes/token.csv)

Create kubeconfig for kubelet

# Set cluster parameters	kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.10:6443 -- kubeconfig = kubelet bootstrap.kubeconfig# set client authentication parameters 	 Kubectl config set credentials kubelet bootstrap -- token = ${bootstrap_token} -- kubeconfig = kubelet bootstrap.kubeconfig# set context parameters 	 Kubectl config set context default -- cluster = kubernetes -- user = kubelet bootstrap -- kubeconfig = kubelet bootstrap.kubeconfig# sets the default context 	 kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

Create kublet's profile

Note:

  1. "cgroupDriver": "systemd" should be consistent with the docker driver.
  2. Address is replaced by the IP address of the node running kubelet.
# k8s-master-1 node cat > k8s-master-1-kubelet.json <<EOF{  "kind": "KubeletConfiguration",  "apiVersion": "kubelet.config.k8s.io/v1beta1",  "authentication": {    "x509": {      "clientCAFile": "/etc/kubernetes/ssl/kube-apiserver-ca.pem"    },    "webhook": {      "enabled": true,      "cacheTTL": "2m0s"    },    "anonymous": {      "enabled": false    }  },  "authorization": {    "mode": "Webhook",    "webhook": {      "cacheAuthorizedTTL": "5m0s",      "cacheUnauthorizedTTL": "30s"    }  },  "address": "192.168.0.10",  "port": 10250,  "readOnlyPort": 10255,  "cgroupDriver": "systemd",  "hairpinMode": "promiscuous-bridge",  "serializeImagePulls": false,  "featureGates": {    "RotateKubeletClientCertificate": true,    "RotateKubeletServerCertificate": true  },  "clusterDomain": "cluster.local.",  "clusterDNS": ["10.0.0.10"]}EOF# k8s-node-1 node cat > k8s-node-1-kubelet.json <<EOF{  "kind": "KubeletConfiguration",  "apiVersion": "kubelet.config.k8s.io/v1beta1",  "authentication": {    "x509": {      "clientCAFile": "/etc/kubernetes/ssl/kube-apiserver-ca.pem"    },    "webhook": {      "enabled": true,      "cacheTTL": "2m0s"    },    "anonymous": {      "enabled": false    }  },  "authorization": {    "mode": "Webhook",    "webhook": {      "cacheAuthorizedTTL": "5m0s",      "cacheUnauthorizedTTL": "30s"    }  },  "address": "192.168.0.11",  "port": 10250,  "readOnlyPort": 10255,  "cgroupDriver": "systemd",  "hairpinMode": "promiscuous-bridge",  "serializeImagePulls": false,  "featureGates": {    "RotateKubeletClientCertificate": true,    "RotateKubeletServerCertificate": true  },  "clusterDomain": "cluster.local.",  "clusterDNS": ["10.0.0.10"]}EOF# K8s-node-2 node cat > k8s-node-2-kubelet.json < < EOF {"kind": "kubeletconfiguration", "apiversion": "kubelet. Config. K8s. IO / v1beta1", "authentication": {"x509": {"clientcafile": "/ etc / kubernetes / SSL / Kube apiserver ca.pem"}, "webhook": {"enabled": true, "cachettl": "2m0s"}, "anonymous": {"enabled" : false    }  },  "authorization": {    "mode": "Webhook",    "webhook": {      "cacheAuthorizedTTL": "5m0s",      "cacheUnauthorizedTTL": "30s"    }  },  "address": "192.168.0.12",  "port": 10250,  "readOnlyPort": 10255,  "cgroupDriver": "systemd",  "hairpinMode": "promiscuous-bridge",  "serializeImagePulls": false,  "featureGates": {     "RotateKubeletClientCertificate": true,    "RotateKubeletServerCertificate": true  },  "clusterDomain": "cluster.local.",  "clusterDNS": ["10.0.0.10"]}EOF

Create kubelet startup file

# establish kubelet Start configuration,master node cat > kubelet.service<<"EOF"[Unit]Description=Kubernetes KubeletDocumentation=https://github.com/kubernetes/kubernetesAfter=docker.serviceRequires=docker.service[Service] WorkingDirectory=/var/lib/kubeletExecStart=/usr/bin/kubelet \    --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \    --cert-dir=/etc/kubernetes/ssl \    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \    --config=/etc/kubernetes/kubelet.json \    --network-plugin=cni \    --pod-infra-container-image=registry.aliyuncs.com/goo Gle_containers / pause: 3.6 \ -- feature gates = rotatekubeletclientcertificate = true, rotatekubeletservercertificate = true \ -- rotate certificates = true \ -- alsologtosterror = true \ -- logtostderr = false \ -- log dir = / var / log / kubernetes \ -- v = 2restart = on failurerestartsec = 5 [install] wantedby = multi-user.targeteof# move related files 	 mkdir -p /var/lib/kubelet 	 cp kubelet.service /usr/lib/systemd/system/ 	 cp k8s-master-1-kubelet.json /etc/kubernetes/kubelet.json 	 cp kubelet-bootstrap.kubeconfig /etc/kubernetes / # start the service 	 systemctl daemon-reload 	 systemctl enable kubelet --now

Create RBAC rules to automatically approve CSR

apiserver automatically creates two clusterroles:

  1. system:certificates.k8s.io:certificatesigningrequests:nodeclient
  2. system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
# Let's add another one cat <<EOF | kubectl apply -f -kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserverrules:- apiGroups: ["certificates.k8s.io"]  resources: ["certificatesigningrequests/selfnodeserver"]  verbs: ["create"]EOF# take ClusterRole Bind to the appropriate user group to complete automatic approval CSR Request, here system:bootstrappers Group and token.csv Group correspondence in# token.csv,format Token,user name,UID,User group fbecd7fb7d3c75efc7f8bd8c0896addf,kubelet-bootstrap,10001,"system:kubelet-bootstrap"# allow system:bootstrappers Group user creation CSR request	kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:kubelet-bootstrap# Automatic approval system:bootstrappers Group user TLS bootstrapping First application for certificate CSR Request, clusterrolebinding kubelet-bootstrap and node-client-auto-approve-csr Medium--group=system:kubelet-bootstrap Can be replaced with--user=kubelet-bootstrap,And token.csv bring into correspondence with	kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:kubelet-bootstrap# Automatic approval system:nodes Group user update kubelet Self and apiserver Of Communication Certificate CSR request	kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes# Automatic approval system:nodes Group user update kubelet 10250 api Port certificate CSR request	kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes	# see csr,Can be found master After the node joins the cluster, the certificate is automatically issued[root@k8s-master-1 ~]# kubectl get csrNAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITIONnode-csr-7yhdBfn1JE3dUOPfvRLVkRzlljdgno9X0C_X0gqipzg   2m16s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued# View node status[root@k8s-master-1 ~]# kubectl get nodesNAME           STATUS     ROLES    AGE    VERSIONk8s-master-1   NotReady   <none>   114s   v1.20.10

Node deployment Kubelet

# Issue relevant certificates to node node		for i in root@k8s-node-1 root@k8s-node-2; do scp kube-apiserver-ca.pem $i:/etc/kubernetes/ssl;scp kubelet.service $i:/usr/lib/systemd/system; scp kubelet-bootstrap.kubeconfig $i:/etc/kubernetes; done		scp k8s-node-1-kubelet.json root@k8s-node-1:/etc/kubernetes/kubelet.json	scp k8s-node-2-kubelet.json root@k8s-node-2:/etc/kubernetes/kubelet.json# node Node start service	for i in root@k8s-node-1 root@k8s-node-2; do ssh $i "mkdir -p /etc/kubernetes/ssl;mkdir -p /var/lib/kubelet; mkdir -p /var/log/kubernetes;systemctl daemon-reload; systemctl enable kubelet --now;"; done	# see node[root@k8s-master-1 ssl]# kubectl get nodesNAME           STATUS     ROLES    AGE   VERSIONk8s-master-1   NotReady   <none>   46m   v1.20.10k8s-node-1     NotReady   <none>   13m   v1.20.10k8s-node-2     NotReady   <none>   13m   v1.20.10

Deploy Kube proxy

Create Kube proxy certificate

# Create certificate request file cat > kube-proxy-csr.json <<EOF{  "CN": "system:kube-proxy",  "key": {    "algo": "rsa",    "size": 2048  },  "names": [{      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "system:kube-proxy",      "OU": "system"}]}EOF# Generate certificate 	 cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

Notes:

  1. CN: specify the User of the certificate as system: Kube proxy
  2. The predefined rolebinding system: node proxy binds user system: Kube proxy to Role system: node proxy, which grants the right to call the API related to Kube apiserver proxy
  3. The certificate will only be used by Kube proxy as a client certificate, so the hosts field is empty

Create kubeconfig for Kube proxy

# Set cluster parameters	kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.10:6443 -- kubeconfig = Kube proxy.kubeconfig# set client authentication parameters 	 Kubectl config set credentials Kube proxy -- client certificate = Kube proxy. PEM -- client key = Kube proxy key. PEM -- embedded certs = true -- kubeconfig = Kube proxy.kubeconfig# set context parameters 	 Kubectl config set context default -- cluster = kubernetes -- user = Kube proxy -- kubeconfig = Kube proxy.kubeconfig# sets the default context 	 kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

Create Kube proxy profile

# IP Change to operation kube-proxy node IP that will do# establish k8s-master-1 configuration file cat > k8s-master-1-kube-proxy.yaml << EOFapiVersion: kubeproxy.config.k8s.io/v1alpha1bindAddress: 192.168.0.10clientConnection:  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfigclusterCIDR: 192.168.0.0/24healthzBindAddress: 192.168.0.10:10256kind: KubeProxyConfigurationmetricsBindAddress: 192.168.0.10:10249mode: "ipvs"EOF# establish k8s-node-1 configuration file cat > k8s-node-1-kube-proxy.yaml << EOFapiVersion: kubeproxy.config.k8s.io/v1alpha1bindAddress: 192.168.0.11clientConnection:  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfigclusterCIDR: 192.168.0.0/24healthzBindAddress: 192.168.0.11:10256kind: KubeProxyConfigurationmetricsBindAddress: 192.168.0.11:10249mode: "ipvs"EOF# Create k8s-node-2 configuration file cat > k8s-node-2-kube-proxy.yaml < < eofapiversion: kubeproxy.config.k8s.io/v1alpha1bindaddress: 192.168.0.12clientconnection: kubeconfig: / etc / kubernetes / kube-proxy.kubeconfigugclustercidr: 192.168.0.0/24healthzbindaddress: 192.168.0.12:10256kind: kubeproxyconfigurationmetrics bindaddress: 192.168.0.12:10249mode: "IPVS" EOF

Create Kube proxy startup file

# establish kube-proxy Startup file cat > kube-proxy.service <<"EOF"[Unit]Description=Kubernetes Kube-Proxy ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target [Service]WorkingDirectory=/var/lib/kube-proxyExecStart=/usr/bin/kube-proxy \  --config=/etc/kubernetes/kube-proxy.yaml \  --alsologtostderr=true \  --logtostderr=false \  --log-dir=/var/log/kubernetes \  --v=2Restart=on-failureRestartSec=5LimitNOFILE=65536 [Install]WantedBy=multi-user.targetEOF# copy the certificate and create the relevant folder 	 mkdir -p /var/lib/kube-proxy 	 cp kube-proxy.service /usr/lib/systemd/system/ 	 cp k8s-master-1-kube-proxy.yaml /etc/kubernetes/kube-proxy.yaml 	 CP Kube proxy.kubeconfig / etc / kubernetes / # start service 	 systemctl daemon-reload 	 systemctl enable kube-proxy.service --now

Node deployment Kube proxy

scp k8s-node-1-kube-proxy.yaml root@k8s-node-1:/etc/kubernetes/kube-proxy.yamlscp k8s-node-2-kube-proxy.yaml root@k8s-node-2:/etc/kubernetes/kube-proxy.yamlfor i in root@k8s-node-1 root@k8s-node-2; do scp kube-proxy.service $i:/usr/lib/systemd/system/; scp kube-proxy.kubeconfig $i:/etc/kubernetes; ssh $i "mkdir -p /var/lib/kube-proxy; systemctl daemon-reload; systemctl enable kube-proxy.service --now"; done

Add cluster role

# View the current cluster status. The default should be NotReady,Here, because I deployed calico Yes, so it shows Ready Now you can see the cluster ROLES by none[root@k8s-master-1 ssl]# kubectl get nodesNAME           STATUS   ROLES    AGE   VERSIONk8s-master-1   Ready    <none>   15h   v1.20.10k8s-node-1     Ready    <none>   15h   v1.20.10k8s-node-2     Ready    <none>   15h   v1.20.10# set up k8s-master-1 by master node	kubectl label nodes k8s-master-1 node-role.kubernetes.io/master=# set up k8s-node-*by work node	kubectl label nodes k8s-node-1 node-role.kubernetes.io/node=	kubectl label nodes k8s-node-2 node-role.kubernetes.io/node=# set up master Generally, scheduling is not accepted, but only the scheduling of necessary components	kubectl taint nodes k8s-master-1 node-role.kubernetes.io/master=true:NoSchedule	# Or set master Nodes can also accept scheduling	kubectl taint nodes k8s-master-1 node-role.kubernetes.io/master-# View the current status of the cluster[root@k8s-master-1 ssl]# kubectl get nodesNAME           STATUS   ROLES    AGE   VERSIONk8s-master-1   Ready    master   15h   v1.20.10k8s-node-1     Ready    node     15h   v1.20.10k8s-node-2     Ready    node     15h   v1.20.10

Deploy calico

# Download File	curl -O https://docs.projectcalico.org/manifests/calico.yaml# CALICO_ IPV4POOL_ Where CIDR is changed to pod IP # modification 	-  name: CALICO_IPV4POOL_CIDR 	   value: "10.70.2.0/24"       	-  name: IP_AUTODETECTION_METHOD 	   value: interface=ens33 	  #  Run calico 	 kubectl apply -f calico.yaml
[root@k8s-master-1 ssl]# kubectl get pods -A -o wideNAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE     IP             NODE           NOMINATED NODE   READINESS GATESkube-system   calico-kube-controllers-855445d444-jvxt8   1/1     Running   0          3m22s   10.70.2.65     k8s-node-2     <none>           <none>kube-system   calico-node-44f94                          1/1     Running   0          3m23s   192.168.0.12   k8s-node-2     <none>           <none>kube-system   calico-node-bvpdd                          1/1     Running   0          3m23s   192.168.0.11   k8s-node-1     <none>           <none>kube-system   calico-node-g5p8g                          1/1     Running   0          3m23s   192.168.0.10   k8s-master-1   <none>           <none># see master Node routing, running calico before[root@k8s-master-1 ssl]# route -nKernel IP routing tableDestination     Gateway         Genmask         Flags Metric Ref    Use Iface0.0.0.0         192.168.0.2     0.0.0.0         UG    100    0        0 ens33172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0192.168.0.0     0.0.0.0         255.255.255.0   U     100    0        0 ens33# see master node ipvs Information, running calico before[root@k8s-master-1 ssl]# ipvsadm -lnIP Virtual Server version 1.2.1 (size=4096)Prot LocalAddress:Port Scheduler Flags  -> RemoteAddress:Port           Forward Weight ActiveConn InActConnTCP  10.0.0.1:443 rr  -> 192.168.0.10:6443            Masq    1      0          0   # see master Node routing, running calico after[root@k8s-master-1 ssl]# route -nKernel IP routing tableDestination     Gateway         Genmask         Flags Metric Ref    Use Iface0.0.0.0         192.168.0.2     0.0.0.0         UG    100    0        0 ens3310.70.2.0       192.168.0.11    255.255.255.192 UG    0      0        0 tunl010.70.2.64      192.168.0.12    255.255.255.192 UG    0      0        0 tunl010.70.2.128     0.0.0.0         255.255.255.192 U     0      0        0 *172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0192.168.0.0     0.0.0.0         255.255.255.0   U     100    0        0 ens33# Notes: 	 It can be seen from the above that calico is a node node that can communicate pod across hosts. It is necessary to run calico components on each node to generate corresponding routing information on the host. When it is necessary to access pod across hosts, the traffic will be routed

Deploy coredns

cat > coredns.yaml <<EOFapiVersion: v1kind: ServiceAccountmetadata:  name: coredns  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    kubernetes.io/bootstrapping: rbac-defaults  name: system:corednsrules:  - apiGroups:    - ""    resources:    - endpoints    - services    - pods    - namespaces    verbs:    - list    - watch  - apiGroups:    - discovery.k8s.io    resources:    - endpointslices    verbs:    - list    - watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  annotations:    rbac.authorization.kubernetes.io/autoupdate: "true"  labels:    kubernetes.io/bootstrapping: rbac-defaults  name: system:corednsroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:corednssubjects:- kind: ServiceAccount  name: coredns  namespace: kube-system---apiVersion: v1kind: ConfigMapmetadata:  name: coredns  namespace: kube-systemdata:  Corefile: |    .:53 {        errors        health {          lameduck 5s        }        ready        kubernetes cluster.local in-addr.arpa ip6.arpa {          fallthrough in-addr.arpa ip6.arpa        }        prometheus :9153        forward . /etc/resolv.conf {          max_concurrent 1000        }        cache 30        loop        reload        loadbalance    }---apiVersion: apps/v1kind: Deploymentmetadata:  name: coredns  namespace: kube-system  labels:    k8s-app: kube-dns    kubernetes.io/name: "CoreDNS"spec:  # replicas: not specified here:  # 1. Default is 1.  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.  strategy:    type: RollingUpdate    rollingUpdate:      maxUnavailable: 1  selector:    matchLabels:      k8s-app: kube-dns  template:    metadata:      labels:        k8s-app: kube-dns    spec:      priorityClassName: system-cluster-critical      serviceAccountName: coredns      tolerations:        - key: "CriticalAddonsOnly"          operator: "Exists"      nodeSelector:        kubernetes.io/os: linux      affinity:         podAntiAffinity:           preferredDuringSchedulingIgnoredDuringExecution:           - weight: 100             podAffinityTerm:               labelSelector:                 matchExpressions:                   - key: k8s-app                     operator: In                     values: ["kube-dns"]               topologyKey: kubernetes.io/hostname      containers:      - name: coredns        image: coredns/coredns:1.7.0        imagePullPolicy: IfNotPresent        resources:          limits:            memory: 170Mi          requests:            cpu: 100m            memory: 70Mi        args: [ "-conf", "/etc/coredns/Corefile" ]        volumeMounts:        - name: config-volume          mountPath: /etc/coredns          readOnly: true        ports:        - containerPort: 53          name: dns          protocol: UDP        - containerPort: 53          name: dns-tcp          protocol: TCP        - containerPort: 9153          name: metrics          protocol: TCP        securityContext:          allowPrivilegeEscalation: false          capabilities:            add:            - NET_BIND_SERVICE            drop:            - all          readOnlyRootFilesystem: true        livenessProbe:          httpGet:            path: /health            port: 8080            scheme: HTTP          initialDelaySeconds: 60          timeoutSeconds: 5          successThreshold: 1          failureThreshold: 5        readinessProbe:          httpGet:            path: /ready            port: 8181            scheme: HTTP      dnsPolicy: Default      volumes:        - name: config-volume          configMap:            name: coredns            items:            - key: Corefile              path: Corefile---apiVersion: v1kind: Servicemetadata:  name: kube-dns  namespace: kube-system  annotations:    prometheus.io/port: "9153"    prometheus.io/scrape: "true"  labels:    k8s-app: kube-dns    kubernetes.io/cluster-service: "true"    kubernetes.io/name: "CoreDNS"spec:  selector:    k8s-app: kube-dns  clusterIP: 10.0.0.10  ports:  - name: dns    port: 53    protocol: UDP  - name: dns-tcp    port: 53    protocol: TCP  - name: metrics    port: 9153    protocol: TCPEOF# To make coredns Only in master Run the node and modify the above file    spec:      priorityClassName: system-cluster-critical      serviceAccountName: coredns      tolerations:        - key: "CriticalAddonsOnly"          operator: "Exists"        - key: "node-role.kubernetes.io/master"          operator: "Exists"      nodeName: k8s-master-1# View podnamespace name ready status restarts age IP node normalized node ready gatecube system calico Kube controllers-855445d444-6glmm 1 / 1 running 0 19m 10.70.2.1 k8s-node-1 < none > < none > Kube system calico node-6pkz6                     1/1     Running   0          19m     192.168.0.10   k8s-master-1   <none>           <none>kube-system   calico-node-8nz7s                          1/1     Running   0          19m     192.168.0.12   k8s-node-2     <none>           <none>kube-system   calico-node-z7pwc                          1/1     Running   0          19m      192.168.0.11   k8s-node-1     <none>           <none>kube-system   coredns-6f4c9cb7c5-hj9bj                   1/1     Running   0          5m48s   10.70.2.66     k8s-master-1   <none>           <none>

Deploy metric server

# establish metrics-server Configuration file, here I set it only in k8s-master-1 The node runs, and readers can modify it according to their own ideas cat > metrics-server.yaml <<EOFapiVersion: v1kind: ServiceAccountmetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    k8s-app: metrics-server    rbac.authorization.k8s.io/aggregate-to-admin: "true"    rbac.authorization.k8s.io/aggregate-to-edit: "true"    rbac.authorization.k8s.io/aggregate-to-view: "true"  name: system:aggregated-metrics-readerrules:- apiGroups:  - metrics.k8s.io  resources:  - pods  - nodes  verbs:  - get  - list  - watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    k8s-app: metrics-server  name: system:metrics-serverrules:- apiGroups:  - ""  resources:  - pods  - nodes  - nodes/stats  - namespaces  - configmaps  verbs:  - get  - list  - watch---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata:  labels:    k8s-app: metrics-server  name: metrics-server-auth-reader  namespace: kube-systemroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: extension-apiserver-authentication-readersubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  labels:    k8s-app: metrics-server  name: metrics-server:system:auth-delegatorroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:auth-delegatorsubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  labels:    k8s-app: metrics-server  name: system:metrics-serverroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:metrics-serversubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: v1kind: Servicemetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-systemspec:  ports:  - name: https    port: 443    protocol: TCP    targetPort: https  selector:    k8s-app: metrics-server---apiVersion: apps/v1kind: Deploymentmetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-systemspec:  selector:    matchLabels:      k8s-app: metrics-server  strategy:    rollingUpdate:      maxUnavailable: 0  template:    metadata:      labels:        k8s-app: metrics-server    spec:      nodeName: k8s-master-1      tolerations:      - key: "node-role.kubernetes.io/master"        operator: "Exists"      priorityClassName: system-cluster-critical      serviceAccountName: metrics-server      containers:      - args:        - --cert-dir=/tmp        - --secure-port=4443        - --metric-resolution=30s        - --kubelet-insecure-tls        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname        - --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem        - --requestheader-username-headers=X-Remote-User        - --requestheader-group-headers=X-Remote-Group        - --requestheader-extra-headers-prefix=X-Remote-Extra-        image: registry.aliyuncs.com/google_containers/metrics-server:v0.4.1        imagePullPolicy: IfNotPresent        livenessProbe:          failureThreshold: 3          httpGet:            path: /livez            port: https            scheme: HTTPS          periodSeconds: 10        name: metrics-server        ports:        - containerPort: 4443          name: https          protocol: TCP        readinessProbe:          failureThreshold: 3          httpGet:            path: /readyz            port: https            scheme: HTTPS          periodSeconds: 10        securityContext:          readOnlyRootFilesystem: true          runAsNonRoot: true          runAsUser: 1000        volumeMounts:        - mountPath: /tmp          name: tmp-dir        - name: ca-ssl          mountPath: /etc/kubernetes/ssl      volumes:      - emptyDir: {}        name: tmp-dir      - name: ca-ssl        hostPath:          path: /etc/kubernetes/ssl---apiVersion: apiregistration.k8s.io/v1kind: APIServicemetadata:  labels:    k8s-app: metrics-server  name: v1beta1.metrics.k8s.iospec:  group: metrics.k8s.io  groupPriorityMinimum: 100  insecureSkipTLSVerify: true  service:    name: metrics-server    namespace: kube-system  version: v1beta1  versionPriority: 100EOF# obtain node information[root@k8s-master-1 ~]# kubectl top nodesNAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   k8s-master-1   146m         7%     1352Mi          35%       k8s-node-1     84m          4%     726Mi           25%       k8s-node-2     78m          3%     651Mi           22% # see pod function[root@k8s-master-1 ~]# kubectl get pods -A -o wideNAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE    IP             NODE           NOMINATED NODE   READINESS GATESkube-system   calico-kube-controllers-855445d444-6glmm   1/1     Running   0          88m    10.70.2.1      k8s-node-1     <none>           <none>kube-system   calico-node-6pkz6                          1/1     Running   0          88m    192.168.0.10   k8s-master-1   <none>           <none>kube-system   calico-node-8nz7s                          1/1     Running   0          88m    192.168.0.12   k8s-node-2     <none>           <none>kube-system   calico-node-z7pwc                          1/1     Running   0          88m    192.168.0.11   k8s-node-1     <none>           <none>kube-system   coredns-6f4c9cb7c5-hj9bj                   1/1     Running   0          74m    10.70.2.66     k8s-master-1   <none>           <none>kube-system   metrics-server-68bdbcc6b-gk6cq             1/1     Running   0          6m5s   10.70.2.68     k8s-master-1   <none>           <none>

Test cluster network

Note: busybox is better to use 1.28, and the latest version has a BUG

Create test pod

cat << EOF | kubectl apply -f -[root@k8s-master-1 ssl]# cat test-network.yaml apiVersion: v1kind: Podmetadata:  name: busybox-1  namespace: defaultspec:  nodeSelector:    node-role.kubernetes.io/master: ""  tolerations:  - key: node-role.kubernetes.io/master    operator: Exists  containers:  - name: busybox    image: busybox:1.28    imagePullPolicy: IfNotPresent    command:    - sleep    - "86400"  restartPolicy: OnFailure---apiVersion: v1kind: Podmetadata:  name: busybox-2  namespace: defaultspec:  nodeSelector:    node-role.kubernetes.io/node: ""  containers:  - name: busybox    image: busybox:1.28    imagePullPolicy: IfNotPresent    command:    - sleep    - "86400"  restartPolicy: OnFailureEOF# View existing svc[root@k8s-master-1 ssl]# kubectl get svc -ANAMESPACE     NAME             TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGEdefault       kubernetes       ClusterIP   10.0.0.1     <none>        443/TCP                  45hkube-system   kube-dns         ClusterIP   10.0.0.10    <none>        53/UDP,53/TCP,9153/TCP   9hkube-system   metrics-server   ClusterIP   10.0.0.168   <none>        443/TCP                  8h# View current pod Operation[root@k8s-master-1 ssl]# kubectl get pods -A -o wideNAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE   IP             NODE           NOMINATED NODE   READINESS GATESdefault       busybox-1                                  1/1     Running   0          18s   10.70.2.73     k8s-master-1   <none>           <none>default       busybox-2                                  1/1     Running   0          18s   10.70.2.130    k8s-node-2     <none>           <none>kube-system   calico-kube-controllers-855445d444-6glmm   1/1     Running   1          10h   10.70.2.2      k8s-node-1     <none>           <none>kube-system   calico-node-6pkz6                          1/1     Running   1          10h   192.168.0.10   k8s-master-1   <none>           <none>kube-system   calico-node-8nz7s                          1/1     Running   1          10h   192.168.0.12   k8s-node-2     <none>           <none>kube-system   calico-node-z7pwc                          1/1     Running   1          10h   192.168.0.11   k8s-node-1     <none>           <none>kube-system   coredns-6f4c9cb7c5-hj9bj                   1/1     Running   1          10h   10.70.2.69     k8s-master-1   <none>           <none>kube-system   metrics-server-68bdbcc6b-gk6cq             1/1     Running   1          9h    10.70.2.70     k8s-master-1   <none>           <none>

Test pod parsing service

# Test parsing the same namespace Lower service[root@k8s-master-1 ssl]# kubectl exec busybox-1 -- nslookup kubernetesServer:    10.0.0.10Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.localName:      kubernetesAddress 1: 10.0.0.1 kubernetes.default.svc.cluster.local# Span namespace analysis service[root@k8s-master-1 ssl]# kubectl exec busybox-1 -- nslookup kube-dns.kube-systemServer:    10.0.0.10Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.localName:      kube-dns.kube-systemAddress 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local

Test node access kubernetes svc

# Test each node[root@k8s-master-1 ~]# telnet 10.0.0.1 443Trying 10.0.0.1...Connected to 10.0.0.1.Escape character is '^]'.^CConnection closed by foreign host.[root@k8s-master-1 ~]# telnet 10.0.0.10 53Trying 10.0.0.10...Connected to 10.0.0.10.Escape character is '^]'.

Test communication between pod s

[root@k8s-master-1 ssl]# kubectl exec busybox-1 -- ping 10.70.2.130PING 10.70.2.130 (10.70.2.130): 56 data bytes64 bytes from 10.70.2.130: seq=0 ttl=62 time=0.603 ms64 bytes from 10.70.2.130: seq=1 ttl=62 time=0.451 ms

Note:

  1. Communicate with the namespace
  2. Be able to communicate across namespace s
  3. Cross machine communication

Tags: Linux Docker Kubernetes

Posted on Wed, 06 Oct 2021 08:43:55 -0400 by fisicx