Build LB load balancing and maintained

1, Environmental optimization
LB1

[root@localhost ~]# hostnamectl set-hostname lb1
[root@localhost ~]# su / / modify the hostname
[root@lb1 ~]# systemctl stop NetworkManager
//Shut down the NetworkManage service
[root@lb1 ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
Removed symlink /etc/systemd/system/network-online.target.wants/NetworkManager-wait-online.service.
[root@lb1 ~]# setenforce / / turn off enhanced security
[root@lb1 ~]# sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
[root@lb1 ~]# iptables -F / / clear firewall policy

LB2

[root@localhost ~]# hostnamectl set-hostname lb2
[root@localhost ~]# su
[root@lb2 ~]# systemctl stop NetworkManager
[root@lb2 ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
Removed symlink /etc/systemd/system/network-online.target.wants/NetworkManager-wait-online.service.
[root@lb2 ~]# setenforce 0
[root@lb2 ~]# sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
[root@lb2 ~]# iptables -F

2. Install nginx on two LB servers

[root@lb1 ~]# echo -e '[nginx]\nname=nginx.repo\nbaseurl=http://nginx.org/packages/centos/7/$basearch/\ngpgcheck=0' > /etc/yum.repos.d/nginx.repo
[root@lb1 ~]# yum makecache
[root@lb1 ~]# yum install nginx -y
[root@lb2 ~]# echo -e '[nginx]\nname=nginx.repo\nbaseurl=http://nginx.org/packages/centos/7/$basearch/\ngpgcheck=0' > /etc/yum.repos.d/nginx.repo
[root@lb2 ~]# yum makecache
[root@lb2 ~]# yum install nginx -y

3. Add four layers of forwarding upstream to two LB servers
Take LB1 for example

[root@lb1 ~]# vim /etc/nginx/nginx.conf 
events {
    worker_connections  1024;
}
stream {
    log_format  main    '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log /var/log/nginx/k8s-access.log main;

    upstream k8s-apiserver {
        server 192.168.191.134:6443;
        server 192.168.191.133:6443;
        #Two master addresses, apiserver port number 6443
    }
    server {
        listen 6443;
        proxy_pass k8s-apiserver;
    }
}

http {
[root@lb1 ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful

4. Start nginx service (take LB1 as an example)

[root@lb1 ~]# systemctl start nginx
[root@lb1 ~]# systemctl status nginx
● nginx.service - nginx - high performance web server
   Loaded: loaded (/usr/lib/systemd/system/nginx.service; disabled; vendor preset: disabled)
   Active: active (running) since Sat 2020-05-03 13:02:50 CST; 5s ago
     Docs: http://nginx.org/en/docs/
  Process: 29485 ExecStart=/usr/sbin/nginx -c /etc/nginx/nginx.conf (code=exited, status=0/SUCCESS)
 Main PID: 29488 (nginx)
    Tasks: 2
   CGroup: /system.slice/nginx.service
           ├─29488 nginx: master process /usr/sbin/nginx -c /etc/nginx/nginx.conf
           └─29489 nginx: worker process

May 03 13:02:50 lb1 systemd[1]: Starting nginx - high performance web server...
May 03 13:02:50 lb1 systemd[1]: Started nginx - high performance web server.
[root@lb1 ~]# systemctl enable nginx
Created symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.

5. Local verification

2, Deploy preserved
Take LB1 for example
1. Install preserved

[root@lb1 ~]# yum install keepalived -y

2. Modify the preserved configuration file

[root@lb1 ~]# mkdir /abc
[root@lb1 ~]# mount.cifs //192.168.0.88/linuxs /abc
Password for root@//192.168.0.88/linuxs:  
[root@lb1 ~]# cp /abc/k8s/keepalived.conf /etc/keepalived/keepalived.conf 
cp: overwrite '/etc/keepalived/keepalived.conf'? y
[root@lb1 ~]# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   # Receiving email address
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   # Mailing address
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_MASTER
}

vrrp_script check_nginx {
    script "/etc/check_nginx.sh"        #This profile will be edited later
}   

vrrp_instance VI_1 {
    state MASTER
    interface ens32     #Specify physical network port
    virtual_router_id 51 # VRRP route ID instance, each instance is unique
    priority 100    # Priority, standby server setting 90
    advert_int 1    # Specifies the notification interval of VRRP heartbeat package, 1 second by default
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.191.135/24  #Specify virtual IP
    }
    track_script {          #Monitoring script
        check_nginx
    }
}

LB2 virtual route IP is inconsistent, state is BACKUP, others are the same

vrrp_instance VI_1 {
    state BACKUP
    interface ens32
    virtual_router_id 52
    priority 90

3. Edit nginx script

[root@lb1 ~]# vim /etc/nginx/check_nginx.sh

count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
#The variable is whether nginx is enabled. If it is not, keepalived will be closed
if [ "$count" -eq 0 ];then
    /etc/init.d/keepalived stop
fi
[root@lb1 ~]# chmod +x /etc/nginx/check_nginx.sh

4. Enable the preserved service of LB1, LB2backup status

[root@lb1 ~]# systemctl start keepalived.service 

5. Use the ip a command to view the virtual IP

[root@lb1 ~]# ip a
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:ef:81:b6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.191.130/24 brd 192.168.191.255 scope global noprefixroute dynamic ens32
       valid_lft 5355394sec preferred_lft 5355394sec
    inet 192.168.191.135/24 scope global secondary ens32

3, Point the node node in k8s to vip with respect to the apiserver address
1.node node finds vip through master

[root@node01 ~]# cd /k8s/cfg/
[root@node01 cfg]# ls
bootstrap.kubeconfig  kubelet.config      kube-proxy
kubelet               kubelet.kubeconfig  kube-proxy.kubeconfig
[root@node01 cfg]# vim bootstrap.kubeconfig 
    server: https://192.168.191.133:6443
[root@node01 cfg]# vim kubelet.kubeconfig 
    server: https://192.168.191.133:6443
[root@node01 cfg]# vim kube-proxy.kubeconfig 
    server: https://192.168.191.133:6443

2. Restart kubelet and proxy services

[root@node01 cfg]# systemctl restart kubelet.service 
[root@node01 cfg]# systemctl restart kube-proxy.service 

3. Replace and complete self inspection

[root@node01 cfg]# grep 100 *
bootstrap.kubeconfig:    server: https://192.168.191.133:6443
kubelet.kubeconfig:    server: https://192.168.191.133:6443
kube-proxy.kubeconfig:    server: https://192.168.191.133:6443

4. Verify the apiserver drift address
First turn off nginx at the lb1 node, and then check whether the virtual IP is effective at lb2. If nginx is detected to be turned off, keepalived will turn off automatically

[root@lb1 ~]# pkill nginx
[root@lb1 ~]# ps -ef |grep nginx |egrep -cv "grep|$$"
0
[root@lb1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:ef:81:b6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.191.130/24 brd 192.168.191.255 scope global noprefixroute dynamic ens32
       valid_lft 5354179sec preferred_lft 5354179sec
    inet6 fe80::d8f:d3dc:3ef7:446/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

At this time, vip. Is not on LB1. Check LB2 again

[root@lb2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:df:af:4e brd ff:ff:ff:ff:ff:ff
    inet 192.168.191.132/24 brd 192.168.247.255 scope global noprefixroute dynamic ens32
       valid_lft 5354144sec preferred_lft 5354144sec
    inet 192.168.191.133/24 scope global secondary ens33

3. Restart nginx on LB1, check online vip and return to LB1

[root@lb1 ~]# systemctl restart nginx
[root@lb1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:ef:81:b6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.191.132/24 brd 192.168.191.255 scope global noprefixroute dynamic ens32
       valid_lft 5354038sec preferred_lft 5354038sec
    inet 192.168.247.100/24 scope global secondary ens33

4, Create a pod test
1. At this time, the node node docker status is
node1

[root@node01 cfg]# docker ps -a
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
39f034a2f24e        centos:7            "/bin/bash"         3 days ago          Up 3 days                               beautiful_jennings
[root@node01 cfg]# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
centos              7                   5e35e350aded        5 months ago        203MB

node2

[root@node02 cfg]# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
centos              7                   5e35e350aded        5 months ago        203MB
[root@node02 cfg]# docker ps -a
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
fea29d0ff39b        centos:7            "/bin/bash"         3 days ago 

2. Use kublet to create pod
Running a specified image in the cluster

[root@master1 cfg]# kubectl run nginx --image=nginx
kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
deployment.apps/nginx created
[root@master1 cfg]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
nginx-dbddb74b8-sx4m6   1/1     Running   0          49s

Before the run state of pod, there is another container creating creation state

kubectl controls the Kubernetes cluster manager. 

Find more information at: https://kubernetes.io/docs/reference/kubectl/overview/

Basic Commands (Beginner):
  create         From file or stdin Create resources.
  expose         Use replication controller, service, deployment perhaps pod And exposed it as a new Kubernetes Service
  run            Running a specified image in the cluster
  set            by objects Set a specified feature

Basic Commands (Intermediate):
  explain        View documents for resources
  get            Show one or more resources
  edit           Edit a resource on the server
  delete         By file name stdin,Delete resources by resource and name, or by resource and label selector

Deploy Commands:
  rollout        Launch of management resources
  scale          by Deployment, ReplicaSet, Replication Controller perhaps Job Set a new number of copies
  autoscale      Auto adjust one Deployment, ReplicaSet, perhaps ReplicationController Number of copies of

Cluster Management Commands:
  certificate    modify certificate Resources.
  cluster-info   Display cluster information
  top            Display Resource (CPU/Memory/Storage) usage.
  cordon         sign node by unschedulable
  uncordon       sign node by schedulable
  drain          Drain node in preparation for maintenance
  taint          Update one or more node Upper taints

Troubleshooting and Debugging Commands:
  describe       Show a specification resource perhaps group Of resources details
  logs           Output container in pod Log in
  attach         Attach To a running container
  exec           In a container Execute a command in
  port-forward   Forward one or more local ports to a pod
  proxy          Run a proxy reach Kubernetes API server
  cp             copy files and directories reach containers And copy from container files and directories.
  auth           Inspect authorization

Advanced Commands:
  apply          By filename or standard input stream(stdin)Configure resources
  patch          Use strategic merge patch Update a resource's field(s)
  replace        adopt filename perhaps stdin Replace a resource
  wait           Experimental: Wait for a specific condition on one or many resources.
  convert        In different API versions Convert profile

Settings Commands:
  label          Update on this resource labels
  annotate       Update a resource's Annotation
  completion     Output shell completion code for the specified shell (bash or zsh)

Other Commands:
  alpha          Commands for features in alpha
  api-resources  Print the supported API resources on the server
  api-versions   Print the supported API versions on the server, in the form of "group/version"
  config         modify kubeconfig file
  plugin         Provides utilities for interacting with plugins.
  version        output client and server Version information for

Usage:
  kubectl [flags] [options]

Use "kubectl <command> --help" for more information about a given command.
Use "kubectl options" for a list of global command-line options (applies to all commands).

3. Check the pod network. You can also see which node this pod is deployed to

[root@master1 cfg]# kubectl get pods -o wide
NAME                    READY   STATUS    RESTARTS   AGE   IP            NODE              NOMINATED NODE
nginx-dbddb74b8-sx4m6   1/1     Running   0          16m   172.17.42.3   192.168.191.131   <none>

4. At this time, there are three containers on the node2 node. One is the container warehouse just created, and the other is to test the flannel before

[root@node02 cfg]# docker ps -a
CONTAINER ID        IMAGE                                                                 COMMAND                  CREATED             STATUS              PORTS               NAMES
6eff0af2c578        nginx                                                                 "nginx -g 'daemon of..."   16 minutes ago      Up 16 minutes                           k8s_nginx_nginx-dbddb74b8-sx4m6_default_cd5a2ea4-8c68-11ea-a668-000c29db840b_0
c4ca11690aa1        registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0   "/pause"                 16 minutes ago      Up 16 minutes                           k8s_POD_nginx-dbddb74b8-sx4m6_default_cd5a2ea4-8c68-11ea-a668-000c29db840b_0
fea29d0ff39b        centos:7                                                              "/bin/bash"              3 days ago          Up 3 days                               kind_burnell
[root@node02 cfg]# docker images
REPOSITORY                                                        TAG                 IMAGE ID            CREATED             SIZE
nginx                                                             latest              602e111c06b6        8 days ago          127MB
centos                                                            7                   5e35e350aded        5 months ago        203MB
registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64   3.0                 99e59f495ffa        3 years ago         747kB

You can directly access nginx on node2 node

[root@node02 cfg]# curl 172.17.42.3
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>

View the container's log again

[root@master1 cfg]# kubectl logs nginx-dbddb74b8-sx4m6
172.17.42.1 - - [02/May/2020:11:52:45 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"

Tags: Nginx CentOS yum kubelet

Posted on Tue, 05 May 2020 20:19:11 -0400 by ericcampbell30