K8S Deployment upgrades application declaratively

Role: upgrade POD through Deployment declarative smoothing

1, Create Deployment file

apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: kubia
  namespace: test
spec:
  replicas: 3
  template:
    metadata:
      name: kubia
      labels:
        app: kubia
    spec:
      containers:
        - name: nodejs
          image: luksa/kubia:v1       #Image as v1 version

2, Create service to provide load service

apiVersion: v1
kind: Service
metadata:
  name: kubia
  namespace: test
spec:
  ports:
    - name: http
      port: 80
      targetPort: 8080
  selector:
    app: kubia              #Label match to kubia

3, Create delplyment and service

[root@test-nodes1 k8s-yaml-file]# kubectl create -f kubia-deployment-v1.yaml --record
deployment.apps/kubia created
[root@test-nodes1 k8s-yaml-file]# kubectl get all -n test
NAME                         READY   STATUS    RESTARTS   AGE
pod/kubia-66b4657d7b-77t4k   1/1     Running   0          9m5s
pod/kubia-66b4657d7b-ggdft   1/1     Running   0          9m5s
pod/kubia-66b4657d7b-qtckn   1/1     Running   0          9m5s

NAME                    READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/kubia   3/3     3            3           9m6s

NAME                               DESIRED   CURRENT   READY   AGE
replicaset.apps/kubia-66b4657d7b   3         3         3       9m5s

4, Upgrade pod with deployment (change pod version)

[root@test-nodes1 k8s-yaml-file]# kubectl set image deployment kubia nodejs=luksa/kubia:v2 -n test
deployment.extensions/kubia image updated

[root@test-nodes1 k8s-yaml-file]# kubectl get all -n test
NAME                         READY   STATUS        RESTARTS   AGE
pod/kubia-66b4657d7b-77t4k   0/1     Terminating   0          16m    #termination
pod/kubia-66b4657d7b-ggdft   1/1     Terminating   0          16m     #termination
pod/kubia-66b4657d7b-qtckn   1/1     Terminating   0          16m    #termination
pod/kubia-75974f96b5-b7rhd   1/1     Running       0          27s
pod/kubia-75974f96b5-q4txq   1/1     Running       0          34s
pod/kubia-75974f96b5-tdpbf   1/1     Running       0          43s

NAME            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
service/kubia   ClusterIP   192.168.253.59   <none>        80/TCP    4m26s

NAME                    READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/kubia   3/3     3            3           16m

NAME                               DESIRED   CURRENT   READY   AGE
replicaset.apps/kubia-66b4657d7b   0         0         0       16m
replicaset.apps/kubia-75974f96b5   3         3         3       43s

[root@test-nodes1 k8s-yaml-file]# kubectl get deployment kubia -o wide -n test
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES           SELECTOR
kubia   3/3     3            3           20m   nodejs       luksa/kubia:v2   app=kubia

4, deployment rollback upgrade (when upgrade fails)

$kubectl rollout undo deployment kubia -n test

Tip: the undo command can also be run during the rolling upgrade process and directly stop the rolling upgrade.
During the upgrade process, the created pod will be deleted and replaced by the old version of pod.

5, View deployment rolling upgrade history

[root@test-nodes1 k8s-yaml-file]# kubectl rollout history deployment kubia -n test
deployment.extensions/kubia 
REVISION  CHANGE-CAUSE
1         kubectl create --filename=kubia-deployment-v1.yaml --record=true
3         kubectl create --filename=kubia-deployment-v1.yaml --record=true
4         kubectl create --filename=kubia-deployment-v1.yaml --record=true

6, Roll back deployment to the appropriate version (such as version 1)

[root@test-nodes1 k8s-yaml-file]# kubectl rollout undo deployment kubia --to-revision=1 -n test
deployment.extensions/kubia rolled back
[root@test-nodes1 k8s-yaml-file]# kubectl get deployment kubia -o wide -n test
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES           SELECTOR
kubia   3/3     3            3           14m   nodejs       luksa/kubia:v1   app=kubia

Tags: Linux

Posted on Wed, 13 May 2020 11:49:18 -0400 by derekm