天天看点

Kubernetes pgsql使用本地 Local PV

Node 14-16全部打上污点

Kubernetes pgsql使用本地 Local PV

Node 14-16全部设置为不可调度

[root@master1 ~]# kubectl cordon node14
node/node14 cordoned
[root@master1 ~]# kubectl cordon node15
node/node15 cordoned
[root@master1 ~]# kubectl cordon node16
node/node16 cordoned
[root@master1 ~]# kubectl get node
NAME      STATUS                     ROLES    AGE    VERSION
master1   Ready                      master   144m   v1.19.8
master2   Ready                      master   143m   v1.19.8
master3   Ready                      master   143m   v1.19.8
node1     Ready                      worker   143m   v1.19.8
node10    Ready                      worker   143m   v1.19.8
node11    Ready                      worker   143m   v1.19.8
node12    Ready                      worker   142m   v1.19.8
node13    Ready                      worker   142m   v1.19.8
node14    Ready,SchedulingDisabled   worker   142m   v1.19.8
node15    Ready,SchedulingDisabled   worker   142m   v1.19.8
node16    Ready,SchedulingDisabled   worker   142m   v1.19.8
node2     Ready                      worker   143m   v1.19.8
node3     Ready                      worker   143m   v1.19.8
node4     Ready                      worker   143m   v1.19.8
node5     Ready                      worker   143m   v1.19.8
node6     Ready                      worker   143m   v1.19.8
node7     Ready                      worker   143m   v1.19.8
node8     Ready                      worker   143m   v1.19.8
node9     Ready                      worker   143m   v1.19.8      
Kubernetes pgsql使用本地 Local PV
[root@master1 ~]# kubectl drain node15  --delete-local-data  --ignore-daemonsets --force
node/node15 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-mcdls, kube-system/kube-proxy-wlngt, kube-system/nodelocaldns-p98d2, kubesphere-logging-system/fluent-bit-2k5hg, kubesphere-monitoring-system/node-exporter-c46tx
evicting pod kubesphere-monitoring-system/prometheus-operator-664b66fd6f-62s8r
evicting pod kubesphere-logging-system/ks-events-operator-7c55bbfc6b-9z5gp
evicting pod kubesphere-monitoring-system/prometheus-k8s-0
pod/prometheus-operator-664b66fd6f-62s8r evicted
pod/prometheus-k8s-0 evicted
pod/ks-events-operator-7c55bbfc6b-9z5gp evicted
node/node15 evicted

[root@master1 ~]# kubectl drain node16  --delete-local-data  --ignore-daemonsets --force
node/node16 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-hr2j2, kube-system/kube-proxy-h97qw, kube-system/nodelocaldns-c575c, kubesphere-logging-system/fluent-bit-hnqg6, kubesphere-monitoring-system/node-exporter-hnl2g
evicting pod kubesphere-system/openpitrix-import-job-plnkp
evicting pod kubesphere-logging-system/elasticsearch-logging-data-2
evicting pod kubesphere-monitoring-system/alertmanager-main-1
pod/openpitrix-import-job-plnkp evicted
pod/alertmanager-main-1 evicted
pod/elasticsearch-logging-data-2 evicted
node/node16 evicted

[root@master1 ~]# kubectl drain node14  --delete-local-data  --ignore-daemonsets --force
node/node14 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-fshxc, kube-system/kube-proxy-p64rr, kube-system/nodelocaldns-bflft, kubesphere-logging-system/fluent-bit-gzjtr, kubesphere-monitoring-system/node-exporter-xvqbf
evicting pod kubesphere-monitoring-system/thanos-ruler-kubesphere-1
evicting pod istio-system/istio-ingressgateway-6dddcbbfd5-hfpql
evicting pod kubesphere-logging-system/logsidecar-injector-deploy-78cbddd74b-nlkxc
error when evicting pod "istio-ingressgateway-6dddcbbfd5-hfpql" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod istio-system/istio-ingressgateway-6dddcbbfd5-hfpql
error when evicting pod "istio-ingressgateway-6dddcbbfd5-hfpql" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
pod/thanos-ruler-kubesphere-1 evicted
evicting pod istio-system/istio-ingressgateway-6dddcbbfd5-hfpql
error when evicting pod "istio-ingressgateway-6dddcbbfd5-hfpql" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
pod/logsidecar-injector-deploy-78cbddd74b-nlkxc evicted
evicting pod istio-system/istio-ingressgateway-6dddcbbfd5-hfpql
error when evicting pod "istio-ingressgateway-6dddcbbfd5-hfpql" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.^C

[root@master1 ~]# kubectl get pod -n istio-system -o wide
NAME                                    READY   STATUS    RESTARTS   AGE    IP             NODE     NOMINATED NODE   READINESS GATES
istio-ingressgateway-6dddcbbfd5-hfpql   1/1     Running   0          145m   10.233.103.3   node14   <none>           <none>      
[root@master1 provisioner]# kubectl uncordon node14
node/node14 uncordoned
[root@master1 provisioner]# kubectl uncordon node15
node/node15 uncordoned
[root@master1 provisioner]# kubectl uncordon node16
node/node16 uncordoned      

创建storageclass和local pv 

[root@master1 offline_deploy]# cat storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: fast-disks
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer      
[root@master1 offline_deploy]# kubectl apply -f storageclass.yaml 
storageclass.storage.k8s.io/fast-disks created      
[root@master1 offline_deploy]# kubectl get sc
NAME                   PROVISIONER                                       RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
fast-disks             kubernetes.io/no-provisioner                      Delete          WaitForFirstConsumer   false                  5s
nfs-client (default)   cluster.local/nfs-client-nfs-client-provisioner   Delete          Immediate              true                   163m      

创建挂载点 在挂载目录下再创建一个目录,这个目录会被扫描到

[root@node14 ~]# mkdir -p  /pgdata/pv
[root@node15 ~]# mkdir -p /pgdata/pv
[root@node16 ~]# mkdir -p /pgdata/pv      

格式化和挂载,将物理磁盘`attach`到node 节点上

格式化文件系统

mkfs.xfs /dev/mapper/centos-data        

挂载

mount -t xfs  /dev/mapper/centos-data  /pgdata/pv      

挂载重启生效

DISK_UUID=$(sudo blkid -s UUID -o value /dev/mapper/centos-data) &&\
      echo UUID=${DISK_UUID} /pgdata/pv xfs defaults 0 0 | sudo tee -a /etc/fstab      

查看节点磁盘挂载情况

[root@node14 ~]#  lsblk
?.?..centos-data 253:3    0  500G  0 lvm  /pgdata/pv
[root@node15 ~]# lsblk
?.?..centos-data 253:3    0  500G  0 lvm  /pgdata/pv
[root@node16 ~]# lsblk
?.?..centos-data 253:3    0  500G  0 lvm  /pgdata/pv      

安装local-static-provisioner

#下载
  git clone https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner.git
  cd sig-storage-local-static-provisioner
  #修改以下配置
  vi helm/provisioner/values.yaml
   66 classes:
   67 - name: fast-disks
   70   hostDir: /pgdata
   76   volumeMode: Filesystem
   81   fsType: xfs
  116   image: quay.io/external_storage/local-volume-provisioner:v2.4.0      
[root@bots-hrx-ksm1 provisioner]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                                                              STORAGECLASS   REASON   AGE
local-pv-913a7ee8                          499Gi      RWO            Delete           Available                                                                      fast-disks              4s
local-pv-f21e5ebe                          499Gi      RWO            Delete           Available                                                                      fast-disks              3s
local-pv-f3b5c31f                          499Gi      RWO            Delete           Available                                                                      fast-disks              5s      
[root@bots-hrx-ksm1 provisioner]# kubectl get pod -n kube-system
kube-scheduler-bots-hrx-ksm1                        1/1     Running   2          32m
kube-scheduler-bots-hrx-ksm2                        1/1     Running   0          31m
kube-scheduler-bots-hrx-ksm3                        1/1     Running   1          31m
localprovi-provisioner-5qtbl                        1/1     Running   0          3m
localprovi-provisioner-8nlxr                        1/1     Running   0          3m1s
localprovi-provisioner-bwgq9                        1/1     Running   0          3m1s
localprovi-provisioner-db6q5                        1/1     Running   0          3m
localprovi-provisioner-drgfl                        1/1     Running   0          3m
localprovi-provisioner-gcmbg                        1/1     Running   0          3m1s
localprovi-provisioner-kv29z                        1/1     Running   0          3m
localprovi-provisioner-mb6ss                        1/1     Running   0          3m1s
localprovi-provisioner-mg8mk                        1/1     Running   0          3m
localprovi-provisioner-mqzws                        1/1     Running   0          3m1s
localprovi-provisioner-q7jnv                        1/1     Running   0          3m1s
localprovi-provisioner-qmrdh                        1/1     Running   0          3m
localprovi-provisioner-t7lfp                        1/1     Running   0          3m
localprovi-provisioner-w8lcq                        1/1     Running   0          3m1s
localprovi-provisioner-zz9dg                        1/1     Running   0          3m      
[root@bots-hrx-ksw14 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-data  500G   33M  500G   1% /pgdata/pv      

设置节点亲和性,先打标签

部署Operator

[root@bots-hrx-ksm1 kubectl]# pwd
/root/offline_deploy/radondb-postgresql-operator/installers/kubectl
[root@bots-hrx-ksm1 kubectl]# vim postgres-operator.yml 

   pgo_client_install: "true"`
   pgo_client_container_install: "true"
   pgo_image_prefix: "198.1.229.203/docker.io/radondb"


    - name: pgo-deploy
     image: 198.1.229.203/docker.io/radondb/pgo-deployer:centos8-4.7.1
     imagePullPolicy: Always

  ```yaml
      storage5_name: "storageos"
      storage5_access_mode: "ReadWriteOnce"
      storage5_size: "5Gi"
      storage5_type: "dynamic"
      storage5_class: "fast-disks"
  ```

`kubectl apply -f postgres-operator.yml `


pgo create cluster pgcluster --toleration=node-role.kubernetes.io/pg:NoSchedule --replica-count=2 --cpu=4 --cpu-limit=4 --memory=16Gi --memory-limit=16Gi --pgbackrest-cpu=4 --pgbackrest-cpu-limit=4 --pgbackrest-memory=1Gi --pgbackrest-memory-limit=1Gi --node-label=node-role.kubernetes.io/pg= --node-affinity-type=required --storage-config=storageos --metrics --pgbouncer --pgbouncer-replicas=2 --ccp-image-tag='centos8-12.7-4.7.1' --replica-storage-config=storageos --ccp-image-prefix='198.1.229.203/docker.io/radondb' -n bots-hrx-pgo


[root@bots-hrx-ksm2 ~]# kubectl get pod -n bots-hrx-pgo  -o wide
NAME                                             READY   STATUS      RESTARTS   AGE    IP               NODE             NOMINATED NODE   READINESS GATES
backrest-backup-pgcluster-8pj9w                  0/1     Completed   0          16m    10.233.84.15     bots-hrx-ksw15   <none>           <none>
pgcluster-679f4b6875-xc8c2                       2/2     Running     0          22m    10.233.82.4      bots-hrx-ksw16   <none>           <none>
pgcluster-backrest-shared-repo-9b9966fcb-87k4t   1/1     Running     0          4m2s   10.233.114.186   bots-hrx-ksw11   <none>           <none>
pgcluster-epek-69b5d77bc4-69twq                  2/2     Running     0          15m    10.233.84.16     bots-hrx-ksw15   <none>           <none>
pgcluster-fbhn-77b4545d8-jdhnw                   2/2     Running     0          15m    10.233.66.13     bots-hrx-ksw14   <none>           <none>
pgcluster-pgbouncer-848f69d9cc-897z7             1/1     Running     0          19m    10.233.124.139   bots-hrx-ksw13   <none>           <none>
pgcluster-pgbouncer-848f69d9cc-hwmqr             1/1     Running     0          19m    10.233.121.209   bots-hrx-ksw5    <none>           <none>
pgo-client-68594654df-bfbmg                      1/1     Running     0          4h6m   10.233.105.6     bots-hrx-ksw10   <none>           <none>
pgo-deploy-29qr7                                 0/1     Completed   0          4h9m   10.233.80.6      bots-hrx-ksw1    <none>           <none>
postgres-operator-84bffd65d5-xmfms               4/4     Running     1          4h8m   10.233.100.9     bots-hrx-ksw7    <none>           <none>


[root@bots-hrx-ksm2 ~]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                                                              STORAGECLASS   REASON   AGE
local-pv-913a7ee8                          499Gi      RWO            Delete           Bound    bots-hrx-pgo/pgcluster-fbhn                                        fast-disks              19h
local-pv-f21e5ebe                          499Gi      RWO            Delete           Bound    bots-hrx-pgo/pgcluster                                             fast-disks              19h
local-pv-f3b5c31f                          499Gi      RWO            Delete           Bound    bots-hrx-pgo/pgcluster-epek                                        fast-disks              19h      
spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: node-role.kubernetes.io/pg
                    operator: In
                    values:
                      - ''
      tolerations:
        - key: node-role.kubernetes.io/pg
          operator: Exists      

继续阅读