Karmada管理Openkruise自定义资源示例

CloneSet 原地升级示例

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
cat << EOF  | kubectl apply -f - --kubeconfig karmada-primary
apiVersion: apps.kruise.io/v1alpha1
kind: CloneSet
metadata:
  labels:
    app: nginx
  name: nginx
spec:
  replicas: 5
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
        test: mytest
    spec:
      containers:
      - name: nginx
        image: nginx:alpine
        env:
        - name: test
          valueFrom:
            fieldRef:
              fieldPath: metadata.labels['test']
  updateStrategy:
    type: InPlaceIfPossible
---
apiVersion: policy.karmada.io/v1alpha1
kind: PropagationPolicy
metadata:
  name: nginx
spec:
  resourceSelectors:
    - apiVersion: apps.kruise.io/v1alpha1
      kind: CloneSet
      name: nginx
  placement:
    clusterAffinity:
      clusterNames:
        - primary
        - secondary
    replicaScheduling:
      replicaDivisionPreference: Weighted
      replicaSchedulingType: Divided
EOF

修改cloneset,触发原地升级

1
kubectl patch cloneset nginx --type merge -p '{"spec":{"template":{"metadata":{"labels":{"test":"test1"}}}}}' --kubeconfig karmada-primary
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
# kubectl  get cloneset --kubeconfig karmada-primary
NAME    DESIRED   UPDATED   UPDATED_READY   READY   TOTAL   AGE
nginx   5         10        10              10      10      6m12s
# kubectl get pod --kubeconfig secondary
NAME          READY   STATUS    RESTARTS      AGE
nginx-729jm   1/1     Running   1 (23s ago)   2m
nginx-9wjbd   1/1     Running   1 (5s ago)    2m
nginx-cwxlq   1/1     Running   1 (14s ago)   2m
nginx-drsft   1/1     Running   1 (33s ago)   2m
nginx-g674t   1/1     Running   1 (43s ago)   2m
# kubectl get pod --kubeconfig primary
NAME          READY   STATUS    RESTARTS      AGE
nginx-4fjfq   1/1     Running   1 (16s ago)   2m3s
nginx-82gmf   1/1     Running   1 (7s ago)    2m3s
nginx-qt8kl   1/1     Running   1 (36s ago)   2m3s
nginx-vrbnc   1/1     Running   1 (46s ago)   2m3s
nginx-xx2c4   1/1     Running   1 (26s ago)   2m3s

CloneSet 分批灰度

设置升级保留个数

由于 karmada 调度分发策略只针对replicas运算,其他字段是完全拷贝,所以在设置 partition 来实现分批灰度是需要考虑到每个成员集群副本数,不然可能会导致灰度更新失效。 比如: Cloneset 全局设置副本5,但是根据设置的调度策略分发到两个A、B集群是2个和3个;此时如果设置 partition: 2 时,那么A集群的pod不会触发更新,B集群更新一个,如果设置 partition: 3 时,那么A、B集群都不会触发更新,无法达到灰度更新效果。

partition = (n // m) - 1 n: 副本数 m: 承载资源的成员数

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
cat << EOF  | kubectl apply -f - --kubeconfig karmada-primary
apiVersion: apps.kruise.io/v1alpha1
kind: CloneSet
metadata:
  labels:
    app: nginx
  name: nginx
spec:
  replicas: 5
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
        test: mytest
    spec:
      containers:
      - name: nginx
        image: nginx:mainline
        imagePullPolicy: Always
        env:
        - name: test
          valueFrom:
            fieldRef:
              fieldPath: metadata.labels['test']
  updateStrategy:
    partition: 3
EOF
1
2
3
# kubectl  get cloneset  --kubeconfig primary
NAME    DESIRED   UPDATED   UPDATED_READY   READY   TOTAL   AGE
nginx   5         2         2               5       5       15h

剩下的全部升级

1
kubectl patch cloneset nginx --type merge -p '{"spec":{"updateStrategy":{"partition":0}}}' --kubeconfig karmada-primary

Advanced StatefulSet

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
cat << EOF  | kubectl apply -f - --kubeconfig karmada-primary
apiVersion: apps.kruise.io/v1beta1
kind: StatefulSet
metadata:
  name: sample
spec:
  replicas: 3
  serviceName: fake-service
  selector:
    matchLabels:
      app: sample
  template:
    metadata:
      labels:
        app: sample
    spec:
      readinessGates:
         # A new condition that ensures the pod remains at NotReady state while the in-place update is happening
      - conditionType: InPlaceUpdateReady
      containers:
      - name: main
        image: nginx:alpine
  podManagementPolicy: Parallel # allow parallel updates, works together with maxUnavailable
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      # Do in-place update if possible, currently only image update is supported for in-place update
      podUpdatePolicy: InPlaceIfPossible
      # Allow parallel updates with max number of unavailable instances equals to 2
      maxUnavailable: 2
---
apiVersion: policy.karmada.io/v1alpha1
kind: PropagationPolicy
metadata:
  name: sample
spec:
  resourceSelectors:
    - apiVersion: apps.kruise.io/v1beta1
      kind: StatefulSet
      name: sample
  placement:
    clusterAffinity:
      clusterNames:
        - primary
        - secondary
    # replicaScheduling:
    #   replicaDivisionPreference: Weighted
    #   replicaSchedulingType: Divided
EOF

查看资源分发情况

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
# kubectl  get asts --kubeconfig karmada-primary
NAME     DESIRED   CURRENT   UPDATED   READY   AGE
sample   3         6         6         6       4m36s
# kubectl  get asts --kubeconfig primary
NAME     DESIRED   CURRENT   UPDATED   READY   AGE
sample   3         3         3         3       3m7s
# kubectl  get pod --kubeconfig primary -l app=sample
NAME       READY   STATUS    RESTARTS   AGE
sample-0   1/1     Running   0          3m28s
sample-1   1/1     Running   0          3m28s
sample-2   1/1     Running   0          3m28s
# kubectl  get asts --kubeconfig secondary
NAME     DESIRED   CURRENT   UPDATED   READY   AGE
sample   3         3         3         3       3m38s
# kubectl  get pod --kubeconfig secondary -l app=sample
NAME       READY   STATUS    RESTARTS   AGE
sample-0   1/1     Running   0          3m46s
sample-1   1/1     Running   0          3m46s
sample-2   1/1     Running   0          3m46s

WorkloadSpread

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
cat << EOF  | kubectl apply -f - --kubeconfig karmada-primary
apiVersion: apps.kruise.io/v1alpha1
kind: WorkloadSpread
metadata:
  name: workloadspread-demo
spec:
  targetRef:
    apiVersion: apps.kruise.io/v1alpha1
    kind: CloneSet
    name: nginx
  subsets:
    - name: subset-a
      requiredNodeSelectorTerm:
        matchExpressions:
          - key: topology.kubernetes.io/zone
            operator: In
            values:
              - zone-a
      preferredNodeSelectorTerms:
        - weight: 1
          preference:
            matchExpressions:
              - key: another-node-label-key
                operator: In
                values:
                  - another-node-label-value
      maxReplicas: 3
      tolerations: [ ]
    - name: subset-b
      requiredNodeSelectorTerm:
        matchExpressions:
          - key: topology.kubernetes.io/zone
            operator: In
            values:
              - zone-b
---
apiVersion: policy.karmada.io/v1alpha1
kind: PropagationPolicy
metadata:
  name: workloadspread-demo
spec:
  resourceSelectors:
    - apiVersion: apps.kruise.io/v1alpha1
      kind: WorkloadSpread
      name: workloadspread-demo
  placement:
    clusterAffinity:
      clusterNames:
        - primary
        - secondary
EOF
1
2
3
4
5
6
7
8
9
# kubectl  get WorkloadSpread --kubeconfig karmada-primary
NAME                  WORKLOADNAME   WORKLOADKIND   ADAPTIVE   AGE
workloadspread-demo   nginx          CloneSet                  9m51s
root@localhost:/opt/certs# kubectl  get WorkloadSpread --kubeconfig primary
NAME                  WORKLOADNAME   WORKLOADKIND   ADAPTIVE   AGE
workloadspread-demo   nginx          CloneSet                  2m
root@localhost:/opt/certs# kubectl  get WorkloadSpread --kubeconfig secondary
NAME                  WORKLOADNAME   WORKLOADKIND   ADAPTIVE   AGE
workloadspread-demo   nginx          CloneSet                  2m3s
0%