Home avatar

蜷缩的蜗牛

专注云原生运维

为什么HPA扩容比较慢

最近遇到业务活动期间遇到突发流量,由于pod资源使用飙升导致业务可用性降低的问题。这里面导致业务不可用的原因有很多,其中一个直接原因是流量来临时候资源使用飙升,而HPA没有及时的进行扩容。 这篇文章就是针对这个问题进行研究,主要从这三方面进行阐述:

  1. 扩容有多慢
  2. 为什么扩容慢
  3. 有什么解决方案

Karmada多集群部署

  1. 20231031 快速建立karmada集群
  2. 20240304 补充手动注册集群方法

创建 Kind 集群

karmada 控制面
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
export VIP=172.26.145.27
cat << EOF | kind create cluster --name=karmada-controller --kubeconfig=karmada-controller --config=-
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
networking:
  apiServerAddress: "${VIP}"
nodes:
  - role: control-plane
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.20.15
    extraPortMappings:
      - containerPort: 5443
        hostPort: 5443
        protocol: TCP
        listenAddress: "${VIP}"
EOF
创建 karmada 成员集群
member1
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
cat << EOF | kind create cluster --name=member1 --kubeconfig=member1 --config=-
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
networking:
  apiServerAddress: "${VIP}"
  podSubnet: "10.10.0.0/16"
  serviceSubnet: "10.11.0.0/16"
nodes:
  - role: control-plane
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.26.0
EOF
member2
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
cat << EOF | kind create cluster --name=member2 --kubeconfig=member2 --config=-
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
networking:
  apiServerAddress: "${VIP}"
  podSubnet: "10.12.0.0/16"
  serviceSubnet: "10.13.0.0/16"
nodes:
  - role: control-plane
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.26.0
EOF
member3
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
cat << EOF | kind create cluster --name=member3 --kubeconfig=member3 --config=-
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
networking:
  apiServerAddress: "${VIP}"
  podSubnet: "10.8.0.0/16"
  serviceSubnet: "10.9.0.0/16"
nodes:
  - role: control-plane
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.23.0
EOF

安装 Karmada Operator

1
helm install karmada-operator -n karmada-system  --create-namespace --dependency-update ./charts/karmada-operator  --kubeconfig karmada-controller

创建 Karmada

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
kubectl apply -f - <<EOF
apiVersion: operator.karmada.io/v1alpha1
kind: Karmada
metadata:
  name: karmada
  namespace: karmada-system
spec:
  components:
    karmadaAPIServer:
      serviceType: NodePort
      certSANs:
        - "kubernetes.default.svc"
        - "127.0.0.1"
        - "${VIP}"
EOF

通过 Helm 安装 Karmada

karmada 配置
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
cat <<EOF>> values.yaml
installMode: "host"
clusterDomain: "cluster.local"
systemNamespace: "karmada-system"
components: []
cfssl:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/cfssl
    tag: latest
    pullPolicy: IfNotPresent
kubectl:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/kubectl
    tag: latest
    pullPolicy: IfNotPresent
certs:
  mode: auto
  auto:
    expiry: 43800h
    hosts: [
      "kubernetes.default.svc",
      "*.etcd.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}",
      "*.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}",
      "*.{{ .Release.Namespace }}.svc",
      "localhost",
      "127.0.0.1",
      "${VIP}"
    ]
scheduler:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-scheduler
webhook:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-webhook
controllerManager:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-controller-manager
apiServer:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/kube-apiserver
    tag: "v1.25.4"
    pullPolicy: IfNotPresent
aggregatedApiServer:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-aggregated-apiserver
metricsAdapter:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-metrics-adapter
kubeControllerManager:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/kube-controller-manager
    tag: "v1.25.4"
etcd:
  internal:
    image:
      registry: registry.cn-hangzhou.aliyuncs.com
      repository: seam/etcd
      tag: "3.5.9-0"
      pullPolicy: IfNotPresent
agent:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-agent
schedulerEstimator:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-scheduler-estimator
descheduler:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-descheduler
search:
  image:
    registry: registry.cn-hangzhou.aliyuncs.com
    repository: seam/karmada-search
apiServer:
  hostNetwork: true
EOF
1
2
3
4
5
6
helm repo add karmada-charts https://raw.githubusercontent.com/karmada-io/karmada/master/charts

helm --namespace karmada-system upgrade --install   karmada karmada-charts/karmada -f values.yaml --kubeconfig karmada-controller --create-namespace


helm --namespace karmada-system  template karmada karmada-charts/karmada --version=1.8.0 -f values.yaml
1
kubectl get secret -n karmada-system  karmada-admin-config -o jsonpath={.data.kubeconfig} | base64 -d |  sed "s/karmada-apiserver.karmada-system.svc.cluster.local/${VIP}/" > karmada-api

注册成员集群

安装 karmada 客户端
1
kubectl krew install karmada
加入集群
1
kubectl karmada join member1 --kubeconfig=<karmada api kubeconfig> --cluster-kubeconfig=<member kubeconfig>
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# kind get kubeconfig --name member1 > member1
# kubectl karmada join member1 --kubeconfig=karmada-api --cluster-kubeconfig=member1
cluster(member1) is joined successfully

# kubectl get secret --kubeconfig member1 -n karmada-cluster
NAME                   TYPE                                  DATA   AGE
karmada-impersonator   kubernetes.io/service-account-token   3      7m21s
karmada-member1        kubernetes.io/service-account-token   3      7m19s
# kubectl get sa --kubeconfig member1 -n karmada-cluster
NAME                   SECRETS   AGE
default                0         7m33s
karmada-impersonator   0         7m33s
karmada-member1        0         7m31s
# kubectl get clusterRole --kubeconfig member1|grep karmada
karmada-controller-manager:karmada-member1                             2024-03-01T02:57:24Z
karmada-impersonator                                                   2024-03-01T02:57:27Z
# kubectl get ClusterRoleBinding --kubeconfig member1|grep karmada
karmada-controller-manager:karmada-member1               ClusterRole/karmada-controller-manager:karmada-member1                             8m4s
karmada-impersonator                                     ClusterRole/karmada-impersonator                                                   8m1s
# kubectl get secret --kubeconfig karmada-api -n karmada-cluster
NAME                   TYPE     DATA   AGE
member1                Opaque   2      10m
member1-impersonator   Opaque   1      10m

# kubectl get clusters --kubeconfig karmada-api

NAME      VERSION   MODE   READY   AGE
member1   v1.26.0   Push   True    7s

# kubectl karmada join member2 --kubeconfig=karmada-api --cluster-kubeconfig=kindyaml/member2

cluster(member2) is joined successfully

# kubectl get clusters --kubeconfig karmada-api

NAME      VERSION   MODE   READY   AGE
member1   v1.26.0   Push   True    93s
member2   v1.26.0   Push   True    4s
手动加入集群
    1. 成员集群配置
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
member=member3
# 获取kubeconfig
kind get kubeconfig --name ${member} > ${member}

# 获取ca证书
ca_data=$(yq  eval '.clusters[0].cluster.certificate-authority-data' ${member})

# 获取成员集群ID
cluster_id=$(kubectl get ns kube-system  --kubeconfig ${member} -o jsonpath={.metadata.uid})

# 获取API地址
apiEndpoint=$(yq eval '.clusters[0].cluster.server' ${member})

# 创建名称空间
kubectl create ns karmada-cluster --kubeconfig ${member}
    1. 创建Clusterrole | ClusterRoleBinding | ServiceAccount
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
cat << EOF | kubectl apply -f - --kubeconfig ${member}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: karmada-controller-manager:karmada-${member}
rules:
- apiGroups:
  - '*'
  resources:
  - '*'
  verbs:
  - '*'
- nonResourceURLs:
  - '*'
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    karmada.io/managed: "true"
    work.karmada.io/namespace: karmada-es-${member}
  name: karmada-impersonator
rules:
- apiGroups:
  - ""
  resourceNames:
  - system:admin
  - system:kube-controller-manager
  resources:
  - users
  verbs:
  - impersonate
- apiGroups:
  - ""
  resourceNames:
  - generic-garbage-collector
  - namespace-controller
  - resourcequota-controller
  resources:
  - serviceaccounts
  verbs:
  - impersonate
- apiGroups:
  - ""
  resourceNames:
  - system:masters
  resources:
  - groups
  verbs:
  - impersonate
---


apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: karmada-controller-manager:karmada-${member}
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: karmada-controller-manager:karmada-${member}
subjects:
- kind: ServiceAccount
  name: karmada-${member}
  namespace: karmada-cluster
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    karmada.io/managed: "true"
    work.karmada.io/namespace: karmada-es-${member}
  name: karmada-impersonator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: karmada-impersonator
subjects:
- kind: ServiceAccount
  name: karmada-impersonator
  namespace: karmada-cluster
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: karmada-impersonator
  namespace: karmada-cluster
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: karmada-member3
  namespace: karmada-cluster
EOF
    1. 成员集群创建Secret
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
cat << EOF | kubectl apply -f - --kubeconfig ${member}
apiVersion: v1
data:
  ca.crt: ${ca_data}
  namespace: a2FybWFkYS1jbHVzdGVy
kind: Secret
metadata:
  annotations:
    kubernetes.io/service-account.name: karmada-${member}
  name: karmada-${member}
  namespace: karmada-cluster
type: kubernetes.io/service-account-token
---
apiVersion: v1
data:
  ca.crt: ${ca_data}
  namespace: a2FybWFkYS1jbHVzdGVy
kind: Secret
metadata:
  annotations:
    kubernetes.io/service-account.name: karmada-impersonator
  name: karmada-impersonator
  namespace: karmada-cluster
type: kubernetes.io/service-account-token
EOF
    1. 获取成员集群Token
1
2
member_token="$(kubectl get secret -n karmada-cluster --kubeconfig  ${member} karmada-${member} -o jsonpath="{.data.token}")"
impersonator_token="$(kubectl get secret -n karmada-cluster --kubeconfig ${member} karmada-impersonator -o jsonpath="{.data.token}")"
    1. 控制面创建Secret
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
cat << EOF | kubectl apply -f - --kubeconfig karmada-api
apiVersion: v1
data:
  caBundle: ${ca_data}
  token: ${member_token}
kind: Secret
metadata:
  name: ${member}
  namespace: karmada-cluster
type: Opaque
---
apiVersion: v1
data:
  token: ${impersonator_token}
kind: Secret
metadata:
  name: ${member}-impersonator
  namespace: karmada-cluster
type: Opaque

EOF
    1. 创建Cluster
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
cat << EOF | kubectl apply -f - --kubeconfig karmada-api
---
apiVersion: cluster.karmada.io/v1alpha1
kind: Cluster
metadata:
  name: ${member}
spec:
  apiEndpoint: ${apiEndpoint}
  id: ${cluster_id}
  secretRef:
    name: ${member}
    namespace: karmada-cluster
  impersonatorSecretRef:
    name: ${member}-impersonator
    namespace: karmada-cluster
  syncMode: Push
  resourceModels:
  - grade: 0
    ranges:
    - max: "1"
      min: "0"
      name: cpu
    - max: 4Gi
      min: "0"
      name: memory
  - grade: 1
    ranges:
    - max: "2"
      min: "1"
      name: cpu
    - max: 16Gi
      min: 4Gi
      name: memory
  - grade: 2
    ranges:
    - max: "4"
      min: "2"
      name: cpu
    - max: 32Gi
      min: 16Gi
      name: memory
  - grade: 3
    ranges:
    - max: "8"
      min: "4"
      name: cpu
    - max: 64Gi
      min: 32Gi
      name: memory
  - grade: 4
    ranges:
    - max: "16"
      min: "8"
      name: cpu
    - max: 128Gi
      min: 64Gi
      name: memory
  - grade: 5
    ranges:
    - max: "32"
      min: "16"
      name: cpu
    - max: 256Gi
      min: 128Gi
      name: memory
  - grade: 6
    ranges:
    - max: "64"
      min: "32"
      name: cpu
    - max: 512Gi
      min: 256Gi
      name: memory
  - grade: 7
    ranges:
    - max: "128"
      min: "64"
      name: cpu
    - max: 1Ti
      min: 512Gi
      name: memory
  - grade: 8
    ranges:
    - max: "9223372036854775807"
      min: "128"
      name: cpu
    - max: "9223372036854775807"
      min: 1Ti
      name: memory

EOF
1
2
3
4
5
# kubectl get cluster -n karmada-cluster --kubeconfig karmada-api
NAME      VERSION   MODE   READY   AGE
member1   v1.29.0   Push   True    3d
member2   v1.29.0   Push   True    2d20h
member3   v1.29.0   Push   True    2d18h

Docker 运行 Android 云手机

前言

市面上已经存在很多云手机厂商如多多云、雷电云、河马云手机等 但是如果自己有现成的服务器的话 可以直接搭建云手机

Istio多集群系列-单控制面跨AZ集群区域负载均衡

基于安全容灾考虑,生产K8S集群是多可用区。多可用区集群必然涉及跨可用区调用,由于是海外业务,使用的是AWS云,AWS内部跨可用区流量不像国内云厂商跨可用区流量不收费,是需要收费而且特贵。

基于成本和容灾考虑使用Istio的地域负载均衡功能,优先调度到同可用区,出现问题时调度到另一可用区。

现使用 kind 在本地创建一套多节点集群,通过标签模拟多可用区场景。

由于办公电脑是M1,而Istio1.13并不支持M1,所以选用Istio1.16,K8S1.24版本

创建多节点集群模拟跨AZ

kind配置

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
mkdir -p multicluster
cd multicluster
cat << EOF > kind-cluster1.yaml
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
networking:
  apiServerAddress: "172.26.128.224" # 本机IP,可不设置。
  podSubnet: "10.10.0.0/16"
  serviceSubnet: "10.11.0.0/16"
nodes:
  - role: control-plane
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.24.15
  - role: worker
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.24.15
    kubeadmConfigPatches:
    - |
      kind: JoinConfiguration
      nodeRegistration:
        kubeletExtraArgs:
          node-labels: "topology.kubernetes.io/region=az01,topology.kubernetes.io/zone=az01"
  - role: worker
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.24.15
    kubeadmConfigPatches:
    - |
      kind: JoinConfiguration
      nodeRegistration:
        kubeletExtraArgs:
          node-labels: "topology.kubernetes.io/region=az02,topology.kubernetes.io/zone=az02"
EOF

Istio多集群系列-同一网络多控制面部署

(多集群网络架构

本文基于Istio 1.16.2,k8s 1.24

创建两套k8s集群

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
mkdir -p multicluster
cd multicluster
cat << EOF > kind-cluster1.yaml
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
networking:
  apiServerAddress: "172.26.128.224"
  podSubnet: "10.10.0.0/16"
  serviceSubnet: "10.11.0.0/16"
nodes:
  - role: control-plane
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.24.15
  - role: worker
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.24.15
    kubeadmConfigPatches:
    - |
      kind: JoinConfiguration
      nodeRegistration:
        kubeletExtraArgs:
          node-labels: "topology.kubernetes.io/region=sg,topology.kubernetes.io/zone=az01"
EOF

cat << EOF > kind-cluster2.yaml
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
networking:
  apiServerAddress: "172.26.128.224"
  podSubnet: "10.12.0.0/16"
  serviceSubnet: "10.13.0.0/16"
nodes:
  - role: control-plane
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.24.15
  - role: worker
    image: registry.cn-hangzhou.aliyuncs.com/seam/node:v1.24.15
    kubeadmConfigPatches:
    - |
      kind: JoinConfiguration
      nodeRegistration:
        kubeletExtraArgs:
          node-labels: "topology.kubernetes.io/region=sg,topology.kubernetes.io/zone=az02"
EOF
1
2
3
4
5
kind create cluster --name cluster1 --kubeconfig=istio-multicluster --config=kind-cluster1.yaml
kind create cluster --name cluster2 --kubeconfig=istio-multicluster --config=kind-cluster2.yaml

kubectl config rename-context kind-cluster1  cluster1 --kubeconfig istio-multicluster
kubectl config rename-context kind-cluster2  cluster2 --kubeconfig istio-multicluster
0%