环境准备
利用 kind
快速创建 kubernetes
集群环境
1
| LOCAL_IP=172.25.163.181
|
创建控制面集群
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
| cat << EOF > controler.yaml
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
metadata:
name: controler
imageRepository: registry.aliyuncs.com/google_containers
networking:
apiServerAddress: ${LOCAL_IP}
podSubnet: "10.8.0.0/16"
serviceSubnet: "10.9.0.0/16"
nodes:
- role: control-plane
image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6
extraPortMappings:
- containerPort: 5443
hostPort: 5443
protocol: TCP
EOF
kind create cluster --name controler --config=controler.yaml
|
创建成员(member1)集群
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
| cat << EOF > member1.yaml
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
metadata:
name: member1
imageRepository: registry.aliyuncs.com/google_containers
networking:
apiServerAddress: ${LOCAL_IP}
podSubnet: "10.10.0.0/16"
serviceSubnet: "10.11.0.0/16"
nodes:
- role: control-plane
image: kindest/node:v1.20.15@sha256:a32bf55309294120616886b5338f95dd98a2f7231519c7dedcec32ba29699394
EOF
kind create cluster --name member1 --config=member1.yaml
|
创建成员(member2)集群
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
| cat << EOF > member2.yaml
kind: Cluster
apiVersion: "kind.x-k8s.io/v1alpha4"
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
metadata:
name: member2
imageRepository: registry.aliyuncs.com/google_containers
networking:
apiServerAddress: ${LOCAL_IP}
podSubnet: "10.12.0.0/16"
serviceSubnet: "10.13.0.0/16"
nodes:
- role: control-plane
image: kindest/node:v1.20.15@sha256:a32bf55309294120616886b5338f95dd98a2f7231519c7dedcec32ba29699394
EOF
kind create cluster --name member2 --config=member2.yaml
|
获取集群kubeconfig
1
2
| kind get kubeconfig --name=member1 > member1.conf
kind get kubeconfig --name=member2 > member2.conf
|
拉取镜像并加载到控制面集群
1
2
3
4
5
6
7
8
9
| docker pull docker.io/karmada/karmada-aggregated-apiserver:v1.6.0
docker pull registry.k8s.io/kube-apiserver:v1.25.4
docker pull docker.io/karmada/karmada-scheduler:v1.6.0
docker pull docker.io/karmada/karmada-webhook:v1.6.0
docker pull registry.k8s.io/kube-controller-manager:v1.25.4
docker pull registry.k8s.io/etcd:3.5.3-0
docker pull docker.io/karmada/karmada-controller-manager:v1.6.0
docker pull docker.io/cfssl/cfssl:latest
docker pull docker.io/bitnami/kubectl:latest
|
1
2
3
4
5
6
7
8
9
| kind load docker-image docker.io/karmada/karmada-aggregated-apiserver:v1.6.0 --name controler
kind load docker-image registry.k8s.io/kube-apiserver:v1.25.4 --name controler
kind load docker-image docker.io/karmada/karmada-scheduler:v1.6.0 --name controler
kind load docker-image docker.io/karmada/karmada-webhook:v1.6.0 --name controler
kind load docker-image registry.k8s.io/kube-controller-manager:v1.25.4 --name controler
kind load docker-image registry.k8s.io/etcd:3.5.3-0 --name controler
kind load docker-image docker.io/karmada/karmada-controller-manager:v1.6.0 --name controler
kind load docker-image cfssl/cfssl:v1.6.4 --name controler
kind load docker-image docker.io/bitnami/kubectl:1.25.4 --name controler
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
| docker exec -ti controler-control-plane bash
root@controler-control-plane:/# crictl images
IMAGE TAG IMAGE ID SIZE
docker.io/karmada/karmada-aggregated-apiserver v1.6.0 3582a8dea29f3 91.4MB
docker.io/karmada/karmada-controller-manager v1.6.0 e1df9f3a30f17 92MB
docker.io/karmada/karmada-scheduler v1.6.0 1e3b84078d2d1 70.6MB
docker.io/karmada/karmada-webhook v1.6.0 da3eec9e5aed7 67.5MB
docker.io/kindest/kindnetd v20210326-1e038dc5 f37b7c809e5dc 54.8MB
docker.io/rancher/local-path-provisioner v0.0.14 2b703ea309660 12.3MB
k8s.gcr.io/build-image/debian-base v2.1.0 3cc9c70b44747 22.8MB
k8s.gcr.io/coredns/coredns v1.8.0 1a1f05a2cd7c2 11.6MB
k8s.gcr.io/etcd 3.4.13-0 05b738aa1bc63 135MB
k8s.gcr.io/kube-apiserver v1.21.1 18e61c783b417 118MB
k8s.gcr.io/kube-controller-manager v1.21.1 0c6dccae49de8 113MB
k8s.gcr.io/kube-proxy v1.21.1 4bbef4ca108cd 128MB
k8s.gcr.io/kube-scheduler v1.21.1 8c783dd252088 48.7MB
k8s.gcr.io/pause 3.5 f7ff3c4042631 253kB
registry.k8s.io/etcd 3.5.3-0 a9a710bb96df0 180MB
registry.k8s.io/kube-apiserver v1.25.4 8e49cdf98f4d1 125MB
registry.k8s.io/kube-controller-manager v1.25.4 8296621317758 114MB
|
控制集群安装Karmada
1
2
| helm repo add karmada-charts https://raw.githubusercontent.com/karmada-io/karmada/master/charts
helm --namespace karmada-system upgrade -i karmada karmada-charts/karmada --version=1.6.0 --create-namespace
|
1
2
3
4
5
6
7
8
9
| kubectl get pod -n karmada-system
NAME READY STATUS RESTARTS AGE
etcd-0 1/1 Running 0 72s
karmada-aggregated-apiserver-5dbbcbb459-j95p7 1/1 Running 0 72s
karmada-apiserver-7796648bc4-ksl6j 1/1 Running 0 72s
karmada-controller-manager-57f965864b-2gbjm 1/1 Running 0 30s
karmada-kube-controller-manager-578888599b-hctr9 1/1 Running 2 72s
karmada-scheduler-76b769f497-6x9nn 1/1 Running 0 72s
karmada-webhook-7fd6fd5cb8-dt929 1/1 Running 1 72s
|
获取Karmada集群的kubeconfig
1
2
3
| kubectl get secret -n karmada-system karmada-kubeconfig -o jsonpath={.data.kubeconfig} | base64 -d > karmada-apiserver
kubecm add -f karmada-apiserver
kubecm switch karmada-apiserver
|
安装CLI工具
1
| kubectl krew install karmada
|
1
| curl -s https://raw.githubusercontent.com/karmada-io/karmada/master/hack/install-cli.sh | sudo bash
|
添加成员集群
1
2
| kubectl karmada join member1 --cluster-context=kind-member1
kubectl karmada join member2 --cluster-context=kind-member2
|
1
2
3
4
5
| ➜ demo git:(v1.6.0) ✗ docker cp member2-control-plane:/etc/kubernetes/admin.conf member1.conf
➜ demo git:(v1.6.0) ✗ docker cp member2-control-plane:/etc/kubernetes/admin.conf member2.conf
➜ demo git:(v1.6.0) ✗ docker cp member1-control-plane:/etc/kubernetes/admin.conf member2.conf
➜ demo git:(v1.6.0) ✗ docker cp member1.conf controler-control-plane:/tmp
➜ demo git:(v1.6.0) ✗ docker cp member2.conf controler-control-plane:/tmp
|