-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathHA-3-MASTER-3-WORKERS-PART-2
More file actions
278 lines (252 loc) · 8.96 KB
/
HA-3-MASTER-3-WORKERS-PART-2
File metadata and controls
278 lines (252 loc) · 8.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
#!/bin/bash
set -euo pipefail
set -x
# =========================================
# Configuration - EDIT THESE
# =========================================
MASTER_IPS=("192.168.31.101" "192.168.31.102" "192.168.31.103")
WORKER_IPS=("192.168.31.111" "192.168.31.112" "192.168.31.113")
VIP="192.168.31.100"
NODE_TYPE="$1" # master1/master2/master3/worker1/worker2/worker3
NODE_IP=$(hostname -I | awk '{print $1}')
POD_CIDR="10.11.0.0/16"
K8S_VERSION="1.35.0"
# =========================================
# Pre-requisites - Disable swap, load modules
# =========================================
sudo swapoff -a
sudo modprobe overlay
sudo modprobe br_netfilter
sudo tee /etc/modules-load.d/k8s.conf >/dev/null <<EOF
overlay
br_netfilter
EOF
sudo tee /etc/sysctl.d/k8s.conf >/dev/null <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
# =========================================
# Install Docker / Containerd
# =========================================
sudo apt update
sudo apt install -y ca-certificates curl gnupg lsb-release apt-transport-https
# Docker
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
sudo mkdir -p /etc/containerd
sudo containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i 's/ SystemdCgroup = false/ SystemdCgroup = true/' /etc/containerd/config.toml
sudo systemctl restart containerd
sudo systemctl enable containerd
# =========================================
# Install Kubernetes components
# =========================================
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.35/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.35/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt update
sudo apt install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
sudo systemctl enable --now kubelet
# =========================================
# HAProxy for API load balancing (only master1)
# =========================================
if [[ "$NODE_TYPE" == "master1" ]]; then
sudo apt install -y haproxy
sudo tee /etc/haproxy/haproxy.cfg >/dev/null <<EOF
global
log /dev/log local0
maxconn 4096
defaults
log global
mode tcp
timeout connect 10s
timeout client 1m
timeout server 1m
frontend kubernetes-frontend
bind ${VIP}:6443
mode tcp
default_backend kubernetes-backend
backend kubernetes-backend
mode tcp
EOF
for ip in "${MASTER_IPS[@]}"; do
echo " server $ip $ip:6443 check" | sudo tee -a /etc/haproxy/haproxy.cfg
done
sudo systemctl restart haproxy
sudo systemctl enable haproxy
fi
# =========================================
# Kubernetes Init / Join
# =========================================
if [[ "$NODE_TYPE" == "master1" ]]; then
sudo kubeadm init --control-plane-endpoint "${VIP}:6443" --upload-certs --pod-network-cidr=$POD_CIDR | tee /tmp/kubeadm-init.out
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Save join commands
grep "kubeadm join" /tmp/kubeadm-init.out | tee /tmp/kubeadm_join_cp.sh
echo "--control-plane --certificate-key $(grep 'certificate-key' /tmp/kubeadm-init.out | awk '{print $NF}')" >> /tmp/kubeadm_join_cp.sh
grep "kubeadm join" /tmp/kubeadm-init.out | grep -v control-plane | tee /tmp/kubeadm_join_worker.sh
elif [[ "$NODE_TYPE" =~ master[2-3] ]]; then
sudo bash /tmp/kubeadm_join_cp.sh
elif [[ "$NODE_TYPE" =~ worker[1-3] ]]; then
sudo bash /tmp/kubeadm_join_worker.sh
fi
# =========================================
# Install Calico CNI (only after all masters joined)
# =========================================
if [[ "$NODE_TYPE" == "master1" ]]; then
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/tigera-operator.yaml
curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/custom-resources.yaml
sed -i "s/cidr: 192\.168\.0\.0\/16/cidr: ${POD_CIDR}/g" custom-resources.yaml
kubectl create -f custom-resources.yaml
fi
# =========================================
# Metrics Server
# =========================================
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
kubectl -n kube-system patch deployment metrics-server \
--patch '{"spec":{"template":{"spec":{"containers":[{"name":"metrics-server","args":["--kubelet-insecure-tls","--cert-dir=/tmp","--secure-port=10250","--kubelet-preferred-address-types=InternalIP","--kubelet-use-node-status-port","--metric-resolution=15s"]}]}}}}'
kubectl rollout restart deployment metrics-server -n kube-system
# =========================================
# Nginx Deployment + HPA
# =========================================
kubectl create deployment nginx --image=nginx --replicas=1 || true
kubectl expose deployment nginx --port=80 --type=NodePort || true
kubectl patch deployment nginx -p '{"spec":{"template":{"spec":{"containers":[{"name":"nginx","resources":{"requests":{"cpu":"100m","memory":"128Mi"},"limits":{"cpu":"500m","memory":"256Mi"}}}]}}}}'
kubectl autoscale deployment nginx --cpu-percent=50 --min=1 --max=5 || true
# =========================================
# Kubernetes Dashboard
# =========================================
mkdir -p /tmp/dashboard-tls
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
-keyout /tmp/dashboard-tls/dashboard.key \
-out /tmp/dashboard-tls/dashboard.crt \
-subj "/CN=dashboard.cluster.local/O=Kubernetes-Dashboard"
TLS_CERT=$(base64 -w0 /tmp/dashboard-tls/dashboard.crt)
TLS_KEY=$(base64 -w0 /tmp/dashboard-tls/dashboard.key)
cat <<EOF >/tmp/dashboard-all-in-one.yaml
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin-sa
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin-sa
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
name: dashboard-tls
namespace: kubernetes-dashboard
type: kubernetes.io/tls
data:
tls.crt: $TLS_CERT
tls.key: $TLS_KEY
---
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 31484
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
serviceAccountName: dashboard-admin-sa
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.7.0
ports:
- containerPort: 8443
args:
- --tls-cert-file=/certs/dashboard.crt
- --tls-key-file=/certs/dashboard.key
volumeMounts:
- name: dashboard-certs
mountPath: /certs
volumes:
- name: dashboard-certs
secret:
secretName: dashboard-tls
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard-ingress
namespace: kubernetes-dashboard
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
spec:
rules:
- host: dashboard.cluster.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443
tls:
- hosts:
- dashboard.cluster.local
secretName: dashboard-tls
EOF
kubectl apply -f /tmp/dashboard-all-in-one.yaml
# =========================================
# Auto /etc/hosts entry + port-forward for Dashboard (master1 only)
# =========================================
if [[ "$NODE_TYPE" == "master1" ]]; then
DASHBOARD_HOST="dashboard.cluster.local"
if ! grep -q "$DASHBOARD_HOST" /etc/hosts; then
echo "$NODE_IP $DASHBOARD_HOST" | sudo tee -a /etc/hosts
fi
nohup kubectl -n kubernetes-dashboard port-forward service/kubernetes-dashboard 8443:443 >/tmp/dashboard-port-forward.log 2>&1 &
echo "Dashboard URL: https://$DASHBOARD_HOST:8443/"
echo "Login token: kubectl -n kubernetes-dashboard create token dashboard-admin-sa"
fi