Merge branch 'sandervanvugt:master' into master
This commit is contained in:
commit
b0669a2bdc
BIN
Installing Ubuntu Server 22-04.pdf
Normal file
BIN
Installing Ubuntu Server 22-04.pdf
Normal file
Binary file not shown.
18
README.md
18
README.md
@ -1 +1,17 @@
|
|||||||
# files for my CKA online course
|
This Git repository contains supporting files for my "Certified Kubernetes Administrator (CKA)" video course. See https://sandervanvugt.com for more details. It is also used in the "CKA Crash Course" that I'm teaching at https://learning.oreilly.com.
|
||||||
|
|
||||||
|
In this course you need to have your own lab environment. This lab environment should consist of 3 virtual machines, using Ubuntu LTS server 20.4 or later (22.4 is recommended)
|
||||||
|
Make sure the virtual machines meet the following requirements
|
||||||
|
* 2GB RAM
|
||||||
|
* 2 vCPUs
|
||||||
|
* 20 GB disk space
|
||||||
|
* No swap
|
||||||
|
For instructions on how to set up Ubuntu Server 22.04, see the document "Installing Ubuntu 22-04" in this Git repository.
|
||||||
|
For information on getting started with VirtualBox, see this video: https://www.youtube.com/watch?v=4qwUHSaIJdY
|
||||||
|
Alternatively, check out my video course "Virtualization for Everyone" for an introduction to different virtualization solution.
|
||||||
|
|
||||||
|
To set up the required tools on the cluster nodes, the following scripts are provided:
|
||||||
|
* setup-container.sh installs containerd. Run this script first
|
||||||
|
* setup-kubetools.sh install the latest version of kubelet, kubeadm and kubectl
|
||||||
|
* setup-kubetool-previousversion.sh installs the previous major version of the kubelet, kubeadm and kubectl. Use this if you want to practice cluster upgrades
|
||||||
|
|
||||||
|
|||||||
2
RESOURCES.txt
Normal file
2
RESOURCES.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
ON DEMAND COURSE
|
||||||
|
* Certified Kubernetes Administrator (CKA) 3rd edition: https://learning.oreilly.com/course/certified-kubernetes-administrator/9780138103804/
|
||||||
291
april-2024-history.txt
Normal file
291
april-2024-history.txt
Normal file
@ -0,0 +1,291 @@
|
|||||||
|
1 ip a
|
||||||
|
2 sudo apt install git vim -y
|
||||||
|
3 git clone https://github.com/sandervanvugt/cka
|
||||||
|
4 cd cka
|
||||||
|
5 ls *sh
|
||||||
|
6 ./setup-container.sh
|
||||||
|
7 ls
|
||||||
|
8 ls *sh
|
||||||
|
9 ./setup-kubetools-previousversion.sh
|
||||||
|
10 sudo apt install jq -y
|
||||||
|
11 history
|
||||||
|
12 sudo kubeadm init
|
||||||
|
13 history
|
||||||
|
14 mkdir ~/.kube
|
||||||
|
15 sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
||||||
|
16 sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
||||||
|
17 kubectl get all
|
||||||
|
18 #kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||||
|
19 kubectl get pods -n kube-system
|
||||||
|
20 kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||||
|
21 kubectl get pods -n kube-system
|
||||||
|
22 kubectl get ds -n kube-system
|
||||||
|
23 kubectl get nodes
|
||||||
|
24 history
|
||||||
|
25 kubectl create testapp --image=nginx --replicas=3
|
||||||
|
26 kubectl create deploy testapp --image=nginx --replicas=3
|
||||||
|
27 kubectl get all -o wide
|
||||||
|
28 history
|
||||||
|
29 cd ..
|
||||||
|
30 ls
|
||||||
|
31 tar xvf helm-v3.14.4-linux-amd64.tar.gz
|
||||||
|
32 sudo mv linux-amd64/helm /usr/local/bin
|
||||||
|
33 helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace
|
||||||
|
34 source <(kubectl completion bash)
|
||||||
|
35 kubectl get pods -n ingress-nginx
|
||||||
|
36 kubectl create deploy nginxsvc --image=nginx --replicas=3
|
||||||
|
37 kubectl get all --selector app=nginxsvc
|
||||||
|
38 kubectl expose deploy nginxsvc
|
||||||
|
39 kubectl expose deploy nginxsvc --port=80
|
||||||
|
40 kubectl get all --selector app=nginxsvc
|
||||||
|
41 kubectl describe svc nginxsvc
|
||||||
|
42 kubectl create ingress nginxsvc --class=nginx --rule=nginxsvc.info/*=nginxsvc:80
|
||||||
|
43 kubectl describe ing nginxsvc
|
||||||
|
44 sudo vim /etc/hosts
|
||||||
|
45 kubectl port-forward -n ingress-nginx svc/ingress-nginx-controller 8080:80
|
||||||
|
46 bg
|
||||||
|
47 curl nginxsvc.info
|
||||||
|
48 curl nginxsvc.info:8080
|
||||||
|
49 history
|
||||||
|
50 curl nginxsvc.info:8080
|
||||||
|
51 kubectl edit svc nginxsvc
|
||||||
|
52 curl nginxsvc.info:8080
|
||||||
|
53 kubectl describe ing nginxsvc
|
||||||
|
54 kubectl describe svc nginxsvc
|
||||||
|
55 kubectl edit svc nginxsvc
|
||||||
|
56 curl nginxsvc.info:8080
|
||||||
|
57 history
|
||||||
|
58 kubectl create ns limited
|
||||||
|
59 kubectl create quota -h | less
|
||||||
|
60 kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi --namespace=limited
|
||||||
|
61 kubectl describe quota -n limited
|
||||||
|
62 kubectl describe ns limited
|
||||||
|
63 kubectl create deploy nginx --image=nginx --replicas=3 -n limited
|
||||||
|
64 kubectl get all -n limited
|
||||||
|
65 kubectl describe -n limited rs nginx-7854ff8877
|
||||||
|
66 kubectl describe ns limited
|
||||||
|
67 kubectl set resources -h | less
|
||||||
|
68 kubectl set -n limited resources deploy nginx --requests cpu=100m,memory=5Mi --limits cpu=200m,memory=20Mi
|
||||||
|
69 kubectl get all -n limited
|
||||||
|
70 kubectl describe ns limited
|
||||||
|
71 kubectl edit quota -n limited qtest
|
||||||
|
72 kubectl describe ns limited
|
||||||
|
73 kubectl scale -n limited deployment nginx --replicas=2
|
||||||
|
74 kubectl scale -n limited deployment nginx --replicas=3
|
||||||
|
75 kubectl describe ns limited
|
||||||
|
76 history
|
||||||
|
77 kubectl edit node control
|
||||||
|
78 kubectl cordon worker1
|
||||||
|
79 kubectl edit node worker1
|
||||||
|
80 kubectl get nodes
|
||||||
|
81 kubectl uncordon worker1
|
||||||
|
82 kubectl get pods -n kube-system
|
||||||
|
83 kubectl drain node worker1
|
||||||
|
84 kubectl drain worker1
|
||||||
|
85 kubectl get nodes
|
||||||
|
86 kubectl edit node worker1
|
||||||
|
87 kubectl get nodes
|
||||||
|
88 kubectl get pods -o wide
|
||||||
|
89 kubectl drain worker1
|
||||||
|
90 kubectl get nodes
|
||||||
|
91 kubectl drain worker1 --ignore-daemonsets
|
||||||
|
92 kubectl edit node worker1
|
||||||
|
93 kubectl get pods -o wide
|
||||||
|
94 kubectl get nodes
|
||||||
|
95 kubectl create deploy ready --image=nginx --replicas=3
|
||||||
|
96 kubectl get pods -o wide --selector app=ready
|
||||||
|
97 kubectl get nodes
|
||||||
|
98 kubectl scale deploy ready --replicas=0
|
||||||
|
99 kubectl scale deploy ready --replicas=3
|
||||||
|
100 kubectl get pods -o wide --selector app=ready
|
||||||
|
101 cd cka/
|
||||||
|
102 vim networkpolicy-example.yaml
|
||||||
|
103 git pull
|
||||||
|
104 vim nwpolicy-complete-example.yaml
|
||||||
|
105 kubectl apply -f nwpolicy-complete-example.yaml
|
||||||
|
106 kubectl expose pod nginx --port=80
|
||||||
|
107 kubectl exec -it busybox -- wget --spider --timeout=1 nginx
|
||||||
|
108 kubectl label pod busybox access=true
|
||||||
|
109 kubectl exec -it busybox -- wget --spider --timeout=1 nginx
|
||||||
|
110 kubectl create ns nwp-namespace
|
||||||
|
111 vim nwp-lab9-1.yaml
|
||||||
|
112 kubectl apply -f nwp-lab9-1.yaml
|
||||||
|
113 kubectl expose pod nwp-nginx --port=80
|
||||||
|
114 kubectl exec -n nwp-namespace nwp-busybox -- wget --spider --timeout=1 nwp-nginx
|
||||||
|
115 kubectl exec -n nwp-namespace nwp-busybox -- nslookup nwp-nginx
|
||||||
|
116 kubectl exec -n nwp-namespace nwp-busybox -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
117 vim nwp-lab9-2.yaml
|
||||||
|
118 kubectl apply -f nwp-lab9-2.yaml
|
||||||
|
119 kubectl exec -n nwp-namespace nwp-busybox -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
120 kubectl create deploy busybox --image=busybox -- sleep 3600
|
||||||
|
121 kubectl exec -it busybox-6fc6c44c5b-x5vrx -- wget --spider --timeput=1 nwp-nginx
|
||||||
|
122 kubectl exec -it busybox-6fc6c44c5b-x5vrx -- wget --spider --timeout=1 nwp-nginx
|
||||||
|
123 kubectl delete -f nwp-lab9-2.yaml
|
||||||
|
124 history
|
||||||
|
125 kubectl top
|
||||||
|
126 kubectl top pod
|
||||||
|
127 cd
|
||||||
|
128 git clone https://github.com/kubernetes-sigs/metrics-server.git
|
||||||
|
129 cd metrics-server/
|
||||||
|
130 ls
|
||||||
|
131 kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||||
|
132 kubectl -n kube-system get pods
|
||||||
|
133 kubectl logs -n kube-system metrics-server-6d94bc8694-76vzc
|
||||||
|
134 kubectl -n kube-system get deploy
|
||||||
|
135 kubectl -n kube-system edit deployments.apps metrics-server
|
||||||
|
136 kubectl get pods -n kube-system
|
||||||
|
137 kubectl top pods
|
||||||
|
138 cd /etc/kubernetes/manifests/
|
||||||
|
139 ls
|
||||||
|
140 cd
|
||||||
|
141 cd -
|
||||||
|
142 less kube-scheduler.yaml
|
||||||
|
143 sudo less kube-scheduler.yaml
|
||||||
|
144 cd
|
||||||
|
145 kubectl run auto-web --image=nginx --dry-run=client -o yaml > auto-web.yaml
|
||||||
|
146 cat auto-web.yaml
|
||||||
|
147 kubectl get pods
|
||||||
|
148 cd /etc/kubernetes/manifests/
|
||||||
|
149 ls -l
|
||||||
|
150 sudo apt install etcd-client
|
||||||
|
151 sudo etcdctl --help
|
||||||
|
152 sudo ETCDCTL_API=3 etcdctl --help
|
||||||
|
153 ps aux | grep etcd
|
||||||
|
154 cd ../pki/
|
||||||
|
155 ls
|
||||||
|
156 cd etcd/
|
||||||
|
157 ls
|
||||||
|
158 cd
|
||||||
|
159 sudo ETCDCTL_API=3 etcdctl --endpoints localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key get / --prefix --keys-only
|
||||||
|
160 sudo ETCDCTL_API=3 etcdctl --endpoints localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key snapshot save /tmp/etcdbackup.db
|
||||||
|
161 ls -l /tmp/etcdbackup.db
|
||||||
|
162 sudo ETCDCTL_API=3 etcdctl --endpoints localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key snapshot save /tmp/etcdbackup-$(date +%d-%m-%y).db
|
||||||
|
163 cd /etc/kubernetes/manifests/
|
||||||
|
164 ls
|
||||||
|
165 sudo vim etcd.yaml
|
||||||
|
166 cd
|
||||||
|
167 history
|
||||||
|
168 sudo ETCDCTL_API=3 etcdctl --write-out=table snapshot status /tmp/etcdbackup.db
|
||||||
|
169 cp /tmp/etcdbackup.db /tmp/etcdbackup.db.2
|
||||||
|
170 kubectl get deploy
|
||||||
|
171 kubectl delete deploy --all
|
||||||
|
172 cd /etc/kubernetes/manifests/
|
||||||
|
173 ls
|
||||||
|
174 ls ..
|
||||||
|
175 sudo mv * ..
|
||||||
|
176 ls
|
||||||
|
177 sudo crictl ps
|
||||||
|
178 sudo mkdir /var/lib/etcd-backup
|
||||||
|
179 sudo ETCDCTL_API=3 etcdctl snapshot restore /tmp/etcdbackup.db --data-dir /var/lib/etcd-backup
|
||||||
|
180 sudo rmdir /var/lib/etcd-backup/
|
||||||
|
181 sudo ETCDCTL_API=3 etcdctl snapshot restore /tmp/etcdbackup.db --data-dir /var/lib/etcd-backup
|
||||||
|
182 sudo ls -l /var/lib/etcd-backup/
|
||||||
|
183 sudo ls -l /var/lib/etcd-backup/member
|
||||||
|
184 sudo ls -l /var/lib/etcd/member
|
||||||
|
185 sudo vim /etc/kubernetes/etcd.yaml
|
||||||
|
186 sudo mv ../*.yaml .
|
||||||
|
187 ls
|
||||||
|
188 sudo crictl ps
|
||||||
|
189 kubectl get deploy -A
|
||||||
|
190 sudo mv ../*.yaml .
|
||||||
|
191 sudo mv *.yaml ..
|
||||||
|
192 ls
|
||||||
|
193 vim ../etcd.yaml
|
||||||
|
194 sudo vim ../etcd.yaml
|
||||||
|
195 sudo mv /var/lib/etcd /var/lib/etcd.old
|
||||||
|
196 sudo mv /var/lib/etcd-backup /var/lib/etcd
|
||||||
|
197 sudo vim ../etcd.yaml
|
||||||
|
198 mv ../*.yaml .
|
||||||
|
199 sudo mv ../*.yaml .
|
||||||
|
200 sudo crictl ps
|
||||||
|
201 kubectl get deploty
|
||||||
|
202 kubectl get deploy
|
||||||
|
203 cd
|
||||||
|
204 history
|
||||||
|
205 sudo apt update
|
||||||
|
206 sudo apt-cache madison kubeadm
|
||||||
|
207 kubeadm version
|
||||||
|
208 sudo apt update
|
||||||
|
209 sudo apt-cache madison kubeadm
|
||||||
|
210 sudo apt-mark unhold kubeadm
|
||||||
|
211 udo apt-get update && sudo apt-get install -y kubeadm='1.29.3-*'
|
||||||
|
212 sudo apt-get update && sudo apt-get install -y kubeadm='1.29.3-*'
|
||||||
|
213 pager /etc/apt/sources.list.d/kubernetes.list
|
||||||
|
214 vim /etc/apt/sources.list.d/kubernetes.list
|
||||||
|
215 sudo vim /etc/apt/sources.list.d/kubernetes.list
|
||||||
|
216 history
|
||||||
|
217 sudo apt update
|
||||||
|
218 sudo apt-cache madison kubeadm
|
||||||
|
219 sudo apt-get update && sudo apt-get install -y kubeadm='1.29.3-1'
|
||||||
|
220 sudo apt-get update && sudo apt-get install -y kubeadm='1.29.3-1.1'
|
||||||
|
221 sudo apt-mark hold kubeadm
|
||||||
|
222 kubeadm version
|
||||||
|
223 sudo kubeadm upgrade plan
|
||||||
|
224 sudo kubeadm upgrade apply v1.29.3
|
||||||
|
225 sudo apt-mark unhold kubelet kubectl
|
||||||
|
226 sudo apt-get update && sudo apt-get install -y kubelet='1.29.3-1.1' kubectl='1.29.3-1.1'
|
||||||
|
227 sudo apt-mark hold kubelet kubectl
|
||||||
|
228 sudo systemctl daemon-reload
|
||||||
|
229 sudo systemctl restart kubelet
|
||||||
|
230 kubectl get nodes
|
||||||
|
231 kubectl get pods
|
||||||
|
232 history
|
||||||
|
233 kubectl edit node control
|
||||||
|
234 kubectl get pods -A -o wide
|
||||||
|
235 kubectl explain pod.spec.nodeSelector
|
||||||
|
236 cd cka/
|
||||||
|
237 vim selector-pod.yaml
|
||||||
|
238 kubectl apply -f selector-pod.yaml
|
||||||
|
239 kubectl delete pods --all
|
||||||
|
240 kubectl apply -f selector-pod.yaml
|
||||||
|
241 kubectl get pods
|
||||||
|
242 kubectl describe pods nginx
|
||||||
|
243 kubectl label nodes worker2 disktype=ssd
|
||||||
|
244 kubectl get pods
|
||||||
|
245 kubectl taint nodes worker1 example-key=value1:NoSchedule
|
||||||
|
246 kubectl describe nodes worker1
|
||||||
|
247 kubectl cordon worker2
|
||||||
|
248 kubectl create deploy nginx-taint --image=nginx
|
||||||
|
249 kubectl scale deploy nginx-taint --replicas=3
|
||||||
|
250 kubectl get pods -o wide
|
||||||
|
251 vim taint-toleration.yaml
|
||||||
|
252 kubectl apply -f taint-toleration.yaml
|
||||||
|
253 kubectl get pods
|
||||||
|
254 kubectl get pods -o wide
|
||||||
|
255 kubectl edit node worker1
|
||||||
|
256 kubectl edit node worker2
|
||||||
|
257 kubectl get nodes
|
||||||
|
258 kubectl get pods -o wide
|
||||||
|
259 vim pod-with-node-affinity.yaml
|
||||||
|
260 vim pod-with-node-anti-affinity.yaml
|
||||||
|
261 vim pod-with-node-antiaffinity.yaml
|
||||||
|
262 vim pod-with-pod-affinity.yaml
|
||||||
|
263 history
|
||||||
|
264 kubectl create role -h | less
|
||||||
|
265 #kubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods
|
||||||
|
266 kubectl create ns roles
|
||||||
|
267 kubectl create role viewers --verb=get --verb=list --verb=watch --resource=pods -n roles
|
||||||
|
268 kubectl run viewpod --image=nginx --dry-run=client -o yaml > viewpod.yaml
|
||||||
|
269 kubectl create sa viewers
|
||||||
|
270 kubectl explain pod.spec.serviceaccount
|
||||||
|
271 kubectl explain pod.spec.serviceAccount
|
||||||
|
272 vim viewpod.yaml
|
||||||
|
273 kubectl get sa -n roles
|
||||||
|
274 kubectl create sa viewers -n roles
|
||||||
|
275 kubectl get sa -n roles
|
||||||
|
276 vim viewpod.yaml
|
||||||
|
277 kubectl explain pod.metadata
|
||||||
|
278 vim viewpod.yaml
|
||||||
|
279 kubectl get roles -n roles
|
||||||
|
280 kubectl create rolebinding -h | less
|
||||||
|
281 #kubectl create rolebinding admin-binding --role=admin --serviceaccount=monitoring:sa-dev
|
||||||
|
282 kubectl -n roles create rolebinding viewers --role=viewers --serviceaccount=roles:viewers
|
||||||
|
283 kubectl get roles,rolebindings,sa -n roles
|
||||||
|
284 history
|
||||||
|
285 kubectl api-resources | less
|
||||||
|
286 kubectl get roles,rolebindings,sa -n roles
|
||||||
|
287 kubectl get pods -n kube-system
|
||||||
|
288 kubectl get -n kube-system pod metrics-server-67fc4df55-9ddb8 -o yaml | grep -i serviceacc
|
||||||
|
289 kubectl config view
|
||||||
|
290 history
|
||||||
|
|
||||||
74
calico.yaml
74
calico.yaml
@ -1,8 +1,6 @@
|
|||||||
---
|
---
|
||||||
# Source: calico/templates/calico-kube-controllers.yaml
|
# Source: calico/templates/calico-kube-controllers.yaml
|
||||||
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
|
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
|
||||||
#
|
|
||||||
# this is where this comes from: https://github.com/projectcalico/calico/blob/master/manifests/calico.yaml
|
|
||||||
|
|
||||||
apiVersion: policy/v1
|
apiVersion: policy/v1
|
||||||
kind: PodDisruptionBudget
|
kind: PodDisruptionBudget
|
||||||
@ -31,6 +29,13 @@ metadata:
|
|||||||
name: calico-node
|
name: calico-node
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
---
|
---
|
||||||
|
# Source: calico/templates/calico-node.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico-cni-plugin
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
# Source: calico/templates/calico-config.yaml
|
# Source: calico/templates/calico-config.yaml
|
||||||
# This ConfigMap is used to configure a self-hosted Calico installation.
|
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
@ -1015,7 +1020,7 @@ spec:
|
|||||||
description: 'BPFEnforceRPF enforce strict RPF on all host interfaces
|
description: 'BPFEnforceRPF enforce strict RPF on all host interfaces
|
||||||
with BPF programs regardless of what is the per-interfaces or global
|
with BPF programs regardless of what is the per-interfaces or global
|
||||||
setting. Possible values are Disabled, Strict or Loose. [Default:
|
setting. Possible values are Disabled, Strict or Loose. [Default:
|
||||||
Strict]'
|
Loose]'
|
||||||
type: string
|
type: string
|
||||||
bpfExtToServiceConnmark:
|
bpfExtToServiceConnmark:
|
||||||
description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
|
description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
|
||||||
@ -4356,7 +4361,7 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- serviceaccounts/token
|
- serviceaccounts/token
|
||||||
resourceNames:
|
resourceNames:
|
||||||
- calico-node
|
- calico-cni-plugin
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
# The CNI plugin needs to get pods, nodes, and namespaces.
|
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||||
@ -4373,7 +4378,7 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- endpointslices
|
- endpointslices
|
||||||
verbs:
|
verbs:
|
||||||
- watch
|
- watch
|
||||||
- list
|
- list
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources:
|
resources:
|
||||||
@ -4511,6 +4516,41 @@ rules:
|
|||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
---
|
---
|
||||||
|
# Source: calico/templates/calico-node-rbac.yaml
|
||||||
|
# CNI cluster role
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-cni-plugin
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods/status
|
||||||
|
verbs:
|
||||||
|
- patch
|
||||||
|
- apiGroups: ["crd.projectcalico.org"]
|
||||||
|
resources:
|
||||||
|
- blockaffinities
|
||||||
|
- ipamblocks
|
||||||
|
- ipamhandles
|
||||||
|
- clusterinformations
|
||||||
|
- ippools
|
||||||
|
- ipreservations
|
||||||
|
- ipamconfigs
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
---
|
||||||
# Source: calico/templates/calico-kube-controllers-rbac.yaml
|
# Source: calico/templates/calico-kube-controllers-rbac.yaml
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
@ -4539,6 +4579,20 @@ subjects:
|
|||||||
name: calico-node
|
name: calico-node
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
---
|
---
|
||||||
|
# Source: calico/templates/calico-node-rbac.yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: calico-cni-plugin
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-cni-plugin
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-cni-plugin
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
# Source: calico/templates/calico-node.yaml
|
# Source: calico/templates/calico-node.yaml
|
||||||
# This manifest installs the calico-node container, as well
|
# This manifest installs the calico-node container, as well
|
||||||
# as the CNI plugins and network config on
|
# as the CNI plugins and network config on
|
||||||
@ -4585,7 +4639,7 @@ spec:
|
|||||||
# It can be deleted if this is a fresh installation, or if you have already
|
# It can be deleted if this is a fresh installation, or if you have already
|
||||||
# upgraded to use calico-ipam.
|
# upgraded to use calico-ipam.
|
||||||
- name: upgrade-ipam
|
- name: upgrade-ipam
|
||||||
image: docker.io/calico/cni:master
|
image: docker.io/calico/cni:v3.26.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
||||||
envFrom:
|
envFrom:
|
||||||
@ -4613,7 +4667,7 @@ spec:
|
|||||||
# This container installs the CNI binaries
|
# This container installs the CNI binaries
|
||||||
# and CNI network config file on each node.
|
# and CNI network config file on each node.
|
||||||
- name: install-cni
|
- name: install-cni
|
||||||
image: docker.io/calico/cni:master
|
image: docker.io/calico/cni:v3.26.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command: ["/opt/cni/bin/install"]
|
command: ["/opt/cni/bin/install"]
|
||||||
envFrom:
|
envFrom:
|
||||||
@ -4656,7 +4710,7 @@ spec:
|
|||||||
# i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
|
# i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
|
||||||
# in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
|
# in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
|
||||||
- name: "mount-bpffs"
|
- name: "mount-bpffs"
|
||||||
image: docker.io/calico/node:master
|
image: docker.io/calico/node:v3.26.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command: ["calico-node", "-init", "-best-effort"]
|
command: ["calico-node", "-init", "-best-effort"]
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
@ -4682,7 +4736,7 @@ spec:
|
|||||||
# container programs network policy and routes on each
|
# container programs network policy and routes on each
|
||||||
# host.
|
# host.
|
||||||
- name: calico-node
|
- name: calico-node
|
||||||
image: docker.io/calico/node:master
|
image: docker.io/calico/node:v3.26.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
envFrom:
|
envFrom:
|
||||||
- configMapRef:
|
- configMapRef:
|
||||||
@ -4899,7 +4953,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: calico-kube-controllers
|
- name: calico-kube-controllers
|
||||||
image: docker.io/calico/kube-controllers:master
|
image: docker.io/calico/kube-controllers:v3.26.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
env:
|
env:
|
||||||
# Choose which controllers to run.
|
# Choose which controllers to run.
|
||||||
|
|||||||
28
cluster-setup.txt
Normal file
28
cluster-setup.txt
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
ON AL NODES
|
||||||
|
git clone https://github.com/sandervanvugt/cka
|
||||||
|
cd cka
|
||||||
|
./setup-container.sh
|
||||||
|
sudo ./setup-kubetools-specific-version.sh
|
||||||
|
|
||||||
|
ON CONTROL NODE
|
||||||
|
sudo kubeadm init
|
||||||
|
READ COMMAND OUTPUT!!!!
|
||||||
|
|
||||||
|
ON CONTROL NODE
|
||||||
|
mkdir -p $HOME/.kube
|
||||||
|
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
||||||
|
sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
||||||
|
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml
|
||||||
|
|
||||||
|
PRINT THE JOIN COMMAND
|
||||||
|
sudo kubeadm token create --print-join-command
|
||||||
|
|
||||||
|
ON ALL WORKER NODES
|
||||||
|
run the join command that resulted from the previous step
|
||||||
|
|
||||||
|
ON CONTROL NODE
|
||||||
|
kubectl get nodes
|
||||||
|
|
||||||
|
MORE INFORMATION
|
||||||
|
https://learning.oreilly.com/videos/certified-kubernetes-administrator/9780138103804/9780138103804-CKA3_01_02_00/
|
||||||
|
|
||||||
11
counter 2.sh
Executable file
11
counter 2.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
COUNTER=$1
|
||||||
|
COUNTER=$(( COUNTER * 60 ))
|
||||||
|
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
echo $COUNTER seconds remaining
|
||||||
|
sleep 1
|
||||||
|
COUNTER=$(( COUNTER - 1 ))
|
||||||
|
done
|
||||||
102
exam-grade.sh
Executable file
102
exam-grade.sh
Executable file
@ -0,0 +1,102 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# exit if not root
|
||||||
|
clear
|
||||||
|
# evaluating tasks
|
||||||
|
echo -e "\033[1mchecking task 1 results\033[0m"
|
||||||
|
source labs/exam-task1.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$SCORE
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 2 results\033[0m"
|
||||||
|
source labs/exam-task2.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 3 results\033[0m"
|
||||||
|
source labs/exam-task3.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 4 results\033[0m"
|
||||||
|
source labs/exam-task4.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 5 results\033[0m"
|
||||||
|
source labs/exam-task5.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 6 results\033[0m"
|
||||||
|
source labs/exam-task6.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 7 results\033[0m"
|
||||||
|
source labs/exam-task7.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 8 results\033[0m"
|
||||||
|
source labs/exam-task8.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 9 results\033[0m"
|
||||||
|
source labs/exam-task9.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 10 results\033[0m"
|
||||||
|
source labs/exam-task10.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 11 results\033[0m"
|
||||||
|
source labs/exam-task11.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 12 results\033[0m"
|
||||||
|
source labs/exam-task12.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 13 results\033[0m"
|
||||||
|
source labs/exam-task13.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 14 results\033[0m"
|
||||||
|
source labs/exam-task14.sh
|
||||||
|
echo the score is $SCORE
|
||||||
|
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||||
|
TOTALTOTAL=$TOTAL
|
||||||
|
|
||||||
|
echo -e "\033[1mchecking task 15 results\033[0m"
|
||||||
|
source labs/exam-task15.sh
|
||||||
|
#### print PASS/FAIL
|
||||||
|
echo -e "\n"
|
||||||
|
echo your score is $SCORE out of a total of $TOTAL
|
||||||
|
|
||||||
|
if [[ $SCORE -ge $(( TOTAL / 10 * 7 )) ]]
|
||||||
|
then
|
||||||
|
echo -e "\033[32mCONGRATULATIONS!!\033[0m\t\t You passed this sample exam!"
|
||||||
|
echo -e "\033[1mResults obtained here don't guarantee anything for the real exam\033[0m"
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t You did NOT pass this sample exam \033[36m:-(\033[0m"
|
||||||
|
fi
|
||||||
|
|
||||||
146
history-14Oct24.txt
Normal file
146
history-14Oct24.txt
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
ON CONTROLLER NODE
|
||||||
|
1 git clone https://github.com/sandervanvugt/cka
|
||||||
|
2 cd cka
|
||||||
|
3 ls
|
||||||
|
4 ./setup-container.sh
|
||||||
|
5 ls
|
||||||
|
6 ./setup-kubetools.sh
|
||||||
|
7 history
|
||||||
|
8 sudo kubeadm init
|
||||||
|
9 cd
|
||||||
|
10 mkdir -p $HOME/.kube
|
||||||
|
11 kubectl get all
|
||||||
|
12 kubectl get pods -n kube-system
|
||||||
|
13 source <(kubectl completion bash)
|
||||||
|
14 kubectl describe -n kube-system pod coredns-7c65d6cfc9-z5rsc
|
||||||
|
15 kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||||
|
16 kubectl get pods -n kube-system
|
||||||
|
17 history
|
||||||
|
18 kubectl get nodes
|
||||||
|
19 kubectl describe node control
|
||||||
|
20 history
|
||||||
|
21 cd cka/
|
||||||
|
22 ./counter.sh 12
|
||||||
|
23 kubectl get nodes
|
||||||
|
24 sudo apt install helm
|
||||||
|
25 helm
|
||||||
|
26 cd ..
|
||||||
|
27 ls
|
||||||
|
28 tar xvf helm-v3.16.2-linux-arm64.tar.gz
|
||||||
|
29 sudo cp linux-arm64/helm /usr/local/bin
|
||||||
|
30 helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace
|
||||||
|
31 kubectl get all -n ingress-nginx
|
||||||
|
32 history
|
||||||
|
33 kubectl create deploy nginxsvc --image=nginx --port=80
|
||||||
|
34 kubectl expose deploy nginxsvc
|
||||||
|
35 kubectl get all --selector app=nginxsvc
|
||||||
|
36 kubectl create ingress nginxsvc --class=nginx --rule=nginxsvc.info/*=nginxsvc:80
|
||||||
|
37 kubectl port-forward -n ingress-nginx svc/ingress-nginx-controller 8080:80
|
||||||
|
38 bg
|
||||||
|
39 sudo sh -c 'echo "127.0.0.1 nginxsvc.info" >> /etc/hosts'
|
||||||
|
40 cat /etc/hosts
|
||||||
|
41 curl nginxsvc.info:8080
|
||||||
|
42 kubectl create ing -h | less
|
||||||
|
43 history
|
||||||
|
44 kubectl get all -n ingress-nginx
|
||||||
|
45 kubectl edit -n ingress-nginx svc ingress-nginx-controller
|
||||||
|
46 kubectl get all -n ingress-nginx
|
||||||
|
47 cd cka
|
||||||
|
48 ./counter.sh 1
|
||||||
|
49 history
|
||||||
|
50 kubectl edit svc nginxsvc
|
||||||
|
51 kubectl describe ingress nginxsvc
|
||||||
|
52 kubectl describe svc nginxsvc
|
||||||
|
53 kubectl edit svc nginxsvc
|
||||||
|
54 kubectl describe svc nginxsvc
|
||||||
|
55 kubectl describe node control | less
|
||||||
|
56 df -h
|
||||||
|
57 kubectl create ns limited
|
||||||
|
58 kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi
|
||||||
|
59 kubectl get pods
|
||||||
|
60 kubectl scale deploy nginxsvc --replicas=5
|
||||||
|
61 kubectl get all
|
||||||
|
62 kubectl delete quota qtest
|
||||||
|
63 kubectl scale deploy nginxsvc --replicas=5 -n limited
|
||||||
|
64 kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi -n limited
|
||||||
|
65 kubectl describe quota -n limited
|
||||||
|
66 kubectl create deploy nginx --image=nginx --replicas=3 -n limited
|
||||||
|
67 kubectl get all -n limited
|
||||||
|
68 kubectl describe rs nginxsvc-7f8cdcb4db
|
||||||
|
69 kubectl get all -n limited
|
||||||
|
70 kubectl -n limited describe rs nginx-676b6c5bbc
|
||||||
|
71 history
|
||||||
|
72 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=5Mi --limits cpu=200m,memory=20m
|
||||||
|
73 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=20Mi --limits cpu=200m,memory=40m
|
||||||
|
74 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=20Mi --limits cpu=200m,memory=40Mi
|
||||||
|
75 kubectl get pods -n limited
|
||||||
|
76 kubectl get all -n limited
|
||||||
|
77 kubectl scale -n limited deploy nginx --replicas=4
|
||||||
|
78 kubectl scale -n limited deploy nginx --replicas=3
|
||||||
|
79 kubectl get all -n limited
|
||||||
|
80 kubectl describe -n limited quota qtest
|
||||||
|
81 kubectl edit quota -n limited qtest
|
||||||
|
82 kubectl get all -n limited
|
||||||
|
83 kubectl scale -n limited deploy nginx --replicas=3
|
||||||
|
84 kubectl get all -n limited
|
||||||
|
85 kubectl describe -n limited quota qtest
|
||||||
|
86 kubectl scale -n limited deploy nginx --replicas=2
|
||||||
|
87 kubectl get all -n limited
|
||||||
|
88 kubectl scale -n limited deploy nginx --replicas=3
|
||||||
|
89 kubectl get all -n limited
|
||||||
|
90 history
|
||||||
|
91 kubectl create ns limited
|
||||||
|
92 vim limitrange.yaml
|
||||||
|
93 kubectl apply -f limitrange.yaml -n limited
|
||||||
|
94 kubectl describe ns limited
|
||||||
|
95 kubectl run limitpod --image=nginx -n limited
|
||||||
|
96 kubectl -n limited delete quota
|
||||||
|
97 kubectl -n limited delete quota qtest
|
||||||
|
98 kubectl run limitpod --image=nginx -n limited
|
||||||
|
99 kubectl describe -n limited pod limitpod
|
||||||
|
100 history
|
||||||
|
101 kubectl get pods -A -o wide
|
||||||
|
102 kubectl get pods -o wide
|
||||||
|
103 kubectl create deploy testdeploy --image=nginx --replicas=6
|
||||||
|
104 kubectl get pods -o wide
|
||||||
|
105 kubectl drain worker2
|
||||||
|
106 kubectl drain worker2 --ignore-daemonsets --force
|
||||||
|
107 kubectl get pods
|
||||||
|
108 kubectl get pods -o wide
|
||||||
|
109 kubectl get nodes
|
||||||
|
110 kubectl describe node worker2
|
||||||
|
111 kubectl edit node worker2
|
||||||
|
112 kubectl uncordon worker2
|
||||||
|
113 kubectl get pods -o wide
|
||||||
|
114 kubectl create newweb --image=nginx --replicas=20
|
||||||
|
115 kubectl create deploy newweb --image=nginx --replicas=20
|
||||||
|
116 kubectl get pods -o wide
|
||||||
|
117 kubectl delete deploy newweb
|
||||||
|
118 history
|
||||||
|
119 sudo ls -l /etc/kubernetes/manifests
|
||||||
|
120 kubectl run staticpod --image=nginx --dry-run=client -o yaml
|
||||||
|
121 kubectl get pods
|
||||||
|
122 sudo -i
|
||||||
|
123 history >> /tmp/history-14Oct24.txt
|
||||||
|
|
||||||
|
ON WORKER1
|
||||||
|
1 git clone https://github.com/sandervanvugt/cka
|
||||||
|
2 cd cka
|
||||||
|
3 ./setup-container.sh
|
||||||
|
4 ./setup-kubetools.sh
|
||||||
|
5 sudo kubeadm join 192.168.29.220:6443 --token 1lmw4f.ow5iplrq9duz747f --discovery-token-ca-cert-hash sha256:9f4ca9d11687b1ef871fbc306e7fae682b5750de059cea3420fb4e5111a76c39
|
||||||
|
6 history
|
||||||
|
7 sudo vim /etc/kubernetes/manifests/staticpod.yaml
|
||||||
|
8 crictl ps
|
||||||
|
9 sudo crictl ps
|
||||||
|
10 sudo -i
|
||||||
|
11 history
|
||||||
|
|
||||||
|
ON CLIENT
|
||||||
|
42 scp helm-v3.16.2-linux-arm64.tar.gz 192.168.29.220:/home/student/Downloads
|
||||||
|
43 scp helm-v3.16.2-linux-arm64.tar.gz 192.168.29.220:/home/student/
|
||||||
|
44 curl 10.103.142.248
|
||||||
|
45 history
|
||||||
|
46 sudo vim /etc/hosts
|
||||||
|
47 curl nginxsvc.info:31390
|
||||||
|
|
||||||
126
history-15APR24.txt
Normal file
126
history-15APR24.txt
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
student@control:~/cka$ history
|
||||||
|
1 ip a
|
||||||
|
2 sudo apt install git vim -y
|
||||||
|
3 git clone https://github.com/sandervanvugt/cka
|
||||||
|
4 cd cka
|
||||||
|
5 ls *sh
|
||||||
|
6 ./setup-container.sh
|
||||||
|
7 ls
|
||||||
|
8 ls *sh
|
||||||
|
9 ./setup-kubetools-previousversion.sh
|
||||||
|
10 sudo apt install jq -y
|
||||||
|
11 history
|
||||||
|
12 sudo kubeadm init
|
||||||
|
13 history
|
||||||
|
14 mkdir ~/.kube
|
||||||
|
15 sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
||||||
|
16 sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
||||||
|
17 kubectl get all
|
||||||
|
18 #kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||||
|
19 kubectl get pods -n kube-system
|
||||||
|
20 kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||||
|
21 kubectl get pods -n kube-system
|
||||||
|
22 kubectl get ds -n kube-system
|
||||||
|
23 kubectl get nodes
|
||||||
|
24 history
|
||||||
|
25 kubectl create testapp --image=nginx --replicas=3
|
||||||
|
26 kubectl create deploy testapp --image=nginx --replicas=3
|
||||||
|
27 kubectl get all -o wide
|
||||||
|
28 history
|
||||||
|
29 cd ..
|
||||||
|
30 ls
|
||||||
|
31 tar xvf helm-v3.14.4-linux-amd64.tar.gz
|
||||||
|
32 sudo mv linux-amd64/helm /usr/local/bin
|
||||||
|
33 helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace
|
||||||
|
34 source <(kubectl completion bash)
|
||||||
|
35 kubectl get pods -n ingress-nginx
|
||||||
|
36 kubectl create deploy nginxsvc --image=nginx --replicas=3
|
||||||
|
37 kubectl get all --selector app=nginxsvc
|
||||||
|
38 kubectl expose deploy nginxsvc
|
||||||
|
39 kubectl expose deploy nginxsvc --port=80
|
||||||
|
40 kubectl get all --selector app=nginxsvc
|
||||||
|
41 kubectl describe svc nginxsvc
|
||||||
|
42 kubectl create ingress nginxsvc --class=nginx --rule=nginxsvc.info/*=nginxsvc:80
|
||||||
|
43 kubectl describe ing nginxsvc
|
||||||
|
44 sudo vim /etc/hosts
|
||||||
|
45 kubectl port-forward -n ingress-nginx svc/ingress-nginx-controller 8080:80
|
||||||
|
46 bg
|
||||||
|
47 curl nginxsvc.info
|
||||||
|
48 curl nginxsvc.info:8080
|
||||||
|
49 history
|
||||||
|
50 curl nginxsvc.info:8080
|
||||||
|
51 kubectl edit svc nginxsvc
|
||||||
|
52 curl nginxsvc.info:8080
|
||||||
|
53 kubectl describe ing nginxsvc
|
||||||
|
54 kubectl describe svc nginxsvc
|
||||||
|
55 kubectl edit svc nginxsvc
|
||||||
|
56 curl nginxsvc.info:8080
|
||||||
|
57 history
|
||||||
|
58 kubectl create ns limited
|
||||||
|
59 kubectl create quota -h | less
|
||||||
|
60 kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi --namespace=limited
|
||||||
|
61 kubectl describe quota -n limited
|
||||||
|
62 kubectl describe ns limited
|
||||||
|
63 kubectl create deploy nginx --image=nginx --replicas=3 -n limited
|
||||||
|
64 kubectl get all -n limited
|
||||||
|
65 kubectl describe -n limited rs nginx-7854ff8877
|
||||||
|
66 kubectl describe ns limited
|
||||||
|
67 kubectl set resources -h | less
|
||||||
|
68 kubectl set -n limited resources deploy nginx --requests cpu=100m,memory=5Mi --limits cpu=200m,memory=20Mi
|
||||||
|
69 kubectl get all -n limited
|
||||||
|
70 kubectl describe ns limited
|
||||||
|
71 kubectl edit quota -n limited qtest
|
||||||
|
72 kubectl describe ns limited
|
||||||
|
73 kubectl scale -n limited deployment nginx --replicas=2
|
||||||
|
74 kubectl scale -n limited deployment nginx --replicas=3
|
||||||
|
75 kubectl describe ns limited
|
||||||
|
76 history
|
||||||
|
77 kubectl edit node control
|
||||||
|
78 kubectl cordon worker1
|
||||||
|
79 kubectl edit node worker1
|
||||||
|
80 kubectl get nodes
|
||||||
|
81 kubectl uncordon worker1
|
||||||
|
82 kubectl get pods -n kube-system
|
||||||
|
83 kubectl drain node worker1
|
||||||
|
84 kubectl drain worker1
|
||||||
|
85 kubectl get nodes
|
||||||
|
86 kubectl edit node worker1
|
||||||
|
87 kubectl get nodes
|
||||||
|
88 kubectl get pods -o wide
|
||||||
|
89 kubectl drain worker1
|
||||||
|
90 kubectl get nodes
|
||||||
|
91 kubectl drain worker1 --ignore-daemonsets
|
||||||
|
92 kubectl edit node worker1
|
||||||
|
93 kubectl get pods -o wide
|
||||||
|
94 kubectl get nodes
|
||||||
|
95 kubectl create deploy ready --image=nginx --replicas=3
|
||||||
|
96 kubectl get pods -o wide --selector app=ready
|
||||||
|
97 kubectl get nodes
|
||||||
|
98 kubectl scale deploy ready --replicas=0
|
||||||
|
99 kubectl scale deploy ready --replicas=3
|
||||||
|
100 kubectl get pods -o wide --selector app=ready
|
||||||
|
101 cd cka/
|
||||||
|
102 vim networkpolicy-example.yaml
|
||||||
|
103 git pull
|
||||||
|
104 vim nwpolicy-complete-example.yaml
|
||||||
|
105 kubectl apply -f nwpolicy-complete-example.yaml
|
||||||
|
106 kubectl expose pod nginx --port=80
|
||||||
|
107 kubectl exec -it busybox -- wget --spider --timeout=1 nginx
|
||||||
|
108 kubectl label pod busybox access=true
|
||||||
|
109 kubectl exec -it busybox -- wget --spider --timeout=1 nginx
|
||||||
|
110 kubectl create ns nwp-namespace
|
||||||
|
111 vim nwp-lab9-1.yaml
|
||||||
|
112 kubectl apply -f nwp-lab9-1.yaml
|
||||||
|
113 kubectl expose pod nwp-nginx --port=80
|
||||||
|
114 kubectl exec -n nwp-namespace nwp-busybox -- wget --spider --timeout=1 nwp-nginx
|
||||||
|
115 kubectl exec -n nwp-namespace nwp-busybox -- nslookup nwp-nginx
|
||||||
|
116 kubectl exec -n nwp-namespace nwp-busybox -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
117 vim nwp-lab9-2.yaml
|
||||||
|
118 kubectl apply -f nwp-lab9-2.yaml
|
||||||
|
119 kubectl exec -n nwp-namespace nwp-busybox -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
120 kubectl create deploy busybox --image=busybox -- sleep 3600
|
||||||
|
121 kubectl exec -it busybox-6fc6c44c5b-x5vrx -- wget --spider --timeput=1 nwp-nginx
|
||||||
|
122 kubectl exec -it busybox-6fc6c44c5b-x5vrx -- wget --spider --timeout=1 nwp-nginx
|
||||||
|
123 kubectl delete -f nwp-lab9-2.yaml
|
||||||
|
124 history
|
||||||
|
|
||||||
282
history-15Oct24.txt
Normal file
282
history-15Oct24.txt
Normal file
@ -0,0 +1,282 @@
|
|||||||
|
1 git clone https://github.com/sandervanvugt/cka
|
||||||
|
2 cd cka
|
||||||
|
3 ls
|
||||||
|
4 ./setup-container.sh
|
||||||
|
5 ls
|
||||||
|
6 ./setup-kubetools.sh
|
||||||
|
7 history
|
||||||
|
8 sudo kubeadm init
|
||||||
|
9 cd
|
||||||
|
10 mkdir -p $HOME/.kube
|
||||||
|
11 kubectl get all
|
||||||
|
12 kubectl get pods -n kube-system
|
||||||
|
13 source <(kubectl completion bash)
|
||||||
|
14 kubectl describe -n kube-system pod coredns-7c65d6cfc9-z5rsc
|
||||||
|
15 kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||||
|
16 kubectl get pods -n kube-system
|
||||||
|
17 history
|
||||||
|
18 kubectl get nodes
|
||||||
|
19 kubectl describe node control
|
||||||
|
20 history
|
||||||
|
21 cd cka/
|
||||||
|
22 ./counter.sh 12
|
||||||
|
23 kubectl get nodes
|
||||||
|
24 sudo apt install helm
|
||||||
|
25 helm
|
||||||
|
26 cd ..
|
||||||
|
27 ls
|
||||||
|
28 tar xvf helm-v3.16.2-linux-arm64.tar.gz
|
||||||
|
29 sudo cp linux-arm64/helm /usr/local/bin
|
||||||
|
30 helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace
|
||||||
|
31 kubectl get all -n ingress-nginx
|
||||||
|
32 history
|
||||||
|
33 kubectl create deploy nginxsvc --image=nginx --port=80
|
||||||
|
34 kubectl expose deploy nginxsvc
|
||||||
|
35 kubectl get all --selector app=nginxsvc
|
||||||
|
36 kubectl create ingress nginxsvc --class=nginx --rule=nginxsvc.info/*=nginxsvc:80
|
||||||
|
37 kubectl port-forward -n ingress-nginx svc/ingress-nginx-controller 8080:80
|
||||||
|
38 bg
|
||||||
|
39 sudo sh -c 'echo "127.0.0.1 nginxsvc.info" >> /etc/hosts'
|
||||||
|
40 cat /etc/hosts
|
||||||
|
41 curl nginxsvc.info:8080
|
||||||
|
42 kubectl create ing -h | less
|
||||||
|
43 history
|
||||||
|
44 kubectl get all -n ingress-nginx
|
||||||
|
45 kubectl edit -n ingress-nginx svc ingress-nginx-controller
|
||||||
|
46 kubectl get all -n ingress-nginx
|
||||||
|
47 cd cka
|
||||||
|
48 ./counter.sh 1
|
||||||
|
49 history
|
||||||
|
50 kubectl edit svc nginxsvc
|
||||||
|
51 kubectl describe ingress nginxsvc
|
||||||
|
52 kubectl describe svc nginxsvc
|
||||||
|
53 kubectl edit svc nginxsvc
|
||||||
|
54 kubectl describe svc nginxsvc
|
||||||
|
55 kubectl describe node control | less
|
||||||
|
56 df -h
|
||||||
|
57 kubectl create ns limited
|
||||||
|
58 kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi
|
||||||
|
59 kubectl get pods
|
||||||
|
60 kubectl scale deploy nginxsvc --replicas=5
|
||||||
|
61 kubectl get all
|
||||||
|
62 kubectl delete quota qtest
|
||||||
|
63 kubectl scale deploy nginxsvc --replicas=5 -n limited
|
||||||
|
64 kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi -n limited
|
||||||
|
65 kubectl describe quota -n limited
|
||||||
|
66 kubectl create deploy nginx --image=nginx --replicas=3 -n limited
|
||||||
|
67 kubectl get all -n limited
|
||||||
|
68 kubectl describe rs nginxsvc-7f8cdcb4db
|
||||||
|
69 kubectl get all -n limited
|
||||||
|
70 kubectl -n limited describe rs nginx-676b6c5bbc
|
||||||
|
71 history
|
||||||
|
72 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=5Mi --limits cpu=200m,memory=20m
|
||||||
|
73 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=20Mi --limits cpu=200m,memory=40m
|
||||||
|
74 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=20Mi --limits cpu=200m,memory=40Mi
|
||||||
|
75 kubectl get pods -n limited
|
||||||
|
76 kubectl get all -n limited
|
||||||
|
77 kubectl scale -n limited deploy nginx --replicas=4
|
||||||
|
78 kubectl scale -n limited deploy nginx --replicas=3
|
||||||
|
79 kubectl get all -n limited
|
||||||
|
80 kubectl describe -n limited quota qtest
|
||||||
|
81 kubectl edit quota -n limited qtest
|
||||||
|
82 kubectl get all -n limited
|
||||||
|
83 kubectl scale -n limited deploy nginx --replicas=3
|
||||||
|
84 kubectl get all -n limited
|
||||||
|
85 kubectl describe -n limited quota qtest
|
||||||
|
86 kubectl scale -n limited deploy nginx --replicas=2
|
||||||
|
87 kubectl get all -n limited
|
||||||
|
88 kubectl scale -n limited deploy nginx --replicas=3
|
||||||
|
89 kubectl get all -n limited
|
||||||
|
90 history
|
||||||
|
91 kubectl create ns limited
|
||||||
|
92 vim limitrange.yaml
|
||||||
|
93 kubectl apply -f limitrange.yaml -n limited
|
||||||
|
94 kubectl describe ns limited
|
||||||
|
95 kubectl run limitpod --image=nginx -n limited
|
||||||
|
96 kubectl -n limited delete quota
|
||||||
|
97 kubectl -n limited delete quota qtest
|
||||||
|
98 kubectl run limitpod --image=nginx -n limited
|
||||||
|
99 kubectl describe -n limited pod limitpod
|
||||||
|
100 history
|
||||||
|
101 kubectl get pods -A -o wide
|
||||||
|
102 kubectl get pods -o wide
|
||||||
|
103 kubectl create deploy testdeploy --image=nginx --replicas=6
|
||||||
|
104 kubectl get pods -o wide
|
||||||
|
105 kubectl drain worker2
|
||||||
|
106 kubectl drain worker2 --ignore-daemonsets --force
|
||||||
|
107 kubectl get pods
|
||||||
|
108 kubectl get pods -o wide
|
||||||
|
109 kubectl get nodes
|
||||||
|
110 kubectl describe node worker2
|
||||||
|
111 kubectl edit node worker2
|
||||||
|
112 kubectl uncordon worker2
|
||||||
|
113 kubectl get pods -o wide
|
||||||
|
114 kubectl create newweb --image=nginx --replicas=20
|
||||||
|
115 kubectl create deploy newweb --image=nginx --replicas=20
|
||||||
|
116 kubectl get pods -o wide
|
||||||
|
117 kubectl delete deploy newweb
|
||||||
|
118 history
|
||||||
|
119 sudo ls -l /etc/kubernetes/manifests
|
||||||
|
120 kubectl run staticpod --image=nginx --dry-run=client -o yaml
|
||||||
|
121 kubectl get pods
|
||||||
|
122 sudo -i
|
||||||
|
123 history >> /tmp/history-14Oct24.txt
|
||||||
|
124 vim /tmp/history-14Oct24.txt
|
||||||
|
125 kubectl config view
|
||||||
|
126 kubectl api-resources | grep -i networkp
|
||||||
|
127 vim nwpolicy-complete-example.yaml
|
||||||
|
128 kubectl apply -f nwpolicy-complete-example.yaml
|
||||||
|
129 kubectl expose pod nginx --port=80
|
||||||
|
130 kubectl get svc
|
||||||
|
131 kubectl exec -it busybox -- wget --spider --timeout=1 nginx
|
||||||
|
132 vim nwpolicy-complete-example.yaml
|
||||||
|
133 kubectl lab pod busybox access=true
|
||||||
|
134 kubectl label pod busybox access=true
|
||||||
|
135 kubectl exec -it busybox -- wget --spider --timeout=1 nginx
|
||||||
|
136 history
|
||||||
|
137 kubectl create ns new-namespace
|
||||||
|
138 kubectl create ns nwp-namespace
|
||||||
|
139 vim nwp-lab9-1.yaml
|
||||||
|
140 kubectl create -f nwp-lab9-1.yaml
|
||||||
|
141 kubectl expose pod nwp-nginx --port=80
|
||||||
|
142 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx
|
||||||
|
143 kubectl exec -it nwp-busybox -n nwp-namespace -- nslookup nwp-nginx
|
||||||
|
144 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
145 vim nwp-lab9-2.yaml
|
||||||
|
146 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
147 kubectl apply -f nwp-lab9-2.yaml
|
||||||
|
148 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
149 kubectl create deploy busybox --image=busybox --sleep 3600
|
||||||
|
150 kubectl create deploy busybox --image=busybox -- sleep 3600
|
||||||
|
151 kubectl exec -it busybox-75cd85d546-wd6wq -- wget --spider --timeout=1 nwp-nginx
|
||||||
|
152 kubectl get netpol
|
||||||
|
153 kubectl delete netpol access-nginx
|
||||||
|
154 kubectl delete netpol deny-from-other-namespaces
|
||||||
|
155 history
|
||||||
|
156 kubectl top pods
|
||||||
|
157 kubectl top node
|
||||||
|
158 kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||||
|
159 kubectl -n kube-system get pods
|
||||||
|
160 kubectl edit -n kube-system deployments.apps metrics-server
|
||||||
|
161 kubectl -n kube-system get pods
|
||||||
|
162 kubectl top pods
|
||||||
|
163 history
|
||||||
|
164 kubectl get quota
|
||||||
|
165 kubectl create quota -h | less
|
||||||
|
166 #kubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10
|
||||||
|
167 kubectl create quota one --hard=cpu=1
|
||||||
|
168 kubectl create quota two --hard=memory=4G
|
||||||
|
169 kubectl create quota three --hard=cpu=2
|
||||||
|
170 kubectl describe ns default
|
||||||
|
171 kubectl delete quota one two three
|
||||||
|
172 kubectl top -h | less
|
||||||
|
173 kubectl top pod -h | less
|
||||||
|
174 kubectl top pod -A --sort-by="memory"
|
||||||
|
175 history
|
||||||
|
176 kubectl run testpox --image=nginx --dry-run=client -o yaml
|
||||||
|
177 kubectl get pods
|
||||||
|
178 history
|
||||||
|
179 kubectl get pods
|
||||||
|
180 sudo apt install etcd-client
|
||||||
|
181 sudo etcdctl --help
|
||||||
|
182 sudo ETCDCTL_API=2 etcdctl --help
|
||||||
|
183 sudo ETCDCTL_API=3 etcdctl --help
|
||||||
|
184 ps aux | grep 2379
|
||||||
|
185 sudo etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.rt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key get / --prefix --keys-only
|
||||||
|
186 sudo etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key get / --prefix --keys-only
|
||||||
|
187 sudo etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key snapshot save /tmp/etcdbackup.db
|
||||||
|
188 ls -l /tmp/etcdbackup.db
|
||||||
|
189 sudo etcdctl --write-out=table snapshot status /tmp/etcdbackup.db
|
||||||
|
190 cp /tmp/etcdbackup.db /tmp/etcdbackup.db.2
|
||||||
|
191 sudo cp /tmp/etcdbackup.db /tmp/etcdbackup.db.2
|
||||||
|
192 history
|
||||||
|
193 kubectl get deploy
|
||||||
|
194 kubectl delete deploy --all
|
||||||
|
195 kubectl get deploy
|
||||||
|
196 cd /etc/kubernetes/manifests/
|
||||||
|
197 ls
|
||||||
|
198 sudo mv * ..
|
||||||
|
199 sudo dnf install tree
|
||||||
|
200 sudo apt install tree
|
||||||
|
201 sudo tree /var/lib/etcd
|
||||||
|
202 sudo mv /var/lib/etcd /var/lib/etcd-backup
|
||||||
|
203 sudo tree /var/lib/etcd
|
||||||
|
204 sudo etcdctl snapshot restore /tmp/etcdbackup.db --data-dir /var/lib/etcd
|
||||||
|
205 sudo tree /var/lib/etcd
|
||||||
|
206 sudo mv ../*.yaml .
|
||||||
|
207 sudo crictl ps
|
||||||
|
208 kubectl get deploy
|
||||||
|
209 kubectl get pods
|
||||||
|
210 kubectl delete deploy testdeploy
|
||||||
|
211 history
|
||||||
|
212 kubectl get nodes
|
||||||
|
213*
|
||||||
|
214 cd
|
||||||
|
215 vim nodesel.yam;l
|
||||||
|
216 vim nodesel.yaml
|
||||||
|
217 kubectl apply -f nodesel.yaml
|
||||||
|
218 kubectl get pods -o yaml
|
||||||
|
219 kubectl get pods -o wide
|
||||||
|
220 vim nodesel.yaml
|
||||||
|
221 kubectl apply -f nodesel.yaml
|
||||||
|
222 kubectl get pods
|
||||||
|
223 kubectl describe pod islectnginxxxxxx
|
||||||
|
224 kubectl get deploy
|
||||||
|
225 kubectl get pods
|
||||||
|
226 kubectl delete pods --force --timeout=0 testdeploy-7cd7d7ddc8-28mcq testdeploy-7cd7d7ddc8-fqh6v testdeploy-7cd7d7ddc8-ftk48 testdeploy-7cd7d7ddc8-pd7sd testdeploy-7cd7d7ddc8-stj67 testdeploy-7cd7d7ddc8-stxsx
|
||||||
|
227 kubectl get pods
|
||||||
|
228 history
|
||||||
|
229 kubectl get node control -o yaml | less
|
||||||
|
230 kubectl get ds -A
|
||||||
|
231 kubectl get ds -n kube-system kube-proxy -o yaml | less
|
||||||
|
232 kubectl get ds -n kube-system calico-node -o yaml | less
|
||||||
|
233 history
|
||||||
|
234 kubectl taint nodes worker1 storage=ssd:NoSchedule
|
||||||
|
235 kubectl describe node worker1
|
||||||
|
236 kubectl create deploy nginx-taint --image=nginx --replicas=3
|
||||||
|
237 kubectl get pods -o wide
|
||||||
|
238 cd cka/
|
||||||
|
239 vim taint-toleration.yaml
|
||||||
|
240 kubectl apply -f taint-toleration.yaml
|
||||||
|
241 kubectl get pods -o wide | grep tole
|
||||||
|
242 vim taint-toleration2.yaml
|
||||||
|
243 kubectl apply -f taint-toleration2.yaml
|
||||||
|
244 kubectl get pods -o wide | grep hdd
|
||||||
|
245 vim taint-toleration2.yaml
|
||||||
|
246 kubectl apply -f taint-toleration2.yaml
|
||||||
|
247 vim taint-toleration2.yaml
|
||||||
|
248 kubectl apply -f taint-toleration2.yaml
|
||||||
|
249 kubectl get pods -o wide | grep exists
|
||||||
|
250 history
|
||||||
|
251 vim ~/.kube/config
|
||||||
|
252 kubectl get sa
|
||||||
|
253 kubectl get sa -n kube-system
|
||||||
|
254 kubectl create role -h | less
|
||||||
|
255 kubectl create rolebinding -h | less
|
||||||
|
256 kubectl run mypod --image=alpine -- sleep 3600
|
||||||
|
257 kubectl get pods mypod -o yaml | less
|
||||||
|
258 kubectl exec -it mypod -- sh
|
||||||
|
259 historyt
|
||||||
|
260 history
|
||||||
|
261 kubectl create sa mysa
|
||||||
|
262 kubectl create role list-pods --resource=pods --verbs=list
|
||||||
|
263 kubectl create role list-pods --resource=pods --verb=list
|
||||||
|
264 kubectl describe role list-pods
|
||||||
|
265 kubectl create rolebinding list-pods --role=list-pods --serviceaccount=default:default
|
||||||
|
266 vim mysapod.yaml
|
||||||
|
267 kubectl apply -f mysapod.yaml
|
||||||
|
268 kubectl exec -it mysapod -- sh
|
||||||
|
269 ls *role*
|
||||||
|
270 grep -li 'type=role' *
|
||||||
|
271 kubectl get rolebindings
|
||||||
|
272 kubectl create rolebinding list-pods --role=list-pods --serviceaccount=default:mysa
|
||||||
|
273 kubectl delete rolebindings.rbac.authorization.k8s.io list-pods
|
||||||
|
274 kubectl create rolebinding list-pods --role=list-pods --serviceaccount=default:mysa
|
||||||
|
275 kubectl exec -it mysapod -- sh
|
||||||
|
276 kubectl get pods
|
||||||
|
277 kubectl delete pod mysapod
|
||||||
|
278 kubectl delete pod mysapod --force --timeout=1
|
||||||
|
279 kubectl delete pod mysapod --force --timeout=0
|
||||||
|
280 kubectl apply -f mysapod.yaml
|
||||||
|
281 kubectl exec -it mysapod -- sh
|
||||||
|
282 history > /tmp/history-15Oct24.txt
|
||||||
39
labs/exam-task1.sh
Normal file
39
labs/exam-task1.sh
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
if kubectl get ns indiana &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t namespace indiana was found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t namespace indiana was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if [[ $(echo $(kubectl get -n indiana secret insecret -o yaml | awk '/color/ { print $2 }')| base64 -d) == blue ]] &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t secret insecret with COLOR=blue was found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
elif kubectl get -n indiana secret insecret &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t secret insecret was found, but not with the expected variable"
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t secret insecret was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if [[ $(echo $(kubectl get pods -n indiana inpod -o jsonpath='{.spec.containers[*].image}')) == nginx:latest ]] &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t found pod inpod that uses the latest version of nginx"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t pod inpod that uses the latest version of the nginx image was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
|
||||||
|
if kubectl get pods -n indiana inpod -o yaml | grep insecret &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t pod inpod uses the secret insecret"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t pod inpod doesn't use the secret insecret"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
8
labs/exam-task10.sh
Normal file
8
labs/exam-task10.sh
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
if helm list | grep mysql &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t you have successfully installed the bitnami mysql chart"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t bitnami mysql chart not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
26
labs/exam-task11.sh
Normal file
26
labs/exam-task11.sh
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
if kubectl get ns nebraska &>/dev/null &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t namespace nebraska was found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t namespace nebraska was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl -n nebraska get deploy | grep snowdeploy &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t Deployment snowdeploy was found in Namespace nebraska"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t Deployment snowdeploy was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl -n nebraska get deploy snowdeploy -o yaml | grep -A1 requests | grep 64Mi &>/dev/null && kubectl -n nebraska get deploy snowdeploy -o yaml | grep -A1 limits | grep 128Mi &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t the requested memory request and limits have been found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t the requested memory request and limits have not been found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
27
labs/exam-task12.sh
Normal file
27
labs/exam-task12.sh
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
if kubectl get ns | grep birds &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t namespace birds was found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t namespace birds was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if [[ $(kubectl -n birds get pods --show-labels --selector=type=allbirds | grep bird | wc -l) == "5" ]] &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t good, 5 pods with label type=allbirds were found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't finf 5 pods with the label type=allbirds"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl get -n birds svc allbirds | grep 32323 &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t NodePort Service allbirds listening on nodePort 32323 was found in Namespace birds"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t no NodePort Service allbirds listening on nodePort 32323 was found in Namespace birds"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
17
labs/exam-task13.sh
Normal file
17
labs/exam-task13.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
if kubectl get pods -o yaml securepod | grep 'runAsGroup: 2000' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t securepod is running with group ID 2000"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t securepod is not running with group ID 2000"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl get pods -o yaml securepod | grep 'allowPrivilegeEscalation: false' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t container in pod securepod has privilege escalation disabled"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t container in pod securepod has privilege escalation not disabled"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
18
labs/exam-task14.sh
Normal file
18
labs/exam-task14.sh
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
if docker images | grep myapp | grep '1.0' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t container image myapp:1.0 was found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t container image myapp:1.0 was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if [ -f /tmp/myapp.tar ]
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t tar archive /tmp/myapp.tar was found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t tar archive /tmp/myapp.tar was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
8
labs/exam-task15.sh
Normal file
8
labs/exam-task15.sh
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
if kubectl get pod securepod -n oklahoma -o yaml | grep 'serviceAccount: secure' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t pod securepod in namespace oklahoma found and it is using the serviceaccount secure"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't find the pod securepod in namespace oklahoma that uses the serviceaccount secure"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
11
labs/exam-task2.sh
Normal file
11
labs/exam-task2.sh
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
kubectl get pods -A --selector tier=control-plane | awk 'NR > 1 { print $2 }' > /tmp/task2file.txt
|
||||||
|
|
||||||
|
if diff /tmp/task2file.txt /tmp/task2pods
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t all pods with label tier=control-plane were found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t your result file doesn't show all pods with the label tier=control-plane"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
17
labs/exam-task3.sh
Normal file
17
labs/exam-task3.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
if kubectl get cm task3cm -o yaml |grep index.html &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t a configmap with the name task3cm was found with the right contents"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t configmap with the name task3cm was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl describe pod oregonpod | grep -A1 'ConfigMap' | grep task3cm &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t the pod oregonpod has the configmap task3cm mounted"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t the pod oregonpod doesn't seem to have the configmap task3cm mounted"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
8
labs/exam-task4.sh
Normal file
8
labs/exam-task4.sh
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
if kubectl get pods sidepod -o yaml | grep -A 10 initContainers | grep 'restartPolicy: Always' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t found a pod sidepod that runs a sidecar container"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find a pod sidepod that runs a sidecar container"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
17
labs/exam-task5.sh
Normal file
17
labs/exam-task5.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
if kubectl get ns probes &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t namespace probes was found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t namespace probes was not found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl describe pods -n probes probepod | grep Liveness | grep '/healthz' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t pod probepod was found, as well as its Liveness probe"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t no pod probepod with correct liveness probe was found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
22
labs/exam-task6.sh
Normal file
22
labs/exam-task6.sh
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# get the revision number of the last update that was found
|
||||||
|
kubectl rollout history deployment updates > /tmp/task6.txt
|
||||||
|
LAST=$(tail -2 /tmp/task6.txt | head -1 | awk '{ print $1 }')
|
||||||
|
BEFORE=$(( LAST -1 ))
|
||||||
|
|
||||||
|
if kubectl rollout history deployment updates --revision=${LAST} | grep 'nginx:1.17' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t last revision of the updated deploy is set to nginx:1.17"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t last revision of the updated deploy is not set to nginx:1.17"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl rollout history deployment updates --revision=${BEFORE} | grep 'nginx:latest' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t previous revision of deploy updated was using nginx:latest"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t previous revision of deploy updated not found or not using nginx:latest"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
36
labs/exam-task7.sh
Normal file
36
labs/exam-task7.sh
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
if grep $(minikube ip).*myapp.info /etc/hosts &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t name resolution for myapp.info is setup"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t no name resolution for myapp.info was found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl describe svc task7svc | grep app=updates &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t Service task7svc found and exposes Deploy updates"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t No Service task7svc exposing Deploy updates was found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl get pods -n ingress-nginx | grep controller | grep Running &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t found a running ingress controller"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t no running ingress controller was found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
|
||||||
|
if kubectl describe ing | grep task7svc:80 &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t ingress rule forwarding traffic to task7svc was found"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\" no ingress rule forwarding traffic to task7svc was found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
17
labs/exam-task8.sh
Normal file
17
labs/exam-task8.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
if kubectl describe networkpolicy | grep 'PodSelector:.*type=webapp' &>/dev/null && kubectl describe networkpolicy | grep 'PodSelector:.*type=tester' &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t NetworkPolicy was found with correct configuration"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t No NetworkPolicy with correct configuration was found"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if kubectl exec -it nevatest -- wget --spider --timeout=1 nevaginx &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t the tester pod can access the nevaginx pod"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t the tester pod cannot access the nevaginx pod"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
17
labs/exam-task9.sh
Normal file
17
labs/exam-task9.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
if kubectl exec storepod -- cat /usr/share/nginx/html/index.html &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t file index.html accessible through hostPath storage"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t file index.html not accessible through hostPath storage"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
|
|
||||||
|
if curl $(minikube ip):32032 | grep welcome &>/dev/null
|
||||||
|
then
|
||||||
|
echo -e "\033[32m[OK]\033[0m\t\t Pod storepod correctly exposed and hostPath volume content accessible"
|
||||||
|
SCORE=$(( SCORE + 10 ))
|
||||||
|
else
|
||||||
|
echo -e "\033[31m[FAIL]\033[0m\t\t Pod storepod not correctly exposed"
|
||||||
|
fi
|
||||||
|
TOTAL=$(( TOTAL + 10 ))
|
||||||
462
novo-history.txt
Normal file
462
novo-history.txt
Normal file
@ -0,0 +1,462 @@
|
|||||||
|
1 ping nu.nl
|
||||||
|
2 ip a
|
||||||
|
3 git
|
||||||
|
4 git clone https://github.com/sandervanvugt/cka
|
||||||
|
5 cd cka
|
||||||
|
6 ls *sh
|
||||||
|
7 vim setup-container-previous-version.sh
|
||||||
|
8 ./setup-container-previous-version.sh
|
||||||
|
9 history
|
||||||
|
10 ls *sh
|
||||||
|
11 ./setup-kubetools-previousversion.sh
|
||||||
|
12 vim setup-container.sh
|
||||||
|
13 touch /tmp/container.txt
|
||||||
|
14 ./setup-kubetools-previousversion.sh
|
||||||
|
15 history
|
||||||
|
16 cat /etc/hosts
|
||||||
|
17 sudo kubeadm init
|
||||||
|
18 which kubeadm
|
||||||
|
19 history
|
||||||
|
20 mkdir -p $HOME/.kube
|
||||||
|
21 kubectl get all
|
||||||
|
22 kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||||
|
23 kubectl get pods -n kube-system
|
||||||
|
24 history
|
||||||
|
25 kubectl get nodes
|
||||||
|
26 kubeadm -h | less
|
||||||
|
27 kubeadm token -h | les
|
||||||
|
28 kubeadm token -h | less
|
||||||
|
29 kubeadm token list -h | less
|
||||||
|
30 sudo kubeadm token create --print-join-command
|
||||||
|
31 sudo kubeadm token list
|
||||||
|
32 history
|
||||||
|
33 kubectl -h | less
|
||||||
|
34 kubectl completion -h | less
|
||||||
|
35 source <(kubectl completion bash)
|
||||||
|
36 kubectl create -h | less
|
||||||
|
37 kubectl create deploy -h | less
|
||||||
|
38 kubectl create deployment novoapp --image=nginx --replicas=3
|
||||||
|
39 kubectl get all
|
||||||
|
40 kubectl delete pod novoapp-6c544c567c-46wgx
|
||||||
|
41 kubectl get all
|
||||||
|
42 history
|
||||||
|
43 kubectl get all
|
||||||
|
44 kubectl describe pod novoapp-6c544c567c-46wgx
|
||||||
|
45 kubectl run -h | less
|
||||||
|
46 kubectl run nginx --image=nginx
|
||||||
|
47 kubectl get all
|
||||||
|
48 kubectl delete pod nginx
|
||||||
|
49 kubectl create deploy mydb --image=mariadb --replicas=3
|
||||||
|
50 kubectl get all
|
||||||
|
51 kubectl describe pod mydb-7d6458794-cwx4j
|
||||||
|
52 kubectl get all
|
||||||
|
53 kubectl describe pod mydb-7d6458794-cwx4j
|
||||||
|
54 kubectl logs mydb-7d6458794-rrtnf
|
||||||
|
55 kubectl set env -h | less
|
||||||
|
56 kubectl set env deploy mydb MARIADB_ROOT_PASSWORD=password
|
||||||
|
57 kubectl get ll
|
||||||
|
58 kubectl get all
|
||||||
|
59 sudo systemctl status containerd
|
||||||
|
60 ps aux | grep nginx
|
||||||
|
61 sudo journalctl -u containerd
|
||||||
|
62 journalctl
|
||||||
|
63 kubectl get pods
|
||||||
|
64 kubectl delete pod nginx --force --timeout=0
|
||||||
|
65 kubectl get all
|
||||||
|
66 kubectl delete pods novoapp-6c544c567c-46wgx --force --timeout=0
|
||||||
|
67 kubectl get pods
|
||||||
|
68 kubectl get pods -o wide
|
||||||
|
69 kubectl create deploy failure --image=nginx --replicas=3
|
||||||
|
70 kubectl get all
|
||||||
|
71 kubectl get all --show-labels
|
||||||
|
72 kubectl get all --selector app=failure
|
||||||
|
73 kubectl get all --selector app=failure -o wide
|
||||||
|
74 kubectl delete pod failure-7fdf677784-fnd27
|
||||||
|
75 kubectl get all --selector app=failure -o wide
|
||||||
|
76 kubectl delete pod failure-7fdf677784-fnd27 --force --timeout=0
|
||||||
|
77 kubectl get all --selector app=failure -o wide
|
||||||
|
78 sudo ln -s /etc/apparmor.d/runc /etc/apparmor.d/disable/
|
||||||
|
79 sudo apparmor_parser -R /etc/apparmor.d/runc
|
||||||
|
80 kubectl get all --selector app=failure -o wide
|
||||||
|
81 curl 172.16.189.77
|
||||||
|
82 kubectl expose deployment failure --port=80
|
||||||
|
83 kubectl get all --selector app=failure -o wide
|
||||||
|
84 curl 10.107.127.87
|
||||||
|
85 kubectl edit svc failure
|
||||||
|
86 kubectl get all --selector app=failure -o wide
|
||||||
|
87 kubectl edit svc failure
|
||||||
|
88 kubectl describe svc failure
|
||||||
|
89 kubectl edit svc failure
|
||||||
|
90 kubectl describe svc failure
|
||||||
|
91 vim exo1.txt
|
||||||
|
92 history
|
||||||
|
93 kubectl create deploy october --image=nginx --replicas=3
|
||||||
|
94 kubectl expose deploy october --type=NodePort --port=80
|
||||||
|
95 kubectl get all --selector app=october
|
||||||
|
96 kubectl describe svc october
|
||||||
|
97 kubectl get pods -n kube-system
|
||||||
|
98 kubectl edit svc october
|
||||||
|
99 kubectl get svc
|
||||||
|
100 kubectl edit svc october
|
||||||
|
101 kubectl delete svc failure
|
||||||
|
102 history
|
||||||
|
103 exit
|
||||||
|
104 kubectl run webserver --image=nginx
|
||||||
|
105 source <(kubectl completion bash)
|
||||||
|
106 kubectl expose pod webserver --port=80
|
||||||
|
107 kubectl run testpod --image=busybox -- sleep 3600
|
||||||
|
108 kubectl get svc
|
||||||
|
109 kubectl exec -it testpod -- wget webserver
|
||||||
|
110 kubectl exec -it testpod -- cat /etc/resolv.conf
|
||||||
|
111 kubectl get ns
|
||||||
|
112 kubectl get pods
|
||||||
|
113 kubectl get pods -n kube-system
|
||||||
|
114 kubectl get pods -A
|
||||||
|
115 kubectl create ns remote
|
||||||
|
116 kubectl run interginx --image=nginx
|
||||||
|
117 kubectl run remotebox --image=busybox -n remote -- sleep 3600
|
||||||
|
118 kubectl expose pod interginx --port=80
|
||||||
|
119 kubectl exec -it remotebox -n remote -- cat /etc/resolv.conf
|
||||||
|
120 kubectl exec -it remotebox -n remote -- nslookup interginx
|
||||||
|
121 kubectl exec -it remotebox -n remote -- nslookup interginx.default.svc.cluster.local
|
||||||
|
122 history
|
||||||
|
123 echo run busybox in the sleepy namespace, run nginx in the awake namespace and expose it. access the nginx application from busybox by using the dns name
|
||||||
|
124 kubectl create ns sleepy
|
||||||
|
125 kubectl create ns awake
|
||||||
|
126 kubectl run awakeging --image=nginx -n awake
|
||||||
|
127 kubectl -n awake expose pod awakeging
|
||||||
|
128 kubectl -n awake expose pod awakeging --port=80
|
||||||
|
129 kubectl run -n sleepy busybox -- sleep 3600
|
||||||
|
130 kubectl run -n sleepy --image=busybox -- sleep 3600
|
||||||
|
131 kubectl get all -n sleepy
|
||||||
|
132 kubectl delete pod sleep -n sleepy
|
||||||
|
133 kubectl run sleepy --image=busybox -n sleepy -- sleep 3600
|
||||||
|
134 kubectl get all -n sleepy
|
||||||
|
135 kubectl -n sleepy exec -it sleepy -- nslookup awakeging.awake.svc.cluster.local
|
||||||
|
136 kubectl -n sleepy exec -it sleepy -- nslookup awakeging.awake
|
||||||
|
137 cd cka/
|
||||||
|
138 kubectl create ns nwp-namespace
|
||||||
|
139 vim nwp-lab9-1.yaml
|
||||||
|
140 kubectl apply -f nwp-lab9-1.yaml
|
||||||
|
141 kubectl expose pod nwp-nginx --port=80
|
||||||
|
142 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx
|
||||||
|
143 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
144 vim nwp-lab9-2.yaml
|
||||||
|
145 kubectl apply -f nwp-lab9-2.yaml
|
||||||
|
146 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local
|
||||||
|
147 kubectl create deploy busybox --image=busybox -- sleep 3600
|
||||||
|
148 kubectl exec -it busybox-5b5ddd5fc-nzrz9 -- wget --spider --timeout=1 nwp-nginx
|
||||||
|
149 etcdctl
|
||||||
|
150 sudo apt install etcd-client
|
||||||
|
151 sudo etcdctl --help | less
|
||||||
|
152 sudo etcdctl snapshot save -h
|
||||||
|
153 sudo etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key get / --prefix --keys-only
|
||||||
|
154 sudo etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key snapshot save /tmp/etcdbackup.db
|
||||||
|
155 ls -l /tmp/etcdbackup.db
|
||||||
|
156 etcdctl --write-out=table snapshot status /tmp/etcdbackup.db
|
||||||
|
157 sudo etcdctl --write-out=table snapshot status /tmp/etcdbackup.db
|
||||||
|
158 history
|
||||||
|
159 kubectl get deploy
|
||||||
|
160 kubectl delete deploy mydb
|
||||||
|
161 cd /etc/kubernetes/manifests/
|
||||||
|
162 ls
|
||||||
|
163 sudo mv * ..
|
||||||
|
164 sudo crictl ps
|
||||||
|
165 sudo etcdctl snapshot restore /tmp/etcdbackup.db --data-dir /var/lib/etcd-backup
|
||||||
|
166 sudo ls -l /var/lib/etcd-backup
|
||||||
|
167 sudo vim /etc/kubernetes/etcd.yaml
|
||||||
|
168 sudo mv ../*.yaml .
|
||||||
|
169 sudo crictl ps
|
||||||
|
170 kubectl get deploy -A
|
||||||
|
171 sudo mv ..
|
||||||
|
172 sudo mv * ..
|
||||||
|
173 sudo mv /var/lib/etcd /var/lib/etcd-old
|
||||||
|
174 sudo mv /var/lib/etcd-backup /var/lib/etcd
|
||||||
|
175 sudo vim /etc/kubernetes/etcd.yaml
|
||||||
|
176 sudo mv ../*.yaml .
|
||||||
|
177 sudo crictl ps
|
||||||
|
178 kubectl get deploy -A
|
||||||
|
179 cd
|
||||||
|
180 cd cka
|
||||||
|
181 ls
|
||||||
|
182 ls p*
|
||||||
|
183 vim pv.yaml
|
||||||
|
184 kubectl explain persistenvolume.spec | less
|
||||||
|
185 kubectl explain persistentvolume.spec | less
|
||||||
|
186 kubectl apply -f pv.yaml
|
||||||
|
187 vim pvc.yaml
|
||||||
|
188 kubectl apply -f pvc.yaml
|
||||||
|
189 kubectl get pvc,pv
|
||||||
|
190 vim pv-pod.yaml
|
||||||
|
191 kubectl apply -f pv-pod.yaml
|
||||||
|
192 kubectl exec -it pv-pod -- touch /usr/share/nginx/html/helloDK
|
||||||
|
193 kubectl get pods
|
||||||
|
194 source <(kubectl completion bash)
|
||||||
|
195 kubectl describe pv pv-volume
|
||||||
|
196 kubectl get pods -o wide | grep pv
|
||||||
|
197 exit
|
||||||
|
198 source <(kubectl completion bash)
|
||||||
|
199 cd cka/
|
||||||
|
200 vim nwpolicy-complete-example.yaml
|
||||||
|
201 kubectl apply -f nwpolicy-complete-example.yaml
|
||||||
|
202 vim nwpolicy-complete-example.yaml
|
||||||
|
203 kubectl expose pod nginx --port=80
|
||||||
|
204 kubectl exec -it busybox -- wget --spider --timeout=1 nginx
|
||||||
|
205 kubectl get networkpolicy -o yaml
|
||||||
|
206 kubectl get pods --show-labels
|
||||||
|
207 kubectl label pod access=true
|
||||||
|
208 kubectl label pod busybox access=true
|
||||||
|
209 kubectl exec -it busybox -- wget --spider --timeout=1 nginx
|
||||||
|
210 history
|
||||||
|
211 vim nwpexample.yaml
|
||||||
|
212 kubectl create ns nondefault
|
||||||
|
213 vim nwpexample.yaml
|
||||||
|
214 kubectl apply -f nwpexample.yaml
|
||||||
|
215 kubectl describe ns nondefault
|
||||||
|
216 kubectl get networkpolicy -n nondefault
|
||||||
|
217 kubectl get networkpolicy
|
||||||
|
218 kubectl run nginx1 --image=nginx -n nondefault
|
||||||
|
219 kubectl run nginx2 --image=nginx
|
||||||
|
220 kubectl label pod nginx2 role=web
|
||||||
|
221 kubectl lable pod nginx1 -n nondefault role=web
|
||||||
|
222 kubectl label pod nginx1 -n nondefault role=web
|
||||||
|
223 kubectl expose pod nginx1 -n nondefault --port=80
|
||||||
|
224 kubectl expose pod nginx2 --port=80
|
||||||
|
225 kubectl get pods
|
||||||
|
226 history
|
||||||
|
227 kubectl exec -it busybox -- wget --spider --timeout=1 nginx1.nondefault.svc.cluster.local
|
||||||
|
228 kubectl exec -it busybox -- wget --spider --timeout=1 nginx2.default.svc.cluster.local
|
||||||
|
229 vim nwpexample.yaml
|
||||||
|
230 kubectl get ns --show-labels
|
||||||
|
231 vim nwpexample.yaml
|
||||||
|
232 history
|
||||||
|
233 kubectl exec -it busybox -- wget --spider --timeout=1 nginx1.nondefault.svc.cluster.local
|
||||||
|
234 kubectl apply -f nwpexample.yaml
|
||||||
|
235 kubectl exec -it busybox -- wget --spider --timeout=1 nginx1.nondefault.svc.cluster.local
|
||||||
|
236 kubectl exec -it busybox -- wget --spider --timeout=1 nginx2.default.svc.cluster.local
|
||||||
|
237 vim nwpexample.yaml
|
||||||
|
238 kubectl get pods busybox --show-labels
|
||||||
|
239 kubectl apply -f nwpexample.yaml
|
||||||
|
240 kubectl exec -it busybox -- wget --spider --timeout=1 nginx1.nondefault.svc.cluster.local
|
||||||
|
241 kubectl apply -f nwpexample.yaml
|
||||||
|
242 kubectl get ns
|
||||||
|
243 kubectl get pods remote
|
||||||
|
244 kubectl get pods -n remote
|
||||||
|
245 kubectl apply -f nwpexample.yaml
|
||||||
|
246 vim nwpexample.yaml
|
||||||
|
247 #kubectl label pod -n remote role=frontend
|
||||||
|
248 kubectl exec -n remote -it remotebox wget --spider --timeout=1 nginx1.nondefault.svc.cluster.local
|
||||||
|
249 kubectl exec -n remote -it remotebox -- wget --spider --timeout=1 nginx1.nondefault.svc.cluster.local
|
||||||
|
250 kubectl get networkpolicy -n nondefault
|
||||||
|
251 kubectl label pod -n remote role=frontend
|
||||||
|
252 kubectl label pod remotepod -n remote role=frontend
|
||||||
|
253 kubectl label pod remotebox -n remote role=frontend
|
||||||
|
254 kubectl exec -n remote -it remotebox -- wget --spider --timeout=1 nginx1.nondefault.svc.cluster.local
|
||||||
|
255 vim networkpolicy-example.yaml
|
||||||
|
256 vim nwp-lab9-2.yaml
|
||||||
|
257 kubectl get netpol
|
||||||
|
258 kubectl delete netpol deny-from-other-namespaces
|
||||||
|
259 kubectl describe netpol access-nginx
|
||||||
|
260 kubectl get pods
|
||||||
|
261 kubectl get ns
|
||||||
|
262 kubectl get pods remote
|
||||||
|
263 kubectl get pods -n remote
|
||||||
|
264 kubectl -n remote exec -it remotebox -- wget --spider --timeout=1 nginx.default.svc.cluster.local
|
||||||
|
265 history | grep app
|
||||||
|
266 kubectl get all
|
||||||
|
267 kubectl kubectl delete all all
|
||||||
|
268 kubectl delete all all
|
||||||
|
269 kubectl delete all --all
|
||||||
|
270 kubectl api-resources | less
|
||||||
|
271 kubectl api-resources | grep -i networkp
|
||||||
|
272 kubectl explain networkpolicies.spec | less
|
||||||
|
273 kubectl explain networkpolicies.spec.ingress | less
|
||||||
|
274 kubectl explain networkpolicies.crd.projectcalico.org.spec.ingress | less
|
||||||
|
275 kubectl completion -h | less
|
||||||
|
276 kubectl get ds -A
|
||||||
|
277 vim initex.yaml
|
||||||
|
278 grep init *
|
||||||
|
279 vim init-container.yaml
|
||||||
|
280 kubectl apply -f init-container.yaml
|
||||||
|
281 kubectl get pods
|
||||||
|
282 kubectl get pods -n kube-system
|
||||||
|
283 kubectl -n kube-system get pods calico-node-5xcrf -o yaml | less
|
||||||
|
284 vim init-container.yaml
|
||||||
|
285 kubectl delete -f init-container.yaml
|
||||||
|
286 kubectl get pods
|
||||||
|
287 kubectl apply -f init-container.yaml
|
||||||
|
288 kubectl get pods
|
||||||
|
289 kubectl get pods -w
|
||||||
|
290 kubectl run examplepod --image=busybox --dry-run=client -o yaml -- sleep 10
|
||||||
|
291 kubectl run examplepod --image=busybox --dry-run=client -o yaml -- sleep 10 > sleep10.yaml
|
||||||
|
292 vim sleep10.yaml
|
||||||
|
293 kubectl apply -f sleep10.yaml
|
||||||
|
294 kubectl get pods
|
||||||
|
295 kubectl get pods -o wide
|
||||||
|
296 kubectl run examplepod --image=busybox --dry-run=client -o yaml -- sh -c "sleep 10 > /tmp/sleep"
|
||||||
|
297 kubectl create deploy daemon --image=nginx --dry-run=client -o yaml > daemon.yaml
|
||||||
|
298 vim daemon.yaml
|
||||||
|
299 kubectl apply -f daemon.yaml
|
||||||
|
300 vim daemon.yaml
|
||||||
|
301 kubectl apply -f daemon.yaml
|
||||||
|
302 kubectl get daemonset
|
||||||
|
303 kubectl get pods -o wide
|
||||||
|
304 kubectl edit node control
|
||||||
|
305 kubectl get ds -A
|
||||||
|
306 kubectl -n kube-system get ds calico-node -o yaml | less
|
||||||
|
307 kubectl run busybox date
|
||||||
|
308 kubectl run testbox --image=busybox date
|
||||||
|
309 kubectl run testbox --image=busybox -- date
|
||||||
|
310 kubectl run testbox2 --image=busybox -- date
|
||||||
|
311 kubectl run testbox3 --image=busybox
|
||||||
|
312 kubectl get pods
|
||||||
|
313 vim selector-pod.yaml
|
||||||
|
314 kubectl apply -f selector-pod.yaml
|
||||||
|
315 kubectl get pods
|
||||||
|
316 kubectl describe pod nginx
|
||||||
|
317 kubectl label node worker2 disktype=ssd
|
||||||
|
318 kubectl get pods
|
||||||
|
319 kubectl get pods -o wide
|
||||||
|
320 kubectl edit node worker1
|
||||||
|
321 kubectl describe node worker1
|
||||||
|
322 kubectl taint node worker1 storage=ssd:NoSchedule
|
||||||
|
323 kubectl describe node worker1
|
||||||
|
324 kubectl describe node worker1 | less /taint
|
||||||
|
325 kubectl describe node worker1 | less
|
||||||
|
326 kubectl create deploy tolerateornot --image=nginx --replicas=6
|
||||||
|
327 kubectl get pods -o wide
|
||||||
|
328 vim taint-toleration.yaml
|
||||||
|
329 kubectl apply -f taint-toleration.yaml
|
||||||
|
330 kubectl get pods
|
||||||
|
331 kubectl get pods -o wide
|
||||||
|
332 kubectl apply -f taint-toleration.yaml
|
||||||
|
333 vim taint-toleration.yaml
|
||||||
|
334 vim taint-toleration2.yaml
|
||||||
|
335 kubectl apply -f taint-toleration2.yaml
|
||||||
|
336 kubectl get pods -o wide
|
||||||
|
337 kubectl set -h
|
||||||
|
338 kubectl set resources -h
|
||||||
|
339 kubectl set resources -h | less
|
||||||
|
340 kubectl get deploy
|
||||||
|
341 kubectl taint node worker1 storage=ssd:NoSchedule-
|
||||||
|
342 kubectl delete deploy tolerateornot
|
||||||
|
343 kubectl create deploy whatever --image=nginx --replicas=3
|
||||||
|
344 kubectl set resources deployment whatever --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi
|
||||||
|
345 kubectl get deploy whatever -o yaml | less
|
||||||
|
346 kubectl get pods -o wide
|
||||||
|
347 kubectl set resources deployment whatever --limits=cpu=200m,memory=512Gi --requests=cpu=100m,memory=256Gi
|
||||||
|
348 kubectl get pods -o wide
|
||||||
|
349 kubectl describe pod whatever-694fbf4f4b-hsrfh
|
||||||
|
350 kubectl set resources deployment whatever --limits=cpu=200m,memory=4Mi --requests=cpu=100m,memory=2Mi
|
||||||
|
351 kubectl get pods
|
||||||
|
352 kubectl describe pod whatever-684c54dfc-wjjg8
|
||||||
|
353 kubectl get pods
|
||||||
|
354 kubectl describe pod whatever-684c54dfc-wqmsw
|
||||||
|
355 kubectl delete deployments.apps whatever
|
||||||
|
356 kubectl create ns notmuch
|
||||||
|
357 kubectl create quota -h | less
|
||||||
|
358 kubectl create quota notmany --hard=cpu=1,memory=1G,pods=2,services=3,resourcequotas=1,secrets=5,persistentvolumeclaims=10 -n notmuch
|
||||||
|
359 kubectl create deploy alot --image=nginx --replicas=3 -n notmuch
|
||||||
|
360 kubectl get all -n notmuch
|
||||||
|
361 kubectl -n notmuch describe replicaset alot-8465f68dc6
|
||||||
|
362 kubectl set resources deployment alot --limits=cpu=200m,memory=128Mi --requests=cpu=100m,memory=2Mi -n notmuch
|
||||||
|
363 kubectl get all -n notmuch
|
||||||
|
364 kubectl describe -n notmuch pod alot-7cd6cbc85-zzm9t
|
||||||
|
365 kubectl -n notmuch delete quota notmany
|
||||||
|
366 kubectl get all -n notmuch
|
||||||
|
367 kubectl create deploy mydb --image=mariadb --replicas=3
|
||||||
|
368 kubectl get all --selector app=mydb
|
||||||
|
369 kubectl create cm -h | less
|
||||||
|
370 kubectl create cm mydbvars --from-literal=MARIADB_ROOT_PASSWORD=password
|
||||||
|
371 kubectl get cm mydbvars -o yaml
|
||||||
|
372 kubectl set env -h | less
|
||||||
|
373 #kubectl set env --from=configmap/myconfigmap --prefix=MYSQL_ deployment/myapp
|
||||||
|
374 kubectl set env --from=configmap/mydbvars deployment/mydb
|
||||||
|
375 kubec
|
||||||
|
376 kubectl get all --selector app=mydb
|
||||||
|
377 kubectl get deploy mydb -o yaml | less
|
||||||
|
378 kubectl create secret mydbsecretpw --from-literal=ROOT_PASSWORD=password
|
||||||
|
379 kubectl create secret -h | less
|
||||||
|
380 kubectl create secret generic -h | less
|
||||||
|
381 kubectl create secret generic mydbsecretpw --from-literal=ROOT_PASSWORD=password
|
||||||
|
382 kubectl describe secrets mydbsecretpw
|
||||||
|
383 kubectl get secrets mydbsecretpw -o yaml
|
||||||
|
384 echo cGFzc3dvcmQ= | base64 -d
|
||||||
|
385 vim exo2.txt
|
||||||
|
386 #kubectl create secret generic mynewdbvars --from-literal=whatever=password
|
||||||
|
387 kubectl create secret generic mynewdbvars --from-literal=whatever=password
|
||||||
|
388 kubectl get secrets mynewdbvars -o yaml
|
||||||
|
389 kubectl create deploy whateverdb --image=mariadb
|
||||||
|
390 kubectl set env --from secret/mynewdbvars deploy/whateverdb
|
||||||
|
391 kubectl edit deploy whateverdb
|
||||||
|
392 kubectl get all --selector app=whateverdb
|
||||||
|
393 kubectl explain deploy.spec | less
|
||||||
|
394 kubectl get nodes
|
||||||
|
395 kubectl drain node worker2
|
||||||
|
396 kubectl drain worker2
|
||||||
|
397 kubectl drain worker2 --force
|
||||||
|
398 kubectl drain worker2 --force --ignore-daemonsets
|
||||||
|
399 kubectl get nodes
|
||||||
|
400 kubectl get pods -o wide
|
||||||
|
401 kubectl edit node worker2
|
||||||
|
402 kubectl uncordon worker2
|
||||||
|
403 kubectl get nodes
|
||||||
|
404 kubectl get pods -o wide
|
||||||
|
405 vim morevolumes.yaml
|
||||||
|
406 kubectl apply -f morevolumes.yaml
|
||||||
|
407 kubectl get pods
|
||||||
|
408 kubectl delete pods morevol
|
||||||
|
409 kubectl get pods
|
||||||
|
410 kubectl apply -f morevolumes.yaml
|
||||||
|
411 kubectl get pods
|
||||||
|
412 kubectl exec -it morevol -c centos1 touch /centos1/centfile
|
||||||
|
413 kubectl exec -it morevol -c centos2 ls -l /centos2/
|
||||||
|
414 kubectl exec -it morevol -c centos2 -- ls -l /centos2/
|
||||||
|
415 kubectl top pods
|
||||||
|
416 kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components
|
||||||
|
417 kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||||
|
418 kubectl get pods -n kube-system
|
||||||
|
419 kubectl edit deploy -n kube-system metrics-server
|
||||||
|
420 kubectl get pods -n kube-system
|
||||||
|
421 kubectl top pods
|
||||||
|
422 kubectl get pods -n kube-system -o wide
|
||||||
|
423 sudo -i
|
||||||
|
424 cd /etc/kubernetes/manifests/
|
||||||
|
425 ls
|
||||||
|
426 vim etcd.yaml
|
||||||
|
427 sudo vim etcd.yaml
|
||||||
|
428 sudo ls -l /var/lib/etcd/
|
||||||
|
429 sudo ls -l /var/lib/etcd/member
|
||||||
|
430 ls -l /etc/pki
|
||||||
|
431 ls -l /etc/ssl
|
||||||
|
432 ls -l /etc/ssl/certs
|
||||||
|
433 sudo ps aux | grep kube-api
|
||||||
|
434 cd /cka
|
||||||
|
435 cd /
|
||||||
|
436 cd home/student/
|
||||||
|
437 ls
|
||||||
|
438 cd cka
|
||||||
|
439 ls
|
||||||
|
440 vim security-context.yaml
|
||||||
|
441 kubectl apply -f security-context.yaml
|
||||||
|
442 kubectl exec -it security-context-demo -- sh
|
||||||
|
443 kubectl create role -h
|
||||||
|
444 kubectl create role -h | less
|
||||||
|
445 kubectl get roles
|
||||||
|
446 kubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods
|
||||||
|
447 kubectl get roles
|
||||||
|
448 kubectl describe role pod-reader
|
||||||
|
449 kubectl get roles -n awake
|
||||||
|
450 kubectl get pods -o yaml pv-pod | less
|
||||||
|
451 kubectl get sa -A
|
||||||
|
452 kubectl create sa myown
|
||||||
|
453 kubectl get roles
|
||||||
|
454 kubectl create rolebinding -h | less
|
||||||
|
455 #kubectl create rolebinding admin-binding --role=admin --serviceaccount=monitoring:sa-dev
|
||||||
|
456 kubectl create rolebinding pod-reader-binding --role=pod-reader --serviceaccount=default:myown
|
||||||
|
457 kubectl get deploy
|
||||||
|
458 kubectl set serviceaccount -h | less
|
||||||
|
459 kubectl set serviceaccount deploy mydb myown
|
||||||
|
460 exit
|
||||||
|
461 history
|
||||||
|
462 history > /tmp/novo-history.txt
|
||||||
17
pullratelimitpatch.sh
Normal file
17
pullratelimitpatch.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# this scripts creates a secret and sets that as the default for the default service account
|
||||||
|
# the purpose is to overcome the Docker imagepullratelimit restriction
|
||||||
|
|
||||||
|
echo enter your docker username
|
||||||
|
read -s DOCKERUSER
|
||||||
|
echo enter your docker password
|
||||||
|
read -s DOCKERPASS
|
||||||
|
|
||||||
|
kubectl create secret docker-registry dockercreds \
|
||||||
|
--docker-username=$DOCKERUSER \
|
||||||
|
--docker-password=$DOCKERPASS \
|
||||||
|
|
||||||
|
kubectl patch serviceaccount default \
|
||||||
|
-p '{"imagePullSecrets": [{"name": "dockercreds"}]}'
|
||||||
|
|
||||||
3
setup-calico.sh
Normal file
3
setup-calico.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml
|
||||||
|
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml
|
||||||
|
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml
|
||||||
69
setup-container-previous-version 2.sh
Executable file
69
setup-container-previous-version 2.sh
Executable file
@ -0,0 +1,69 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# script that runs
|
||||||
|
# https://kubernetes.io/docs/setup/production-environment/container-runtime
|
||||||
|
|
||||||
|
# changes March 14 2023: introduced $PLATFORM to have this work on amd64 as well as arm64
|
||||||
|
|
||||||
|
# setting MYOS variable
|
||||||
|
MYOS=$(hostnamectl | awk '/Operating/ { print $3 }')
|
||||||
|
OSVERSION=$(hostnamectl | awk '/Operating/ { print $4 }')
|
||||||
|
# beta: building in ARM support
|
||||||
|
[ $(arch) = aarch64 ] && PLATFORM=arm64
|
||||||
|
[ $(arch) = x86_64 ] && PLATFORM=amd64
|
||||||
|
|
||||||
|
if [ $MYOS = "Ubuntu" ]
|
||||||
|
then
|
||||||
|
### setting up container runtime prereq
|
||||||
|
cat <<- EOF | sudo tee /etc/modules-load.d/containerd.conf
|
||||||
|
overlay
|
||||||
|
br_netfilter
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo modprobe overlay
|
||||||
|
sudo modprobe br_netfilter
|
||||||
|
|
||||||
|
# Setup required sysctl params, these persist across reboots.
|
||||||
|
cat <<- EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
|
||||||
|
net.bridge.bridge-nf-call-iptables = 1
|
||||||
|
net.ipv4.ip_forward = 1
|
||||||
|
net.bridge.bridge-nf-call-ip6tables = 1
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Apply sysctl params without reboot
|
||||||
|
sudo sysctl --system
|
||||||
|
|
||||||
|
# (Install containerd)
|
||||||
|
|
||||||
|
sudo apt-get update && sudo apt-get install -y containerd
|
||||||
|
# hopefully temporary bugfix as the containerd version provided in Ubu repo is tool old
|
||||||
|
# added Jan 26th 2023
|
||||||
|
# this needs to be updated when a recent enough containerd version will be in Ubuntu repos
|
||||||
|
sudo systemctl stop containerd
|
||||||
|
# cleanup old files from previous attempt if existing
|
||||||
|
[ -d bin ] && rm -rf bin
|
||||||
|
wget https://github.com/containerd/containerd/releases/download/v1.6.15/containerd-1.6.15-linux-${PLATFORM}.tar.gz
|
||||||
|
tar xvf containerd-1.6.15-linux-${PLATFORM}.tar.gz
|
||||||
|
sudo mv bin/* /usr/bin/
|
||||||
|
# Configure containerd
|
||||||
|
sudo mkdir -p /etc/containerd
|
||||||
|
cat <<- TOML | sudo tee /etc/containerd/config.toml
|
||||||
|
version = 2
|
||||||
|
[plugins]
|
||||||
|
[plugins."io.containerd.grpc.v1.cri"]
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||||
|
discard_unpacked_layers = true
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||||
|
runtime_type = "io.containerd.runc.v2"
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||||
|
SystemdCgroup = true
|
||||||
|
TOML
|
||||||
|
|
||||||
|
# Restart containerd
|
||||||
|
sudo systemctl restart containerd
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo ln -s /etc/apparmor.d/runc /etc/apparmor.d/disable/
|
||||||
|
sudo apparmor_parser -R /etc/apparmor.d/runc
|
||||||
|
|
||||||
|
touch /tmp/container.txt
|
||||||
69
setup-container-previous-version.sh
Executable file
69
setup-container-previous-version.sh
Executable file
@ -0,0 +1,69 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# script that runs
|
||||||
|
# https://kubernetes.io/docs/setup/production-environment/container-runtime
|
||||||
|
|
||||||
|
# changes March 14 2023: introduced $PLATFORM to have this work on amd64 as well as arm64
|
||||||
|
|
||||||
|
# setting MYOS variable
|
||||||
|
MYOS=$(hostnamectl | awk '/Operating/ { print $3 }')
|
||||||
|
OSVERSION=$(hostnamectl | awk '/Operating/ { print $4 }')
|
||||||
|
# beta: building in ARM support
|
||||||
|
[ $(arch) = aarch64 ] && PLATFORM=arm64
|
||||||
|
[ $(arch) = x86_64 ] && PLATFORM=amd64
|
||||||
|
|
||||||
|
if [ $MYOS = "Ubuntu" ]
|
||||||
|
then
|
||||||
|
### setting up container runtime prereq
|
||||||
|
cat <<- EOF | sudo tee /etc/modules-load.d/containerd.conf
|
||||||
|
overlay
|
||||||
|
br_netfilter
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo modprobe overlay
|
||||||
|
sudo modprobe br_netfilter
|
||||||
|
|
||||||
|
# Setup required sysctl params, these persist across reboots.
|
||||||
|
cat <<- EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
|
||||||
|
net.bridge.bridge-nf-call-iptables = 1
|
||||||
|
net.ipv4.ip_forward = 1
|
||||||
|
net.bridge.bridge-nf-call-ip6tables = 1
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Apply sysctl params without reboot
|
||||||
|
sudo sysctl --system
|
||||||
|
|
||||||
|
# (Install containerd)
|
||||||
|
|
||||||
|
sudo apt-get update && sudo apt-get install -y containerd
|
||||||
|
# hopefully temporary bugfix as the containerd version provided in Ubu repo is tool old
|
||||||
|
# added Jan 26th 2023
|
||||||
|
# this needs to be updated when a recent enough containerd version will be in Ubuntu repos
|
||||||
|
sudo systemctl stop containerd
|
||||||
|
# cleanup old files from previous attempt if existing
|
||||||
|
[ -d bin ] && rm -rf bin
|
||||||
|
wget https://github.com/containerd/containerd/releases/download/v1.6.15/containerd-1.6.15-linux-${PLATFORM}.tar.gz
|
||||||
|
tar xvf containerd-1.6.15-linux-${PLATFORM}.tar.gz
|
||||||
|
sudo mv bin/* /usr/bin/
|
||||||
|
# Configure containerd
|
||||||
|
sudo mkdir -p /etc/containerd
|
||||||
|
cat <<- TOML | sudo tee /etc/containerd/config.toml
|
||||||
|
version = 2
|
||||||
|
[plugins]
|
||||||
|
[plugins."io.containerd.grpc.v1.cri"]
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||||
|
discard_unpacked_layers = true
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||||
|
runtime_type = "io.containerd.runc.v2"
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||||
|
SystemdCgroup = true
|
||||||
|
TOML
|
||||||
|
|
||||||
|
# Restart containerd
|
||||||
|
sudo systemctl restart containerd
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo ln -s /etc/apparmor.d/runc /etc/apparmor.d/disable/
|
||||||
|
sudo apparmor_parser -R /etc/apparmor.d/runc
|
||||||
|
|
||||||
|
touch /tmp/container.txt
|
||||||
@ -11,42 +11,38 @@ OSVERSION=$(hostnamectl | awk '/Operating/ { print $4 }')
|
|||||||
[ $(arch) = aarch64 ] && PLATFORM=arm64
|
[ $(arch) = aarch64 ] && PLATFORM=arm64
|
||||||
[ $(arch) = x86_64 ] && PLATFORM=amd64
|
[ $(arch) = x86_64 ] && PLATFORM=amd64
|
||||||
|
|
||||||
|
sudo apt install -y jq
|
||||||
|
|
||||||
if [ $MYOS = "Ubuntu" ]
|
if [ $MYOS = "Ubuntu" ]
|
||||||
then
|
then
|
||||||
### setting up container runtime prereq
|
### setting up container runtime prereq
|
||||||
cat <<- EOF | sudo tee /etc/modules-load.d/containerd.conf
|
cat <<- EOF | sudo tee /etc/modules-load.d/containerd.conf
|
||||||
overlay
|
overlay
|
||||||
br_netfilter
|
br_netfilter
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
sudo modprobe overlay
|
sudo modprobe overlay
|
||||||
sudo modprobe br_netfilter
|
sudo modprobe br_netfilter
|
||||||
|
|
||||||
# Setup required sysctl params, these persist across reboots.
|
# Setup required sysctl params, these persist across reboots.
|
||||||
cat <<- EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
|
cat <<- EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
|
||||||
net.bridge.bridge-nf-call-iptables = 1
|
net.bridge.bridge-nf-call-iptables = 1
|
||||||
net.ipv4.ip_forward = 1
|
net.ipv4.ip_forward = 1
|
||||||
net.bridge.bridge-nf-call-ip6tables = 1
|
net.bridge.bridge-nf-call-ip6tables = 1
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Apply sysctl params without reboot
|
# Apply sysctl params without reboot
|
||||||
sudo sysctl --system
|
sudo sysctl --system
|
||||||
|
|
||||||
# (Install containerd)
|
# (Install containerd)
|
||||||
|
# getting rid of hard coded version numbers
|
||||||
sudo apt-get update && sudo apt-get install -y containerd
|
CONTAINERD_VERSION=$(curl -s https://api.github.com/repos/containerd/containerd/releases/latest | jq -r '.tag_name')
|
||||||
# hopefully temporary bugfix as the containerd version provided in Ubu repo is tool old
|
CONTAINERD_VERSION=${CONTAINERD_VERSION#v}
|
||||||
# added Jan 26th 2023
|
wget https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-${PLATFORM}.tar.gz
|
||||||
# this needs to be updated when a recent enough containerd version will be in Ubuntu repos
|
sudo tar xvf containerd-${CONTAINERD_VERSION}-linux-${PLATFORM}.tar.gz -C /usr/local
|
||||||
sudo systemctl stop containerd
|
# Configure containerd
|
||||||
# cleanup old files from previous attempt if existing
|
sudo mkdir -p /etc/containerd
|
||||||
[ -d bin ] && rm -rf bin
|
cat <<- TOML | sudo tee /etc/containerd/config.toml
|
||||||
wget https://github.com/containerd/containerd/releases/download/v1.6.15/containerd-1.6.15-linux-${PLATFORM}.tar.gz
|
|
||||||
tar xvf containerd-1.6.15-linux-${PLATFORM}.tar.gz
|
|
||||||
sudo mv bin/* /usr/bin/
|
|
||||||
# Configure containerd
|
|
||||||
sudo mkdir -p /etc/containerd
|
|
||||||
cat <<- TOML | sudo tee /etc/containerd/config.toml
|
|
||||||
version = 2
|
version = 2
|
||||||
[plugins]
|
[plugins]
|
||||||
[plugins."io.containerd.grpc.v1.cri"]
|
[plugins."io.containerd.grpc.v1.cri"]
|
||||||
@ -57,9 +53,22 @@ version = 2
|
|||||||
runtime_type = "io.containerd.runc.v2"
|
runtime_type = "io.containerd.runc.v2"
|
||||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||||
SystemdCgroup = true
|
SystemdCgroup = true
|
||||||
TOML
|
TOML
|
||||||
|
|
||||||
# Restart containerd
|
RUNC_VERSION=$(curl -s https://api.github.com/repos/opencontainers/runc/releases/latest | jq -r '.tag_name')
|
||||||
sudo systemctl restart containerd
|
|
||||||
|
wget https://github.com/opencontainers/runc/releases/download/${RUNC_VERSION}/runc.${PLATFORM}
|
||||||
|
sudo install -m 755 runc.${PLATFORM} /usr/local/sbin/runc
|
||||||
|
# Restart containerd
|
||||||
|
wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
|
||||||
|
sudo mv containerd.service /usr/lib/systemd/system/
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
sudo systemctl enable --now containerd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
sudo ln -s /etc/apparmor.d/runc /etc/apparmor.d/disable/
|
||||||
|
sudo apparmor_parser -R /etc/apparmor.d/runc
|
||||||
|
|
||||||
|
|
||||||
|
touch /tmp/container.txt
|
||||||
|
exit
|
||||||
|
|||||||
@ -1,68 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# kubeadm installation instructions as on
|
|
||||||
# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
|
|
||||||
|
|
||||||
# this script supports centos 7 and Ubuntu 20.04 only
|
|
||||||
# run this script with sudo
|
|
||||||
|
|
||||||
if ! [ $USER = root ]
|
|
||||||
then
|
|
||||||
echo run this script with sudo
|
|
||||||
exit 3
|
|
||||||
fi
|
|
||||||
|
|
||||||
# setting MYOS variable
|
|
||||||
MYOS=$(hostnamectl | awk '/Operating/ { print $3 }')
|
|
||||||
OSVERSION=$(hostnamectl | awk '/Operating/ { print $4 }')
|
|
||||||
|
|
||||||
##### CentOS 7 config
|
|
||||||
if [ $MYOS = "CentOS" ]
|
|
||||||
then
|
|
||||||
echo RUNNING CENTOS CONFIG
|
|
||||||
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
|
|
||||||
[kubernetes]
|
|
||||||
name=Kubernetes
|
|
||||||
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
|
|
||||||
enabled=1
|
|
||||||
gpgcheck=1
|
|
||||||
repo_gpgcheck=1
|
|
||||||
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
|
||||||
exclude=kubelet kubeadm kubectl
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Set SELinux in permissive mode (effectively disabling it)
|
|
||||||
setenforce 0
|
|
||||||
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
|
|
||||||
|
|
||||||
# disable swap (assuming that the name is /dev/centos/swap
|
|
||||||
sed -i 's/^\/dev\/mapper\/centos-swap/#\/dev\/mapper\/centos-swap/' /etc/fstab
|
|
||||||
swapoff /dev/mapper/centos-swap
|
|
||||||
|
|
||||||
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
|
|
||||||
|
|
||||||
systemctl enable --now kubelet
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $MYOS = "Ubuntu" ]
|
|
||||||
then
|
|
||||||
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y apt-transport-https ca-certificates curl
|
|
||||||
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
|
||||||
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y kubelet kubeadm kubectl
|
|
||||||
sudo apt-mark hold kubelet kubeadm kubectl
|
|
||||||
swapoff /swapfile
|
|
||||||
|
|
||||||
sed -i 's/swapfile/#swapfile/' /etc/fstab
|
|
||||||
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set iptables bridging
|
|
||||||
cat <<EOF > /etc/sysctl.d/k8s.conf
|
|
||||||
net.bridge.bridge-nf-call-ip6tables = 1
|
|
||||||
net.bridge.bridge-nf-call-iptables = 1
|
|
||||||
EOF
|
|
||||||
sysctl --system
|
|
||||||
59
setup-kubetools-previousversion.sh
Executable file
59
setup-kubetools-previousversion.sh
Executable file
@ -0,0 +1,59 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# kubeadm installation instructions as on
|
||||||
|
# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
|
||||||
|
|
||||||
|
# this script supports Ubuntu 20.04 LTS and later only
|
||||||
|
# run this script with sudo
|
||||||
|
|
||||||
|
if ! [ -f /tmp/container.txt ]
|
||||||
|
then
|
||||||
|
echo run ./setup-container.sh before running this script
|
||||||
|
exit 4
|
||||||
|
fi
|
||||||
|
|
||||||
|
# setting MYOS variable
|
||||||
|
MYOS=$(hostnamectl | awk '/Operating/ { print $3 }')
|
||||||
|
OSVERSION=$(hostnamectl | awk '/Operating/ { print $4 }')
|
||||||
|
|
||||||
|
# detecting latest Kubernetes version
|
||||||
|
KUBEVERSION=$(curl -s https://api.github.com/repos/kubernetes/kubernetes/releases/latest | jq -r '.tag_name')
|
||||||
|
KUBEVERSION=${KUBEVERSION%.*}
|
||||||
|
|
||||||
|
# setting previous version
|
||||||
|
VERSION=${KUBEVERSION#*.}
|
||||||
|
PREVIOUSVERSION=$(( VERSION - 1 ))
|
||||||
|
PREVIOUSVERSION=v1.${PREVIOUSVERSION}
|
||||||
|
|
||||||
|
|
||||||
|
if [ $MYOS = "Ubuntu" ]
|
||||||
|
then
|
||||||
|
echo RUNNING UBUNTU CONFIG
|
||||||
|
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
|
||||||
|
br_netfilter
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
|
||||||
|
curl -fsSL https://pkgs.k8s.io/core:/stable:/${PREVIOUSVERSION}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
||||||
|
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${PREVIOUSVERSION}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y kubelet kubeadm kubectl
|
||||||
|
sudo apt-mark hold kubelet kubeadm kubectl
|
||||||
|
sudo swapoff -a
|
||||||
|
|
||||||
|
sudo sed -i 's/\/swap/#\/swap/' /etc/fstab
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set iptables bridging
|
||||||
|
#sudo cat <<EOF > /etc/sysctl.d/k8s.conf
|
||||||
|
#net.bridge.bridge-nf-call-ip6tables = 1
|
||||||
|
#net.bridge.bridge-nf-call-iptables = 1
|
||||||
|
#EOF
|
||||||
|
#sudo sysctl --system
|
||||||
|
|
||||||
|
sudo crictl config --set \
|
||||||
|
runtime-endpoint=unix:///run/containerd/containerd.sock
|
||||||
|
echo 'after initializing the control node, follow instructions and use kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml to install the calico plugin (control node only). On the worker nodes, use sudo kubeadm join ... to join'
|
||||||
|
|
||||||
|
|
||||||
@ -28,7 +28,7 @@ EOF
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y kubelet=1.24.3-00 kubeadm=1.24.3-00 kubectl=1.24.3-00
|
sudo apt-get install -y kubelet=1.27.1-00 kubeadm=1.27.1-00 kubectl=1.27.1-00
|
||||||
sudo apt-mark hold kubelet kubeadm kubectl
|
sudo apt-mark hold kubelet kubeadm kubectl
|
||||||
swapoff -a
|
swapoff -a
|
||||||
|
|
||||||
@ -42,5 +42,8 @@ net.bridge.bridge-nf-call-iptables = 1
|
|||||||
EOF
|
EOF
|
||||||
sysctl --system
|
sysctl --system
|
||||||
|
|
||||||
|
sudo crictl config --set \
|
||||||
|
runtime-endpoint=unix:///run/containerd/containerd.sock
|
||||||
|
|
||||||
echo 'after initializing the control node, follow instructions and use kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml to install the calico plugin (control node only). On the worker nodes, use sudo kubeadm join ... to join'
|
echo 'after initializing the control node, follow instructions and use kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml to install the calico plugin (control node only). On the worker nodes, use sudo kubeadm join ... to join'
|
||||||
|
|
||||||
|
|||||||
@ -5,16 +5,27 @@
|
|||||||
# this script supports Ubuntu 20.04 LTS and later only
|
# this script supports Ubuntu 20.04 LTS and later only
|
||||||
# run this script with sudo
|
# run this script with sudo
|
||||||
|
|
||||||
if ! [ $USER = root ]
|
#if ! [ $USER = root ]
|
||||||
|
#then
|
||||||
|
# echo run this script with sudo
|
||||||
|
# exit 3
|
||||||
|
#fi
|
||||||
|
|
||||||
|
if ! [ -f /tmp/container.txt ]
|
||||||
then
|
then
|
||||||
echo run this script with sudo
|
echo run ./setup-container.sh before running this script
|
||||||
exit 3
|
exit 4
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# setting MYOS variable
|
# setting MYOS variable
|
||||||
MYOS=$(hostnamectl | awk '/Operating/ { print $3 }')
|
MYOS=$(hostnamectl | awk '/Operating/ { print $3 }')
|
||||||
OSVERSION=$(hostnamectl | awk '/Operating/ { print $4 }')
|
OSVERSION=$(hostnamectl | awk '/Operating/ { print $4 }')
|
||||||
|
|
||||||
|
# detecting latest Kubernetes version
|
||||||
|
KUBEVERSION=$(curl -s https://api.github.com/repos/kubernetes/kubernetes/releases/latest | jq -r '.tag_name')
|
||||||
|
KUBEVERSION=${KUBEVERSION%.*}
|
||||||
|
|
||||||
|
|
||||||
if [ $MYOS = "Ubuntu" ]
|
if [ $MYOS = "Ubuntu" ]
|
||||||
then
|
then
|
||||||
echo RUNNING UBUNTU CONFIG
|
echo RUNNING UBUNTU CONFIG
|
||||||
@ -23,24 +34,25 @@ then
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
|
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
|
||||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
|
curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBEVERSION}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
||||||
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBEVERSION}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
||||||
deb https://apt.kubernetes.io/ kubernetes-xenial main
|
sleep 2
|
||||||
EOF
|
|
||||||
|
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y kubelet kubeadm kubectl
|
sudo apt-get install -y kubelet kubeadm kubectl
|
||||||
sudo apt-mark hold kubelet kubeadm kubectl
|
sudo apt-mark hold kubelet kubeadm kubectl
|
||||||
swapoff -a
|
sudo swapoff -a
|
||||||
|
|
||||||
sed -i 's/\/swap/#\/swap/' /etc/fstab
|
sudo sed -i 's/\/swap/#\/swap/' /etc/fstab
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set iptables bridging
|
# Set iptables bridging
|
||||||
cat <<EOF > /etc/sysctl.d/k8s.conf
|
#sudo cat <<EOF > /etc/sysctl.d/k8s.conf
|
||||||
net.bridge.bridge-nf-call-ip6tables = 1
|
#net.bridge.bridge-nf-call-ip6tables = 1
|
||||||
net.bridge.bridge-nf-call-iptables = 1
|
#net.bridge.bridge-nf-call-iptables = 1
|
||||||
EOF
|
#EOF
|
||||||
sysctl --system
|
#sudo sysctl --system
|
||||||
|
|
||||||
echo 'after initializing the control node, follow instructions and use kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml to install the calico plugin (control node only). On the worker nodes, use sudo kubeadm join ... to join'
|
sudo crictl config --set \
|
||||||
|
runtime-endpoint=unix:///run/containerd/containerd.sock
|
||||||
|
echo 'after initializing the control node, follow instructions and use kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml to install the calico plugin (control node only). On the worker nodes, use sudo kubeadm join ... to join'
|
||||||
|
|||||||
@ -1,151 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# source https://github.com/sandervanvugt/cka/setup-lb.sh
|
|
||||||
|
|
||||||
# script to set up load balancing on cluster nodes
|
|
||||||
# for use in CKA courses by Sander van Vugt
|
|
||||||
# version 0.6
|
|
||||||
# currently only tested on Ubuntu 22.04 LTS Server
|
|
||||||
# run this AFTER running setup-container.sh and setup-kubetools.sh
|
|
||||||
|
|
||||||
if which kubectl
|
|
||||||
then
|
|
||||||
echo all good moving on
|
|
||||||
else
|
|
||||||
echo please run setup-container.sh and setup-kubetools.sh first and then run this again
|
|
||||||
exit 6
|
|
||||||
fi
|
|
||||||
|
|
||||||
## establish key based SSH with remote hosts
|
|
||||||
# obtain node information
|
|
||||||
if grep control1 /etc/hosts | grep -v 127
|
|
||||||
then
|
|
||||||
export CONTROL1_IP=$(awk '/control1/ { print $1 }' /etc/hosts | grep -v 127)
|
|
||||||
else
|
|
||||||
echo enter IP address for control1
|
|
||||||
read CONTROL1_IP
|
|
||||||
export CONTROL1_IP=$CONTROL1_IP
|
|
||||||
sudo sh -c "echo $CONTROL1_IP control1 >> /etc/hosts"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if grep control2 /etc/hosts | grep -v 127
|
|
||||||
then
|
|
||||||
export CONTROL2_IP=$(awk '/control2/ { print $1 }' /etc/hosts | grep -v 127)
|
|
||||||
else
|
|
||||||
echo enter IP address for control2
|
|
||||||
read CONTROL2_IP
|
|
||||||
export CONTROL2_IP=$CONTROL2_IP
|
|
||||||
sudo sh -c "echo $CONTROL2_IP control2 >> /etc/hosts"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if grep control3 /etc/hosts | grep -v 127
|
|
||||||
then
|
|
||||||
export CONTROL3_IP=$(awk '/control3/ { print $1 }' /etc/hosts | grep -v 127)
|
|
||||||
else
|
|
||||||
echo enter IP address for control3
|
|
||||||
read CONTROL3_IP
|
|
||||||
export CONTROL3_IP=$CONTROL3_IP
|
|
||||||
sudo sh -c "echo $CONTROL3_IP control3 >> /etc/hosts"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
echo ##### READ ALL OF THIS BEFORE CONTINUING ######
|
|
||||||
echo this script requires you to run setup-docker.sh and setup-kubetools.sh first
|
|
||||||
echo this script is based on the NIC name ens33
|
|
||||||
echo if your networkcard has a different name, edit keepalived.conf
|
|
||||||
echo before continuing and change "interface ens33" to match your config
|
|
||||||
echo .
|
|
||||||
echo this script will create a keepalived apiserver at 192.168.29.100
|
|
||||||
echo if this IP address does not match your network configuration,
|
|
||||||
echo manually change the check_apiserver.sh file before continuing
|
|
||||||
echo press enter to continue or Ctrl-c to interrupt and apply modifications
|
|
||||||
read
|
|
||||||
|
|
||||||
# performing check on critical files
|
|
||||||
for i in keepalived.conf check_apiserver.sh haproxy.cfg
|
|
||||||
do
|
|
||||||
if [ ! -f $i ]
|
|
||||||
then
|
|
||||||
echo $i should exist in the current directory && exit 2
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
cat /etc/hosts
|
|
||||||
echo this is the main thing that goes wrong: if it does, manually edit /etc/hosts on all the nodes and run the rest of the script
|
|
||||||
read
|
|
||||||
|
|
||||||
# generating and distributing SSH keys
|
|
||||||
ssh-keygen
|
|
||||||
ssh-copy-id control1
|
|
||||||
ssh-copy-id control2
|
|
||||||
ssh-copy-id control3
|
|
||||||
|
|
||||||
# configuring sudo for easier access
|
|
||||||
sudo sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
|
||||||
sudo scp -p /etc/sudoers student@control2:/tmp/ && ssh -t control2 'sudo -S chown root:root /tmp/sudoers' && ssh -t control2 'sudo -S cp -p /tmp/sudoers /etc/'
|
|
||||||
sudo scp -p /etc/sudoers student@control3:/tmp/ && ssh -t control3 'sudo -S chown root:root /tmp/sudoers' && ssh -t control3 'sudo -S cp -p /tmp/sudoers /etc/'
|
|
||||||
#ssh control2 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
|
||||||
#ssh control3 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
|
||||||
|
|
||||||
echo DEBUG: check if sudo modification worked
|
|
||||||
read
|
|
||||||
|
|
||||||
# install required software
|
|
||||||
sudo apt install haproxy keepalived -y
|
|
||||||
ssh control2 "sudo -S apt install haproxy keepalived -y"
|
|
||||||
ssh control3 "sudo -S apt install haproxy keepalived -y"
|
|
||||||
echo DEBUG check if haproxy and keepalived are installed
|
|
||||||
read
|
|
||||||
|
|
||||||
|
|
||||||
scp /etc/hosts control2:/tmp && ssh -t control2 'sudo -S cp /tmp/hosts /etc/'
|
|
||||||
scp /etc/hosts control3:/tmp && ssh -t control3 'sudo -S cp /tmp/hosts /etc/'
|
|
||||||
|
|
||||||
# create keepalived config
|
|
||||||
# change IP address to anything that works in your environment!
|
|
||||||
sudo chmod +x check_apiserver.sh
|
|
||||||
sudo cp check_apiserver.sh /etc/keepalived/
|
|
||||||
|
|
||||||
|
|
||||||
scp check_apiserver.sh control2:/tmp && ssh -t control2 'sudo -S cp /tmp/check_apiserver.sh /etc/keepalived'
|
|
||||||
scp check_apiserver.sh control3:/tmp && ssh -t control3 'sudo -S cp /tmp/check_apiserver.sh /etc/keepalived'
|
|
||||||
|
|
||||||
#### creating site specific keepalived.conf file
|
|
||||||
sudo cp keepalived.conf keepalived-control2.conf
|
|
||||||
sudo cp keepalived.conf keepalived-control3.conf
|
|
||||||
|
|
||||||
sudo sed -i 's/state MASTER/state SLAVE/' keepalived-control2.conf
|
|
||||||
sudo sed -i 's/state MASTER/state SLAVE/' keepalived-control3.conf
|
|
||||||
sudo sed -i 's/priority 255/priority 254/' keepalived-control2.conf
|
|
||||||
sudo sed -i 's/priority 255/priority 253/' keepalived-control3.conf
|
|
||||||
|
|
||||||
sudo cp keepalived.conf /etc/keepalived/
|
|
||||||
scp keepalived-control2.conf control2:/tmp && ssh -t control2 'sudo -S cp /tmp/keepalived-control2.conf /etc/keepalived/keepalived.conf'
|
|
||||||
scp keepalived-control3.conf control3:/tmp && ssh -t control3 'sudo -S cp /tmp/keepalived-control3.conf /etc/keepalived/keepalived.conf'
|
|
||||||
echo DEBUG check if files are copied over successfully
|
|
||||||
read
|
|
||||||
|
|
||||||
### rewriting haproxy.cfg with site specific IP addresses
|
|
||||||
sudo sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
|
||||||
sudo sed -i s/server\ control2\ 1.1.1.2\:6443\ check/server\ control2\ $CONTROL2_IP\:6443\ check/ haproxy.cfg
|
|
||||||
sudo sed -i s/server\ control3\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3_IP\:6443\ check/ haproxy.cfg
|
|
||||||
|
|
||||||
# copy haproxy.cfg to destinations
|
|
||||||
sudo cp haproxy.cfg /etc/haproxy/
|
|
||||||
scp haproxy.cfg control2:/tmp && ssh -t control2 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
|
||||||
scp haproxy.cfg control3:/tmp && ssh -t control3 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
|
||||||
echo DEBUG check if haproxy files are copied over successfully
|
|
||||||
read
|
|
||||||
|
|
||||||
# start and enable services
|
|
||||||
sudo systemctl enable keepalived --now
|
|
||||||
sudo systemctl enable haproxy --now
|
|
||||||
ssh control2 sudo -S systemctl enable keepalived --now
|
|
||||||
ssh control2 sudo -S systemctl enable haproxy --now
|
|
||||||
ssh control3 sudo -S systemctl enable keepalived --now
|
|
||||||
ssh control3 sudo -S systemctl enable haproxy --now
|
|
||||||
|
|
||||||
echo setup is now done, please verify
|
|
||||||
echo the first node that started the services - normally control1 - should run the virtual IP address 192.168.29.100
|
|
||||||
@ -1,145 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# source https://github.com/sandervanvugt/cka/setup-lb.sh
|
|
||||||
|
|
||||||
# script to set up load balancing on cluster nodes
|
|
||||||
# for use in CKA courses by Sander van Vugt
|
|
||||||
# version 0.7
|
|
||||||
# currently only tested on Ubuntu 22.04 LTS Server
|
|
||||||
# run this AFTER running setup-container.sh and setup-kubetools.sh
|
|
||||||
#
|
|
||||||
# TODO: remove the many password prompts
|
|
||||||
|
|
||||||
if which kubectl
|
|
||||||
then
|
|
||||||
echo all good moving on
|
|
||||||
else
|
|
||||||
echo please run setup-container.sh and setup-kubetools.sh first and then run this again
|
|
||||||
exit 6
|
|
||||||
fi
|
|
||||||
|
|
||||||
## establish key based SSH with remote hosts
|
|
||||||
# obtain node information
|
|
||||||
if grep control1 /etc/hosts | grep -v 127
|
|
||||||
then
|
|
||||||
export CONTROL1_IP=$(awk '/control1/ { print $1 }' /etc/hosts | grep -v 127)
|
|
||||||
else
|
|
||||||
echo enter IP address for control1
|
|
||||||
read CONTROL1_IP
|
|
||||||
export CONTROL1_IP=$CONTROL1_IP
|
|
||||||
sudo sh -c "echo $CONTROL1_IP control1 >> /etc/hosts"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if grep control2 /etc/hosts | grep -v 127
|
|
||||||
then
|
|
||||||
export CONTROL2_IP=$(awk '/control2/ { print $1 }' /etc/hosts | grep -v 127)
|
|
||||||
else
|
|
||||||
echo enter IP address for control2
|
|
||||||
read CONTROL2_IP
|
|
||||||
export CONTROL2_IP=$CONTROL2_IP
|
|
||||||
sudo sh -c "echo $CONTROL2_IP control2 >> /etc/hosts"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if grep control3 /etc/hosts | grep -v 127
|
|
||||||
then
|
|
||||||
export CONTROL3_IP=$(awk '/control3/ { print $1 }' /etc/hosts | grep -v 127)
|
|
||||||
else
|
|
||||||
echo enter IP address for control3
|
|
||||||
read CONTROL3_IP
|
|
||||||
export CONTROL3_IP=$CONTROL3_IP
|
|
||||||
sudo sh -c "echo $CONTROL3_IP control3 >> /etc/hosts"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
echo ##### READ ALL OF THIS BEFORE CONTINUING ######
|
|
||||||
echo this script requires you to run setup-container.sh and setup-kubetools.sh first
|
|
||||||
echo this script is based on the NIC name ens33
|
|
||||||
echo if your networkcard has a different name, edit keepalived.conf
|
|
||||||
echo before continuing and change "interface ens33" to match your config
|
|
||||||
echo .
|
|
||||||
echo this script will create a keepalived apiserver at 192.168.29.100
|
|
||||||
echo if this IP address does not match your network configuration,
|
|
||||||
echo manually change the check_apiserver.sh file before continuing
|
|
||||||
echo also change the IP address in keepalived.conf
|
|
||||||
echo .
|
|
||||||
echo press enter to continue or Ctrl-c to interrupt and apply modifications
|
|
||||||
read
|
|
||||||
|
|
||||||
# performing check on critical files
|
|
||||||
for i in keepalived.conf check_apiserver.sh haproxy.cfg
|
|
||||||
do
|
|
||||||
if [ ! -f $i ]
|
|
||||||
then
|
|
||||||
echo $i should exist in the current directory && exit 2
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# generating and distributing SSH keys
|
|
||||||
ssh-keygen
|
|
||||||
ssh-copy-id control1
|
|
||||||
ssh-copy-id control2
|
|
||||||
ssh-copy-id control3
|
|
||||||
|
|
||||||
# configuring sudo for easier access
|
|
||||||
sudo sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
|
||||||
sudo scp -p /etc/sudoers student@control2:/tmp/ && ssh -t control2 'sudo -S chown root:root /tmp/sudoers' && ssh -t control2 'sudo -S cp -p /tmp/sudoers /etc/'
|
|
||||||
sudo scp -p /etc/sudoers student@control3:/tmp/ && ssh -t control3 'sudo -S chown root:root /tmp/sudoers' && ssh -t control3 'sudo -S cp -p /tmp/sudoers /etc/'
|
|
||||||
#ssh control2 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
|
||||||
#ssh control3 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
|
||||||
|
|
||||||
# install required software
|
|
||||||
sudo apt install haproxy keepalived -y
|
|
||||||
ssh control2 "sudo -S apt install haproxy keepalived -y"
|
|
||||||
ssh control3 "sudo -S apt install haproxy keepalived -y"
|
|
||||||
|
|
||||||
scp /etc/hosts control2:/tmp && ssh -t control2 'sudo -S cp /tmp/hosts /etc/'
|
|
||||||
scp /etc/hosts control3:/tmp && ssh -t control3 'sudo -S cp /tmp/hosts /etc/'
|
|
||||||
|
|
||||||
# create keepalived config
|
|
||||||
# change IP address to anything that works in your environment!
|
|
||||||
sudo chmod +x check_apiserver.sh
|
|
||||||
sudo cp check_apiserver.sh /etc/keepalived/
|
|
||||||
|
|
||||||
|
|
||||||
scp check_apiserver.sh control2:/tmp && ssh -t control2 'sudo -S cp /tmp/check_apiserver.sh /etc/keepalived'
|
|
||||||
scp check_apiserver.sh control3:/tmp && ssh -t control3 'sudo -S cp /tmp/check_apiserver.sh /etc/keepalived'
|
|
||||||
|
|
||||||
#### creating site specific keepalived.conf file
|
|
||||||
sudo cp keepalived.conf keepalived-control2.conf
|
|
||||||
sudo cp keepalived.conf keepalived-control3.conf
|
|
||||||
|
|
||||||
sudo sed -i 's/state MASTER/state SLAVE/' keepalived-control2.conf
|
|
||||||
sudo sed -i 's/state MASTER/state SLAVE/' keepalived-control3.conf
|
|
||||||
sudo sed -i 's/priority 255/priority 254/' keepalived-control2.conf
|
|
||||||
sudo sed -i 's/priority 255/priority 253/' keepalived-control3.conf
|
|
||||||
|
|
||||||
sudo cp keepalived.conf /etc/keepalived/
|
|
||||||
scp keepalived-control2.conf control2:/tmp && ssh -t control2 'sudo -S cp /tmp/keepalived-control2.conf /etc/keepalived/keepalived.conf'
|
|
||||||
scp keepalived-control3.conf control3:/tmp && ssh -t control3 'sudo -S cp /tmp/keepalived-control3.conf /etc/keepalived/keepalived.conf'
|
|
||||||
echo DEBUG check if files are copied over successfully
|
|
||||||
read
|
|
||||||
|
|
||||||
### rewriting haproxy.cfg with site specific IP addresses
|
|
||||||
sudo sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
|
||||||
sudo sed -i s/server\ control2\ 1.1.1.2\:6443\ check/server\ control2\ $CONTROL2_IP\:6443\ check/ haproxy.cfg
|
|
||||||
sudo sed -i s/server\ control3\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3_IP\:6443\ check/ haproxy.cfg
|
|
||||||
|
|
||||||
# copy haproxy.cfg to destinations
|
|
||||||
sudo cp haproxy.cfg /etc/haproxy/
|
|
||||||
scp haproxy.cfg control2:/tmp && ssh -t control2 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
|
||||||
scp haproxy.cfg control3:/tmp && ssh -t control3 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
|
||||||
echo DEBUG check if haproxy files are copied over successfully
|
|
||||||
read
|
|
||||||
|
|
||||||
# start and enable services
|
|
||||||
sudo systemctl enable keepalived --now
|
|
||||||
sudo systemctl enable haproxy --now
|
|
||||||
ssh control2 sudo -S systemctl enable keepalived --now
|
|
||||||
ssh control2 sudo -S systemctl enable haproxy --now
|
|
||||||
ssh control3 sudo -S systemctl enable keepalived --now
|
|
||||||
ssh control3 sudo -S systemctl enable haproxy --now
|
|
||||||
|
|
||||||
echo setup is now done, please verify
|
|
||||||
echo the first node that started the services - normally control1 - should run the virtual IP address 192.168.29.100
|
|
||||||
@ -118,8 +118,6 @@ sudo sed -i 's/priority 255/priority 253/' keepalived-control3.conf
|
|||||||
sudo cp keepalived.conf /etc/keepalived/
|
sudo cp keepalived.conf /etc/keepalived/
|
||||||
scp keepalived-control2.conf control2:/tmp && ssh -t control2 'sudo -S cp /tmp/keepalived-control2.conf /etc/keepalived/keepalived.conf'
|
scp keepalived-control2.conf control2:/tmp && ssh -t control2 'sudo -S cp /tmp/keepalived-control2.conf /etc/keepalived/keepalived.conf'
|
||||||
scp keepalived-control3.conf control3:/tmp && ssh -t control3 'sudo -S cp /tmp/keepalived-control3.conf /etc/keepalived/keepalived.conf'
|
scp keepalived-control3.conf control3:/tmp && ssh -t control3 'sudo -S cp /tmp/keepalived-control3.conf /etc/keepalived/keepalived.conf'
|
||||||
echo DEBUG check if files are copied over successfully
|
|
||||||
read
|
|
||||||
|
|
||||||
### rewriting haproxy.cfg with site specific IP addresses
|
### rewriting haproxy.cfg with site specific IP addresses
|
||||||
sudo sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
sudo sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
||||||
@ -130,8 +128,6 @@ sudo sed -i s/server\ control3\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3
|
|||||||
sudo cp haproxy.cfg /etc/haproxy/
|
sudo cp haproxy.cfg /etc/haproxy/
|
||||||
scp haproxy.cfg control2:/tmp && ssh -t control2 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
scp haproxy.cfg control2:/tmp && ssh -t control2 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
||||||
scp haproxy.cfg control3:/tmp && ssh -t control3 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
scp haproxy.cfg control3:/tmp && ssh -t control3 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
||||||
echo DEBUG check if haproxy files are copied over successfully
|
|
||||||
read
|
|
||||||
|
|
||||||
# start and enable services
|
# start and enable services
|
||||||
sudo systemctl enable keepalived --now
|
sudo systemctl enable keepalived --now
|
||||||
|
|||||||
100
setup-lb.sh
100
setup-lb.sh
@ -1,100 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# source https://github.com/sandervanvugt/cka/setup-lb.sh
|
|
||||||
|
|
||||||
# script to set up load balancing on cluster nodes
|
|
||||||
# for use in CKA courses by Sander van Vugt
|
|
||||||
# version 0.5
|
|
||||||
# currently only supporting CentOS 7.x
|
|
||||||
# run this AFTER running setup-docker.sh and setup-kubetools.sh
|
|
||||||
|
|
||||||
## establish key based SSH with remote hosts
|
|
||||||
# obtain node information
|
|
||||||
echo this script requires three nodes: control1 control2 and control3
|
|
||||||
echo enter the IP address for control1
|
|
||||||
read CONTROL1_IP
|
|
||||||
echo enter the IP address for control2
|
|
||||||
read CONTROL2_IP
|
|
||||||
echo enter the IP address for control3
|
|
||||||
read CONTROL3_IP
|
|
||||||
echo ##### READ ALL OF THIS BEFORE CONTINUING ######
|
|
||||||
echo this script requires you to run setup-docker.sh and setup-kubetools.sh first
|
|
||||||
echo this script is based on the NIC name ens33
|
|
||||||
echo if your networkcard has a different name, edit keepalived.conf
|
|
||||||
echo before continuing and change "interface ens33" to match your config
|
|
||||||
echo .
|
|
||||||
echo this script will create a keepalived apiserver at 192.168.4.100
|
|
||||||
echo if this IP address does not match your network configuration,
|
|
||||||
echo manually change the check_apiserver.sh file before continuing
|
|
||||||
echo press enter to continue or Ctrl-c to interrupt and apply modifications
|
|
||||||
read
|
|
||||||
|
|
||||||
# performing check on critical files
|
|
||||||
for i in keepalived.conf check_apiserver.sh haproxy.cfg
|
|
||||||
do
|
|
||||||
if [ ! -f $i ]
|
|
||||||
then
|
|
||||||
echo $i should exist in the current directory && exit 2
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# create /etc/hosts for all nodes
|
|
||||||
echo $CONTROL1_IP control1 >> /etc/hosts
|
|
||||||
echo $CONTROL2_IP control2 >> /etc/hosts
|
|
||||||
echo $CONTROL3_IP control3 >> /etc/hosts
|
|
||||||
|
|
||||||
# generating and distributing SSH keys
|
|
||||||
ssh-keygen
|
|
||||||
ssh-copy-id control1
|
|
||||||
ssh-copy-id control2
|
|
||||||
ssh-copy-id control3
|
|
||||||
|
|
||||||
# install required software
|
|
||||||
yum install haproxy keepalived -y
|
|
||||||
ssh control2 "yum install haproxy keepalived -y"
|
|
||||||
ssh control3 "yum install haproxy keepalived -y"
|
|
||||||
|
|
||||||
# copying /etc/hosts file
|
|
||||||
scp /etc/hosts control2:/etc/
|
|
||||||
scp /etc/hosts control3:/etc/
|
|
||||||
|
|
||||||
# create keepalived config
|
|
||||||
# change IP address to anything that works in your environment!
|
|
||||||
chmod +x check_apiserver.sh
|
|
||||||
cp check_apiserver.sh /etc/keepalived/
|
|
||||||
scp check_apiserver.sh control2:/etc/keepalived/
|
|
||||||
scp check_apiserver.sh control3:/etc/keepalived/
|
|
||||||
|
|
||||||
#### creating site specific keepalived.conf file
|
|
||||||
cp keepalived.conf keepalived-control2.conf
|
|
||||||
cp keepalived.conf keepalived-control3.conf
|
|
||||||
|
|
||||||
sed -i 's/state MASTER/state SLAVE/' keepalived-control2.conf
|
|
||||||
sed -i 's/state MASTER/state SLAVE/' keepalived-control3.conf
|
|
||||||
sed -i 's/priority 255/priority 254/' keepalived-control2.conf
|
|
||||||
sed -i 's/priority 255/priority 253/' keepalived-control3.conf
|
|
||||||
|
|
||||||
cp keepalived.conf /etc/keepalived/
|
|
||||||
scp keepalived-control2.conf control2:/etc/keepalived/keepalived.conf
|
|
||||||
scp keepalived-control3.conf control3:/etc/keepalived/keepalived.conf
|
|
||||||
|
|
||||||
### rewriting haproxy.cfg with site specific IP addresses
|
|
||||||
sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
|
||||||
sed -i s/server\ control2\ 1.1.1.2\:6443\ check/server\ control2\ $CONTROL2_IP\:6443\ check/ haproxy.cfg
|
|
||||||
sed -i s/server\ control3\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3_IP\:6443\ check/ haproxy.cfg
|
|
||||||
|
|
||||||
# copy haproxy.cfg to destinations
|
|
||||||
cp haproxy.cfg /etc/haproxy/
|
|
||||||
scp haproxy.cfg control2:/etc/haproxy/
|
|
||||||
scp haproxy.cfg control3:/etc/haproxy/
|
|
||||||
|
|
||||||
# start and enable services
|
|
||||||
systemctl enable keepalived --now
|
|
||||||
systemctl enable haproxy --now
|
|
||||||
ssh control2 systemctl enable keepalived --now
|
|
||||||
ssh control2 systemctl enable haproxy --now
|
|
||||||
ssh control3 systemctl enable keepalived --now
|
|
||||||
ssh control3 systemctl enable haproxy --now
|
|
||||||
|
|
||||||
echo setup is now done, please verify
|
|
||||||
echo control1 should run the virtual IP address 192.168.4.100
|
|
||||||
@ -1,16 +1,17 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: nginx-ssd
|
name: tolerator
|
||||||
labels:
|
labels:
|
||||||
env: test
|
env: test
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: nginx-ssd
|
- name: nginx
|
||||||
image: nginx
|
image: nginx
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: "storage"
|
- key: "example-key"
|
||||||
operator: "Equal"
|
operator: "Equal"
|
||||||
value: "ssd"
|
value: "value1"
|
||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user