From 0630eb32e6cd96b9e11a45ae94e14f9d4f1b224e Mon Sep 17 00:00:00 2001 From: sandervanvugt Date: Tue, 15 Oct 2024 18:55:21 +0200 Subject: [PATCH] message --- history-15Oct24.txt | 282 ++++++++++++++++++++++++++ setup-container-previous-version 2.sh | 69 +++++++ 2 files changed, 351 insertions(+) create mode 100644 history-15Oct24.txt create mode 100755 setup-container-previous-version 2.sh diff --git a/history-15Oct24.txt b/history-15Oct24.txt new file mode 100644 index 0000000..47f9e27 --- /dev/null +++ b/history-15Oct24.txt @@ -0,0 +1,282 @@ + 1 git clone https://github.com/sandervanvugt/cka + 2 cd cka + 3 ls + 4 ./setup-container.sh + 5 ls + 6 ./setup-kubetools.sh + 7 history + 8 sudo kubeadm init + 9 cd + 10 mkdir -p $HOME/.kube + 11 kubectl get all + 12 kubectl get pods -n kube-system + 13 source <(kubectl completion bash) + 14 kubectl describe -n kube-system pod coredns-7c65d6cfc9-z5rsc + 15 kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml + 16 kubectl get pods -n kube-system + 17 history + 18 kubectl get nodes + 19 kubectl describe node control + 20 history + 21 cd cka/ + 22 ./counter.sh 12 + 23 kubectl get nodes + 24 sudo apt install helm + 25 helm + 26 cd .. + 27 ls + 28 tar xvf helm-v3.16.2-linux-arm64.tar.gz + 29 sudo cp linux-arm64/helm /usr/local/bin + 30 helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace + 31 kubectl get all -n ingress-nginx + 32 history + 33 kubectl create deploy nginxsvc --image=nginx --port=80 + 34 kubectl expose deploy nginxsvc + 35 kubectl get all --selector app=nginxsvc + 36 kubectl create ingress nginxsvc --class=nginx --rule=nginxsvc.info/*=nginxsvc:80 + 37 kubectl port-forward -n ingress-nginx svc/ingress-nginx-controller 8080:80 + 38 bg + 39 sudo sh -c 'echo "127.0.0.1 nginxsvc.info" >> /etc/hosts' + 40 cat /etc/hosts + 41 curl nginxsvc.info:8080 + 42 kubectl create ing -h | less + 43 history + 44 kubectl get all -n ingress-nginx + 45 kubectl edit -n ingress-nginx svc ingress-nginx-controller + 46 kubectl get all -n ingress-nginx + 47 cd cka + 48 ./counter.sh 1 + 49 history + 50 kubectl edit svc nginxsvc + 51 kubectl describe ingress nginxsvc + 52 kubectl describe svc nginxsvc + 53 kubectl edit svc nginxsvc + 54 kubectl describe svc nginxsvc + 55 kubectl describe node control | less + 56 df -h + 57 kubectl create ns limited + 58 kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi + 59 kubectl get pods + 60 kubectl scale deploy nginxsvc --replicas=5 + 61 kubectl get all + 62 kubectl delete quota qtest + 63 kubectl scale deploy nginxsvc --replicas=5 -n limited + 64 kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi -n limited + 65 kubectl describe quota -n limited + 66 kubectl create deploy nginx --image=nginx --replicas=3 -n limited + 67 kubectl get all -n limited + 68 kubectl describe rs nginxsvc-7f8cdcb4db + 69 kubectl get all -n limited + 70 kubectl -n limited describe rs nginx-676b6c5bbc + 71 history + 72 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=5Mi --limits cpu=200m,memory=20m + 73 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=20Mi --limits cpu=200m,memory=40m + 74 kubectl -n limited set resources deploy nginx --requests cpu=100m,memory=20Mi --limits cpu=200m,memory=40Mi + 75 kubectl get pods -n limited + 76 kubectl get all -n limited + 77 kubectl scale -n limited deploy nginx --replicas=4 + 78 kubectl scale -n limited deploy nginx --replicas=3 + 79 kubectl get all -n limited + 80 kubectl describe -n limited quota qtest + 81 kubectl edit quota -n limited qtest + 82 kubectl get all -n limited + 83 kubectl scale -n limited deploy nginx --replicas=3 + 84 kubectl get all -n limited + 85 kubectl describe -n limited quota qtest + 86 kubectl scale -n limited deploy nginx --replicas=2 + 87 kubectl get all -n limited + 88 kubectl scale -n limited deploy nginx --replicas=3 + 89 kubectl get all -n limited + 90 history + 91 kubectl create ns limited + 92 vim limitrange.yaml + 93 kubectl apply -f limitrange.yaml -n limited + 94 kubectl describe ns limited + 95 kubectl run limitpod --image=nginx -n limited + 96 kubectl -n limited delete quota + 97 kubectl -n limited delete quota qtest + 98 kubectl run limitpod --image=nginx -n limited + 99 kubectl describe -n limited pod limitpod + 100 history + 101 kubectl get pods -A -o wide + 102 kubectl get pods -o wide + 103 kubectl create deploy testdeploy --image=nginx --replicas=6 + 104 kubectl get pods -o wide + 105 kubectl drain worker2 + 106 kubectl drain worker2 --ignore-daemonsets --force + 107 kubectl get pods + 108 kubectl get pods -o wide + 109 kubectl get nodes + 110 kubectl describe node worker2 + 111 kubectl edit node worker2 + 112 kubectl uncordon worker2 + 113 kubectl get pods -o wide + 114 kubectl create newweb --image=nginx --replicas=20 + 115 kubectl create deploy newweb --image=nginx --replicas=20 + 116 kubectl get pods -o wide + 117 kubectl delete deploy newweb + 118 history + 119 sudo ls -l /etc/kubernetes/manifests + 120 kubectl run staticpod --image=nginx --dry-run=client -o yaml + 121 kubectl get pods + 122 sudo -i + 123 history >> /tmp/history-14Oct24.txt + 124 vim /tmp/history-14Oct24.txt + 125 kubectl config view + 126 kubectl api-resources | grep -i networkp + 127 vim nwpolicy-complete-example.yaml + 128 kubectl apply -f nwpolicy-complete-example.yaml + 129 kubectl expose pod nginx --port=80 + 130 kubectl get svc + 131 kubectl exec -it busybox -- wget --spider --timeout=1 nginx + 132 vim nwpolicy-complete-example.yaml + 133 kubectl lab pod busybox access=true + 134 kubectl label pod busybox access=true + 135 kubectl exec -it busybox -- wget --spider --timeout=1 nginx + 136 history + 137 kubectl create ns new-namespace + 138 kubectl create ns nwp-namespace + 139 vim nwp-lab9-1.yaml + 140 kubectl create -f nwp-lab9-1.yaml + 141 kubectl expose pod nwp-nginx --port=80 + 142 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx + 143 kubectl exec -it nwp-busybox -n nwp-namespace -- nslookup nwp-nginx + 144 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local + 145 vim nwp-lab9-2.yaml + 146 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local + 147 kubectl apply -f nwp-lab9-2.yaml + 148 kubectl exec -it nwp-busybox -n nwp-namespace -- wget --spider --timeout=1 nwp-nginx.default.svc.cluster.local + 149 kubectl create deploy busybox --image=busybox --sleep 3600 + 150 kubectl create deploy busybox --image=busybox -- sleep 3600 + 151 kubectl exec -it busybox-75cd85d546-wd6wq -- wget --spider --timeout=1 nwp-nginx + 152 kubectl get netpol + 153 kubectl delete netpol access-nginx + 154 kubectl delete netpol deny-from-other-namespaces + 155 history + 156 kubectl top pods + 157 kubectl top node + 158 kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + 159 kubectl -n kube-system get pods + 160 kubectl edit -n kube-system deployments.apps metrics-server + 161 kubectl -n kube-system get pods + 162 kubectl top pods + 163 history + 164 kubectl get quota + 165 kubectl create quota -h | less + 166 #kubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10 + 167 kubectl create quota one --hard=cpu=1 + 168 kubectl create quota two --hard=memory=4G + 169 kubectl create quota three --hard=cpu=2 + 170 kubectl describe ns default + 171 kubectl delete quota one two three + 172 kubectl top -h | less + 173 kubectl top pod -h | less + 174 kubectl top pod -A --sort-by="memory" + 175 history + 176 kubectl run testpox --image=nginx --dry-run=client -o yaml + 177 kubectl get pods + 178 history + 179 kubectl get pods + 180 sudo apt install etcd-client + 181 sudo etcdctl --help + 182 sudo ETCDCTL_API=2 etcdctl --help + 183 sudo ETCDCTL_API=3 etcdctl --help + 184 ps aux | grep 2379 + 185 sudo etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.rt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key get / --prefix --keys-only + 186 sudo etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key get / --prefix --keys-only + 187 sudo etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key snapshot save /tmp/etcdbackup.db + 188 ls -l /tmp/etcdbackup.db + 189 sudo etcdctl --write-out=table snapshot status /tmp/etcdbackup.db + 190 cp /tmp/etcdbackup.db /tmp/etcdbackup.db.2 + 191 sudo cp /tmp/etcdbackup.db /tmp/etcdbackup.db.2 + 192 history + 193 kubectl get deploy + 194 kubectl delete deploy --all + 195 kubectl get deploy + 196 cd /etc/kubernetes/manifests/ + 197 ls + 198 sudo mv * .. + 199 sudo dnf install tree + 200 sudo apt install tree + 201 sudo tree /var/lib/etcd + 202 sudo mv /var/lib/etcd /var/lib/etcd-backup + 203 sudo tree /var/lib/etcd + 204 sudo etcdctl snapshot restore /tmp/etcdbackup.db --data-dir /var/lib/etcd + 205 sudo tree /var/lib/etcd + 206 sudo mv ../*.yaml . + 207 sudo crictl ps + 208 kubectl get deploy + 209 kubectl get pods + 210 kubectl delete deploy testdeploy + 211 history + 212 kubectl get nodes + 213* + 214 cd + 215 vim nodesel.yam;l + 216 vim nodesel.yaml + 217 kubectl apply -f nodesel.yaml + 218 kubectl get pods -o yaml + 219 kubectl get pods -o wide + 220 vim nodesel.yaml + 221 kubectl apply -f nodesel.yaml + 222 kubectl get pods + 223 kubectl describe pod islectnginxxxxxx + 224 kubectl get deploy + 225 kubectl get pods + 226 kubectl delete pods --force --timeout=0 testdeploy-7cd7d7ddc8-28mcq testdeploy-7cd7d7ddc8-fqh6v testdeploy-7cd7d7ddc8-ftk48 testdeploy-7cd7d7ddc8-pd7sd testdeploy-7cd7d7ddc8-stj67 testdeploy-7cd7d7ddc8-stxsx + 227 kubectl get pods + 228 history + 229 kubectl get node control -o yaml | less + 230 kubectl get ds -A + 231 kubectl get ds -n kube-system kube-proxy -o yaml | less + 232 kubectl get ds -n kube-system calico-node -o yaml | less + 233 history + 234 kubectl taint nodes worker1 storage=ssd:NoSchedule + 235 kubectl describe node worker1 + 236 kubectl create deploy nginx-taint --image=nginx --replicas=3 + 237 kubectl get pods -o wide + 238 cd cka/ + 239 vim taint-toleration.yaml + 240 kubectl apply -f taint-toleration.yaml + 241 kubectl get pods -o wide | grep tole + 242 vim taint-toleration2.yaml + 243 kubectl apply -f taint-toleration2.yaml + 244 kubectl get pods -o wide | grep hdd + 245 vim taint-toleration2.yaml + 246 kubectl apply -f taint-toleration2.yaml + 247 vim taint-toleration2.yaml + 248 kubectl apply -f taint-toleration2.yaml + 249 kubectl get pods -o wide | grep exists + 250 history + 251 vim ~/.kube/config + 252 kubectl get sa + 253 kubectl get sa -n kube-system + 254 kubectl create role -h | less + 255 kubectl create rolebinding -h | less + 256 kubectl run mypod --image=alpine -- sleep 3600 + 257 kubectl get pods mypod -o yaml | less + 258 kubectl exec -it mypod -- sh + 259 historyt + 260 history + 261 kubectl create sa mysa + 262 kubectl create role list-pods --resource=pods --verbs=list + 263 kubectl create role list-pods --resource=pods --verb=list + 264 kubectl describe role list-pods + 265 kubectl create rolebinding list-pods --role=list-pods --serviceaccount=default:default + 266 vim mysapod.yaml + 267 kubectl apply -f mysapod.yaml + 268 kubectl exec -it mysapod -- sh + 269 ls *role* + 270 grep -li 'type=role' * + 271 kubectl get rolebindings + 272 kubectl create rolebinding list-pods --role=list-pods --serviceaccount=default:mysa + 273 kubectl delete rolebindings.rbac.authorization.k8s.io list-pods + 274 kubectl create rolebinding list-pods --role=list-pods --serviceaccount=default:mysa + 275 kubectl exec -it mysapod -- sh + 276 kubectl get pods + 277 kubectl delete pod mysapod + 278 kubectl delete pod mysapod --force --timeout=1 + 279 kubectl delete pod mysapod --force --timeout=0 + 280 kubectl apply -f mysapod.yaml + 281 kubectl exec -it mysapod -- sh + 282 history > /tmp/history-15Oct24.txt diff --git a/setup-container-previous-version 2.sh b/setup-container-previous-version 2.sh new file mode 100755 index 0000000..71c5566 --- /dev/null +++ b/setup-container-previous-version 2.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# script that runs +# https://kubernetes.io/docs/setup/production-environment/container-runtime + +# changes March 14 2023: introduced $PLATFORM to have this work on amd64 as well as arm64 + +# setting MYOS variable +MYOS=$(hostnamectl | awk '/Operating/ { print $3 }') +OSVERSION=$(hostnamectl | awk '/Operating/ { print $4 }') +# beta: building in ARM support +[ $(arch) = aarch64 ] && PLATFORM=arm64 +[ $(arch) = x86_64 ] && PLATFORM=amd64 + +if [ $MYOS = "Ubuntu" ] +then + ### setting up container runtime prereq + cat <<- EOF | sudo tee /etc/modules-load.d/containerd.conf + overlay + br_netfilter + EOF + + sudo modprobe overlay + sudo modprobe br_netfilter + + # Setup required sysctl params, these persist across reboots. + cat <<- EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + EOF + + # Apply sysctl params without reboot + sudo sysctl --system + + # (Install containerd) + + sudo apt-get update && sudo apt-get install -y containerd + # hopefully temporary bugfix as the containerd version provided in Ubu repo is tool old + # added Jan 26th 2023 + # this needs to be updated when a recent enough containerd version will be in Ubuntu repos + sudo systemctl stop containerd + # cleanup old files from previous attempt if existing + [ -d bin ] && rm -rf bin + wget https://github.com/containerd/containerd/releases/download/v1.6.15/containerd-1.6.15-linux-${PLATFORM}.tar.gz + tar xvf containerd-1.6.15-linux-${PLATFORM}.tar.gz + sudo mv bin/* /usr/bin/ + # Configure containerd + sudo mkdir -p /etc/containerd + cat <<- TOML | sudo tee /etc/containerd/config.toml +version = 2 +[plugins] + [plugins."io.containerd.grpc.v1.cri"] + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + TOML + + # Restart containerd + sudo systemctl restart containerd +fi + +sudo ln -s /etc/apparmor.d/runc /etc/apparmor.d/disable/ +sudo apparmor_parser -R /etc/apparmor.d/runc + +touch /tmp/container.txt