message
This commit is contained in:
parent
0630eb32e6
commit
a64c9fdff9
11
counter 2.sh
Executable file
11
counter 2.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
COUNTER=$1
|
||||
COUNTER=$(( COUNTER * 60 ))
|
||||
|
||||
while true
|
||||
do
|
||||
echo $COUNTER seconds remaining
|
||||
sleep 1
|
||||
COUNTER=$(( COUNTER - 1 ))
|
||||
done
|
||||
102
exam-grade.sh
Executable file
102
exam-grade.sh
Executable file
@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
# exit if not root
|
||||
clear
|
||||
# evaluating tasks
|
||||
echo -e "\033[1mchecking task 1 results\033[0m"
|
||||
source labs/exam-task1.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$SCORE
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 2 results\033[0m"
|
||||
source labs/exam-task2.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 3 results\033[0m"
|
||||
source labs/exam-task3.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 4 results\033[0m"
|
||||
source labs/exam-task4.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 5 results\033[0m"
|
||||
source labs/exam-task5.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 6 results\033[0m"
|
||||
source labs/exam-task6.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 7 results\033[0m"
|
||||
source labs/exam-task7.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 8 results\033[0m"
|
||||
source labs/exam-task8.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 9 results\033[0m"
|
||||
source labs/exam-task9.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 10 results\033[0m"
|
||||
source labs/exam-task10.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 11 results\033[0m"
|
||||
source labs/exam-task11.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 12 results\033[0m"
|
||||
source labs/exam-task12.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 13 results\033[0m"
|
||||
source labs/exam-task13.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 14 results\033[0m"
|
||||
source labs/exam-task14.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 15 results\033[0m"
|
||||
source labs/exam-task15.sh
|
||||
#### print PASS/FAIL
|
||||
echo -e "\n"
|
||||
echo your score is $SCORE out of a total of $TOTAL
|
||||
|
||||
if [[ $SCORE -ge $(( TOTAL / 10 * 7 )) ]]
|
||||
then
|
||||
echo -e "\033[32mCONGRATULATIONS!!\033[0m\t\t You passed this sample exam!"
|
||||
echo -e "\033[1mResults obtained here don't guarantee anything for the real exam\033[0m"
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t You did NOT pass this sample exam \033[36m:-(\033[0m"
|
||||
fi
|
||||
|
||||
39
labs/exam-task1.sh
Normal file
39
labs/exam-task1.sh
Normal file
@ -0,0 +1,39 @@
|
||||
if kubectl get ns indiana &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespace indiana was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespace indiana was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [[ $(echo $(kubectl get -n indiana secret insecret -o yaml | awk '/color/ { print $2 }')| base64 -d) == blue ]] &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t secret insecret with COLOR=blue was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
elif kubectl get -n indiana secret insecret &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t secret insecret was found, but not with the expected variable"
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t secret insecret was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [[ $(echo $(kubectl get pods -n indiana inpod -o jsonpath='{.spec.containers[*].image}')) == nginx:latest ]] &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t found pod inpod that uses the latest version of nginx"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t pod inpod that uses the latest version of the nginx image was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
|
||||
if kubectl get pods -n indiana inpod -o yaml | grep insecret &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t pod inpod uses the secret insecret"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t pod inpod doesn't use the secret insecret"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
8
labs/exam-task10.sh
Normal file
8
labs/exam-task10.sh
Normal file
@ -0,0 +1,8 @@
|
||||
if helm list | grep mysql &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t you have successfully installed the bitnami mysql chart"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t bitnami mysql chart not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
26
labs/exam-task11.sh
Normal file
26
labs/exam-task11.sh
Normal file
@ -0,0 +1,26 @@
|
||||
if kubectl get ns nebraska &>/dev/null &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespace nebraska was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespace nebraska was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl -n nebraska get deploy | grep snowdeploy &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t Deployment snowdeploy was found in Namespace nebraska"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t Deployment snowdeploy was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl -n nebraska get deploy snowdeploy -o yaml | grep -A1 requests | grep 64Mi &>/dev/null && kubectl -n nebraska get deploy snowdeploy -o yaml | grep -A1 limits | grep 128Mi &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the requested memory request and limits have been found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the requested memory request and limits have not been found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
27
labs/exam-task12.sh
Normal file
27
labs/exam-task12.sh
Normal file
@ -0,0 +1,27 @@
|
||||
if kubectl get ns | grep birds &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespace birds was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespace birds was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [[ $(kubectl -n birds get pods --show-labels --selector=type=allbirds | grep bird | wc -l) == "5" ]] &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t good, 5 pods with label type=allbirds were found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't finf 5 pods with the label type=allbirds"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl get -n birds svc allbirds | grep 32323 &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t NodePort Service allbirds listening on nodePort 32323 was found in Namespace birds"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no NodePort Service allbirds listening on nodePort 32323 was found in Namespace birds"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
17
labs/exam-task13.sh
Normal file
17
labs/exam-task13.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl get pods -o yaml securepod | grep 'runAsGroup: 2000' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t securepod is running with group ID 2000"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t securepod is not running with group ID 2000"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl get pods -o yaml securepod | grep 'allowPrivilegeEscalation: false' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t container in pod securepod has privilege escalation disabled"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t container in pod securepod has privilege escalation not disabled"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
18
labs/exam-task14.sh
Normal file
18
labs/exam-task14.sh
Normal file
@ -0,0 +1,18 @@
|
||||
if docker images | grep myapp | grep '1.0' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t container image myapp:1.0 was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t container image myapp:1.0 was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [ -f /tmp/myapp.tar ]
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t tar archive /tmp/myapp.tar was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t tar archive /tmp/myapp.tar was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
8
labs/exam-task15.sh
Normal file
8
labs/exam-task15.sh
Normal file
@ -0,0 +1,8 @@
|
||||
if kubectl get pod securepod -n oklahoma -o yaml | grep 'serviceAccount: secure' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t pod securepod in namespace oklahoma found and it is using the serviceaccount secure"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't find the pod securepod in namespace oklahoma that uses the serviceaccount secure"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
11
labs/exam-task2.sh
Normal file
11
labs/exam-task2.sh
Normal file
@ -0,0 +1,11 @@
|
||||
kubectl get pods -A --selector tier=control-plane | awk 'NR > 1 { print $2 }' > /tmp/task2file.txt
|
||||
|
||||
if diff /tmp/task2file.txt /tmp/task2pods
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t all pods with label tier=control-plane were found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t your result file doesn't show all pods with the label tier=control-plane"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
17
labs/exam-task3.sh
Normal file
17
labs/exam-task3.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl get cm task3cm -o yaml |grep index.html &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t a configmap with the name task3cm was found with the right contents"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t configmap with the name task3cm was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl describe pod oregonpod | grep -A1 'ConfigMap' | grep task3cm &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the pod oregonpod has the configmap task3cm mounted"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the pod oregonpod doesn't seem to have the configmap task3cm mounted"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
8
labs/exam-task4.sh
Normal file
8
labs/exam-task4.sh
Normal file
@ -0,0 +1,8 @@
|
||||
if kubectl get pods sidepod -o yaml | grep -A 10 initContainers | grep 'restartPolicy: Always' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t found a pod sidepod that runs a sidecar container"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find a pod sidepod that runs a sidecar container"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
17
labs/exam-task5.sh
Normal file
17
labs/exam-task5.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl get ns probes &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespace probes was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespace probes was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl describe pods -n probes probepod | grep Liveness | grep '/healthz' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t pod probepod was found, as well as its Liveness probe"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no pod probepod with correct liveness probe was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
22
labs/exam-task6.sh
Normal file
22
labs/exam-task6.sh
Normal file
@ -0,0 +1,22 @@
|
||||
# get the revision number of the last update that was found
|
||||
kubectl rollout history deployment updates > /tmp/task6.txt
|
||||
LAST=$(tail -2 /tmp/task6.txt | head -1 | awk '{ print $1 }')
|
||||
BEFORE=$(( LAST -1 ))
|
||||
|
||||
if kubectl rollout history deployment updates --revision=${LAST} | grep 'nginx:1.17' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t last revision of the updated deploy is set to nginx:1.17"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t last revision of the updated deploy is not set to nginx:1.17"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl rollout history deployment updates --revision=${BEFORE} | grep 'nginx:latest' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t previous revision of deploy updated was using nginx:latest"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t previous revision of deploy updated not found or not using nginx:latest"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
36
labs/exam-task7.sh
Normal file
36
labs/exam-task7.sh
Normal file
@ -0,0 +1,36 @@
|
||||
if grep $(minikube ip).*myapp.info /etc/hosts &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t name resolution for myapp.info is setup"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no name resolution for myapp.info was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl describe svc task7svc | grep app=updates &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t Service task7svc found and exposes Deploy updates"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t No Service task7svc exposing Deploy updates was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl get pods -n ingress-nginx | grep controller | grep Running &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t found a running ingress controller"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no running ingress controller was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
|
||||
if kubectl describe ing | grep task7svc:80 &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t ingress rule forwarding traffic to task7svc was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\" no ingress rule forwarding traffic to task7svc was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
17
labs/exam-task8.sh
Normal file
17
labs/exam-task8.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl describe networkpolicy | grep 'PodSelector:.*type=webapp' &>/dev/null && kubectl describe networkpolicy | grep 'PodSelector:.*type=tester' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t NetworkPolicy was found with correct configuration"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t No NetworkPolicy with correct configuration was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl exec -it nevatest -- wget --spider --timeout=1 nevaginx &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the tester pod can access the nevaginx pod"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the tester pod cannot access the nevaginx pod"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
17
labs/exam-task9.sh
Normal file
17
labs/exam-task9.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl exec storepod -- cat /usr/share/nginx/html/index.html &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t file index.html accessible through hostPath storage"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t file index.html not accessible through hostPath storage"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if curl $(minikube ip):32032 | grep welcome &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t Pod storepod correctly exposed and hostPath volume content accessible"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t Pod storepod not correctly exposed"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
@ -1,151 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# source https://github.com/sandervanvugt/cka/setup-lb.sh
|
||||
|
||||
# script to set up load balancing on cluster nodes
|
||||
# for use in CKA courses by Sander van Vugt
|
||||
# version 0.6
|
||||
# currently only tested on Ubuntu 22.04 LTS Server
|
||||
# run this AFTER running setup-container.sh and setup-kubetools.sh
|
||||
|
||||
if which kubectl
|
||||
then
|
||||
echo all good moving on
|
||||
else
|
||||
echo please run setup-container.sh and setup-kubetools.sh first and then run this again
|
||||
exit 6
|
||||
fi
|
||||
|
||||
## establish key based SSH with remote hosts
|
||||
# obtain node information
|
||||
if grep control1 /etc/hosts | grep -v 127
|
||||
then
|
||||
export CONTROL1_IP=$(awk '/control1/ { print $1 }' /etc/hosts | grep -v 127)
|
||||
else
|
||||
echo enter IP address for control1
|
||||
read CONTROL1_IP
|
||||
export CONTROL1_IP=$CONTROL1_IP
|
||||
sudo sh -c "echo $CONTROL1_IP control1 >> /etc/hosts"
|
||||
fi
|
||||
|
||||
|
||||
if grep control2 /etc/hosts | grep -v 127
|
||||
then
|
||||
export CONTROL2_IP=$(awk '/control2/ { print $1 }' /etc/hosts | grep -v 127)
|
||||
else
|
||||
echo enter IP address for control2
|
||||
read CONTROL2_IP
|
||||
export CONTROL2_IP=$CONTROL2_IP
|
||||
sudo sh -c "echo $CONTROL2_IP control2 >> /etc/hosts"
|
||||
fi
|
||||
|
||||
|
||||
if grep control3 /etc/hosts | grep -v 127
|
||||
then
|
||||
export CONTROL3_IP=$(awk '/control3/ { print $1 }' /etc/hosts | grep -v 127)
|
||||
else
|
||||
echo enter IP address for control3
|
||||
read CONTROL3_IP
|
||||
export CONTROL3_IP=$CONTROL3_IP
|
||||
sudo sh -c "echo $CONTROL3_IP control3 >> /etc/hosts"
|
||||
fi
|
||||
|
||||
|
||||
echo ##### READ ALL OF THIS BEFORE CONTINUING ######
|
||||
echo this script requires you to run setup-docker.sh and setup-kubetools.sh first
|
||||
echo this script is based on the NIC name ens33
|
||||
echo if your networkcard has a different name, edit keepalived.conf
|
||||
echo before continuing and change "interface ens33" to match your config
|
||||
echo .
|
||||
echo this script will create a keepalived apiserver at 192.168.29.100
|
||||
echo if this IP address does not match your network configuration,
|
||||
echo manually change the check_apiserver.sh file before continuing
|
||||
echo press enter to continue or Ctrl-c to interrupt and apply modifications
|
||||
read
|
||||
|
||||
# performing check on critical files
|
||||
for i in keepalived.conf check_apiserver.sh haproxy.cfg
|
||||
do
|
||||
if [ ! -f $i ]
|
||||
then
|
||||
echo $i should exist in the current directory && exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
cat /etc/hosts
|
||||
echo this is the main thing that goes wrong: if it does, manually edit /etc/hosts on all the nodes and run the rest of the script
|
||||
read
|
||||
|
||||
# generating and distributing SSH keys
|
||||
ssh-keygen
|
||||
ssh-copy-id control1
|
||||
ssh-copy-id control2
|
||||
ssh-copy-id control3
|
||||
|
||||
# configuring sudo for easier access
|
||||
sudo sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
||||
sudo scp -p /etc/sudoers student@control2:/tmp/ && ssh -t control2 'sudo -S chown root:root /tmp/sudoers' && ssh -t control2 'sudo -S cp -p /tmp/sudoers /etc/'
|
||||
sudo scp -p /etc/sudoers student@control3:/tmp/ && ssh -t control3 'sudo -S chown root:root /tmp/sudoers' && ssh -t control3 'sudo -S cp -p /tmp/sudoers /etc/'
|
||||
#ssh control2 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
||||
#ssh control3 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
||||
|
||||
echo DEBUG: check if sudo modification worked
|
||||
read
|
||||
|
||||
# install required software
|
||||
sudo apt install haproxy keepalived -y
|
||||
ssh control2 "sudo -S apt install haproxy keepalived -y"
|
||||
ssh control3 "sudo -S apt install haproxy keepalived -y"
|
||||
echo DEBUG check if haproxy and keepalived are installed
|
||||
read
|
||||
|
||||
|
||||
scp /etc/hosts control2:/tmp && ssh -t control2 'sudo -S cp /tmp/hosts /etc/'
|
||||
scp /etc/hosts control3:/tmp && ssh -t control3 'sudo -S cp /tmp/hosts /etc/'
|
||||
|
||||
# create keepalived config
|
||||
# change IP address to anything that works in your environment!
|
||||
sudo chmod +x check_apiserver.sh
|
||||
sudo cp check_apiserver.sh /etc/keepalived/
|
||||
|
||||
|
||||
scp check_apiserver.sh control2:/tmp && ssh -t control2 'sudo -S cp /tmp/check_apiserver.sh /etc/keepalived'
|
||||
scp check_apiserver.sh control3:/tmp && ssh -t control3 'sudo -S cp /tmp/check_apiserver.sh /etc/keepalived'
|
||||
|
||||
#### creating site specific keepalived.conf file
|
||||
sudo cp keepalived.conf keepalived-control2.conf
|
||||
sudo cp keepalived.conf keepalived-control3.conf
|
||||
|
||||
sudo sed -i 's/state MASTER/state SLAVE/' keepalived-control2.conf
|
||||
sudo sed -i 's/state MASTER/state SLAVE/' keepalived-control3.conf
|
||||
sudo sed -i 's/priority 255/priority 254/' keepalived-control2.conf
|
||||
sudo sed -i 's/priority 255/priority 253/' keepalived-control3.conf
|
||||
|
||||
sudo cp keepalived.conf /etc/keepalived/
|
||||
scp keepalived-control2.conf control2:/tmp && ssh -t control2 'sudo -S cp /tmp/keepalived-control2.conf /etc/keepalived/keepalived.conf'
|
||||
scp keepalived-control3.conf control3:/tmp && ssh -t control3 'sudo -S cp /tmp/keepalived-control3.conf /etc/keepalived/keepalived.conf'
|
||||
echo DEBUG check if files are copied over successfully
|
||||
read
|
||||
|
||||
### rewriting haproxy.cfg with site specific IP addresses
|
||||
sudo sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
||||
sudo sed -i s/server\ control2\ 1.1.1.2\:6443\ check/server\ control2\ $CONTROL2_IP\:6443\ check/ haproxy.cfg
|
||||
sudo sed -i s/server\ control3\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3_IP\:6443\ check/ haproxy.cfg
|
||||
|
||||
# copy haproxy.cfg to destinations
|
||||
sudo cp haproxy.cfg /etc/haproxy/
|
||||
scp haproxy.cfg control2:/tmp && ssh -t control2 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
||||
scp haproxy.cfg control3:/tmp && ssh -t control3 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
||||
echo DEBUG check if haproxy files are copied over successfully
|
||||
read
|
||||
|
||||
# start and enable services
|
||||
sudo systemctl enable keepalived --now
|
||||
sudo systemctl enable haproxy --now
|
||||
ssh control2 sudo -S systemctl enable keepalived --now
|
||||
ssh control2 sudo -S systemctl enable haproxy --now
|
||||
ssh control3 sudo -S systemctl enable keepalived --now
|
||||
ssh control3 sudo -S systemctl enable haproxy --now
|
||||
|
||||
echo setup is now done, please verify
|
||||
echo the first node that started the services - normally control1 - should run the virtual IP address 192.168.29.100
|
||||
@ -1,145 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# source https://github.com/sandervanvugt/cka/setup-lb.sh
|
||||
|
||||
# script to set up load balancing on cluster nodes
|
||||
# for use in CKA courses by Sander van Vugt
|
||||
# version 0.7
|
||||
# currently only tested on Ubuntu 22.04 LTS Server
|
||||
# run this AFTER running setup-container.sh and setup-kubetools.sh
|
||||
#
|
||||
# TODO: remove the many password prompts
|
||||
|
||||
if which kubectl
|
||||
then
|
||||
echo all good moving on
|
||||
else
|
||||
echo please run setup-container.sh and setup-kubetools.sh first and then run this again
|
||||
exit 6
|
||||
fi
|
||||
|
||||
## establish key based SSH with remote hosts
|
||||
# obtain node information
|
||||
if grep control1 /etc/hosts | grep -v 127
|
||||
then
|
||||
export CONTROL1_IP=$(awk '/control1/ { print $1 }' /etc/hosts | grep -v 127)
|
||||
else
|
||||
echo enter IP address for control1
|
||||
read CONTROL1_IP
|
||||
export CONTROL1_IP=$CONTROL1_IP
|
||||
sudo sh -c "echo $CONTROL1_IP control1 >> /etc/hosts"
|
||||
fi
|
||||
|
||||
|
||||
if grep control2 /etc/hosts | grep -v 127
|
||||
then
|
||||
export CONTROL2_IP=$(awk '/control2/ { print $1 }' /etc/hosts | grep -v 127)
|
||||
else
|
||||
echo enter IP address for control2
|
||||
read CONTROL2_IP
|
||||
export CONTROL2_IP=$CONTROL2_IP
|
||||
sudo sh -c "echo $CONTROL2_IP control2 >> /etc/hosts"
|
||||
fi
|
||||
|
||||
|
||||
if grep control3 /etc/hosts | grep -v 127
|
||||
then
|
||||
export CONTROL3_IP=$(awk '/control3/ { print $1 }' /etc/hosts | grep -v 127)
|
||||
else
|
||||
echo enter IP address for control3
|
||||
read CONTROL3_IP
|
||||
export CONTROL3_IP=$CONTROL3_IP
|
||||
sudo sh -c "echo $CONTROL3_IP control3 >> /etc/hosts"
|
||||
fi
|
||||
|
||||
|
||||
echo ##### READ ALL OF THIS BEFORE CONTINUING ######
|
||||
echo this script requires you to run setup-container.sh and setup-kubetools.sh first
|
||||
echo this script is based on the NIC name ens33
|
||||
echo if your networkcard has a different name, edit keepalived.conf
|
||||
echo before continuing and change "interface ens33" to match your config
|
||||
echo .
|
||||
echo this script will create a keepalived apiserver at 192.168.29.100
|
||||
echo if this IP address does not match your network configuration,
|
||||
echo manually change the check_apiserver.sh file before continuing
|
||||
echo also change the IP address in keepalived.conf
|
||||
echo .
|
||||
echo press enter to continue or Ctrl-c to interrupt and apply modifications
|
||||
read
|
||||
|
||||
# performing check on critical files
|
||||
for i in keepalived.conf check_apiserver.sh haproxy.cfg
|
||||
do
|
||||
if [ ! -f $i ]
|
||||
then
|
||||
echo $i should exist in the current directory && exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
# generating and distributing SSH keys
|
||||
ssh-keygen
|
||||
ssh-copy-id control1
|
||||
ssh-copy-id control2
|
||||
ssh-copy-id control3
|
||||
|
||||
# configuring sudo for easier access
|
||||
sudo sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
||||
sudo scp -p /etc/sudoers student@control2:/tmp/ && ssh -t control2 'sudo -S chown root:root /tmp/sudoers' && ssh -t control2 'sudo -S cp -p /tmp/sudoers /etc/'
|
||||
sudo scp -p /etc/sudoers student@control3:/tmp/ && ssh -t control3 'sudo -S chown root:root /tmp/sudoers' && ssh -t control3 'sudo -S cp -p /tmp/sudoers /etc/'
|
||||
#ssh control2 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
||||
#ssh control3 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
|
||||
|
||||
# install required software
|
||||
sudo apt install haproxy keepalived -y
|
||||
ssh control2 "sudo -S apt install haproxy keepalived -y"
|
||||
ssh control3 "sudo -S apt install haproxy keepalived -y"
|
||||
|
||||
scp /etc/hosts control2:/tmp && ssh -t control2 'sudo -S cp /tmp/hosts /etc/'
|
||||
scp /etc/hosts control3:/tmp && ssh -t control3 'sudo -S cp /tmp/hosts /etc/'
|
||||
|
||||
# create keepalived config
|
||||
# change IP address to anything that works in your environment!
|
||||
sudo chmod +x check_apiserver.sh
|
||||
sudo cp check_apiserver.sh /etc/keepalived/
|
||||
|
||||
|
||||
scp check_apiserver.sh control2:/tmp && ssh -t control2 'sudo -S cp /tmp/check_apiserver.sh /etc/keepalived'
|
||||
scp check_apiserver.sh control3:/tmp && ssh -t control3 'sudo -S cp /tmp/check_apiserver.sh /etc/keepalived'
|
||||
|
||||
#### creating site specific keepalived.conf file
|
||||
sudo cp keepalived.conf keepalived-control2.conf
|
||||
sudo cp keepalived.conf keepalived-control3.conf
|
||||
|
||||
sudo sed -i 's/state MASTER/state SLAVE/' keepalived-control2.conf
|
||||
sudo sed -i 's/state MASTER/state SLAVE/' keepalived-control3.conf
|
||||
sudo sed -i 's/priority 255/priority 254/' keepalived-control2.conf
|
||||
sudo sed -i 's/priority 255/priority 253/' keepalived-control3.conf
|
||||
|
||||
sudo cp keepalived.conf /etc/keepalived/
|
||||
scp keepalived-control2.conf control2:/tmp && ssh -t control2 'sudo -S cp /tmp/keepalived-control2.conf /etc/keepalived/keepalived.conf'
|
||||
scp keepalived-control3.conf control3:/tmp && ssh -t control3 'sudo -S cp /tmp/keepalived-control3.conf /etc/keepalived/keepalived.conf'
|
||||
echo DEBUG check if files are copied over successfully
|
||||
read
|
||||
|
||||
### rewriting haproxy.cfg with site specific IP addresses
|
||||
sudo sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
||||
sudo sed -i s/server\ control2\ 1.1.1.2\:6443\ check/server\ control2\ $CONTROL2_IP\:6443\ check/ haproxy.cfg
|
||||
sudo sed -i s/server\ control3\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3_IP\:6443\ check/ haproxy.cfg
|
||||
|
||||
# copy haproxy.cfg to destinations
|
||||
sudo cp haproxy.cfg /etc/haproxy/
|
||||
scp haproxy.cfg control2:/tmp && ssh -t control2 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
||||
scp haproxy.cfg control3:/tmp && ssh -t control3 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
||||
echo DEBUG check if haproxy files are copied over successfully
|
||||
read
|
||||
|
||||
# start and enable services
|
||||
sudo systemctl enable keepalived --now
|
||||
sudo systemctl enable haproxy --now
|
||||
ssh control2 sudo -S systemctl enable keepalived --now
|
||||
ssh control2 sudo -S systemctl enable haproxy --now
|
||||
ssh control3 sudo -S systemctl enable keepalived --now
|
||||
ssh control3 sudo -S systemctl enable haproxy --now
|
||||
|
||||
echo setup is now done, please verify
|
||||
echo the first node that started the services - normally control1 - should run the virtual IP address 192.168.29.100
|
||||
@ -118,8 +118,6 @@ sudo sed -i 's/priority 255/priority 253/' keepalived-control3.conf
|
||||
sudo cp keepalived.conf /etc/keepalived/
|
||||
scp keepalived-control2.conf control2:/tmp && ssh -t control2 'sudo -S cp /tmp/keepalived-control2.conf /etc/keepalived/keepalived.conf'
|
||||
scp keepalived-control3.conf control3:/tmp && ssh -t control3 'sudo -S cp /tmp/keepalived-control3.conf /etc/keepalived/keepalived.conf'
|
||||
echo DEBUG check if files are copied over successfully
|
||||
read
|
||||
|
||||
### rewriting haproxy.cfg with site specific IP addresses
|
||||
sudo sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
||||
@ -130,8 +128,6 @@ sudo sed -i s/server\ control3\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3
|
||||
sudo cp haproxy.cfg /etc/haproxy/
|
||||
scp haproxy.cfg control2:/tmp && ssh -t control2 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
||||
scp haproxy.cfg control3:/tmp && ssh -t control3 'sudo -S cp /tmp/haproxy.cfg /etc/haproxy/'
|
||||
echo DEBUG check if haproxy files are copied over successfully
|
||||
read
|
||||
|
||||
# start and enable services
|
||||
sudo systemctl enable keepalived --now
|
||||
|
||||
100
setup-lb.sh
100
setup-lb.sh
@ -1,100 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# source https://github.com/sandervanvugt/cka/setup-lb.sh
|
||||
|
||||
# script to set up load balancing on cluster nodes
|
||||
# for use in CKA courses by Sander van Vugt
|
||||
# version 0.5
|
||||
# currently only supporting CentOS 7.x
|
||||
# run this AFTER running setup-docker.sh and setup-kubetools.sh
|
||||
|
||||
## establish key based SSH with remote hosts
|
||||
# obtain node information
|
||||
echo this script requires three nodes: control1 control2 and control3
|
||||
echo enter the IP address for control1
|
||||
read CONTROL1_IP
|
||||
echo enter the IP address for control2
|
||||
read CONTROL2_IP
|
||||
echo enter the IP address for control3
|
||||
read CONTROL3_IP
|
||||
echo ##### READ ALL OF THIS BEFORE CONTINUING ######
|
||||
echo this script requires you to run setup-docker.sh and setup-kubetools.sh first
|
||||
echo this script is based on the NIC name ens33
|
||||
echo if your networkcard has a different name, edit keepalived.conf
|
||||
echo before continuing and change "interface ens33" to match your config
|
||||
echo .
|
||||
echo this script will create a keepalived apiserver at 192.168.4.100
|
||||
echo if this IP address does not match your network configuration,
|
||||
echo manually change the check_apiserver.sh file before continuing
|
||||
echo press enter to continue or Ctrl-c to interrupt and apply modifications
|
||||
read
|
||||
|
||||
# performing check on critical files
|
||||
for i in keepalived.conf check_apiserver.sh haproxy.cfg
|
||||
do
|
||||
if [ ! -f $i ]
|
||||
then
|
||||
echo $i should exist in the current directory && exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
# create /etc/hosts for all nodes
|
||||
echo $CONTROL1_IP control1 >> /etc/hosts
|
||||
echo $CONTROL2_IP control2 >> /etc/hosts
|
||||
echo $CONTROL3_IP control3 >> /etc/hosts
|
||||
|
||||
# generating and distributing SSH keys
|
||||
ssh-keygen
|
||||
ssh-copy-id control1
|
||||
ssh-copy-id control2
|
||||
ssh-copy-id control3
|
||||
|
||||
# install required software
|
||||
yum install haproxy keepalived -y
|
||||
ssh control2 "yum install haproxy keepalived -y"
|
||||
ssh control3 "yum install haproxy keepalived -y"
|
||||
|
||||
# copying /etc/hosts file
|
||||
scp /etc/hosts control2:/etc/
|
||||
scp /etc/hosts control3:/etc/
|
||||
|
||||
# create keepalived config
|
||||
# change IP address to anything that works in your environment!
|
||||
chmod +x check_apiserver.sh
|
||||
cp check_apiserver.sh /etc/keepalived/
|
||||
scp check_apiserver.sh control2:/etc/keepalived/
|
||||
scp check_apiserver.sh control3:/etc/keepalived/
|
||||
|
||||
#### creating site specific keepalived.conf file
|
||||
cp keepalived.conf keepalived-control2.conf
|
||||
cp keepalived.conf keepalived-control3.conf
|
||||
|
||||
sed -i 's/state MASTER/state SLAVE/' keepalived-control2.conf
|
||||
sed -i 's/state MASTER/state SLAVE/' keepalived-control3.conf
|
||||
sed -i 's/priority 255/priority 254/' keepalived-control2.conf
|
||||
sed -i 's/priority 255/priority 253/' keepalived-control3.conf
|
||||
|
||||
cp keepalived.conf /etc/keepalived/
|
||||
scp keepalived-control2.conf control2:/etc/keepalived/keepalived.conf
|
||||
scp keepalived-control3.conf control3:/etc/keepalived/keepalived.conf
|
||||
|
||||
### rewriting haproxy.cfg with site specific IP addresses
|
||||
sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg
|
||||
sed -i s/server\ control2\ 1.1.1.2\:6443\ check/server\ control2\ $CONTROL2_IP\:6443\ check/ haproxy.cfg
|
||||
sed -i s/server\ control3\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3_IP\:6443\ check/ haproxy.cfg
|
||||
|
||||
# copy haproxy.cfg to destinations
|
||||
cp haproxy.cfg /etc/haproxy/
|
||||
scp haproxy.cfg control2:/etc/haproxy/
|
||||
scp haproxy.cfg control3:/etc/haproxy/
|
||||
|
||||
# start and enable services
|
||||
systemctl enable keepalived --now
|
||||
systemctl enable haproxy --now
|
||||
ssh control2 systemctl enable keepalived --now
|
||||
ssh control2 systemctl enable haproxy --now
|
||||
ssh control3 systemctl enable keepalived --now
|
||||
ssh control3 systemctl enable haproxy --now
|
||||
|
||||
echo setup is now done, please verify
|
||||
echo control1 should run the virtual IP address 192.168.4.100
|
||||
Loading…
Reference in New Issue
Block a user