This commit is contained in:
sandervanvugt 2024-11-24 06:03:51 -08:00
parent 6eaf5fd4c7
commit db0687d4e2
33 changed files with 276 additions and 316 deletions

View File

@ -1,11 +0,0 @@
#!/bin/bash
COUNTER=$1
COUNTER=$(( COUNTER * 60 ))
while true
do
echo $COUNTER seconds remaining
sleep 1
COUNTER=$(( COUNTER - 1 ))
done

View File

@ -68,11 +68,6 @@ echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 12 results\033[0m"
source labs/exam1-task12.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
#### print PASS/FAIL
echo -e "\n"
echo your score is $SCORE out of a total of $TOTAL

82
exam2-grade.sh Executable file
View File

@ -0,0 +1,82 @@
#!/bin/bash
# exit if not root
clear
# evaluating tasks
echo -e "\033[1mchecking task 1 results\033[0m"
source labs/exam2-task1.sh
echo the score is $SCORE
TOTALSCORE=$SCORE
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 2 results\033[0m"
source labs/exam2-task2.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 3 results\033[0m"
source labs/exam2-task3.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 4 results\033[0m"
source labs/exam2-task4.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 5 results\033[0m"
source labs/exam2-task5.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 6 results\033[0m"
source labs/exam2-task6.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 7 results\033[0m"
source labs/exam2-task7.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 8 results\033[0m"
source labs/exam2-task8.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 9 results\033[0m"
source labs/exam2-task9.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 10 results\033[0m"
source labs/exam2-task10.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
echo -e "\033[1mchecking task 11 results\033[0m"
source labs/exam2-task11.sh
echo the score is $SCORE
TOTALSCORE=$(( TOTAL + SCORE ))
TOTALTOTAL=$TOTAL
#### print PASS/FAIL
echo -e "\n"
echo your score is $SCORE out of a total of $TOTAL
if [[ $SCORE -ge $(( TOTAL / 10 * 7 )) ]]
then
echo -e "\033[32mCONGRATULATIONS!!\033[0m\t\t You passed this sample exam!"
echo -e "\033[1mResults obtained here don't guarantee anything for the real exam\033[0m"
else
echo -e "\033[31m[FAIL]\033[0m\t\t You did NOT pass this sample exam \033[36m:-(\033[0m"
fi

View File

@ -1,17 +1,29 @@
if test -x $(which helm) &>/dev/null
# check for the role
if kubectl -n access get roles role1510 -o yaml | grep create &>/dev/null && kubectl -n access get roles role1510 -o yaml | grep deploy &>/dev/null && kubectl -n access get roles role1510 -o yaml | grep daemonset &>/dev/null && kubectl -n access get roles role1510 -o yaml | grep stateful &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the helm binary has been installed"
echo -e "\033[32m[OK]\033[0m\t\t role role1510 was found with correct verbs and objects"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t the helm binary has not been installed"
echo -e "\033[31m[FAIL]\033[0m\t\t no correct role configuration was found"
fi
TOTAL=$(( TOTAL + 10 ))
if helm list | grep mysql &>/dev/null
# check for the rolebinding to be set correctly
if kubectl get -n access rolebinding -o yaml | grep lab1510role &>/dev/null && kubectl get -n access rolebinding -o yaml | grep lab1510access &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t you have successfully installed the bitnami mysql chart"
echo -e "\033[32m[OK]\033[0m\t\t rolebinding is set up the right way"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t bitnami mysql chart not found"
echo -e "\033[31m[FAIL]\033[0m\t\t no correctly configured rolebinding was found"
fi
TOTAL=$(( TOTAL + 10 ))
# check for pod that uses the ServiceAccount
if kubectl get -n access pod lab1510pod -o yaml | grep 'serviceAccount: lab1510access' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the pod lab1510pod uses the serviceAccount lab1510access"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t the pod lab1510pod doesn't use the serviceAccount lab1510access"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,29 +1,49 @@
# check for the role
if kubectl -n access get roles role1511 -o yaml | grep create &>/dev/null && kubectl -n access get roles role1511 -o yaml | grep deploy &>/dev/null && kubectl -n access get roles role1511 -o yaml | grep daemonset &>/dev/null && kubectl -n access get roles role1511 -o yaml | grep stateful &>/dev/null
# temporary cordon on worker1
kubectl cordon worker1 &>/dev/null
if kubectl get nodes worker2 -o yaml | grep -i -A3 taint | grep -A2 'effect: NoSchedule' | grep -A1 'key:.*type' | grep 'value:.*db' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t role role1511 was found with correct verbs and objects"
echo -e "\033[32m[OK]\033[0m\t\t without a toleration the testpod doesn't shows a state of Pending and that exactly what we needed!"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t no correct role configuration was found"
echo -e "\033[31m[FAIL]\033[0m\t\t the testpod without toleration works, so the taint isn't functional"
fi
TOTAL=$(( TOTAL + 10 ))
# check for the rolebinding to be set correctly
if kubectl get -n access rolebinding -o yaml | grep lab1511role &>/dev/null && kubectl get -n access rolebinding -o yaml | grep lab1511access &>/dev/null
# verifying that the toleration works
if kubectl run lab1511testpod1 --image=nginx --restart=Never --overrides='{
"spec": {
"tolerations": [
{
"key": "type",
"operator": "Equal",
"value": "db",
"effect": "NoSchedule"
}
]
}
}' &>/dev/null && sleep 5
then
echo -e "\033[32m[OK]\033[0m\t\t rolebinding is set up the right way"
echo -e "\033[32m[OK]\033[0m\t\t a pod with the toleration type=db will run on worker2"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t no correctly configured rolebinding was found"
echo -e "\033[31m[FAIL]\033[0m\t\t the toleration type=db doesn't allow the pod to run on worker2"
fi
TOTAL=$(( TOTAL + 10 ))
# check for pod that uses the ServiceAccount
if kubectl get -n access pod lab1511pod -o yaml | grep 'serviceAccount: lab1511access' &>/dev/null
# verifying that a pod without toleration doesn't run
kubectl run lab1511testpod2 --image=nginx &>/dev/null
if kubectl get pods lab1511testpod2 | grep Pending &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the pod lab1511pod uses the serviceAccount lab1511access"
echo -e "\033[32m[OK]\033[0m\t\t without a toleration the testpod doesn't shows a state of Pending and that exactly what we needed!"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t the pod lab1511pod doesn't use the serviceAccount lab1511access"
echo -e "\033[31m[FAIL]\033[0m\t\t the testpod without toleration works, so the taint isn't functional"
fi
TOTAL=$(( TOTAL + 10 ))
# cleaning up
kubectl delete pod lab1511testpod1 lab1511testpod2 &>/dev/null
kubectl uncordon worker1 &>/dev/null

View File

@ -1,17 +0,0 @@
if kubectl get pods -o yaml securepod | grep 'runAsGroup: 2000' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t securepod is running with group ID 2000"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t securepod is not running with group ID 2000"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl get pods -o yaml securepod | grep 'allowPrivilegeEscalation: false' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t container in pod securepod has privilege escalation disabled"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t container in pod securepod has privilege escalation not disabled"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,18 +0,0 @@
if docker images | grep myapp | grep '1.0' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t container image myapp:1.0 was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t container image myapp:1.0 was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if [ -f /tmp/myapp.tar ]
then
echo -e "\033[32m[OK]\033[0m\t\t tar archive /tmp/myapp.tar was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t tar archive /tmp/myapp.tar was not found"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,8 +0,0 @@
if kubectl get pod securepod -n oklahoma -o yaml | grep 'serviceAccount: secure' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t pod securepod in namespace oklahoma found and it is using the serviceaccount secure"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't find the pod securepod in namespace oklahoma that uses the serviceaccount secure"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,18 +1,18 @@
if kubectl get pods lab153pod | grep '2/2' &>/dev/null
if kubectl get pods lab152pod | grep '2/2' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the pod lab153pod was found and it is running 2 containers"
echo -e "\033[32m[OK]\033[0m\t\t the pod lab152pod was found and it is running 2 containers"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t cannot find the pod lab153pod running 2 containers"
echo -e "\033[31m[FAIL]\033[0m\t\t cannot find the pod lab152pod running 2 containers"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl get pods lab153pod -o yaml | grep -i toleration -A3 | grep -iz noschedule.*control-plane &>/dev/null
if kubectl get pods lab152pod -o yaml | grep -i toleration -A3 | grep -iz noschedule.*control-plane &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the pod lab153pod has a tolerations that allows it to run on control-plane nodes"
echo -e "\033[32m[OK]\033[0m\t\t the pod lab152pod has a tolerations that allows it to run on control-plane nodes"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t the pod lab153pod does not have a toleration that allows it to run on a control-plane node"
echo -e "\033[31m[FAIL]\033[0m\t\t the pod lab152pod does not have a toleration that allows it to run on a control-plane node"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,4 +1,4 @@
export LAB3POD=$(kubectl get pods | awk '/lab154/ { print $1 }') &>/dev/null
export LAB3POD=$(kubectl get pods | awk '/lab153/ { print $1 }') &>/dev/null
if kubectl get pods $LAB3POD -o yaml | grep initContainer &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t $LAB3POD is running and it does have an init container"

View File

@ -1,4 +1,4 @@
if kubectl get pv lab155 -o yaml | grep 'path.*/lab155' &>/dev/null
if kubectl get pv lab154 -o yaml | grep 'path.*/lab154' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t a PersistentVolume with the name lab155 was found and it uses the right path"
SCORE=$(( SCORE + 10 ))

View File

@ -1,13 +1,13 @@
if kubectl get svc | grep 32567 | grep lab156deploy &>/dev/null
if kubectl get svc | grep 32567 | grep lab155deploy &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t a NodePort Service exposing lab156deploy was found listening at port 32567"
echo -e "\033[32m[OK]\033[0m\t\t a NodePort Service exposing lab155deploy was found listening at port 32567"
SCORE=$(( SCORE + 7 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t no NodePort Service listening at port 32567 was found"
fi
TOTAL=$(( TOTAL + 7 ))
if kubectl get endpoints | grep lab156deploy | grep -E '^[^:]*(:[^:]*){3}$' &>/dev/null
if kubectl get endpoints | grep lab155deploy | grep -E '^[^:]*(:[^:]*){3}$' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the NodePort Service is exposing 3 Pods"
SCORE=$(( SCORE + 3 ))

View File

@ -7,7 +7,7 @@ sleep 5
kubectl run labgrade -n access --image=busybox -- sleep 3600 &>/dev/null
sleep 5
if kubectl exec -n access labgrade -- wget --spider --timeout=1 lab157web.restricted.svc.cluster.local &>/dev/null
if kubectl exec -n access labgrade -- wget --spider --timeout=1 lab156web.restricted.svc.cluster.local &>/dev/null
then
echo -e "\033[31m[FAIL]\033[0m\t\t testpod is getting access, which means the networkpolicy is not correct"
else
@ -50,7 +50,7 @@ TOTAL=$(( TOTAL + 8 ))
##setting the label so that the testpod is getting access
kubectl label -n access pod labgrade access="true"
if kubectl exec -n access labgrade -- wget --spider --timeout=1 lab157web.restricted.svc.cluster.local &>/dev/null
if kubectl exec -n access labgrade -- wget --spider --timeout=1 lab156web.restricted.svc.cluster.local &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t previous revision of deploy updated was using nginx:latest"
SCORE=$(( SCORE + 10 ))

View File

@ -1,4 +1,4 @@
if kubectl get quota -n limited | grep 'memory: .*/128M' &>/dev/null
if kubectl get quota -n limited | grep 'memory: .*/1G' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t memory quota was set up correctly"
SCORE=$(( SCORE + 5 ))
@ -16,11 +16,11 @@ else
fi
TOTAL=$(( TOTAL + 5 ))
if kubectl get deploy -n limited lab158deploy -o yaml | grep -A 5 resources | grep 'memory: 32Mi' &>/dev/null
if kubectl get deploy -n limited lab157deploy -o yaml | grep -A 5 resources | grep 'memory: 32Mi' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t lab158deploy resource request is set to 32 Mi"
echo -e "\033[32m[OK]\033[0m\t\t lab157deploy resource request is set to 32 Mi"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t lab158deploy resource request is not set to 32Mi"
echo -e "\033[31m[FAIL]\033[0m\t\t lab157deploy resource request is not set to 32Mi"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,9 +1,9 @@
if kubectl get pods | grep lab159pod-worker2 &>/dev/null
if kubectl get pods | grep lab158pod-worker2 &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t NetworkPolicy was found with correct configuration"
echo -e "\033[32m[OK]\033[0m\t\t Static Pod was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t No NetworkPolicy with correct configuration was found"
echo -e "\033[31m[FAIL]\033[0m\t\t Static Pod was not found"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,6 +1,19 @@
if true &>/dev/null
if ssh student@worker2 sudo journalctl -u kubelet | grep 'Deactivated successfully' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t this task cannot be automatically graded so you just get the points"
echo -e "\033[32m[OK]\033[0m\t\t You have stopped the kubelet service using sudo systemctl kubelet stop"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t You have not stopped the kubelet service using sudo systemctl kubelet stop"
fi
TOTAL=$(( TOTAL + 10 ))
if ssh student@worker2 sudo systemctl status kubelet | grep 'active' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t The kubelet service is running again"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t The kubelet service is not running"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,39 +1,8 @@
if kubectl get ns indiana &>/dev/null
if [[ $(echo $(kubectl get nodes | grep Ready | wc -l)) == 3 ]] &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t namespace indiana was found"
SCORE=$(( SCORE + 10 ))
echo -e "\033[32m[OK]\033[0m\t\t 3 nodes were found"
SCORE=$(( SCORE + 20 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t namespace indiana was not found"
echo -e "\033[31m[FAIL]\033[0m\t\t 3nodes were not found"
fi
TOTAL=$(( TOTAL + 10 ))
if [[ $(echo $(kubectl get -n indiana secret insecret -o yaml | awk '/color/ { print $2 }')| base64 -d) == blue ]] &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t secret insecret with COLOR=blue was found"
SCORE=$(( SCORE + 10 ))
elif kubectl get -n indiana secret insecret &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t secret insecret was found, but not with the expected variable"
else
echo -e "\033[31m[FAIL]\033[0m\t\t secret insecret was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if [[ $(echo $(kubectl get pods -n indiana inpod -o jsonpath='{.spec.containers[*].image}')) == nginx:latest ]] &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t found pod inpod that uses the latest version of nginx"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t pod inpod that uses the latest version of the nginx image was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl get pods -n indiana inpod -o yaml | grep insecret &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t pod inpod uses the secret insecret"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t pod inpod doesn't use the secret insecret"
fi
TOTAL=$(( TOTAL + 10 ))
TOTAL=$(( TOTAL + 20 ))

View File

@ -1,8 +1,8 @@
if helm list | grep mysql &>/dev/null
if [[ $(kubectl get hpa | awk '/lab1610deploy/ { print $5 }') == 2 ]] && [[ $(kubectl get hpa | awk '/lab1610deploy/ { print $6 }') == 6 ]]
then
echo -e "\033[32m[OK]\033[0m\t\t you have successfully installed the bitnami mysql chart"
echo -e "\033[32m[OK]\033[0m\t\t found a correctly configured HPA"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t bitnami mysql chart not found"
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find a correctly configured HPA"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,26 +1,17 @@
if kubectl get ns nebraska &>/dev/null &>/dev/null
if sudo etcdctl --write-out=table snapshot status /tmp/etcdbackup &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t namespace nebraska was found"
echo -e "\033[32m[OK]\033[0m\t\t a valid etcd backup was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t namespace nebraska was not found"
echo -e "\033[31m[FAIL]\033[0m\t\t a valid etcd backup was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl -n nebraska get deploy | grep snowdeploy &>/dev/null
if kubectl logs -n kube-system etcd-control | grep restore &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t Deployment snowdeploy was found in Namespace nebraska"
echo -e "\033[32m[OK]\033[0m\t\t etcd backup was successfully restored"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t Deployment snowdeploy was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl -n nebraska get deploy snowdeploy -o yaml | grep -A1 requests | grep 64Mi &>/dev/null && kubectl -n nebraska get deploy snowdeploy -o yaml | grep -A1 limits | grep 128Mi &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the requested memory request and limits have been found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t the requested memory request and limits have not been found"
echo -e "\033[31m[FAIL]\033[0m\t\t etcd backup was not restored"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,27 +0,0 @@
if kubectl get ns | grep birds &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t namespace birds was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t namespace birds was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if [[ $(kubectl -n birds get pods --show-labels --selector=type=allbirds | grep bird | wc -l) == "5" ]] &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t good, 5 pods with label type=allbirds were found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't finf 5 pods with the label type=allbirds"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl get -n birds svc allbirds | grep 32323 &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t NodePort Service allbirds listening on nodePort 32323 was found in Namespace birds"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t no NodePort Service allbirds listening on nodePort 32323 was found in Namespace birds"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,17 +0,0 @@
if kubectl get pods -o yaml securepod | grep 'runAsGroup: 2000' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t securepod is running with group ID 2000"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t securepod is not running with group ID 2000"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl get pods -o yaml securepod | grep 'allowPrivilegeEscalation: false' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t container in pod securepod has privilege escalation disabled"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t container in pod securepod has privilege escalation not disabled"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,18 +0,0 @@
if docker images | grep myapp | grep '1.0' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t container image myapp:1.0 was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t container image myapp:1.0 was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if [ -f /tmp/myapp.tar ]
then
echo -e "\033[32m[OK]\033[0m\t\t tar archive /tmp/myapp.tar was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t tar archive /tmp/myapp.tar was not found"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,8 +0,0 @@
if kubectl get pod securepod -n oklahoma -o yaml | grep 'serviceAccount: secure' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t pod securepod in namespace oklahoma found and it is using the serviceaccount secure"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't find the pod securepod in namespace oklahoma that uses the serviceaccount secure"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,11 +1,16 @@
kubectl get pods -A --selector tier=control-plane | awk 'NR > 1 { print $2 }' > /tmp/task2file.txt
#!/bin/bash
KUBEVERSION=$(curl -s https://api.github.com/repos/kubernetes/kubernetes/releases/latest | jq -r '.tag_name')
KUBEVERSION=${KUBEVERSION%.*}
if diff /tmp/task2file.txt /tmp/task2pods
CONTROLVERSION=$(kubectl get nodes | awk '/control/ { print $5 }')
CONTROLVERSION=${CONTROLVERSION%.*}
if [[ $KUBEVERSION == $CONTROLVERSION ]]
then
echo -e "\033[32m[OK]\033[0m\t\t all pods with label tier=control-plane were found"
SCORE=$(( SCORE + 10 ))
echo -e "\033[32m[OK]\033[0m\t\t controlnode is running the latest version"
SCORE=$(( SCORE + 20 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t your result file doesn't show all pods with the label tier=control-plane"
echo -e "\033[31m[FAIL]\033[0m\t\t controlnode is not running the latest version"
fi
TOTAL=$(( TOTAL + 10 ))
TOTAL=$(( TOTAL + 20 ))

View File

@ -1,17 +1,8 @@
if kubectl get cm task3cm -o yaml |grep index.html &>/dev/null
if kubectl exec exam2-task3 -c nginx -- cat /usr/share/nginx/html/date.log
then
echo -e "\033[32m[OK]\033[0m\t\t a configmap with the name task3cm was found with the right contents"
echo -e "\033[32m[OK]\033[0m\t\t the sidecar Pod provides access to the main app output"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t configmap with the name task3cm was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl describe pod oregonpod | grep -A1 'ConfigMap' | grep task3cm &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the pod oregonpod has the configmap task3cm mounted"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t the pod oregonpod doesn't seem to have the configmap task3cm mounted"
echo -e "\033[31m[FAIL]\033[0m\t\t the sidecar Pod doesn't provide access to the main app output"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,8 +1,50 @@
if kubectl get pods sidepod -o yaml | grep -A 10 initContainers | grep 'restartPolicy: Always' &>/dev/null
# checking for PV with persistentVolumeReclaimPolicy set to Delete
if kubectl get pv exam2-task4-pv -o yaml | grep 'persistentVolumeReclaimPolicy: Delete' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t found a pod sidepod that runs a sidecar container"
SCORE=$(( SCORE + 10 ))
echo -e "\033[32m[OK]\033[0m\t\t found the pv exam2-task4-pv with persistentVolumeReclaimPolicy: Delete"
SCORE=$(( SCORE + 4 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find a pod sidepod that runs a sidecar container"
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find the pv exam2-task4-pv with persistentVolumeReclaimPolicy: Delete"
fi
TOTAL=$(( TOTAL + 10 ))
TOTAL=$(( TOTAL + 4 ))
# checking for StorageClass with allowVolumeExpension: true
if kubectl get storageclass -o yaml | grep 'allowVolumeExpansion: true' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t found a storageClass with allowVolumeExpansion set to true"
SCORE=$(( SCORE + 4 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find a storageClass with allowVolumeExpansion set to true"
fi
TOTAL=$(( TOTAL + 4 ))
# checking bound status between PVC and PV
if kubectl get pvc exam2-task4-pvc | grep 'Bound' | grep 'exam2-task4-pv' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t found the pvc correctly bound to the pv"
SCORE=$(( SCORE + 4 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find a correct binding between the pvc and the pv"
fi
TOTAL=$(( TOTAL + 4 ))
# checking size in the PVC
if kubectl get pvc exam2-task4-pvc -o yaml | grep -A 1 requests | grep 'storage: 200Mi' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t found correct resized size request on the PVC"
SCORE=$(( SCORE + 4 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find correct resized size request on the PVC"
fi
TOTAL=$(( TOTAL + 4 ))
# checking mountPath for the PVC in Pod
if kubectl get pods storage -o yaml | grep 'claimName: exam2-task4-pvc' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t correct PVC mount found in Pod"
SCORE=$(( SCORE + 4 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t correct PV mount not found in Pod"
fi
TOTAL=$(( TOTAL + 4 ))

View File

@ -1,17 +1,8 @@
if kubectl get ns probes &>/dev/null
if grep 'ERROR.*uninitialized' /tmp/failingdb.log
then
echo -e "\033[32m[OK]\033[0m\t\t namespace probes was found"
echo -e "\033[32m[OK]\033[0m\t\t correct error output was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t namespace probes was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl describe pods -n probes probepod | grep Liveness | grep '/healthz' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t pod probepod was found, as well as its Liveness probe"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t no pod probepod with correct liveness probe was found"
echo -e "\033[31m[FAIL]\033[0m\t\t correct error output was not found"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,22 +1,17 @@
# get the revision number of the last update that was found
kubectl rollout history deployment updates > /tmp/task6.txt
LAST=$(tail -2 /tmp/task6.txt | head -1 | awk '{ print $1 }')
BEFORE=$(( LAST -1 ))
if kubectl rollout history deployment updates --revision=${LAST} | grep 'nginx:1.17' &>/dev/null
if kubectl get pods -n kube-system | grep metrics-server | grep '1/1'
then
echo -e "\033[32m[OK]\033[0m\t\t last revision of the updated deploy is set to nginx:1.17"
echo -e "\033[32m[OK]\033[0m\t\t Good! Metrics server Pod is up and running"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t last revision of the updated deploy is not set to nginx:1.17"
echo -e "\033[31m[FAIL]\033[0m\t\t Metrics server Pod doesn't seem to be running"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl rollout history deployment updates --revision=${BEFORE} | grep 'nginx:latest' &>/dev/null
if [[ $(kubectl top pods -A --sort-by=cpu | head -2 | tail -1 | awk '{ print $2 }') == $(cat /tmp/load.txt) ]]
then
echo -e "\033[32m[OK]\033[0m\t\t previous revision of deploy updated was using nginx:latest"
echo -e "\033[32m[OK]\033[0m\t\t kubectl top is working"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t previous revision of deploy updated not found or not using nginx:latest"
echo -e "\033[31m[FAIL]\033[0m\t\t kubectl top is not working correctly"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,36 +1,17 @@
if grep $(minikube ip).*myapp.info /etc/hosts &>/dev/null
if kubectl get nodes -o yaml | grep 'storage: ssd' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t name resolution for myapp.info is setup"
echo -e "\033[32m[OK]\033[0m\t\t Found at least one node with the label storage=ssd"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t no name resolution for myapp.info was found"
echo -e "\033[31m[FAIL]\033[0m\t\t no nodes with label storage=ssd were found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl describe svc task7svc | grep app=updates &>/dev/null
if kubectl get pods lab167pod -o yaml | grep nodeSelector -A1 | grep 'storage: ssd' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t Service task7svc found and exposes Deploy updates"
echo -e "\033[32m[OK]\033[0m\t\t nodeSelector property was found on the Pod lab167pod"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t No Service task7svc exposing Deploy updates was found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl get pods -n ingress-nginx | grep controller | grep Running &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t found a running ingress controller"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t no running ingress controller was found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl describe ing | grep task7svc:80 &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t ingress rule forwarding traffic to task7svc was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\" no ingress rule forwarding traffic to task7svc was found"
echo -e "\033[31m[FAIL]\033[0m\t\t no nodeSelector propertyn was found on the Pod lab167pod"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,17 +1,17 @@
if kubectl describe networkpolicy | grep 'PodSelector:.*type=webapp' &>/dev/null && kubectl describe networkpolicy | grep 'PodSelector:.*type=tester' &>/dev/null
if kubectl get svc lab168svc | grep NodePort &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t NetworkPolicy was found with correct configuration"
echo -e "\033[32m[OK]\033[0m\t\t The NodePort Service lab168svc was found"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t No NetworkPolicy with correct configuration was found"
echo -e "\033[31m[FAIL]\033[0m\t\t The NodePort Service lab168svc was not found"
fi
TOTAL=$(( TOTAL + 10 ))
if kubectl exec -it nevatest -- wget --spider --timeout=1 nevaginx &>/dev/null
if kubectl describe ing lab168pod | grep '/hi.*lab168pod:80 ..*)' &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t the tester pod can access the nevaginx pod"
echo -e "\033[32m[OK]\033[0m\t\t an ingress resource was found and it does have pod endpoints"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t the tester pod cannot access the nevaginx pod"
echo -e "\033[31m[FAIL]\033[0m\t\t no ingress resource with pod endpoints was found"
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,17 +1,17 @@
if kubectl exec storepod -- cat /usr/share/nginx/html/index.html &>/dev/null
if kubectl describe node worker2 | grep NodeNotSchedulable &>/dev/null
then
echo -e "\033[32m[OK]\033[0m\t\t file index.html accessible through hostPath storage"
echo -e "\033[32m[OK]\033[0m\t\t Node was successfully drained"
SCORE=$(( SCORE + 10 ))
else
echo -e "\033[31m[FAIL]\033[0m\t\t file index.html not accessible through hostPath storage"
echo -e "\033[31m[FAIL]\033[0m\t\t Cannot find any node drain events in the node logs"
fi
TOTAL=$(( TOTAL + 10 ))
if curl $(minikube ip):32032 | grep welcome &>/dev/null
if kubectl get nodes | grep SchedulingDisabled
then
echo -e "\033[32m[OK]\033[0m\t\t Pod storepod correctly exposed and hostPath volume content accessible"
SCORE=$(( SCORE + 10 ))
echo -e "\033[31m[FAIL]\033[0m\t\t Node was not reset to normal operational state"
else
echo -e "\033[31m[FAIL]\033[0m\t\t Pod storepod not correctly exposed"
echo -e "\033[32m[OK]\033[0m\2t\t Node worker2 was reset to normal operational state"
SCORE=$(( SCORE + 10 ))
fi
TOTAL=$(( TOTAL + 10 ))

View File

@ -1,3 +0,0 @@
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml

View File

@ -84,8 +84,8 @@ ssh-copy-id control3
# configuring sudo for easier access
sudo sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
sudo scp -p /etc/sudoers student@control2:/tmp/ && ssh -t control2 'sudo -S chown root:root /tmp/sudoers' && ssh -t control2 'sudo -S cp -p /tmp/sudoers /etc/'
sudo scp -p /etc/sudoers student@control3:/tmp/ && ssh -t control3 'sudo -S chown root:root /tmp/sudoers' && ssh -t control3 'sudo -S cp -p /tmp/sudoers /etc/'
sudo scp -p /etc/sudoers $USER@control2:/tmp/ && ssh -t control2 'sudo -S chown root:root /tmp/sudoers' && ssh -t control2 'sudo -S cp -p /tmp/sudoers /etc/'
sudo scp -p /etc/sudoers $USER@control3:/tmp/ && ssh -t control3 'sudo -S chown root:root /tmp/sudoers' && ssh -t control3 'sudo -S cp -p /tmp/sudoers /etc/'
#ssh control2 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"
#ssh control3 sudo -S sh -c "echo 'Defaults timestamp_type=global,timestamp_timeout=60' >> /etc/sudoers"