message
This commit is contained in:
parent
a64c9fdff9
commit
6eaf5fd4c7
87
exam1-grade.sh
Executable file
87
exam1-grade.sh
Executable file
@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
# exit if not root
|
||||
clear
|
||||
# evaluating tasks
|
||||
echo -e "\033[1mchecking task 1 results\033[0m"
|
||||
source labs/exam1-task1.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$SCORE
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 2 results\033[0m"
|
||||
source labs/exam1-task2.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 3 results\033[0m"
|
||||
source labs/exam1-task3.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 4 results\033[0m"
|
||||
source labs/exam1-task4.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 5 results\033[0m"
|
||||
source labs/exam1-task5.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 6 results\033[0m"
|
||||
source labs/exam1-task6.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 7 results\033[0m"
|
||||
source labs/exam1-task7.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 8 results\033[0m"
|
||||
source labs/exam1-task8.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 9 results\033[0m"
|
||||
source labs/exam1-task9.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 10 results\033[0m"
|
||||
source labs/exam1-task10.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 11 results\033[0m"
|
||||
source labs/exam1-task11.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
|
||||
echo -e "\033[1mchecking task 12 results\033[0m"
|
||||
source labs/exam1-task12.sh
|
||||
echo the score is $SCORE
|
||||
TOTALSCORE=$(( TOTAL + SCORE ))
|
||||
TOTALTOTAL=$TOTAL
|
||||
#### print PASS/FAIL
|
||||
echo -e "\n"
|
||||
echo your score is $SCORE out of a total of $TOTAL
|
||||
|
||||
if [[ $SCORE -ge $(( TOTAL / 10 * 7 )) ]]
|
||||
then
|
||||
echo -e "\033[32mCONGRATULATIONS!!\033[0m\t\t You passed this sample exam!"
|
||||
echo -e "\033[1mResults obtained here don't guarantee anything for the real exam\033[0m"
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t You did NOT pass this sample exam \033[36m:-(\033[0m"
|
||||
fi
|
||||
|
||||
20
labs/exam1-task1.sh
Normal file
20
labs/exam1-task1.sh
Normal file
@ -0,0 +1,20 @@
|
||||
echo evaluating task 1
|
||||
|
||||
if [[ $(echo $(kubectl get nodes | grep control | wc -l)) == 3 ]] &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t 3 control nodes were found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t 3 control nodes were not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [[ $(echo $(kubectl get nodes | grep Ready | wc -l)) == 5 ]] &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t a total of 5 nodes was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't find a total of 5 nodes"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
17
labs/exam1-task10.sh
Normal file
17
labs/exam1-task10.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if test -x $(which helm) &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the helm binary has been installed"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the helm binary has not been installed"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if helm list | grep mysql &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t you have successfully installed the bitnami mysql chart"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t bitnami mysql chart not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
29
labs/exam1-task11.sh
Normal file
29
labs/exam1-task11.sh
Normal file
@ -0,0 +1,29 @@
|
||||
# check for the role
|
||||
if kubectl -n access get roles role1511 -o yaml | grep create &>/dev/null && kubectl -n access get roles role1511 -o yaml | grep deploy &>/dev/null && kubectl -n access get roles role1511 -o yaml | grep daemonset &>/dev/null && kubectl -n access get roles role1511 -o yaml | grep stateful &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t role role1511 was found with correct verbs and objects"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no correct role configuration was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
# check for the rolebinding to be set correctly
|
||||
if kubectl get -n access rolebinding -o yaml | grep lab1511role &>/dev/null && kubectl get -n access rolebinding -o yaml | grep lab1511access &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t rolebinding is set up the right way"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no correctly configured rolebinding was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
# check for pod that uses the ServiceAccount
|
||||
if kubectl get -n access pod lab1511pod -o yaml | grep 'serviceAccount: lab1511access' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the pod lab1511pod uses the serviceAccount lab1511access"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the pod lab1511pod doesn't use the serviceAccount lab1511access"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
40
labs/exam1-task12.sh
Normal file
40
labs/exam1-task12.sh
Normal file
@ -0,0 +1,40 @@
|
||||
# temporary cordon on worker1
|
||||
kubectl cordon worker1 &>/dev/null
|
||||
|
||||
# verifying that the toleration works
|
||||
if kubectl run lab1512testpod1 --image=nginx --restart=Never --overrides='{
|
||||
"spec": {
|
||||
"tolerations": [
|
||||
{
|
||||
"key": "type",
|
||||
"operator": "Equal",
|
||||
"value": "db",
|
||||
"effect": "NoSchedule"
|
||||
}
|
||||
]
|
||||
}
|
||||
}' &>/dev/null && sleep 5
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t a pod with the toleration type=db will run on worker2"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the toleration type=db doesn't allow the pod to run on worker2"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
# verifying that a pod without toleration doesn't run
|
||||
kubectl run lab1512testpod2 --image=nginx &>/dev/null
|
||||
|
||||
if kubectl get pods lab1512testpod2 | grep Pending &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t without a toleration the testpod doesn't shows a state of Pending and that exactly what we needed!"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the testpod without toleration works, so the taint isn't functional"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
# cleaning up
|
||||
kubectl delete pod lab1512testpod1 lab1512testpod2 &>/dev/null
|
||||
kubectl uncordon worker1 &>/dev/null
|
||||
|
||||
17
labs/exam1-task13.sh
Normal file
17
labs/exam1-task13.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl get pods -o yaml securepod | grep 'runAsGroup: 2000' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t securepod is running with group ID 2000"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t securepod is not running with group ID 2000"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl get pods -o yaml securepod | grep 'allowPrivilegeEscalation: false' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t container in pod securepod has privilege escalation disabled"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t container in pod securepod has privilege escalation not disabled"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
18
labs/exam1-task14.sh
Normal file
18
labs/exam1-task14.sh
Normal file
@ -0,0 +1,18 @@
|
||||
if docker images | grep myapp | grep '1.0' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t container image myapp:1.0 was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t container image myapp:1.0 was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [ -f /tmp/myapp.tar ]
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t tar archive /tmp/myapp.tar was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t tar archive /tmp/myapp.tar was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
8
labs/exam1-task15.sh
Normal file
8
labs/exam1-task15.sh
Normal file
@ -0,0 +1,8 @@
|
||||
if kubectl get pod securepod -n oklahoma -o yaml | grep 'serviceAccount: secure' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t pod securepod in namespace oklahoma found and it is using the serviceaccount secure"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't find the pod securepod in namespace oklahoma that uses the serviceaccount secure"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
18
labs/exam1-task2.sh
Normal file
18
labs/exam1-task2.sh
Normal file
@ -0,0 +1,18 @@
|
||||
if kubectl get pods lab153pod | grep '2/2' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the pod lab153pod was found and it is running 2 containers"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t cannot find the pod lab153pod running 2 containers"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl get pods lab153pod -o yaml | grep -i toleration -A3 | grep -iz noschedule.*control-plane &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the pod lab153pod has a tolerations that allows it to run on control-plane nodes"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the pod lab153pod does not have a toleration that allows it to run on a control-plane node"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
9
labs/exam1-task3.sh
Normal file
9
labs/exam1-task3.sh
Normal file
@ -0,0 +1,9 @@
|
||||
export LAB3POD=$(kubectl get pods | awk '/lab154/ { print $1 }') &>/dev/null
|
||||
if kubectl get pods $LAB3POD -o yaml | grep initContainer &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t $LAB3POD is running and it does have an init container"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t cannot find $LAB3POD using an initcontainer"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
8
labs/exam1-task4.sh
Normal file
8
labs/exam1-task4.sh
Normal file
@ -0,0 +1,8 @@
|
||||
if kubectl get pv lab155 -o yaml | grep 'path.*/lab155' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t a PersistentVolume with the name lab155 was found and it uses the right path"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t PersistentVolume with the name lab155 using the right path was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
17
labs/exam1-task5.sh
Normal file
17
labs/exam1-task5.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl get svc | grep 32567 | grep lab156deploy &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t a NodePort Service exposing lab156deploy was found listening at port 32567"
|
||||
SCORE=$(( SCORE + 7 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no NodePort Service listening at port 32567 was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 7 ))
|
||||
|
||||
if kubectl get endpoints | grep lab156deploy | grep -E '^[^:]*(:[^:]*){3}$' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the NodePort Service is exposing 3 Pods"
|
||||
SCORE=$(( SCORE + 3 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the NodePort Service is not exposing 3 Pods"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 3 ))
|
||||
60
labs/exam1-task6.sh
Normal file
60
labs/exam1-task6.sh
Normal file
@ -0,0 +1,60 @@
|
||||
echo -e "\033[35mhang on...\033[0m\t\t this task evaluation needs about a minute to complete"
|
||||
|
||||
|
||||
##preparing test Pod without a label whihch should not get access
|
||||
kubectl delete pod labgrade -n access &>/dev/null
|
||||
sleep 5
|
||||
kubectl run labgrade -n access --image=busybox -- sleep 3600 &>/dev/null
|
||||
sleep 5
|
||||
|
||||
if kubectl exec -n access labgrade -- wget --spider --timeout=1 lab157web.restricted.svc.cluster.local &>/dev/null
|
||||
then
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t testpod is getting access, which means the networkpolicy is not correct"
|
||||
else
|
||||
echo -e "\033[32m[OK]\033[0m\t\t testpod is not getting access, good!"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
###testing if a networkpolicy was found
|
||||
#do we have a networkpolicy on the namespace restricted?
|
||||
if kubectl get netpol -n restricted &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t found a NetworkPolicy on the Namespace restricted"
|
||||
SCORE=$(( SCORE + 4 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t previous revision of deploy updated not found or not using nginx:latest"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 4 ))
|
||||
|
||||
#do we have a namespaceselector that allows incoming traffic from ns access
|
||||
if kubectl get netpol -n restricted -o yaml | grep -A3 namespaceSelector | grep 'kubernetes.io/metadata.name: access' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespaceSelector is set correctly in the NetworkPolicy"
|
||||
SCORE=$(( SCORE + 8 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespaceSelector is not set correctly in the NetworkPolicy"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 8 ))
|
||||
|
||||
#do we have a podselector that allows incoming traffic from pods that have label access=true
|
||||
if kubectl get netpol -n restricted -o yaml | grep -A3 podSelector | grep 'access: "true"' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t podSelector is set correctly in the NetworkPolicy"
|
||||
SCORE=$(( SCORE + 8 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t podSelector is not set correctly in the NetworkPolicy"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 8 ))
|
||||
|
||||
##setting the label so that the testpod is getting access
|
||||
kubectl label -n access pod labgrade access="true"
|
||||
|
||||
if kubectl exec -n access labgrade -- wget --spider --timeout=1 lab157web.restricted.svc.cluster.local &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t previous revision of deploy updated was using nginx:latest"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t previous revision of deploy updated not found or not using nginx:latest"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
26
labs/exam1-task7.sh
Normal file
26
labs/exam1-task7.sh
Normal file
@ -0,0 +1,26 @@
|
||||
if kubectl get quota -n limited | grep 'memory: .*/128M' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t memory quota was set up correctly"
|
||||
SCORE=$(( SCORE + 5 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t memory quota was not set up correctly"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 5 ))
|
||||
|
||||
if kubectl get quota -n limited | grep 'pods: .*/5' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t Pods quota setup correctly"
|
||||
SCORE=$(( SCORE + 5 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t Pods quota no setup correctly"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 5 ))
|
||||
|
||||
if kubectl get deploy -n limited lab158deploy -o yaml | grep -A 5 resources | grep 'memory: 32Mi' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t lab158deploy resource request is set to 32 Mi"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t lab158deploy resource request is not set to 32Mi"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
9
labs/exam1-task8.sh
Normal file
9
labs/exam1-task8.sh
Normal file
@ -0,0 +1,9 @@
|
||||
if kubectl get pods | grep lab159pod-worker2 &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t NetworkPolicy was found with correct configuration"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t No NetworkPolicy with correct configuration was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
6
labs/exam1-task9.sh
Normal file
6
labs/exam1-task9.sh
Normal file
@ -0,0 +1,6 @@
|
||||
if true &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t this task cannot be automatically graded so you just get the points"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
39
labs/exam2-task1.sh
Normal file
39
labs/exam2-task1.sh
Normal file
@ -0,0 +1,39 @@
|
||||
if kubectl get ns indiana &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespace indiana was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespace indiana was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [[ $(echo $(kubectl get -n indiana secret insecret -o yaml | awk '/color/ { print $2 }')| base64 -d) == blue ]] &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t secret insecret with COLOR=blue was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
elif kubectl get -n indiana secret insecret &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t secret insecret was found, but not with the expected variable"
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t secret insecret was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [[ $(echo $(kubectl get pods -n indiana inpod -o jsonpath='{.spec.containers[*].image}')) == nginx:latest ]] &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t found pod inpod that uses the latest version of nginx"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t pod inpod that uses the latest version of the nginx image was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
|
||||
if kubectl get pods -n indiana inpod -o yaml | grep insecret &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t pod inpod uses the secret insecret"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t pod inpod doesn't use the secret insecret"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
8
labs/exam2-task10.sh
Normal file
8
labs/exam2-task10.sh
Normal file
@ -0,0 +1,8 @@
|
||||
if helm list | grep mysql &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t you have successfully installed the bitnami mysql chart"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t bitnami mysql chart not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
26
labs/exam2-task11.sh
Normal file
26
labs/exam2-task11.sh
Normal file
@ -0,0 +1,26 @@
|
||||
if kubectl get ns nebraska &>/dev/null &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespace nebraska was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespace nebraska was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl -n nebraska get deploy | grep snowdeploy &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t Deployment snowdeploy was found in Namespace nebraska"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t Deployment snowdeploy was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl -n nebraska get deploy snowdeploy -o yaml | grep -A1 requests | grep 64Mi &>/dev/null && kubectl -n nebraska get deploy snowdeploy -o yaml | grep -A1 limits | grep 128Mi &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the requested memory request and limits have been found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the requested memory request and limits have not been found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
27
labs/exam2-task12.sh
Normal file
27
labs/exam2-task12.sh
Normal file
@ -0,0 +1,27 @@
|
||||
if kubectl get ns | grep birds &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespace birds was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespace birds was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [[ $(kubectl -n birds get pods --show-labels --selector=type=allbirds | grep bird | wc -l) == "5" ]] &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t good, 5 pods with label type=allbirds were found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't finf 5 pods with the label type=allbirds"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl get -n birds svc allbirds | grep 32323 &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t NodePort Service allbirds listening on nodePort 32323 was found in Namespace birds"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no NodePort Service allbirds listening on nodePort 32323 was found in Namespace birds"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
17
labs/exam2-task13.sh
Normal file
17
labs/exam2-task13.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl get pods -o yaml securepod | grep 'runAsGroup: 2000' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t securepod is running with group ID 2000"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t securepod is not running with group ID 2000"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl get pods -o yaml securepod | grep 'allowPrivilegeEscalation: false' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t container in pod securepod has privilege escalation disabled"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t container in pod securepod has privilege escalation not disabled"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
18
labs/exam2-task14.sh
Normal file
18
labs/exam2-task14.sh
Normal file
@ -0,0 +1,18 @@
|
||||
if docker images | grep myapp | grep '1.0' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t container image myapp:1.0 was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t container image myapp:1.0 was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if [ -f /tmp/myapp.tar ]
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t tar archive /tmp/myapp.tar was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t tar archive /tmp/myapp.tar was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
8
labs/exam2-task15.sh
Normal file
8
labs/exam2-task15.sh
Normal file
@ -0,0 +1,8 @@
|
||||
if kubectl get pod securepod -n oklahoma -o yaml | grep 'serviceAccount: secure' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t pod securepod in namespace oklahoma found and it is using the serviceaccount secure"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t couldn't find the pod securepod in namespace oklahoma that uses the serviceaccount secure"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
11
labs/exam2-task2.sh
Normal file
11
labs/exam2-task2.sh
Normal file
@ -0,0 +1,11 @@
|
||||
kubectl get pods -A --selector tier=control-plane | awk 'NR > 1 { print $2 }' > /tmp/task2file.txt
|
||||
|
||||
if diff /tmp/task2file.txt /tmp/task2pods
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t all pods with label tier=control-plane were found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t your result file doesn't show all pods with the label tier=control-plane"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
17
labs/exam2-task3.sh
Normal file
17
labs/exam2-task3.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl get cm task3cm -o yaml |grep index.html &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t a configmap with the name task3cm was found with the right contents"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t configmap with the name task3cm was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl describe pod oregonpod | grep -A1 'ConfigMap' | grep task3cm &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the pod oregonpod has the configmap task3cm mounted"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the pod oregonpod doesn't seem to have the configmap task3cm mounted"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
8
labs/exam2-task4.sh
Normal file
8
labs/exam2-task4.sh
Normal file
@ -0,0 +1,8 @@
|
||||
if kubectl get pods sidepod -o yaml | grep -A 10 initContainers | grep 'restartPolicy: Always' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t found a pod sidepod that runs a sidecar container"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t didn't find a pod sidepod that runs a sidecar container"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
17
labs/exam2-task5.sh
Normal file
17
labs/exam2-task5.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl get ns probes &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t namespace probes was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t namespace probes was not found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl describe pods -n probes probepod | grep Liveness | grep '/healthz' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t pod probepod was found, as well as its Liveness probe"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no pod probepod with correct liveness probe was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
22
labs/exam2-task6.sh
Normal file
22
labs/exam2-task6.sh
Normal file
@ -0,0 +1,22 @@
|
||||
# get the revision number of the last update that was found
|
||||
kubectl rollout history deployment updates > /tmp/task6.txt
|
||||
LAST=$(tail -2 /tmp/task6.txt | head -1 | awk '{ print $1 }')
|
||||
BEFORE=$(( LAST -1 ))
|
||||
|
||||
if kubectl rollout history deployment updates --revision=${LAST} | grep 'nginx:1.17' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t last revision of the updated deploy is set to nginx:1.17"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t last revision of the updated deploy is not set to nginx:1.17"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl rollout history deployment updates --revision=${BEFORE} | grep 'nginx:latest' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t previous revision of deploy updated was using nginx:latest"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t previous revision of deploy updated not found or not using nginx:latest"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
36
labs/exam2-task7.sh
Normal file
36
labs/exam2-task7.sh
Normal file
@ -0,0 +1,36 @@
|
||||
if grep $(minikube ip).*myapp.info /etc/hosts &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t name resolution for myapp.info is setup"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no name resolution for myapp.info was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl describe svc task7svc | grep app=updates &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t Service task7svc found and exposes Deploy updates"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t No Service task7svc exposing Deploy updates was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl get pods -n ingress-nginx | grep controller | grep Running &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t found a running ingress controller"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t no running ingress controller was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
|
||||
if kubectl describe ing | grep task7svc:80 &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t ingress rule forwarding traffic to task7svc was found"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\" no ingress rule forwarding traffic to task7svc was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
17
labs/exam2-task8.sh
Normal file
17
labs/exam2-task8.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl describe networkpolicy | grep 'PodSelector:.*type=webapp' &>/dev/null && kubectl describe networkpolicy | grep 'PodSelector:.*type=tester' &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t NetworkPolicy was found with correct configuration"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t No NetworkPolicy with correct configuration was found"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if kubectl exec -it nevatest -- wget --spider --timeout=1 nevaginx &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t the tester pod can access the nevaginx pod"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t the tester pod cannot access the nevaginx pod"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
17
labs/exam2-task9.sh
Normal file
17
labs/exam2-task9.sh
Normal file
@ -0,0 +1,17 @@
|
||||
if kubectl exec storepod -- cat /usr/share/nginx/html/index.html &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t file index.html accessible through hostPath storage"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t file index.html not accessible through hostPath storage"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
|
||||
if curl $(minikube ip):32032 | grep welcome &>/dev/null
|
||||
then
|
||||
echo -e "\033[32m[OK]\033[0m\t\t Pod storepod correctly exposed and hostPath volume content accessible"
|
||||
SCORE=$(( SCORE + 10 ))
|
||||
else
|
||||
echo -e "\033[31m[FAIL]\033[0m\t\t Pod storepod not correctly exposed"
|
||||
fi
|
||||
TOTAL=$(( TOTAL + 10 ))
|
||||
Loading…
Reference in New Issue
Block a user