diff --git a/check_apiserver.sh b/check_apiserver.sh new file mode 100755 index 0000000..44908be --- /dev/null +++ b/check_apiserver.sh @@ -0,0 +1,12 @@ +APISERVER_VIP=192.168.4.100 +APISERVER_DEST_PORT=6443 + +errorExit() { + echo "* * * $*" 1>&2 + exit 1 +} + +curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/" +if ip addr | grep -q ${APISERVER_VIP}; then + curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/" +fi diff --git a/haproxy.cfg b/haproxy.cfg new file mode 100644 index 0000000..2ce75da --- /dev/null +++ b/haproxy.cfg @@ -0,0 +1,82 @@ +#--------------------------------------------------------------------- +# Example configuration for a possible web application. See the +# full configuration options online. +# +# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt +# +#--------------------------------------------------------------------- + +#--------------------------------------------------------------------- +# Global settings +#--------------------------------------------------------------------- +global + # to have these messages end up in /var/log/haproxy.log you will + # need to: + # + # 1) configure syslog to accept network log events. This is done + # by adding the '-r' option to the SYSLOGD_OPTIONS in + # /etc/sysconfig/syslog + # + # 2) configure local2 events to go to the /var/log/haproxy.log + # file. A line like the following can be added to + # /etc/sysconfig/syslog + # + # local2.* /var/log/haproxy.log + # + log 127.0.0.1 local2 + + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + # turn on stats unix socket + stats socket /var/lib/haproxy/stats + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + +#--------------------------------------------------------------------- +# main frontend which proxys to the backends +#--------------------------------------------------------------------- +#--------------------------------------------------------------------- +# apiserver frontend which proxys to the masters +#--------------------------------------------------------------------- +frontend apiserver + bind *:8443 + mode tcp + option tcplog + default_backend apiserver +#--------------------------------------------------------------------- +# round robin balancing for apiserver +#--------------------------------------------------------------------- +backend apiserver + option httpchk GET /healthz + http-check expect status 200 + mode tcp + option ssl-hello-chk + balance roundrobin + server control1 1.1.1.1:6443 check + server control2 1.1.1.2:6443 check + server control3 1.1.1.3:6443 check diff --git a/keepalived.conf b/keepalived.conf new file mode 100644 index 0000000..99d047e --- /dev/null +++ b/keepalived.conf @@ -0,0 +1,29 @@ +! /etc/keepalived/keepalived.conf +! Configuration File for keepalived +global_defs { + router_id LVS_DEVEL +} +vrrp_script check_apiserver { + script "/etc/keepalived/check_apiserver.sh" + interval 3 + weight -2 + fall 10 + rise 2 +} + +vrrp_instance VI_1 { + state MASTER + interface ens33 + virtual_router_id 151 + priority 255 + authentication { + auth_type PASS + auth_pass Password + } + virtual_ipaddress { + 192.168.4.100/24 + } + track_script { + check_apiserver + } +} diff --git a/kube-setup.sh b/kube-setup.sh index 24d5fa8..bf06f3b 100755 --- a/kube-setup.sh +++ b/kube-setup.sh @@ -1,8 +1,8 @@ #!/bin/bash # -# verified on Fedora 31 and Ubuntu LTS 20.04 +# verified on Fedora 31, 33 and Ubuntu LTS 20.04 -echo this script works on Fedora 31 and Ubuntu 20.04 +echo this script works on Fedora 31, 33 and Ubuntu 20.04 echo it does NOT currently work on Fedora 32 echo it requires the machine where you run it to have 6GB of RAM or more echo press Enter to continue @@ -20,11 +20,11 @@ echo MYOS is set to $MYOS #### Fedora config if [ $MYOS = "Fedora" ] then - #if [ $OSVERSION = 32 ] - #then - # echo Fedora 32 is not currently supported - # exit 9 - #fi + if [ $OSVERSION = 32 ] + then + echo Fedora 32 is not currently supported + exit 9 + fi sudo dnf clean all sudo dnf -y upgrade @@ -62,3 +62,5 @@ sudo mv minikube /usr/local/bin # start minikube minikube start --memory 4096 --vm-driver=kvm2 +echo if this script ends with an error, restart the virtual machine +echo and manually run minikube start --memory 4096 --vm-driver=kvm2 diff --git a/setup-docker.sh b/setup-docker.sh index 8bda7ee..7f359cc 100755 --- a/setup-docker.sh +++ b/setup-docker.sh @@ -25,15 +25,6 @@ cat > /etc/docker/daemon.json <> /etc/hosts << EOF -{ - 192.168.4.110 control.example.com control - 192.168.4.111 worker1.example.com worker1 - 192.168.4.112 worker2.example.com worker2 - 192.168.4.113 worker3.example.com worker3 -} -EOF - mkdir -p /etc/systemd/system/docker.service.d systemctl daemon-reload diff --git a/setup-lb.sh b/setup-lb.sh index ab692ab..d0b96b2 100644 --- a/setup-lb.sh +++ b/setup-lb.sh @@ -1,146 +1,100 @@ #!/bin/bash # -# echo script to set up load balancing on cluster nodes +# source https://github.com/sandervanvugt/cka/setup-lb.sh + +# script to set up load balancing on cluster nodes # for use in CKA courses by Sander van Vugt -# version 0.1 - may be buggy! +# version 0.5 # currently only supporting CentOS 7.x # run this AFTER running setup-docker.sh and setup-kubetools.sh -# read and try to udnerstand before running this! + +## establish key based SSH with remote hosts +# obtain node information +echo this script requires three nodes: control1 control2 and control3 +echo enter the IP address for control1 +read CONTROL1_IP +echo enter the IP address for control2 +read CONTROL2_IP +echo enter the IP address for control3 +read CONTROL3_IP +echo ##### READ ALL OF THIS BEFORE CONTINUING ###### +echo this script requires you to run setup-docker.sh and setup-kubetools.sh first +echo this script is based on the NIC name ens33 +echo if your networkcard has a different name, edit keepalived.conf +echo before continuing and change "interface ens33" to match your config +echo . +echo this script will create a keepalived apiserver at 192.168.4.100 +echo if this IP address does not match your network configuration, +echo manually change the check_apiserver.sh file before continuing +echo press enter to continue or Ctrl-c to interrupt and apply modifications +read + +# performing check on critical files +for i in keepalived.conf check_apiserver.sh haproxy.cfg +do + if [ ! -f $i ] + then + echo $i should exist in the current directory && exit 2 + fi +done + +# create /etc/hosts for all nodes +echo $CONTROL1_IP control1 >> /etc/hosts +echo $CONTROL2_IP control2 >> /etc/hosts +echo $CONTROL3_IP control3 >> /etc/hosts + +# generating and distributing SSH keys +ssh-keygen +ssh-copy-id control1 +ssh-copy-id control2 +ssh-copy-id control3 # install required software yum install haproxy keepalived -y +ssh control2 "yum install haproxy keepalived -y" +ssh control3 "yum install haproxy keepalived -y" + +# copying /etc/hosts file +scp /etc/hosts control2:/etc/ +scp /etc/hosts control3:/etc/ # create keepalived config # change IP address to anything that works in your environment! -cat << EOF >> /etc/keepalived/check_apiserver.sh -APISERVER_VIP=192.168.4.100 -APISERVER_DEST_PORT=6443 +chmod +x check_apiserver.sh +cp check_apiserver.sh /etc/keepalived/ +scp check_apiserver.sh control2:/etc/keepalived/ +scp check_apiserver.sh control3:/etc/keepalived/ -errorExit() { - echo "* * * $*" 1>&2 - exit 1 -} +#### creating site specific keepalived.conf file +cp keepalived.conf keepalived-control2.conf +cp keepalived.conf keepalived-control3.conf -curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/" -if ip addr | grep -q ${APISERVER_VIP}; then - curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/" -EOF +sed -i 's/state MASTER/state SLAVE/' keepalived-control2.conf +sed -i 's/state MASTER/state SLAVE/' keepalived-control3.conf +sed -i 's/priority 255/priority 254/' keepalived-control2.conf +sed -i 's/priority 255/priority 253/' keepalived-control3.conf -#### creating second script, make sure to change IP addresses! +cp keepalived.conf /etc/keepalived/ +scp keepalived-control2.conf control2:/etc/keepalived/keepalived.conf +scp keepalived-control3.conf control3:/etc/keepalived/keepalived.conf -cat << EOF >> /etc/keepalived/keepalived.conf -! /etc/keepalived/keepalived.conf -! Configuration File for keepalived -global_defs { - router_id LVS_DEVEL -} -vrrp_script check_apiserver { - script "/etc/keepalived/check_apiserver.sh" - interval 3 - weight -2 - fall 10 - rise 2 -} +### rewriting haproxy.cfg with site specific IP addresses +sed -i s/server\ control1\ 1.1.1.1\:6443\ check/server\ control1\ $CONTROL1_IP\:6443\ check/ haproxy.cfg +sed -i s/server\ control1\ 1.1.1.2\:6443\ check/server\ control2\ $CONTROL2_IP\:6443\ check/ haproxy.cfg +sed -i s/server\ control1\ 1.1.1.3\:6443\ check/server\ control3\ $CONTROL3_IP\:6443\ check/ haproxy.cfg -vrrp_instance VI_1 { - state MASTER - interface ens33 - virtual_router_id 151 - priority 255 - authentication { - auth_type PASS - auth_pass Password - } - virtual_ipaddress { - 192.168.4.100/24 - } - track_script { - check_apiserver - } -} -EOF - -chmod +x /etc/keepalived/check_apiserver.sh - -### setting up haproxy -echo > /etc/haproxy/haproxy.cfg -cat << EOF >> /etc/haproxy/haproxy.cfg - - # /etc/sysconfig/syslog - # - # local2.* /var/log/haproxy.log - # - log 127.0.0.1 local2 - - chroot /var/lib/haproxy - pidfile /var/run/haproxy.pid - maxconn 4000 - user haproxy - group haproxy - daemon - - # turn on stats unix socket - stats socket /var/lib/haproxy/stats - -#--------------------------------------------------------------------- -# common defaults that all the 'listen' and 'backend' sections will -# use if not designated in their block -#--------------------------------------------------------------------- -defaults - mode http - log global - option httplog - option dontlognull - option http-server-close - option forwardfor except 127.0.0.0/8 - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 - -#--------------------------------------------------------------------- -# main frontend which proxys to the backends -#--------------------------------------------------------------------- -#--------------------------------------------------------------------- -# apiserver frontend which proxys to the masters -#--------------------------------------------------------------------- -frontend apiserver - bind *:8443 - mode tcp - option tcplog - default_backend apiserver -#--------------------------------------------------------------------- -# round robin balancing for apiserver -#--------------------------------------------------------------------- -backend apiserver - option httpchk GET /healthz - http-check expect status 200 - mode tcp - option ssl-hello-chk - balance roundrobin - server control1 192.168.4.87:6443 check - server control2 192.168.4.88:6443 check - server control3 192.168.4.89:6443 check -EOF - -echo enter IP address of second HA node -read SECONDNODE - -echo enter IP address of third HA node -read THIRDNODE +# copy haproxy.cfg to destinations +cp haproxy.cfg /etc/haproxy/ +scp haproxy.cfg control2:/etc/haproxy/ +scp haproxy.cfg control3:/etc/haproxy/ +# start and enable services systemctl enable keepalived --now systemctl enable haproxy --now +ssh control2 systemctl enable keepalived --now +ssh control2 systemctl enable haproxy --now +ssh control3 systemctl enable keepalived --now +ssh control3 systemctl enable haproxy --now -echo now edit the keepalived.conf file on $SECONDNODE and $THIRDNODE -echo change "state MASTER" to "state SLAVE" -echo set priority to 254 on $SECONDNODE and 253 on $THIRDNODE -echo and use systemctl to enable --now keepalived and haproxy services -echo I will automate this in a later version of this script -for i in $SECONDNODE $THIRDNODE; do scp /etc/keepalived/check_apiserver.sh /etc/keepalived/keepalived.conf root@$i:/etc/keepalived; scp /etc/haproxy/haproxy.cfg root@$i:/etc/haproxy; done +echo setup is now done, please verify +echo control1 should run the virtual IP address 192.168.4.100