Install repositories, docker and kubernetes packages on Ubuntu LTS 16.04, 18.04, 20.04:
#!/bin/bash
#author: tmade
#version: 1.5
RELEASE=$(lsb_release -cs)
echo "Install requirements"
apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
echo ""
echo "check if docker is already installed..."
dpkg -l |grep docker-ce
DOCKERINSTALL=$(echo $?)
echo ""
if [ ${DOCKERINSTALL} -eq "0" ]
then
echo "docker already installed"
else
echo "docker isn´t installed yet - installing..."
curl -sSL https://get.docker.com | sh
fi
echo ""
echo "check k8s installation:"
dpkg -l |grep kube*
KUBEINSTALL=$(echo $?)
echo ""
if [ ${KUBEINSTALL} -eq "0" ]
then
echo "k8s already installed"
else
echo "Installing k8s repository..."
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
#echo "deb http://apt.kubernetes.io/ kubernetes-${RELEASE} main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
fi
echo ""
echo "Setup cgroupdriver as systemd"
echo ""
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
# Restart docker.
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl restart docker
apt-get update
apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce docker-ce-cli
echo ""
echo "Up- or downgrade docker and k8s packages"
echo ""
apt-cache policy docker-ce | head -n 30
echo ""
read -p "Please enter the docker version you want to install (e. g. \"5:19.03.6~3-0~ubuntu-xenial\"): " DVERSION
echo "Version: \"${DVERSION}\" will be installed!"
echo ""
apt-get install -y docker-ce=${DVERSION} docker-ce-cli=${DVERSION} --allow-downgrades
echo ""
apt-cache policy kubeadm | head -n 30
echo ""
read -p "Please enter the k8s version you want to install (e. g. \"1.16.4-00\"): " VERSION
echo "Version: \"${VERSION}\" will be installed!"
echo ""
apt-get install -y kubeadm=${VERSION} kubelet=${VERSION} kubectl=${VERSION} --allow-downgrades
echo ""
apt-mark hold kubelet kubeadm kubectl docker-ce docker-ce-cli
echo ""
echo "k8s packages are installed!"
echo ""
Check also on https://kubernetes.io/docs/setup/independent/install-kubeadm/
Changing the settings such as your container runtime and kubelet use systemd as the cgroup driver stabilized the system! Please note the native.cgroupdriver=systemd setup below:
## Install Docker CE.
apt-get update && apt-get install docker-ce=18.06.2~ce~3-0~ubuntu
# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
# Restart docker.
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl restart docker
Check:
docker info |grep -i cgroup
Check:
cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf | grep -i Environment
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env EnvironmentFile=-/etc/default/kubelet
cat /var/lib/kubelet/config.yaml |grep -i cgroupDriver
Change to:
cgroupDriver: systemd
if not already set.
Check also:
/var/lib/kubelet/kubeadm-flags.env
and
/var/lib/kubelet/config.yaml
Check after modification:
systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet.service systemctl status kubelet.service | grep "\--cgroup-driver=systemd"
Run only on master:
kubeadm config images pull #pulling images before setup k8s kubeadm init --apiserver-advertise-address=192.168.10.5 --pod-network-cidr=192.168.0.0/16 #if there are several nic´s you have too choose the management nic/ ip kubeadm init --pod-network-cidr=192.168.0.0/16 #set pod-network-cidr to use calico CNI network plugins
Hint: If you are running your system behind a proxy, you have to make an proxy-exclude (“/etc/environment”):
no_proxy="localhost,127.0.0.1,IP-Master-Node,IP-Worker-Node,IP_Master-Node-Network,10.96.0.0/12,192.168.0.0,::1"
To start using your cluster, you need to run the following as a regular user (sudo rights required!):
useradd -s /bin/bash -m kubernetes echo "kubernetes ALL=(ALL:ALL) NOPASSWD: ALL" >> /etc/sudoers su - kubernetes mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
Check:
kubectl get pods -o wide --all-namespaces kubectl get pods --all-namespaces -o wide -w kubectl get pods --all-namespaces
The script is working with Ubuntu 20.04 LTS and k8s version until “1.23.3-00”.
Setup:
#!/bin/bash
#author: tmade
#version: 1.1
#kubernetes user name
USER="kubernetes"
HOME="/home/${USER}"
KUBEHOME="${HOME}/.kube"
#CIDR="10.0.0.5"
PODNETWORKADDON="192.168.0.0/16"
read -p "Do you want to create the user ${USER}? Please enter \"y\" or \"n\: " MYUSER
echo ""
if [ ${MYUSER,,} == "y" ]
then
useradd -s /bin/bash -m ${USER}
fi
sudo -lU ${USER} > /dev/null 2>&1
RESULT=$?
if [ "${RESULT}" -ne "0" ] ;
then
echo "User "${USER}" does not exist in sudoers and will be added."
echo "${USER} ALL=(ALL:ALL) NOPASSWD: ALL" >> /etc/sudoers
else
echo "User "${USER}" already exists in sudoers - won´t be added again!"
fi
echo ""
echo "Setup -------------k8s--------------"
echo ""
su - ${USER} -c "kubectl version 2> /dev/null"
echo ""
su - ${USER} -c "read -s -n 1 -p \"Press any key to continue . . .\""
apt-get update
apt-cache policy kubeadm | head -n 30
#apt-cache policy docker-ce
echo ""
read -p "Please enter k8s version you want to install (e. g. \"1.23.3-00\"): " VERSION
echo "Version: \"$VERSION\" will be installed!"
apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce
apt-get install -y kubeadm=${VERSION} kubelet=${VERSION} kubectl=${VERSION}
echo ""
read -p "Please enter your CIDR management ip-adress for your master (e. g. \"10.6.33.10\"): " CIDR
echo ""
echo "ip set to: \"$CIDR\""
echo ""
kubeadm init --apiserver-advertise-address=${CIDR} --pod-network-cidr=${PODNETWORKADDON}
echo ""
read -s -n 1 -p "Press any key to continue . . ."
echo ""
if [ -e ${KUBEHOME} ]
then
echo "\"${KUBEHOME}\" exists"
read -p "Do you want to delete \"${KUBEHOME}\"? Please enter y (proceed) or n (stop): " PROCEED
echo "You´ve entered: \"$PROCEED\""
echo ""
if [ ${PROCEED,,} == y ]
then
rm -r ${KUBEHOME}
echo "\"${KUBEHOME}\" deleted!"
echo ""
read -s -n 1 -p "Press any key to continue . . ."
else
exit 1
fi
fi
echo "Creating folder structure..."
echo ""
su - ${USER} -c "mkdir -p $HOME/.kube"
su - ${USER} -c "sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config"
chown -R ${USER}:${USER} $HOME/.kube
echo ""
echo "home \"$HOME/.kube\" copied!"
echo ""
read -s -n 1 -p "Press any key to continue . . ."
echo ""
echo "Creating calico pod network addon..."
echo ""
su - ${USER} -c "kubectl create -f https://projectcalico.docs.tigera.io/manifests/tigera-operator.yaml"
su - ${USER} -c "kubectl create -f https://projectcalico.docs.tigera.io/manifests/custom-resources.yaml"
echo ""
echo "calico pod network add on has been deployed"
echo ""
read -s -n 1 -p "Press any key to continue . . ."
#install dashboard
su - ${USER} -c "kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.0/aio/deploy/recommended.yaml"
echo ""
echo "dashboard has been deployed"
echo ""
read -s -n 1 -p "Press any key to continue . . ."
apt-mark hold kubernetes-cni kubeadm kubelet kubectl docker-ce
echo ""
while [ -z $SCHEDULE ] ;
do
read -p "Do you want to schedule pods on your master? Please enter \"y\" or \"n\": " SCHEDULE
echo "Please enter \"y\" or \"n\""
done
echo ""
echo "You´ve entered: \"$SCHEDULE\""
echo ""
if [ $SCHEDULE = y ]
then
kubectl taint nodes $(hostname) node-role.kubernetes.io/master-
fi
echo ""
echo "Status - please press \"ctrl + c\" when all pods are running"
echo ""
watch kubectl get pods -A -o wide
The script is working with Ubuntu 16.04 LTS!
Setup:
#!/bin/bash
#author: tmade
#version: 1.5
USER="kubernetes"
HOME="/home/${USER}"
CALICO="/home/kubernetes/calico"
#filename such as "calico-v3.11.2.yaml" expected
CALICOVERS="v3.11.2"
KUBEHOME="${HOME}/.kube"
#CIDR="10.0.0.5"
DASBOARD="/home/kubernetes/dashboard"
# filename such as "kubernetes-dashboard-v2.0.0-rc5.yaml" expected
DASHVERS="v2.0.1"
PODNETWORKADDON="192.168.0.0/16"
echo ""
echo "Setup -------------k8s--------------"
echo ""
su - kubernetes -c "kubectl version 2> /dev/null"
echo ""
su - kubernetes -c "read -s -n 1 -p \"Press any key to continue . . .\""
apt-get update
apt-cache policy kubeadm | head -n 30
#apt-cache policy docker-ce
echo ""
read -p "Please enter k8s version you want to install (e. g. \"1.16.4-00\"): " VERSION
echo "Version: \"$VERSION\" will be installed!"
apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce
#apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce
apt-get install -y kubeadm=${VERSION} kubelet=${VERSION} kubectl=${VERSION}
echo ""
read -p "Please enter your CIDR management ip-adress for your master (e. g. \"10.6.33.10\"): " CIDR
echo ""
echo "ip set to: \"$CIDR\""
echo ""
kubeadm init --apiserver-advertise-address=${CIDR} --pod-network-cidr=${PODNETWORKADDON}
echo ""
read -s -n 1 -p "Press any key to continue . . ."
echo ""
if [ -e ${KUBEHOME} ]
then
echo "\"${KUBEHOME}\" exists"
read -p "Do you want to delete \"${KUBEHOME}\"? Please enter y (proceed) or n (stop): " PROCEED
echo "You´ve entered: \"$PROCEED\""
echo ""
if [ $PROCEED = y ]
then
rm -r ${KUBEHOME}
echo "\"${KUBEHOME}\" deleted!"
echo ""
read -s -n 1 -p "Press any key to continue . . ."
else
exit 1
fi
fi
su - ${USER} -c "mkdir -p $HOME/.kube"
su - ${USER} -c "sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config"
chown -R ${USER}:${USER} $HOME/.kube
echo ""
echo "home \"$HOME/.kube\" copied!"
echo ""
read -s -n 1 -p "Press any key to continue . . ."
#calico pod-network-addon
#su - kubernetes -c "kubectl apply -f /home/kubernetes/calico/${CALICOVERS}/rbac-kdd.yaml"
#su - kubernetes -c "kubectl apply -f /home/kubernetes/calico/${CALICOVERS}/calico.yaml"
su - kubernetes -c "kubectl apply -f ${CALICO}/calico-${CALICOVERS}.yaml"
echo ""
echo "calico pod network add on has been deployed"
echo ""
read -s -n 1 -p "Press any key to continue . . ."
#install dashboard
su - kubernetes -c "kubectl apply -f ${DASBOARD}/kubernetes-dashboard-${DASHVERS}.yaml"
echo ""
echo "dashboard has been deployed"
echo ""
read -s -n 1 -p "Press any key to continue . . ."
apt-mark hold kubernetes-cni kubeadm kubelet kubectl docker-ce
echo ""
while [ -z $SCHEDULE ] ;
do
read -p "Do you want to schedule pods on your master? Please enter \"y\" or \"n\": " SCHEDULE
echo "Please enter \"y\" or \"n\""
done
echo ""
echo "You´ve entered: \"$SCHEDULE\""
echo ""
if [ $SCHEDULE = y ]
then
kubectl taint nodes $(hostname) node-role.kubernetes.io/master-
fi
echo ""
echo "Status - please press \"ctrl + c\" when all pods are running"
echo ""
watch kubectl get pods -A -o wide
https://docs.projectcalico.org/v3.10/reference/node/configuration
You have to deploy a pod network to the cluster. A pod network add-on is required that your pods can communicate with each other!
kubectl apply -f [podnetwork].yaml
Pod network add-on´s:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Examples “calico”:
kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml
Check also https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#pod-network
Important:
Replace
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
to
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "interface=ens18"
in
calico.yaml
Download:
curl https://docs.projectcalico.org/v3.10/manifests/calico.yaml -O
Interface has to be set to (“ens18” in this example)!
Script to change “calico-v3.8.5.yaml”:
#!/bin/bash
#author: tmade
#version: 1.1
INTERFACE="ens18"
#CALIVERS="calico-v3.8.5.yaml"
echo ""
echo "Overview calico versions:"
echo ""
ls -al /home/kubernetes/calico
echo ""
read -p "Please enter the calico version you want to patch to (e. g. \"calico-v3.8.5.yaml\"): " CALIVERS
echo "Version: \"$CALIVERS\" will be modified!"
echo ""
grep -R 'value: "interface' ${CALIVERS}
#grep -R 'value: "interface' calico-v3.8.5.yaml
IFACESET=$(echo $?)
if [ ${IFACESET} = 0 ]
then
echo "Interface already set - nothing todo"
else
sed -i 's/value: "autodetect"/value: "autodetect"\n - name: IP_AUTODETECTION_METHOD\n value: "interface='${INTERFACE}'"/g' ${CALIVERS}
echo "Interface set to \"${INTERFACE}\""
fi
#!/bin/bash
#author: tmade
#version: 1.2
INTERFACE="eth0"
CALICOURLVERSION="v3.11"
FILE="calico.yaml"
URL="https://docs.projectcalico.org/${CALICOURLVERSION}/manifests/$FILE"
USER="kubernetes"
HOME="/home/${USER}/calico"
if [ -d ${HOME} ]
then
cd ${HOME}
echo ""
echo "Changed path to \"${HOME}\""
echo ""
else
echo ""
echo "Directory \"${HOME}\" doesn´t exist, please create it via su - kubernetes -c \"mkdir ${HOME}\" and rerun the script!\""
echo ""
exit 1
fi
echo ""
echo "Download $URL and set interface"
echo ""
curl $URL | sed 's/value: "autodetect"/value: "autodetect"\n - name: IP_AUTODETECTION_METHOD\n value: "interface='${INTERFACE}'"/g' > ${HOME}/${FILE}
echo ""
echo "calico downloaded"
echo ""
CALICOVERSION=$(grep "calico/node:v" $HOME/$FILE | rev |cut -d ":" -f 1 |rev)
echo "renaming..."
echo ""
mv ${HOME}/${FILE} ${HOME}/calico-${CALICOVERSION}.yaml
chown ${USER}:${USER} *
echo "done"
Install:
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta5/aio/deploy/recommended.yaml kubectl apply -f kubernetes-dashboard.yaml
Note: Check also https://github.com/kubernetes/dashboard/releases
Download dashboard yaml and modify ttl (default is just 10 minutes).
#!/bin/bash
#author: tmade
#version: 1.3
DASHBOARDURLVERSION="v2.0.4"
FILE="recommended.yaml"
URL="https://raw.githubusercontent.com/kubernetes/dashboard/${DASHBOARDURLVERSION}/aio/deploy/${FILE}"
USER="kubernetes"
HOME="/home/${USER}/dashboard"
TTL="86400"
if [ -d ${HOME} ]
then
cd ${HOME}
echo ""
echo "Changed path to \"${HOME}\""
echo ""
else
echo ""
echo "Directory \"${HOME}\" doesn´t exist, please create it via su - kubernetes -c \"mkdir ${HOME}\" and rerun the script!"
echo ""
exit 1
fi
echo ""
echo "Download:"
echo ""
echo "$URL"
echo ""
#download and modify, to add ttl - default ttl is just 10 min.
su - ${USER} -c "curl $URL | sed 's/- --auto-generate-certificates/- --auto-generate-certificates\n - --token-ttl='${TTL}'/g' > $HOME/$FILE"
echo ""
echo "Dashboard downloaded"
echo ""
echo "renaming..."
su - ${USER} -c "mv $HOME/$FILE $HOME/kubernetes-dashboard-${DASHBOARDURLVERSION}.yaml"
echo ""
echo "renamed to \"$HOME/kubernetes-dashboard-${DASHBOARDURLVERSION}.yaml\""
echo ""
echo "done"
Delete:
kubectl -n kube-system delete deployment kubernetes-dashboard # < v2.0.0 kubectl -n kubernetes-dashboard delete deployment kubernetes-dashboard # > v2.0.0 as namespace of dashboard has changed kubectl -n kubernetes-dashboard delete $(kubectl -n kubernetes-dashboard get pod -o name | grep dashboard)
Edit:
kubectl edit deployment kubernetes-dashboard -n kube-system # < v2.0.0 kubectl edit deployment kubernetes-dashboard -n kubernetes-dashboard # > v2.0.0 as namespace of dashboard has changed
Show config:
kubectl describe pods -n kube-system kubernetes-dashboard # < v2.0.0 kubectl describe pods -n kubernetes-dashboard kubernetes-dashboard # > v2.0.0 as namespace of dashboard has changed
To change login “token-ttl”, edit
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --token-ttl=86400
to the value you prefer (default 900 sec). If “token-ttl” is not available, add the argument.
Check also on https://github.com/kubernetes/dashboard/wiki/Dashboard-arguments
It´s not recomended for productive use, so usage just for quick access or troubleshooting!
Network access on port 9999 without host restriction. Note: MUST run as kubernetes user (unless you run kubernetes as root)!!:
kubectl proxy --port 9999 --address='192.168.10.5' --accept-hosts="^*$"
Access only on localhost on default port 8001:
kubectl proxy
Access-URL:
http://192.168.10.5:9999/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/
Default access-URL:
http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/
A certificate - installed on the client browser - is required to access! Generate it on you kubernetes master and install it on your client.
Certificate (run as kubernetes user):
grep 'client-certificate-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.crt
grep 'client-key-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.key
openssl pkcs12 -export -clcerts -inkey kubecfg.key -in kubecfg.crt -out kubecfg.p12 -name "kubernetes-client"
Further an “ServiceAccount” inside namespace “kube-system” with “ClusterRoleBinding” is required.
Create service account “admin-user”:
cat <<EOF | kubectl create -f - apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kube-system EOF
Create ClusterRoleBinding:
cat <<EOF | kubectl create -f - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system EOF
Get the Bearer Token, which is required for browser login:
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Note: If you run
kubectl get secret -n kube-system $(kubectl get serviceaccount -n kube-system -o yaml |grep admin-user | grep token | awk '{print $3}') -o yaml
you are not getting the bearer token and the token has to be piped to “base64 –decode” to use it for authentication! Example:
echo "38nnbnbfnktopkeknfvvs..lkjkjhrkjhkdknlöxc,x00073" | base64 --decode
With “describe” you are getting the bearer token immediately!
Access URL:
https://<master-ip-or-dns-name>:<apiserver-port>/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy # < v2.0.0 https://<master-ip-or-dns-name>:<apiserver-port>/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/login # > v2.0.0 as namespace of dashboard has changed
Example:
https://my-k8s:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy # < v2.0.0 https://my-k8s:6443/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy # > v2.0.0 as namespace of dashboard has changed
Note: Cluster info to get access information:
kubectl cluster-info
Get the configuration file:
kubectl config view --raw
Save content to a file and reference it on login.
Check also on https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/
To avoid having always the same default certificate name (“kubernetes-admin”) to select when accessing the dashboard. If you are running several kubernetes-systems, you may create your own certificates (such as “kubecfg-myhostname.crt”) and clusterrolebindings.
Create the *.csr, *.crt, *.p12 and *.key:
openssl req -out kubecfg-myhostname.csr -new -newkey rsa:4096 -nodes -keyout kubecfg-myhostname.key -subj "/C=DE/ST=BW/L=MyCity/O=MyOrganisation/OU=Datacenter/CN=admin-user/emailAddress=tmade@test.com" sudo openssl x509 -req -in kubecfg-myhostname.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -out kubecfg-myhostname.crt -days 1000 -sha256 openssl pkcs12 -export -clcerts -inkey kubecfg-myhostname.key -in kubecfg-myhostname.crt -out kubecfg-myhostname.p12 -name "kubernetes-client"
Note: The “common name” (CN) must be the same as the account-name!
Check certificate:
openssl x509 -noout -text -in kubecfg-myhostname.crt
Create a service account (who matches the CN):
cat <<EOF | kubectl create -f - apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kube-system EOF
Create clusterrolebinding for serviceaccount:
cat <<EOF | kubectl create -f - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system EOF
Create clusterrolebinding for the user (token):
cat <<EOF | kubectl create -f - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: admin-user EOF
Get the bearer token:
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
#/bin/bash
#author: tmade
#version: 1.5
echo ""
echo "Important: The \"Common Name\" (CN) must be the same as the ServiceAccount name (e. g. MyHostname)!"
C="DE"
ST="BW"
L="MyCity"
O="MyCompany"
OU="MyDepartment"
CN="MyHostname"
MAIL="test@test.com"
CERT="$(echo ~/certs)"
if [ -d ${CERT} ]
then
cd ${CERT}
echo ""
echo "Changed path to \"${CERT}\""
echo ""
else
echo ""
echo "Directory \"${CERT}\" doesn´t exist, please create it via \"mkdir ${CERT}\" and rerun the script!"
echo ""
exit 1
fi
read -p "Please enter your secure certificate password: " PASSWORD
echo ""
echo "Your password set to: \"$PASSWORD\""
echo ""
dd if=/dev/urandom of=~/.rnd bs=256 count=1 2> /dev/null
openssl req -out kubecfg-${CN}.csr -new -newkey rsa:4096 -nodes -keyout kubecfg-${CN}.key -subj "/C=${C}/ST=${ST}/L=${L}/O=${O}/OU=${OU}/CN=${CN}/emailAddress=${MAIL}"
sudo openssl x509 -req -in kubecfg-${CN}.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -out kubecfg-${CN}.crt -days 1000 -sha256
openssl pkcs12 -export -clcerts -inkey kubecfg-${CN}.key -in kubecfg-${CN}.crt -out kubecfg-${CN}.p12 -name "kubernetes-client" -passin pass:$PASSWORD -passout pass:$PASSWORD
echo ""
echo "Certificates created!"
echo ""
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: ${CN}
namespace: kube-system
EOF
cat <<EOF | kubectl create -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ${CN}-user-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: ${CN}
EOF
cat <<EOF | kubectl create -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ${CN}-sa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: ${CN}
namespace: kube-system
EOF
echo ""
echo "get the bearer token by running:"
echo ""
echo "kubectl -n kube-system describe secret \$(kubectl -n kube-system get secret | grep ${CN} | awk '{print \$1}')"
echo ""
Remove the taints on the master so that you can schedule pods on it (doesn´t work on default):
kubectl taint nodes $(hostname) node-role.kubernetes.io/master-
Revert:
kubectl taint nodes $(hostname) node-role.kubernetes.io/master="":NoSchedule kubectl taint nodes $(hostname) node-role.kubernetes.io/master- #only worker kubectl taint nodes --all node-role.kubernetes.io/master="":NoSchedule
Check:
kubectl describe nodes $HOSTNAME |grep -i Taints kubectl describe nodes |grep -i taint
Install “docker-ce, kubelet, kubeadm and kubectl”:
https://www.tmade.de/wiki/doku.php?id=docker:kubernetes#install
https://www.tmade.de/wiki/doku.php?id=docker:docker#install
Note: Set proxy settings for master and worker if running behind a proxy (“/etc/environment”)!!
To join the cluster:
useradd -m kubernetes
Note: sudo rights required!
su - kubernetes sudo kubeadm join 192.168.10.5:6443 --token abcdefg.vfxyrqvmgmasdfgd --discovery-token-ca-cert-hash sha256:4256123788006008703a33fafc2 sudo kubeadm join --token <token> <control-plane-host>:<control-plane-port> --discovery-token-ca-cert-hash sha256:<hash>
On Cri-O:
kubeadm join 192.168.10.5:6443 --token dbdndgmtzf,ug,idfg --discovery-token-ca-cert-hash sha256:<hash> --cri-socket=unix:///var/run/crio/crio.sock
Check on master:
kubectl get nodes kubectl get nodes -o wide
Set label (on master):
sudo kubectl label node <NODENAME> node-role.kubernetes.io/worker-1=worker-1 sudo kubectl label node <NODENAME> node-role.kubernetes.io/worker-2=worker-2
sudo kubectl label node knode node-role.kubernetes.io/knode-1=knode-1
Show labels:
kubectl get nodes --show-labels
Delete label (on master):
kubectl label node <NODENAME> node-role.kubernetes.io/worker-1-
Delete node from cluster:
kubectl get nodes -o wide kubectl drain <node name> --delete-local-data --force --ignore-daemonsets #evacuate pods kubectl delete nodes NODENAME #on master as user on which kubernetes is running kubeadm reset -f && iptables -F #on node as root user iptables -t nat -F && iptables -t mangle -F && iptables -X #on node as root user
kubernetes@kmaster:~$ kubectl get nodes NAME STATUS ROLES AGE VERSION kmaster Ready master 48d v1.13.2 knode Ready worker 23m v1.13.2
Note: The token you can get via:
kubeadm token list
Cluster information:
kubectl cluster-info
If no token is listed, run
kubeadm token create --print-join-command
to create a new token and show join command.
To delete an token:
kubeadm token delete TOKEN(ID)
kubectl apply -f dashboard-adminuser.yaml kubectl delete -f dashboard-adminuser.yaml kubectl create serviceaccount myuser kubectl create serviceaccount --namespace kube-system test kubectl get serviceaccounts admin-user --namespace=kube-system -o yaml kubectl get serviceaccount --all-namespaces kubectl get serviceaccounts myuser -o yaml kubectl get secret | grep myuser kubectl get secret myuser-token-1yvwg -o yaml #the exact name of "myuser-token-abcde" you get via "kubectl get secret | grep myuser" kubectl delete serviceaccount -n kube-system kubernetes-dashboard #namespace=kube-system, username=kubernetes-dashboard
Create service account “admin-user”:
cat <<EOF | kubectl create -f - apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kube-system EOF
Create ClusterRoleBinding:
cat <<EOF | kubectl create -f - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system EOF
Get the Bearer Token:
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Create an “ClusterRoleBinding” account and login without authentication (just for testing purposes!!):
cat <<EOF | kubectl create -f -
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
EOF
Note: Just push “skip” on dashboard to login!
Install “bash-completion” (if not already installed):
apt-get install bash-completion
Setup autocomplete in bash into the current shell and permanentelly:
source <(kubectl completion bash) echo "source <(kubectl completion bash)" >> ~/.bashrc source <(kubeadm completion bash) echo "source <(kubeadm completion bash)" >> ~/.bashrc
Note: This has to be done for each user!
Additional aliases (set in “/etc/bash.bashrc”) maybe doesn´t work after adding the completion!
Solution:
cat << EOF >> ~/.bashrc
# Source global definitions
if [ -f /etc/bash.bashrc ]; then
. /etc/bash.bashrc
fi
EOF
kubeadm reset -f iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
Delete node from cluster:
kubectl drain <node-name> --delete-local-data --force --ignore-daemonsets kubectl delete node <node-name>
If you wanna reset the whole cluster to the state after a fresh install, just run this on each node:
#!/bin/bash
#author: tmade
#version: 1.2
HOME="/home/kubernetes"
sudo kubeadm reset -f
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
rm -r ${HOME}/.kube 2> /dev/null
sudo kubeadm reset -f sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X sudo apt-get purge kubeadm kubectl kubelet kubernetes-cni kube* sudo apt-get autoremove
Inside kubernetes-service user:
sudo rm -rf ~/.kube
/var/lib/kubelet/kubeadm-flags.env #is auto-generated on kubeadm runtime and should not be edited.
you can add flags in
/etc/default/kubelet
Kubeconfig folder:
/etc/kubernetes
Info:
kubectl get persistentvolumes --all-namespaces -o wide kubectl get persistentvolumeclaims --all-namespaces -o wide kubectl get storageclasses.storage.k8s.io kubectl get pv,pvc,sc -A
PersistentVolume:
kind: PersistentVolume
apiVersion: v1
metadata:
name: nfs-test1
labels:
type: nfs # optional
spec:
storageClassName: local-storage
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
mountOptions:
- hard
- nfsvers=4.1
nfs: # type
server: 192.168.10.6 # IP NFS-host
path: /nfs-share # path
PersistentVolumeClaim:
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-test1-claim1
namespace: default
spec:
storageClassName: local-storage
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
Storage class:
kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: local-storage provisioner: kubernetes.io/no-provisioner volumeBindingMode: WaitForFirstConsumer
Check also on https://kubernetes.io/docs/concepts/storage/storage-classes/
kubectl get storageclass
Namespace:
apiVersion: v1 kind: Namespace metadata: name: <insert-namespace-name-here>
kubectl create namespace NAMESPACE
kubectl create configmap my-config --from-file=./my/file.txt kubectl create configmap my-config --from-file=./my/dir/path/ kubectl create configmap my-config --from-literal=key1=value1 --from-literal=key2=value2
demo-configmap.yaml:
kind: ConfigMap
apiVersion: v1
metadata:
name: demo-configmap
namespace: mynamesapce
labels:
app: demo-configmap
data:
index.html: "<html><body><h1>It works!</h1></body></html>"
Example “nginx”:
kind: Pod
apiVersion: v1
metadata:
name: nginx-pod
labels:
app: nginx
namespace: default
spec:
volumes:
- name: nfs-test1
persistentVolumeClaim:
claimName: nfs-test-claim1
containers:
- name: nginx-pod
image: nginx
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- name: nfs-test1
mountPath: "/usr/share/nginx/html"
subPath: site1
kind: Pod
apiVersion: v1
metadata:
name: squid-test
labels:
app: proxy
namespace: default
spec:
volumes:
- name: nfs-data1
persistentVolumeClaim:
claimName: nfs-data1-claim
containers:
- name: squid-test
image: ubuntu-squid:16.04
command: ["/bin/sh","-ce"]
args: ["/usr/local/squid/sbin/squid -z && sleep 10 && /etc/init.d/squid start && echo Squid started || echo Squid could not start, exit && while true; do sleep 30; done"]
ports:
- containerPort: 8080
name: "proxy-server"
volumeMounts:
- mountPath: "/data"
name: nfs-data1
Proxy service as “NodePort”:
kind: Service
apiVersion: v1
metadata:
labels:
app: proxy
namespace: default
name: proxy-nodeport
spec:
externalName: proxy-nodeport
ports:
- name: proxy-port-tcp
port: 8080
targetPort: 8080
nodePort: 30000
protocol: TCP
selector:
app: proxy
type: NodePort
Nginx service as “NodePort”::
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx
namespace: default
name: nginx-nodeport
spec:
externalName: nginx-nodeport
ports:
- name: http-port-tcp
port: 80
targetPort: 80
nodePort: 30000
protocol: TCP
selector:
app: nginx
type: NodePort
Mysql as “ClusterIP” with “externalIPs”:
apiVersion: v1
kind: Service
metadata:
name: mysql-1234-inst-1
spec:
selector:
app: mysql-prod
ports:
- name: mysql
protocol: TCP
port: 3306
targetPort: 3306
externalIPs:
- 1.2.3.4
- 5.6.7.8
kind: Deployment
apiVersion: apps/v1
metadata:
name: squid-proxy-deployment
namespace: default
labels:
run: squid-proxy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
run: squid-proxy
template:
metadata:
labels:
run: squid-proxy
spec:
containers:
- name: squid-proxy
image: 'ubuntu-squid-no-cache:16.04'
command: ["/bin/sh","-ce"]
args: ["/usr/local/squid/sbin/squid -N -f /etc/squid/squid.conf"]
ports:
- containerPort: 8080
protocol: TCP
restartPolicy: Always
terminationGracePeriodSeconds: 0
kubectl create secret tls --cert='/path/to/bundle.crt' --key='/path/to/cert.key' secret-name -n mynamespace kubectl create secret generic secret-name --from-file=tls.crt=mycert.crt --from-file=tls.key=mykey.key --from-file=ca.crt=intermediate.crt -n kubernetes-dashboard
{
"kind": "Scale",
"apiVersion": "autoscaling/v1",
"metadata": {
"name": "mysql",
"namespace": "default",
},
"spec": {
"replicas": 1
},
"status": {
"replicas": 1,
"selector": "app=mysql"
}
}
https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#external-ips
https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/
https://kubernetes.io/docs/concepts/services-networking/service/
https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/
https://github.com/nginxinc/kubernetes-ingress/blob/master/docs/nginx-ingress-controllers.md
Check also on https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-manifests/
#!/bin/bash
#author: Thomas Roehm
#version: 1.0
#INGRESSVERSION="v1.12.1" #works with v1.21.0
INGRESSVERSION="v2.0.3" #works with v1.22.3
echo "check also https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-helm/"
rm -rf kubernetes-ingress/
git clone https://github.com/nginxinc/kubernetes-ingress/
echo ""
cd kubernetes-ingress/deployments/helm-chart
git checkout ${INGRESSVERSION}
git status
helm repo add nginx-stable https://helm.nginx.com/stable
helm repo update
helm list
echo ""
sed -i 's/kind: deployment/kind: daemonset/' values.yaml
echo ""
grep "kind: daemonset" values.yaml
echo ""
sleep 2
helm install -f values.yaml nginx-ingress .
helm list -A
kubectl get pods -A -o wide
#!/bin/bash
#author: tmade
#version: 1.3
VERSION="1.6.3"
USER="kubernetes"
HOME="/home/${USER}"
FILE="${HOME}/kubernetes-ingress"
if [ -d ${FILE} ]
then
echo ""
echo "${FILE} exists, renaming..."
echo ""
mv ${FILE} ${FILE}.bak
else
echo ""
echo "Downloading Ingress..."
echo ""
fi
cd ${HOME}
git clone https://github.com/nginxinc/kubernetes-ingress/
cd kubernetes-ingress/deployments
git checkout v${VERSION}
chown -R ${USER}:${USER} ${FILE}
echo ""
echo "Configure RBAC"
echo ""
su - ${USER} -c "kubectl apply -f ~/kubernetes-ingress/deployments/common/ns-and-sa.yaml"
echo ""
echo "create cluster roole"
echo ""
su - ${USER} -c "kubectl apply -f ~/kubernetes-ingress/deployments/rbac/rbac.yaml"
echo ""
echo "create default secret"
echo ""
su - ${USER} -c "kubectl apply -f ~/kubernetes-ingress/deployments/common/default-server-secret.yaml"
echo ""
echo "create config map"
echo ""
su - ${USER} -c "kubectl apply -f ~/kubernetes-ingress/deployments/common/nginx-config.yaml"
echo ""
echo "create custom-resource-definitions"
echo ""
su - ${USER} -c "kubectl apply -f ~/kubernetes-ingress/deployments/common/custom-resource-definitions.yaml"
echo ""
echo "Run the Ingress Controller"
echo ""
#kubectl apply -f deployment/nginx-ingress.yaml
su - ${USER} -c "kubectl apply -f ~/kubernetes-ingress/deployments/daemon-set/nginx-ingress.yaml"
echo ""
echo "Ingress Controller has been installed."
echo ""
echo "Status - please press \"ctrl + c\" when all pods are running"
echo ""
watch kubectl get pods -A -o wide
Note: If you want to define on which nodes the controller will be deployed choose “deployment”. If you want to install it on all nodes choose daemon-set (configured in the script).
kubectl delete namespace nginx-ingress kubectl delete clusterrole nginx-ingress kubectl delete clusterrolebinding nginx-ingress
kubectl patch ds nginx-ingress --patch "$(cat nginx-ingress-controller-patch-8080.yaml)" -n nginx-ingress
spec:
template:
spec:
containers:
- name: nginx-ingress
ports:
- containerPort: 8080
hostPort: 8080
kubectl logs -n nginx-ingress nginx-ingress-ID -f
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: test-ingress
annotations:
# nginx.ingress.kubernetes.io/secure-backends: "true"
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
tls:
- hosts:
- myside.local
secretName: test
rules:
- host: myside.local
http:
paths:
- backend:
serviceName: test1
servicePort: 80
# - host: blue.nginx.example.com
# http:
# paths:
# - backend:
# serviceName: test2
# servicePort: 80
# - host: green.nginx.example.com
# http:
# paths:
# - backend:
# serviceName: test3
# servicePort: 80
Another example:
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: simple-fanout-example
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: foo.bar.com
http:
paths:
- path: /foo
backend:
serviceName: service1
servicePort: 4200
- path: /bar
backend:
serviceName: service2
servicePort: 8080
https://kubernetes.io/de/docs/reference/kubectl/cheatsheet/
kubeadm init --pod-network-cidr 10.244.0.0/16
kubectl get nodes -o wide #show cluster, role and node status
kubectl get namespaces
kubectl describe nodes node1
kubectl delete nodes NODENAME
kubectl delete pods calico-node-w6qz4 -n kube-system
kubectl delete --all pods --namespace=foo #delete all pods in namespace foo
kubectl scale --replicas=0 dc -l version=mylabel
kubectl scale --replicas=1 dc -l version=mylabel
kubectl get pods --all-namespaces --field-selector 'status.phase==Failed' -o json | kubectl delete -f - #delete failed or evicted pods
kubectl get pods -o wide --all-namespaces
kubectl get pods -o wide --all-namespaces --show-labels
kubectl get pods -A -o wide
time kubectl get pods -A
kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |sort
kubectl get pods --namespace kube-system
kubectl delete pods <pod_name> --grace-period=0 --force -n <namespace>
kubectl delete --all pods --namespace <namespace>
kubectl get pods -n <namespace> | grep "searchstring-or-status" | awk '{print $1}' | xargs kubectl --namespace=<namespace> delete pod --grace-period=0 -o name
kubectl describe pods --namespace kube-system kubernetes-dashboard
kubectl describe pods -n kube-system kubernetes-dashboard
kubectl cluster-info
kubectl cluster-info dump
kubectl cordon nodename #mark the node as unschedulable. This ensures that no new pods will get scheduled while you are preparing it for removal or maintenance.
kubectl uncordon nodename #allow scheduling on the node again
kubectl version
kubectl version | base64 | tr -d '\n'
kubectl get pod -o wide
kubectl get pvc,pv -A
kubectl edit pods --namespace=kube-system kubernetes-dashboard-57df4db6b-4h9pc
kubectl exec -it --namespace=test01 ubuntu -- /bin/bash
kubectl exec -it --namespace=default squid-proxy -- /bin/bash
kubectl exec squid-proxy -- ps -ef #execute command "ps -ef" and output to stdout
kubectl get jobs --all-namespaces
kubectl get cronjobs --all-namespaces
kubectl get deployments --all-namespaces -o wide #pendant "kubectl get deploy --all-namespaces"
kubectl --namespace kube-system delete deployment kubernetes-dashboard
kubectl get services --all-namespaces
kubectl describe pod calico-node-s7ch5 -n kube-system
kubectl describe service --all-namespaces | grep -i nodeport #nodeport
kubectl get node -o=jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}'
kubectl replace -f file.yaml
kubectl replace --force -f file.yaml
kubectl apply-f file.yaml
kubectl delete -f file.yaml
kubectl autoscale deployment foo --min=2 --max=10
kubectl cp file-name pod-name:./destination-path
Loadtest:
kubectl run -i --tty load-generator --rm --image=busybox:1.28 --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://localhost:8080; done"
Logging:
kubectl get events kubectl get events -n default kubectl get events -w --all-namespaces kubectl delete events --all kubectl logs -n kube-system <pod> kubectl logs -n kube-system -p calico-node-xxxxx -c calico-node kubectl logs calico-node-s7ch5 -n kube-system -c calico-node sudo journalctl -xeu kubelet sudo journalctl -xeuf kubelet
Default namespace:
kubectl config set-context --current --namespace=default kubectl config set-context --current --namespace=argocd
cat >> /etc/bash.bashrc <<EOF #personal alias ..='cd ../.' alias ...='cd ../../.' alias kc='kubectl' EOF
Activate:
source /etc/bash.bashrc
kubectl get ep kube-dns -n kube-system -o wide kubectl get svc -n kube-system -o wide | grep dns kubectl get svc -n kube-system -o wide kubectl get configmap -n kube-system coredns -oyaml
https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/
Renew all certificates:
sudo kubeadm alpha certs renew all
Order:
To patch a cluster, you can run the following scripts (working for k8s >= v1.15.x).
Patch master:
#!/bin/bash
#author: Thomas Roehm
#version: 1.3
echo "You´re running version:"
echo ""
su - kubernetes -c "kubectl version"
echo ""
read -s -n 1 -p \"'Press any key to continue . . .'\"
apt-get update
apt-cache policy kubeadm | head -n 30
echo ""
read -p "Please enter k8s version you want to patch to (e. g. \"1.16.2-00\"): " VERSION
echo "Version: \"$VERSION\" will be updated!"
echo ""
apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce containerd.io
apt-get update && apt-get install -y kubeadm=${VERSION}
echo ""
#echo "drain node $(hostname -s)"
#su - kubernetes -c "kubectl drain $(hostname -s) --ignore-daemonsets"
echo ""
APPLYVERSION="v$(echo ${VERSION} | cut -d "-" -f1)"
echo ""
echo "version $APPLYVERSION will be applied"
echo ""
read -s -n 1 -p \"'Press any key to continue . . .'\"
kubeadm upgrade plan
echo ""
read -s -n 1 -p \"'Press any key to continue . . .'\"
kubeadm upgrade apply ${APPLYVERSION}
#apt-cache policy docker-ce
#echo ""
#read -p "Please enter docker-ce-version you want to patch to (e. g. \"5:18.09.9~3-0~ubuntu-xenial"): " DVERSION
#echo "Version: \"$iDVERSION\" will be updated!"
#echo ""
#apt-get install -y docker-ce
echo ""
#echo "uncordon node $(hostname -s)"
echo ""
#su - kubernetes -c "kubectl uncordon $(hostname -s)"
echo "patching kublet,kubectl"
echo ""
read -p "Do you want to proceed? Please enter y or n: " PROCEED
echo ""
echo "You´ve entered: \"${PROCEED}\""
echo ""
if [ ${PROCEED} = "y" ]
then
apt-get install -y kubelet=${VERSION} kubectl=${VERSION}
apt-mark hold kubeadm kubernetes-cni kubelet kubectl docker-ce containerd.io
systemctl restart docker.service kubelet.service
systemctl status docker.service kubelet.service | cat
else
exit 1
fi
Hint: Please patch always within one version to the latest patchlevel, before you upgrade to the new version.
Example:
Running version: 1.15.3-00 Update to 1.15.6-00 Update to 1.16.X-00
Prepare/ patch worker:
#!/bin/bash
#author: Thomas Roehm
#version: 1.1
echo "Getting worker:"
echo ""
su - kubernetes -c "kubectl get nodes"
echo ""
read -p "Please enter the name of the worker you want to update: " NODENAME
echo "Worker: \"$NODENAME\" will be patched"
echo ""
su - kubernetes -c "kubectl drain ${NODENAME} --ignore-daemonsets"
#Below version k8s <= v1.15.x run:
#kubeadm upgrade node config --kubelet-version v1.15.x
kubeadm upgrade node
#Proceed or cancel
echo ""
read -p "Do you want to wait until ${NODENAME} has been patched to finish (uncordon) the patch-process? Please enter y (wait) or n: " PROCEED
echo "You´ve entered: \"$PROCEED\""
echo ""
if [ $PROCEED = y ]
then
while read -s -p "Please enter \"p\" to proceed: " p && [[ -z "$p" ]] ;
do
echo "Please enter \"p\" to proceed"
done
su - kubernetes -c "kubectl uncordon ${NODENAME}"
echo "Uncordon ${NODENAME}"
su - kubernetes -c "kubectl get nodes -o wide"
else
exit 1
fi
Patch worker:
#!/bin/bash
#author: Thomas Roehm
#version: 1.3
echo "You´re running version:"
echo ""
kubectl version 2> /dev/null
echo ""
read -s -n 1 -p "Press any key to continue . . ."
#su - kubernetes -c "read -s -n 1 -p \"Press any key to continue . . .\""
apt-get update
apt-cache policy kubeadm | head -n 30
echo ""
read -p "Please enter k8s version you want to patch to (e. g. \"1.16.2-00\"): " VERSION
echo "Version: \"$VERSION\" will be updated!"
echo ""
apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce containerd.io
apt-get update && apt-get install -y kubeadm=${VERSION} kubelet=${VERSION} kubectl=${VERSION}
systemctl restart docker.service kubelet.service
systemctl status docker.service kubelet.service | cat
apt-mark hold kubeadm kubernetes-cni kubelet kubectl docker-ce containerd.io
echo ""
echo "worker updated"
Check releases on:
https://github.com/NetApp/trident/releases
Install script:
#!/bin/bash
#author: Thomas Roehm
#version: 1.3
echo ""
echo "You can get release info on \"https://github.com/NetApp/trident/releases\""
echo ""
VERSION="20.04.0"
HOME="/home/kubernetes/"
FILE="${HOME}trident-installer-${VERSION}.tar.gz"
if [ -e $FILE ]
then
echo "${FILE} exists, please check if trident is already up to date. Wrong version referenced in script!?"
exit 1
else
echo ""
echo "patching trident..."
echo ""
sleep 3
su - kubernetes -c "cat <<EOF | kubectl create -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: trident-ontap-gold #name storage-klasse
provisioner: netapp.io/trident
parameters:
backendType: "ontap-nas"
#media: "ssd"
provisioningType: "thin"
allowVolumeExpansion: true
reclaimPolicy: Delete
EOF"
su - kubernetes -c "wget https://github.com/NetApp/trident/releases/download/v${VERSION}/trident-installer-${VERSION}.tar.gz -P ${HOME}"
su - kubernetes -c "mv ~/trident-installer ~/trident-installer.old"
su - kubernetes -c "tar -xzf trident-installer-${VERSION}.tar.gz"
su - kubernetes -c "mkdir ~/trident-installer/setup"
su - kubernetes -c "cp -a ~/trident-installer.old/setup/backend.json ~/trident-installer/setup/."
su - kubernetes -c "~/trident-installer/tridentctl uninstall -n trident"
su - kubernetes -c "~/trident-installer/tridentctl install -n trident"
su - kubernetes -c "~/trident-installer/tridentctl -n trident create backend -f ~/trident-installer/setup/backend.json"
fi
Note: Configfile has to be previously created within “~/trident-installer/setup/”!
#!/bin/bash
# https://github.com/helm/helm/releases
VERSION="3.4.2"
wget https://get.helm.sh/helm-v${VERSION}-linux-amd64.tar.gz
tar -xzf helm-v${VERSION}-linux-amd64.tar.gz
cp linux-amd64/helm /usr/bin/.
rm -r helm-v${VERSION}-linux-amd64.tar.gz linux-amd64/
helm help helm help template helm template chart-name helm-folder #test if it renders correctly helm template my-example-app my-example-folder helm template --output-dir out . helm template -f values1.json -f values2.json -f values3.yaml my-example-app . --debug > expected.yaml helm install my-nginx-ingress . helm repo add stable https://charts.helm.sh/stable helm repo update helm repo list helm repo remove stable helm list #list intsalled helm-charts helm upgrade --set scale=9,tag="1.13" helm-chart-name ./helm-chart-dir #overwrite values from defined "values.yaml" helm upgrade squid-proxy . --values values.yaml #updates helm-chart "squid-proxy" within the squid-helm-folder helm upgrade squid-proxy . helm delete helm-chart-name #"helm-chart-name" you can get via "helm list" helm rollback helm-chart-name 1 #"1" = revision-ID you are getting from "helm list" helm create test #creates a helm test template / folder structure (which can be modified) source <(helm completion bash) #to load completions in your current shell session helm completion bash > /etc/bash_completion.d/helm #to load completions for every new session (run once as root or with sudoers rights) helm template -f values1.yaml -f values2.yaml helm-chart-name . --output-dir test --debug
helm install --name my-release \
--set mysqlRootPassword=secretpassword,mysqlUser=my-user,mysqlPassword=my-password,mysqlDatabase=my-database \
stable/mysql
Structure:
template/deployment.yaml template/service.yaml template/_route.yaml #"partials", won´t be deployed because of "_" in the filename out/ #output dir of helm files/config. Chart.yaml values.yaml
Example:
name: My awesome helm demo
version: 1.1.1
description: version control
maintainers:
- name: tmade
email: info@tmade.de
Nginx-Ingress-Controller running as daemonset with helm:
#!/bin/bash
#author: Thomas Roehm
#version: 1.0
INGRESSVERSION="v1.12.1"
echo "check also https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-helm/"
rm -rf kubernetes-ingress/
git clone https://github.com/nginxinc/kubernetes-ingress/
echo ""
cd kubernetes-ingress/deployments/helm-chart
git checkout ${INGRESSVERSION}
git status
helm repo add nginx-stable https://helm.nginx.com/stable
helm repo update
helm list
echo ""
sed -i 's/kind: deployment/kind: daemonset/' values.yaml
echo ""
grep "kind: daemonset" values.yaml
echo ""
sleep 2
helm install -f values.yaml nginx-ingress .
helm list -A
kubectl get pods -A -o wide
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
kubectl create namespace monitoring helm repo add prometheus-community https://prometheus-community.github.io/helm-charts helm repo update helm install my-prometheus prometheus-community/kube-prometheus-stack -n monitoring helm uninstall -n monitoring my-prometheus
Very useful editor with handy extensions
Visual Studio Code (vs code)
To get extensions run behind company proxy run it like:
code --ignore-certificate-errors
Add permanentely to favorite launch from desktop (ubuntu 20.04):
/usr/share/applications/code.desktop
Add to
Exec=/usr/share/code/code --ignore-certificate-errors --no-sandbox --unity-launch %F
within
[Desktop Entry]
Workspace-file example:
{
"folders": [
{
"path": "../.."
},
{
"path": ".."
},
{
"name": "jenkins",
"path": "../jenkins"
}
],
"settings": {}
To get it run behind a proxy (to download plugins):
sudo apt-get install libnss3-tools mkdir -p $HOME/.pki/nssd #als user der code startet chmod 700 ~/.pki/nssdb
certutil -d sql:$HOME/.pki/nssdb -A -t "C,," -n RootCALocal -i /usr/local/share/ca-certificates/my_root.crt certutil -d sql:$HOME/.pki/nssdb -A -t "P,," -n SelfSignedLocal -i /usr/local/share/ca-certificates/my_cert.crt certutil -d sql:$HOME/.pki/nssdb -A -t ",," -n CALocal -i /usr/local/share/ca-certificates/my_intermediate.cer
https://www.percona.com/resources/videos/running-mysql-kubernetes
https://medium.com/@oliver_hu/docker-kubernetes-on-raspberry-pi-3-cb787def29d5
http://www.joseluisgomez.com/containers/kubernetes-dashboard/
https://kubernetes.io/de/docs/reference/kubectl/cheatsheet/
https://github.com/dennyzhang/cheatsheet-kubernetes-A4
https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/
https://github.com/kubernetes/dashboard/blob/master/docs/user/certificate-management.md
#clusterinfo
curl -X GET $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
#deploy pod
curl -X POST -H 'Content-Type: application/yaml' --data "$(cat deploy.yaml)" $APISERVER/apis/apps/v1/namespaces/default/deployments --header "Authorization: Bearer $TOKEN" --insecure
#get pods (namespace kube-system and default)
curl -s $APISERVER/api/v1/namespaces/kube-system/pods --header "Authorization: Bearer $TOKEN" --insecure | jq '.items[].metadata.name' curl -s $APISERVER/api/v1/namespaces/default/pods --header "Authorization: Bearer $TOKEN" --insecure | jq '.items[].metadata.name'
#get deployments
curl -s $APISERVER/apis/apps/v1/namespaces/default/deployments?limit=500 --header "Authorization: Bearer $TOKEN" --insecure | jq '.items[].metadata.name' curl -s $APISERVER/apis/apps/v1/namespaces/kube-system/deployments?limit=500 --header "Authorization: Bearer $TOKEN" --insecure | jq '.items[].metadata.name'
#status:
curl -s $APISERVER/api/v1/namespaces/kube-system/pods/kube-apiserver-kmaster --header "Authorization: Bearer $TOKEN" --insecure | jq '.status.phase'
#delete deployment
curl -X DELETE $APISERVER/apis/apps/v1/namespaces/default/deployments/squid-test --header "Authorization: Bearer $TOKEN" --insecure
#scale
curl $APISERVER/apis/apps/v1/namespaces/default/deployments/mysql/scale --header "Authorization: Bearer $TOKEN" --insecure #show curl $APISERVER/apis/apps/v1/namespaces/default/deployments/mysql/scale --header "Authorization: Bearer $TOKEN" --insecure > scale.json #save and edit curl -X PUT -d@scale.yaml -H 'Content-Type: application/yaml' $APISERVER/apis/apps/v1/namespaces/default/deployments/mysql/scale --header "Authorization: Bearer $TOKEN" --insecure #update from file
Taints and tolerations allow the node to control which pods should (or should not) be scheduled on them.
https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
With “taints” you are able to schedule pods on dedicated nodes.
kubectl describe nodes | grep -E 'Name:|Taint' #show taints kubectl taint nodes $(hostname) node-role.kubernetes.io/master- #enable schedule pods on master (if executed on a master), which is denied per default kubectl taint nodes $(hostname) node-role.kubernetes.io/master #revert - master won´t be able to schedule pods (except managed pods such as e.g. "coredns") kubectl taint nodes node squid-key:NoExecute #set taint kubectl taint nodes node key:NoExecute- #unset taint kubectl taint nodes node key=true:NoSchedule
Show taints(detailed):
kubectl get nodes -o=custom-columns=NodeName:.metadata.name,TaintKey:.spec.taints[*].key,TaintValue:.spec.taints[*].value,TaintEffect:.spec.taints[*].effect
Toleration example:
tolerations:
- key: "key"
operator: "Equal"
value: "true"
effect: "NoSchedule"
#effect: "NoExecute"
#tolerationSeconds: 0
Example hpa:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
annotations:
#argocd.argoproj.io/compare-options: IgnoreExtraneous
labels:
app.kubernetes.io/instance: squid-proxy
name: squid-proxy
namespace: default
spec:
maxReplicas: 10
minReplicas: 1
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: squid-proxy
targetCPUUtilizationPercentage: 10
https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/
Commands:
kubectl describe hpa kubectl describe hpa <deployment/pod>