This shows you the differences between two versions of the page.
Both sides previous revisionPrevious revisionNext revision | Previous revisionNext revisionBoth sides next revision | ||
docker:kubernetes [2020/05/20 09:49] – [Ingress] tmade | docker:kubernetes [2022/10/03 15:46] – tmade | ||
---|---|---|---|
Line 1: | Line 1: | ||
- | ===== Kubernetes ===== | + | =====Kubernetes===== |
- | ====Install==== | ||
- | Install repositories, | + | [[Docker:Kubernetes: |
- | <code sh install-packages.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.2 | ||
- | |||
- | echo "" | ||
- | echo "check if docker is already installed..." | ||
- | dpkg -l |grep docker-ce | ||
- | DOCKERINSTALL=$(echo $?) | ||
- | echo "" | ||
- | |||
- | if [ ${DOCKERINSTALL} -eq " | ||
- | then | ||
- | echo " | ||
- | else | ||
- | echo " | ||
- | curl -sSL https:// | ||
- | fi | ||
- | |||
- | echo "" | ||
- | echo "check k8s installation:" | ||
- | dpkg -l |grep kube* | ||
- | KUBEINSTALL=$(echo $?) | ||
- | echo "" | ||
- | |||
- | if [ ${KUBEINSTALL} -eq " | ||
- | then | ||
- | echo "k8s already installed" | ||
- | else | ||
- | echo " | ||
- | curl -s https:// | ||
- | echo "deb http:// | ||
- | fi | ||
- | |||
- | echo "" | ||
- | echo "Setup cgroupdriver as systemd" | ||
- | echo "" | ||
- | |||
- | cat > / | ||
- | { | ||
- | " | ||
- | " | ||
- | " | ||
- | " | ||
- | }, | ||
- | " | ||
- | } | ||
- | EOF | ||
- | |||
- | # Restart docker. | ||
- | |||
- | mkdir -p / | ||
- | systemctl daemon-reload | ||
- | systemctl restart docker | ||
- | |||
- | apt-get update | ||
- | apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce | ||
- | echo "" | ||
- | echo "Up- or downgrade docker and k8s packages" | ||
- | echo "" | ||
- | |||
- | apt-cache policy docker-ce | ||
- | echo "" | ||
- | read -p " | ||
- | echo " | ||
- | echo "" | ||
- | apt-get install -y docker-ce=${DVERSION} --allow-downgrades | ||
- | echo "" | ||
- | apt-cache policy kubeadm | ||
- | echo "" | ||
- | read -p " | ||
- | echo " | ||
- | echo "" | ||
- | apt-get install -y kubeadm=${VERSION} kubelet=${VERSION} kubectl=${VERSION} --allow-downgrades | ||
- | echo "" | ||
- | apt-mark hold kubelet kubeadm kubectl docker-ce | ||
- | echo "" | ||
- | echo "k8s packages are installed!" | ||
- | echo "" | ||
- | </ | ||
- | |||
- | See also on https:// | ||
- | |||
- | |||
- | ==== cgroup vs. systemd driver ==== | ||
- | |||
- | https:// | ||
- | |||
- | ===docker=== | ||
- | |||
- | Changing the settings such that your container runtime and kubelet **use systemd as the cgroup driver stabilized the system!** Please note the native.cgroupdriver=systemd setup below: | ||
- | |||
- | < | ||
- | ## Install Docker CE. | ||
- | apt-get update && apt-get install docker-ce=18.06.2~ce~3-0~ubuntu | ||
- | |||
- | # Setup daemon. | ||
- | cat > / | ||
- | { | ||
- | " | ||
- | " | ||
- | " | ||
- | " | ||
- | }, | ||
- | " | ||
- | } | ||
- | EOF | ||
- | |||
- | # Restart docker. | ||
- | |||
- | mkdir -p / | ||
- | systemctl daemon-reload | ||
- | systemctl restart docker | ||
- | |||
- | </ | ||
- | |||
- | Check: | ||
- | |||
- | docker info |grep -i cgroup | ||
- | | ||
- | ===k8s=== | ||
- | |||
- | Check: | ||
- | |||
- | cat / | ||
- | |||
- | Environment=" | ||
- | EnvironmentFile=-/ | ||
- | EnvironmentFile=-/ | ||
- | |||
- | cat / | ||
- | |||
- | Change to: | ||
- | |||
- | cgroupDriver: | ||
- | | ||
- | if not already set. | ||
- | |||
- | Check also: | ||
- | |||
- | / | ||
- | |||
- | and | ||
- | |||
- | / | ||
- | | ||
- | Check after modification: | ||
- | |||
- | systemctl daemon-reload | ||
- | systemctl restart kubelet.service | ||
- | systemctl status kubelet.service | ||
- | systemctl status kubelet.service | grep " | ||
- | |||
- | ====Master==== | ||
- | |||
- | ===Manually=== | ||
- | |||
- | Run **only on master**: | ||
- | |||
- | kubeadm config images pull #pulling images before setup k8s | ||
- | kubeadm init --apiserver-advertise-address=192.168.10.5 --pod-network-cidr=192.168.0.0/ | ||
- | kubeadm init --pod-network-cidr=192.168.0.0/ | ||
- | | ||
- | **Hint: If you are running your system behind a proxy, you have to make an proxy-exclude ("/ | ||
- | |||
- | no_proxy=" | ||
- | |||
- | To start using your cluster, you need to run the following **as a regular user** with **sudo** rights: | ||
- | |||
- | useradd -s /bin/bash -m kubernetes | ||
- | su - kubernetes | ||
- | #rm -r $HOME/.kube | ||
- | mkdir -p $HOME/.kube | ||
- | sudo cp -i / | ||
- | sudo chown $(id -u):$(id -g) $HOME/ | ||
- | |||
- | Check: | ||
- | |||
- | kubectl get pods -o wide --all-namespaces | ||
- | kubectl get pods --all-namespaces -o wide -w | ||
- | kubectl get pods --all-namespaces | ||
- | |||
- | ===Setup-Script=== | ||
- | |||
- | Setup k8s - packages have to be installed previously! | ||
- | |||
- | Check out https:// | ||
- | |||
- | Download calico.yaml and dashboard.yaml and create required folderstructure (check variables). | ||
- | |||
- | Setup: | ||
- | |||
- | <code sh setup-k8s.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.4 | ||
- | |||
- | USER=" | ||
- | HOME="/ | ||
- | CALICO="/ | ||
- | #filename such as " | ||
- | CALICOVERS=" | ||
- | KUBEHOME=" | ||
- | # | ||
- | DASBOARD="/ | ||
- | # filename such as " | ||
- | DASHVERS=" | ||
- | PODNETWORKADDON=" | ||
- | |||
- | echo "" | ||
- | echo "Setup -------------k8s--------------" | ||
- | echo "" | ||
- | su - kubernetes -c " | ||
- | echo "" | ||
- | su - kubernetes -c "read -s -n 1 -p \" | ||
- | |||
- | apt-get update | ||
- | apt-cache policy kubeadm | ||
- | #apt-cache policy docker-ce | ||
- | |||
- | echo "" | ||
- | read -p " | ||
- | echo " | ||
- | apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce | ||
- | #apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce | ||
- | apt-get install -y kubeadm=${VERSION} kubelet=${VERSION} kubectl=${VERSION} | ||
- | echo "" | ||
- | read -p " | ||
- | echo "" | ||
- | echo "ip set to: \" | ||
- | echo "" | ||
- | kubeadm init --apiserver-advertise-address=${CIDR} --pod-network-cidr=${PODNETWORKADDON} | ||
- | echo "" | ||
- | read -s -n 1 -p "Press any key to continue . . ." | ||
- | echo "" | ||
- | if [ -e ${KUBEHOME} ] | ||
- | then | ||
- | echo " | ||
- | read -p "Do you want to delete \" | ||
- | echo " | ||
- | echo "" | ||
- | if [ $PROCEED = y ] | ||
- | then | ||
- | rm -r ${KUBEHOME} | ||
- | echo " | ||
- | echo "" | ||
- | read -s -n 1 -p "Press any key to continue . . ." | ||
- | else | ||
- | exit 1 | ||
- | fi | ||
- | fi | ||
- | su - ${USER} -c "mkdir -p $HOME/ | ||
- | su - ${USER} -c "sudo cp -i / | ||
- | chown -R ${USER}: | ||
- | echo "" | ||
- | echo "home \" | ||
- | echo "" | ||
- | read -s -n 1 -p "Press any key to continue . . ." | ||
- | #calico pod-network-addon | ||
- | #su - kubernetes -c " | ||
- | #su - kubernetes -c " | ||
- | su - kubernetes -c " | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | read -s -n 1 -p "Press any key to continue . . ." | ||
- | #install dashboard | ||
- | su - kubernetes -c " | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | read -s -n 1 -p "Press any key to continue . . ." | ||
- | |||
- | apt-mark hold kubernetes-cni kubeadm kubelet kubectl docker-ce | ||
- | |||
- | echo "" | ||
- | read -p "Do you want to schedule pods on your master? Please enter \" | ||
- | echo " | ||
- | echo "" | ||
- | if [ $SCHEDULE = y ] | ||
- | then | ||
- | kubectl taint nodes $(hostname) node-role.kubernetes.io/ | ||
- | fi | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | watch kubectl get pods -A -o wide | ||
- | </ | ||
- | | ||
- | ====Calico==== | ||
- | |||
- | https:// | ||
- | | ||
- | You have to deploy a pod network to the cluster. A pod network add-on **is required that your pods can communicate with each other**! | ||
- | |||
- | kubectl apply -f [podnetwork].yaml | ||
- | | ||
- | Pod network add-on´s: | ||
- | |||
- | https:// | ||
- | |||
- | Examples " | ||
- | |||
- | kubectl apply -f https:// | ||
- | | ||
- | Check also https:// | ||
- | |||
- | **Important: | ||
- | |||
- | Replace | ||
- | |||
- | < | ||
- | # Auto-detect the BGP IP address. | ||
- | - name: IP | ||
- | value: " | ||
- | </ | ||
- | |||
- | to | ||
- | |||
- | < | ||
- | # Auto-detect the BGP IP address. | ||
- | - name: IP | ||
- | value: " | ||
- | - name: IP_AUTODETECTION_METHOD | ||
- | value: " | ||
- | </ | ||
- | |||
- | in | ||
- | |||
- | calico.yaml | ||
- | | ||
- | Download: | ||
- | |||
- | curl https:// | ||
- | |||
- | Interface has to be set to (**" | ||
- | |||
- | Script to change " | ||
- | |||
- | <code sh set-interface.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.1 | ||
- | |||
- | INTERFACE=" | ||
- | # | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | ls -al / | ||
- | |||
- | echo "" | ||
- | read -p " | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | grep -R ' | ||
- | #grep -R ' | ||
- | IFACESET=$(echo $?) | ||
- | |||
- | if [ | ||
- | then | ||
- | echo " | ||
- | else | ||
- | sed -i ' | ||
- | echo " | ||
- | fi | ||
- | </ | ||
- | |||
- | <code sh download-calico-and-set-interface.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.2 | ||
- | |||
- | INTERFACE=" | ||
- | CALICOURLVERSION=" | ||
- | FILE=" | ||
- | URL=" | ||
- | USER=" | ||
- | HOME="/ | ||
- | |||
- | if [ -d ${HOME} ] | ||
- | then | ||
- | cd ${HOME} | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | else | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | exit 1 | ||
- | fi | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | curl $URL | sed ' | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | CALICOVERSION=$(grep " | ||
- | echo " | ||
- | echo "" | ||
- | mv ${HOME}/ | ||
- | chown ${USER}: | ||
- | echo " | ||
- | </ | ||
- | |||
- | ====Dashboard==== | ||
- | |||
- | Install: | ||
- | |||
- | kubectl create -f https:// | ||
- | kubectl apply -f https:// | ||
- | kubectl apply -f https:// | ||
- | kubectl apply -f kubernetes-dashboard.yaml | ||
- | | ||
- | **Note: Check also https:// | ||
- | |||
- | ===Download=== | ||
- | |||
- | Download dashboard yaml and modify ttl (default is just 10 minutes). | ||
- | |||
- | <code sh download-and-modify-dashboard.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.2 | ||
- | |||
- | DASHBOARDURLVERSION=" | ||
- | FILE=" | ||
- | URL=" | ||
- | USER=" | ||
- | HOME="/ | ||
- | TTL=" | ||
- | |||
- | if [ -d ${HOME} ] | ||
- | then | ||
- | cd ${HOME} | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | else | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | exit 1 | ||
- | fi | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | #download and modify, to add ttl - default ttl is just 10 min. | ||
- | su - ${USER} -c "curl $URL | sed 's/- --auto-generate-certificates/ | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | echo " | ||
- | |||
- | su - ${USER} -c "mv $HOME/$FILE $HOME/ | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | echo " | ||
- | </ | ||
- | |||
- | Delete: | ||
- | |||
- | kubectl -n kube-system delete deployment kubernetes-dashboard | ||
- | kubectl -n kubernetes-dashboard delete deployment kubernetes-dashboard | ||
- | kubectl -n kubernetes-dashboard delete $(kubectl -n kubernetes-dashboard get pod -o name | grep dashboard) | ||
- | | ||
- | Edit: | ||
- | |||
- | kubectl edit deployment kubernetes-dashboard -n kube-system | ||
- | kubectl edit deployment kubernetes-dashboard -n kubernetes-dashboard | ||
- | | ||
- | Show config: | ||
- | |||
- | kubectl describe pods -n kube-system kubernetes-dashboard | ||
- | kubectl describe pods -n kubernetes-dashboard kubernetes-dashboard | ||
- | | ||
- | To change login " | ||
- | |||
- | < | ||
- | spec: | ||
- | containers: | ||
- | - name: kubernetes-dashboard | ||
- | image: k8s.gcr.io/ | ||
- | ports: | ||
- | - containerPort: | ||
- | protocol: TCP | ||
- | args: | ||
- | - --auto-generate-certificates | ||
- | - --token-ttl=86400 | ||
- | </ | ||
- | |||
- | to the value you prefer (default 900 sec). If " | ||
- | |||
- | Check also on https:// | ||
- | |||
- | ===Proxy Access=== | ||
- | |||
- | It´s **not recomended** for productive use, so usage just for quick access or troubleshooting! | ||
- | |||
- | Network access on port 9999 without host restriction. **Note: MUST run as kubernetes user** (unless you run kubernetes as root)!!: | ||
- | |||
- | kubectl proxy --port 9999 --address=' | ||
- | | ||
- | Access only on localhost on default port 8001: | ||
- | |||
- | kubectl proxy | ||
- | |||
- | Access-URL: | ||
- | |||
- | http:// | ||
- | |||
- | Default access-URL: | ||
- | |||
- | http:// | ||
- | |||
- | ===HTTPS Access=== | ||
- | |||
- | A certificate - installed on the client browser - is required to access! Generate it on you kubernetes master and install it on your client. | ||
- | |||
- | Certificate (run as kubernetes user): | ||
- | |||
- | grep ' | ||
- | grep ' | ||
- | openssl pkcs12 -export -clcerts -inkey kubecfg.key -in kubecfg.crt -out kubecfg.p12 -name " | ||
- | |||
- | Further an **" | ||
- | |||
- | Create service account " | ||
- | |||
- | < | ||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: v1 | ||
- | kind: ServiceAccount | ||
- | metadata: | ||
- | name: admin-user | ||
- | namespace: kube-system | ||
- | EOF | ||
- | </ | ||
- | |||
- | Create ClusterRoleBinding: | ||
- | |||
- | < | ||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: rbac.authorization.k8s.io/ | ||
- | kind: ClusterRoleBinding | ||
- | metadata: | ||
- | name: admin-user | ||
- | roleRef: | ||
- | apiGroup: rbac.authorization.k8s.io | ||
- | kind: ClusterRole | ||
- | name: cluster-admin | ||
- | subjects: | ||
- | - kind: ServiceAccount | ||
- | name: admin-user | ||
- | namespace: kube-system | ||
- | EOF | ||
- | </ | ||
- | |||
- | Get the **Bearer Token**, which is required for browser login: | ||
- | |||
- | kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk ' | ||
- | |||
- | **Note:** If you run | ||
- | |||
- | kubectl get secret -n kube-system $(kubectl get serviceaccount -n kube-system -o yaml |grep admin-user | grep token | awk ' | ||
- | | ||
- | you are **not getting the bearer token** and the token has to be piped to " | ||
- | |||
- | echo " | ||
- | | ||
- | With " | ||
- | |||
- | Access URL: | ||
- | |||
- | https://< | ||
- | https://< | ||
- | |||
- | Example: | ||
- | |||
- | https:// | ||
- | https:// | ||
- | | ||
- | **Note**: Cluster info to get access information: | ||
- | |||
- | kubectl cluster-info | ||
- | |||
- | === Login with Kubeconfig === | ||
- | |||
- | Get the configuration file: | ||
- | |||
- | kubectl config view --raw | ||
- | |||
- | Save content to a file and reference it on login. | ||
- | |||
- | Check also on https:// | ||
- | |||
- | === Own Certificate === | ||
- | |||
- | To avoid having always the same default certificate name (" | ||
- | |||
- | Create the *.csr, *.crt, *.p12 and *.key: | ||
- | |||
- | openssl req -out kubecfg-myhostname.csr -new -newkey rsa:4096 -nodes -keyout kubecfg-myhostname.key -subj "/ | ||
- | sudo openssl x509 -req -in kubecfg-myhostname.csr -CA / | ||
- | openssl pkcs12 -export -clcerts -inkey kubecfg-myhostname.key -in kubecfg-myhostname.crt -out kubecfg-myhostname.p12 -name " | ||
- | | ||
- | **Note:** The " | ||
- | |||
- | Check certificate: | ||
- | |||
- | openssl x509 -noout -text -in kubecfg-myhostname.crt | ||
- | |||
- | Create a service account (who matches the CN): | ||
- | |||
- | < | ||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: v1 | ||
- | kind: ServiceAccount | ||
- | metadata: | ||
- | name: admin-user | ||
- | namespace: kube-system | ||
- | EOF | ||
- | </ | ||
- | |||
- | Create clusterrolebinding for serviceaccount: | ||
- | |||
- | < | ||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: rbac.authorization.k8s.io/ | ||
- | kind: ClusterRoleBinding | ||
- | metadata: | ||
- | name: admin-user-binding | ||
- | roleRef: | ||
- | apiGroup: rbac.authorization.k8s.io | ||
- | kind: ClusterRole | ||
- | name: cluster-admin | ||
- | subjects: | ||
- | - kind: ServiceAccount | ||
- | name: admin-user | ||
- | namespace: kube-system | ||
- | EOF | ||
- | </ | ||
- | |||
- | Create clusterrolebinding for the user (token): | ||
- | |||
- | < | ||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: rbac.authorization.k8s.io/ | ||
- | kind: ClusterRoleBinding | ||
- | metadata: | ||
- | name: admin-user-binding | ||
- | roleRef: | ||
- | apiGroup: rbac.authorization.k8s.io | ||
- | kind: ClusterRole | ||
- | name: cluster-admin | ||
- | subjects: | ||
- | - apiGroup: rbac.authorization.k8s.io | ||
- | kind: User | ||
- | name: admin-user | ||
- | EOF | ||
- | </ | ||
- | |||
- | Get the bearer token: | ||
- | |||
- | kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk ' | ||
- | | ||
- | ====Script to add dashboaduser==== | ||
- | |||
- | <code sh add_dashboard_user.sh> | ||
- | #/bin/bash | ||
- | |||
- | echo " | ||
- | |||
- | # | ||
- | #version: 1.3 | ||
- | |||
- | C=" | ||
- | ST=" | ||
- | L=" | ||
- | O=" | ||
- | OU=" | ||
- | CN=" | ||
- | MAIL=" | ||
- | CERT=" | ||
- | |||
- | if [ -d ${CERT} ] | ||
- | then | ||
- | cd ${CERT} | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | else | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | exit 1 | ||
- | fi | ||
- | |||
- | openssl req -out kubecfg-${CN}.csr -new -newkey rsa:4096 -nodes -keyout kubecfg-${CN}.key -subj "/ | ||
- | sudo openssl x509 -req -in kubecfg-${CN}.csr -CA / | ||
- | openssl pkcs12 -export -clcerts -inkey kubecfg-${CN}.key -in kubecfg-${CN}.crt -out kubecfg-${CN}.p12 -name " | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: v1 | ||
- | kind: ServiceAccount | ||
- | metadata: | ||
- | name: ${CN} | ||
- | namespace: kube-system | ||
- | EOF | ||
- | |||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: rbac.authorization.k8s.io/ | ||
- | kind: ClusterRoleBinding | ||
- | metadata: | ||
- | name: ${CN}-user-binding | ||
- | roleRef: | ||
- | apiGroup: rbac.authorization.k8s.io | ||
- | kind: ClusterRole | ||
- | name: cluster-admin | ||
- | subjects: | ||
- | - apiGroup: rbac.authorization.k8s.io | ||
- | kind: User | ||
- | name: ${CN} | ||
- | EOF | ||
- | |||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: rbac.authorization.k8s.io/ | ||
- | kind: ClusterRoleBinding | ||
- | metadata: | ||
- | name: ${CN}-sa-binding | ||
- | roleRef: | ||
- | apiGroup: rbac.authorization.k8s.io | ||
- | kind: ClusterRole | ||
- | name: cluster-admin | ||
- | subjects: | ||
- | - kind: ServiceAccount | ||
- | name: ${CN} | ||
- | namespace: kube-system | ||
- | EOF | ||
- | </ | ||
- | ====Minicube - Pods on Master==== | ||
- | |||
- | Remove the taints on the master so that you can schedule pods on it (doesn´t work on default): | ||
- | |||
- | kubectl taint nodes $(hostname) node-role.kubernetes.io/ | ||
- | |||
- | Revert: | ||
- | |||
- | kubectl taint nodes $(hostname) node-role.kubernetes.io/ | ||
- | kubectl taint nodes $(hostname) node-role.kubernetes.io/ | ||
- | kubectl taint nodes --all node-role.kubernetes.io/ | ||
- | | ||
- | Check: | ||
- | |||
- | kubectl describe nodes $HOSTNAME |grep -i Taints | ||
- | kubectl describe nodes |grep -i taint | ||
- | |||
- | ====Worker-Node==== | ||
- | |||
- | Install " | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | **Note: Set proxy settings for master and worker if running behind a proxy ("/ | ||
- | |||
- | To join the cluster: | ||
- | |||
- | useradd -m kubernetes | ||
- | |||
- | **Note: sudo rights required!** | ||
- | |||
- | su - kubernetes | ||
- | sudo kubeadm join 192.168.10.5: | ||
- | sudo kubeadm join --token < | ||
- | |||
- | Check on master: | ||
- | |||
- | kubectl get nodes | ||
- | kubectl get nodes -o wide | ||
- | kubectl delete node NODENAME | ||
- | |||
- | Set label (on master): | ||
- | |||
- | sudo kubectl label node NODENAME node-role.kubernetes.io/ | ||
- | sudo kubectl label node NODENAME node-role.kubernetes.io/ | ||
- | |||
- | sudo kubectl label node knode node-role.kubernetes.io/ | ||
- | |||
- | Delete label (on master): | ||
- | |||
- | kubectl label node NODENAME node-role.kubernetes.io/ | ||
- | |||
- | Delete node from cluster: | ||
- | |||
- | kubectl get nodes -o wide | ||
- | kubectl drain <node name> --delete-local-data --force --ignore-daemonsets | ||
- | kubectl delete nodes NODENAME | ||
- | kubeadm reset -f && iptables -F #on node as root user | ||
- | iptables -t nat -F && iptables -t mangle -F && iptables -X #on node as root user | ||
- | |||
- | < | ||
- | kubernetes@kmaster: | ||
- | NAME STATUS | ||
- | kmaster | ||
- | knode | ||
- | </ | ||
- | |||
- | **Note**: The token you can get via: | ||
- | |||
- | kubeadm token list | ||
- | |||
- | Cluster information: | ||
- | |||
- | kubectl cluster-info | ||
- | | ||
- | If no token is listed, run | ||
- | |||
- | kubeadm token create --print-join-command | ||
- | | ||
- | to create a new token and show join command. | ||
- | |||
- | To delete an token: | ||
- | |||
- | kubeadm token delete TOKEN(ID) | ||
- | ====Service Accounts==== | ||
- | |||
- | kubectl apply -f dashboard-adminuser.yaml | ||
- | kubectl delete -f dashboard-adminuser.yaml | ||
- | kubectl create serviceaccount myuser | ||
- | kubectl create serviceaccount --namespace kube-system test | ||
- | kubectl get serviceaccounts admin-user --namespace=kube-system -o yaml | ||
- | kubectl get serviceaccount --all-namespaces | ||
- | kubectl get serviceaccounts myuser -o yaml | ||
- | kubectl get secret | grep myuser | ||
- | kubectl get secret myuser-token-1yvwg -o yaml #the exact name from " | ||
- | kubectl delete serviceaccount -n kube-system kubernetes-dashboard | ||
- | |||
- | Create service account " | ||
- | |||
- | < | ||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: v1 | ||
- | kind: ServiceAccount | ||
- | metadata: | ||
- | name: admin-user | ||
- | namespace: kube-system | ||
- | EOF | ||
- | </ | ||
- | |||
- | Create ClusterRoleBinding: | ||
- | |||
- | < | ||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: rbac.authorization.k8s.io/ | ||
- | kind: ClusterRoleBinding | ||
- | metadata: | ||
- | name: admin-user | ||
- | roleRef: | ||
- | apiGroup: rbac.authorization.k8s.io | ||
- | kind: ClusterRole | ||
- | name: cluster-admin | ||
- | subjects: | ||
- | - kind: ServiceAccount | ||
- | name: admin-user | ||
- | namespace: kube-system | ||
- | EOF | ||
- | </ | ||
- | |||
- | Get the Bearer Token: | ||
- | |||
- | kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk ' | ||
- | | ||
- | Create an " | ||
- | |||
- | < | ||
- | cat <<EOF | kubectl create -f - | ||
- | apiVersion: rbac.authorization.k8s.io/ | ||
- | kind: ClusterRoleBinding | ||
- | metadata: | ||
- | name: kubernetes-dashboard | ||
- | labels: | ||
- | k8s-app: kubernetes-dashboard | ||
- | roleRef: | ||
- | apiGroup: rbac.authorization.k8s.io | ||
- | kind: ClusterRole | ||
- | name: cluster-admin | ||
- | subjects: | ||
- | - kind: ServiceAccount | ||
- | name: kubernetes-dashboard | ||
- | namespace: kube-system | ||
- | EOF | ||
- | </ | ||
- | |||
- | **Note**: Just push " | ||
- | |||
- | ====bash-completion==== | ||
- | |||
- | Install " | ||
- | |||
- | apt-get install bash-completion | ||
- | |||
- | Setup autocomplete in bash into the current shell and permanentelly: | ||
- | |||
- | source < | ||
- | echo " | ||
- | source < | ||
- | echo " | ||
- | | ||
- | **Note**: This has to be done for each user! | ||
- | |||
- | Additional aliases (set in "/ | ||
- | |||
- | Solution: | ||
- | |||
- | < | ||
- | cat << EOF >> ~/.bashrc | ||
- | # Source global definitions | ||
- | if [ -f / | ||
- | . / | ||
- | fi | ||
- | EOF | ||
- | </ | ||
- | |||
- | ====Reset Cluster ==== | ||
- | |||
- | If you wanna **reset the whole cluster** to the state after a fresh install, just run this on each node: | ||
- | |||
- | sudo kubeadm reset -f | ||
- | iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X | ||
- | |||
- | Delete: | ||
- | |||
- | kubectl drain < | ||
- | kubectl delete node < | ||
- | |||
- | ====Single-Node-Cluster==== | ||
- | |||
- | https:// | ||
- | |||
- | |||
- | ====Uninstall==== | ||
- | |||
- | sudo kubeadm reset -f | ||
- | sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X | ||
- | sudo apt-get purge kubeadm kubectl kubelet kubernetes-cni kube* | ||
- | sudo apt-get autoremove | ||
- | |||
- | Inside kubernetes-service user: | ||
- | | ||
- | sudo rm -rf ~/.kube | ||
- | |||
- | | ||
- | |||
- | ====Config==== | ||
- | |||
- | / | ||
- | you can add flags in | ||
- | / | ||
- | Kubeconfig folder: | ||
- | / | ||
- | | ||
- | ====Persistent Volume==== | ||
- | |||
- | Info: | ||
- | |||
- | kubectl get persistentvolumes --all-namespaces -o wide | ||
- | kubectl get persistentvolumeclaims --all-namespaces -o wide | ||
- | kubectl get storageclasses.storage.k8s.io | ||
- | kubectl get pv,pvc,sc -A | ||
- | |||
- | PersistentVolume: | ||
- | |||
- | < | ||
- | kind: PersistentVolume | ||
- | apiVersion: v1 | ||
- | metadata: | ||
- | name: nfs-test1 | ||
- | labels: | ||
- | type: nfs # optional | ||
- | spec: | ||
- | storageClassName: | ||
- | capacity: | ||
- | storage: 1Gi | ||
- | accessModes: | ||
- | - ReadWriteMany | ||
- | mountOptions: | ||
- | - hard | ||
- | - nfsvers=4.1 | ||
- | nfs: # type | ||
- | server: 192.168.10.6 | ||
- | path: / | ||
- | </ | ||
- | |||
- | PersistentVolumeClaim: | ||
- | |||
- | < | ||
- | kind: PersistentVolumeClaim | ||
- | apiVersion: v1 | ||
- | metadata: | ||
- | name: nfs-test1-claim1 | ||
- | namespace: default | ||
- | spec: | ||
- | storageClassName: | ||
- | accessModes: | ||
- | - ReadWriteMany | ||
- | resources: | ||
- | requests: | ||
- | storage: 1Gi | ||
- | </ | ||
- | |||
- | Storage class: | ||
- | |||
- | < | ||
- | kind: StorageClass | ||
- | apiVersion: storage.k8s.io/ | ||
- | metadata: | ||
- | name: local-storage | ||
- | provisioner: | ||
- | volumeBindingMode: | ||
- | </ | ||
- | |||
- | Check also on https:// | ||
- | |||
- | kubectl get storageclass | ||
- | |||
- | Namespace: | ||
- | |||
- | < | ||
- | apiVersion: v1 | ||
- | kind: Namespace | ||
- | metadata: | ||
- | name: < | ||
- | </ | ||
- | |||
- | kubectl create namespace NAMESPACE | ||
- | |||
- | ====POD==== | ||
- | |||
- | ===nginx=== | ||
- | |||
- | Example " | ||
- | |||
- | < | ||
- | kind: Pod | ||
- | apiVersion: v1 | ||
- | metadata: | ||
- | name: nginx-pod | ||
- | labels: | ||
- | app: nginx | ||
- | namespace: default | ||
- | spec: | ||
- | volumes: | ||
- | - name: nfs-test1 | ||
- | persistentVolumeClaim: | ||
- | | ||
- | containers: | ||
- | - name: nginx-pod | ||
- | image: nginx | ||
- | ports: | ||
- | - containerPort: | ||
- | name: " | ||
- | volumeMounts: | ||
- | - mountPath: "/ | ||
- | name: nfs-test1 | ||
- | </ | ||
- | |||
- | ===squid=== | ||
- | |||
- | < | ||
- | kind: Pod | ||
- | apiVersion: v1 | ||
- | metadata: | ||
- | name: squid-test | ||
- | labels: | ||
- | app: proxy | ||
- | namespace: default | ||
- | spec: | ||
- | volumes: | ||
- | - name: nfs-data1 | ||
- | persistentVolumeClaim: | ||
- | | ||
- | containers: | ||
- | - name: squid-test | ||
- | image: ubuntu-squid: | ||
- | command: ["/ | ||
- | #args: ["/ | ||
- | args: ["/ | ||
- | ports: | ||
- | - containerPort: | ||
- | name: " | ||
- | volumeMounts: | ||
- | - mountPath: "/ | ||
- | name: nfs-data1 | ||
- | </ | ||
- | |||
- | ====Service==== | ||
- | |||
- | Proxy service as " | ||
- | |||
- | < | ||
- | kind: Service | ||
- | apiVersion: v1 | ||
- | metadata: | ||
- | labels: | ||
- | app: proxy | ||
- | namespace: default | ||
- | name: proxy-nodeport | ||
- | spec: | ||
- | externalName: | ||
- | ports: | ||
- | - name: proxy-port-tcp | ||
- | port: 8080 | ||
- | targetPort: 8080 | ||
- | nodePort: 30000 | ||
- | protocol: TCP | ||
- | selector: | ||
- | app: proxy | ||
- | type: NodePort | ||
- | </ | ||
- | |||
- | Nginx service as " | ||
- | |||
- | < | ||
- | apiVersion: v1 | ||
- | kind: Service | ||
- | metadata: | ||
- | labels: | ||
- | app: nginx | ||
- | namespace: default | ||
- | name: nginx-nodeport | ||
- | spec: | ||
- | externalName: | ||
- | ports: | ||
- | - name: http-port-tcp | ||
- | port: 80 | ||
- | targetPort: 80 | ||
- | nodePort: 30000 | ||
- | protocol: TCP | ||
- | selector: | ||
- | app: nginx | ||
- | type: NodePort | ||
- | </ | ||
- | |||
- | Mysql as " | ||
- | |||
- | < | ||
- | apiVersion: v1 | ||
- | kind: Service | ||
- | metadata: | ||
- | name: mysql-1234-inst-1 | ||
- | spec: | ||
- | selector: | ||
- | app: mysql-prod | ||
- | ports: | ||
- | - name: mysql | ||
- | protocol: TCP | ||
- | port: 3306 | ||
- | targetPort: 3306 | ||
- | externalIPs: | ||
- | - 1.2.3.4 | ||
- | </ | ||
- | |||
- | ====Deployment==== | ||
- | |||
- | < | ||
- | kind: Deployment | ||
- | apiVersion: apps/v1 | ||
- | metadata: | ||
- | name: squid-proxy-deployment | ||
- | namespace: default | ||
- | labels: | ||
- | run: squid-proxy | ||
- | namespace: default | ||
- | spec: | ||
- | replicas: 1 | ||
- | selector: | ||
- | matchLabels: | ||
- | run: squid-proxy | ||
- | template: | ||
- | metadata: | ||
- | labels: | ||
- | run: squid-proxy | ||
- | spec: | ||
- | containers: | ||
- | - name: squid-proxy | ||
- | image: ' | ||
- | command: ["/ | ||
- | args: ["/ | ||
- | ports: | ||
- | - containerPort: | ||
- | protocol: TCP | ||
- | restartPolicy: | ||
- | terminationGracePeriodSeconds: | ||
- | |||
- | </ | ||
- | |||
- | ====Secret==== | ||
- | |||
- | kubectl create secret tls --cert='/ | ||
- | kubectl create secret generic secret-name --from-file=tls.crt=mycert.crt --from-file=tls.key=mykey.key --from-file=ca.crt=intermediate.crt -n kubernetes-dashboard | ||
- | |||
- | |||
- | ====Ingress==== | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | === Install nginx ingress-controller === | ||
- | |||
- | <code sh install-nginx-ingress-controller.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.1 | ||
- | |||
- | VERSION=" | ||
- | HOME="/ | ||
- | FILE=" | ||
- | |||
- | if [ -f $FILE ] | ||
- | then | ||
- | echo " | ||
- | else | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | mv ${FILE} ${FILE}.bak | ||
- | fi | ||
- | |||
- | git clone https:// | ||
- | cd kubernetes-ingress/ | ||
- | git checkout v${VERSION} | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | kubectl apply -f common/ | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | kubectl apply -f rbac/ | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | kubectl apply -f common/ | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | kubectl apply -f common/ | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | kubectl apply -f common/ | ||
- | |||
- | echo "" | ||
- | echo "Run the Ingress Controller" | ||
- | echo "" | ||
- | |||
- | #kubectl apply -f deployment/ | ||
- | kubectl apply -f daemon-set/ | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | kubectl get pods -A -o wide | ||
- | </ | ||
- | |||
- | **Note:** If you want to define on which nodes the controller will be deployed choose " | ||
- | |||
- | === Uninstall nginx ingress-controller === | ||
- | |||
- | kubectl delete namespace nginx-ingress | ||
- | kubectl delete clusterrole nginx-ingress | ||
- | kubectl delete clusterrolebinding nginx-ingress | ||
- | |||
- | === Patch Ingress Controler === | ||
- | |||
- | kubectl patch ds nginx-ingress --patch "$(cat nginx-ingress-controller-patch-8080.yaml)" | ||
- | |||
- | <code sh nginx-ingress-controller-patch-8080.yaml> | ||
- | spec: | ||
- | template: | ||
- | spec: | ||
- | containers: | ||
- | - name: nginx-ingress | ||
- | ports: | ||
- | - containerPort: | ||
- | | ||
- | </ | ||
- | |||
- | === Example ingress resource === | ||
- | |||
- | < | ||
- | apiVersion: networking.k8s.io/ | ||
- | kind: Ingress | ||
- | metadata: | ||
- | name: test-ingress | ||
- | annotations: | ||
- | # nginx.ingress.kubernetes.io/ | ||
- | # nginx.ingress.kubernetes.io/ | ||
- | kubernetes.io/ | ||
- | nginx.ingress.kubernetes.io/ | ||
- | nginx.ingress.kubernetes.io/ | ||
- | spec: | ||
- | tls: | ||
- | - hosts: | ||
- | - myside.local | ||
- | secretName: test | ||
- | rules: | ||
- | - host: myside.local | ||
- | http: | ||
- | paths: | ||
- | - backend: | ||
- | serviceName: | ||
- | servicePort: | ||
- | # - host: blue.nginx.example.com | ||
- | # http: | ||
- | # paths: | ||
- | # - backend: | ||
- | # serviceName: | ||
- | # servicePort: | ||
- | # - host: green.nginx.example.com | ||
- | # http: | ||
- | # paths: | ||
- | # - backend: | ||
- | # serviceName: | ||
- | # servicePort: | ||
- | |||
- | </ | ||
- | |||
- | ====Commands==== | ||
- | |||
- | kubeadm init --pod-network-cidr 10.244.0.0/ | ||
- | kubectl get nodes -o wide #show cluster, role and node status | ||
- | kubectl get namespaces | ||
- | kubectl describe nodes node1 | ||
- | kubectl delete nodes NODENAME | ||
- | kubectl delete pods calico-node-w6qz4 -n kube-system | ||
- | kubectl get pods --all-namespaces --field-selector ' | ||
- | kubectl get pods -o wide --all-namespaces | ||
- | kubectl get pods -o wide --all-namespaces --show-labels | ||
- | kubectl get pods -A -o wide | ||
- | time kubectl get pods -A | ||
- | kubectl get pods --all-namespaces -o=jsonpath=' | ||
- | kubectl get pods --namespace kube-system | ||
- | kubectl delete pods < | ||
- | kubectl delete --all pods --namespace < | ||
- | kubectl get pods -n < | ||
- | kubectl describe pods --namespace kube-system kubernetes-dashboard | ||
- | kubectl describe pods -n kube-system kubernetes-dashboard | ||
- | kubectl cluster-info | ||
- | kubectl cluster-info dump | ||
- | kubectl cordon nodename | ||
- | kubectl uncordon nodename | ||
- | kubectl version | ||
- | kubectl version | base64 | tr -d ' | ||
- | kubectl get pod -o wide | ||
- | kubectl get pvc,pv -A | ||
- | kubectl edit pods --namespace=kube-system kubernetes-dashboard-57df4db6b-4h9pc | ||
- | kubectl exec -it --namespace=test01 ubuntu -- /bin/bash | ||
- | kubectl exec -it --namespace=default squid-proxy -- /bin/bash | ||
- | kubectl exec squid-proxy -- ps -ef #execute command "ps -ef" and output to stdout | ||
- | kubectl get jobs --all-namespaces | ||
- | kubectl get cronjobs --all-namespaces | ||
- | kubectl get deployments --all-namespaces -o wide #pendant " | ||
- | kubectl --namespace kube-system delete deployment kubernetes-dashboard | ||
- | kubectl get services --all-namespaces | ||
- | kubectl describe pod calico-node-s7ch5 -n kube-system | ||
- | kubectl describe service --all-namespaces | grep -i nodeport | ||
- | kubectl get node -o=jsonpath=' | ||
- | kubectl replace -f file.yaml | ||
- | kubectl replace --force -f file.yaml | ||
- | kubectl apply-f file.yaml | ||
- | kubectl delete -f file.yaml | ||
- | kubectl autoscale deployment foo --min=2 --max=10 | ||
- | |||
- | Logging: | ||
- | |||
- | kubectl get events | ||
- | kubectl get events -n default | ||
- | kubectl delete events --all | ||
- | kubectl logs -n kube-system -p calico-node-xxxxx -c calico-node | ||
- | kubectl logs calico-node-s7ch5 -n kube-system -c calico-node | ||
- | sudo journalctl -xeu kubelet | ||
- | sudo journalctl -xeuf kubelet | ||
- | |||
- | ====Alias==== | ||
- | |||
- | echo "alias kg=' | ||
- | |||
- | |||
- | ====DNS==== | ||
- | |||
- | kubectl get ep kube-dns -n kube-system -o wide | ||
- | kubectl get svc -n kube-system -o wide | grep dns | ||
- | kubectl get svc -n kube-system -o wide | ||
- | kubectl get configmap -n kube-system coredns -oyaml | ||
- | | ||
- | https:// | ||
- | |||
- | ====Certificate==== | ||
- | |||
- | Renew all certificates: | ||
- | |||
- | sudo kubeadm alpha certs renew all | ||
- | | ||
- | ====Patching==== | ||
- | |||
- | Order: | ||
- | |||
- | *Patch master (patch-k8s-master.sh on master) | ||
- | *Prepare patching worker (prepare-k8s-worker.sh on master) | ||
- | *Patch worker (patch-k8s-worker.sh on worker) | ||
- | |||
- | To patch a cluster, you can run the following scripts (working for k8s >= v1.15.x). | ||
- | |||
- | Patch master: | ||
- | |||
- | <code sh patch-k8s-master.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.1 | ||
- | |||
- | echo " | ||
- | echo "" | ||
- | su - kubernetes -c " | ||
- | echo "" | ||
- | read -s -n 1 -p \"' | ||
- | |||
- | apt-get update | ||
- | apt-cache policy kubeadm | ||
- | |||
- | echo "" | ||
- | read -p " | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce | ||
- | apt-get update && apt-get install -y kubeadm=${VERSION} | ||
- | |||
- | echo "" | ||
- | #echo "drain node $(hostname -s)" | ||
- | #su - kubernetes -c " | ||
- | echo "" | ||
- | |||
- | APPLYVERSION=" | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | read -s -n 1 -p \"' | ||
- | kubeadm upgrade plan | ||
- | echo "" | ||
- | read -s -n 1 -p \"' | ||
- | kubeadm upgrade apply ${APPLYVERSION} | ||
- | |||
- | #apt-cache policy docker-ce | ||
- | #echo "" | ||
- | #read -p " | ||
- | #echo " | ||
- | #echo "" | ||
- | #apt-get install -y docker-ce | ||
- | echo "" | ||
- | #echo " | ||
- | echo "" | ||
- | #su - kubernetes -c " | ||
- | echo " | ||
- | echo "" | ||
- | read -p "Do you want to proceed? Please enter y or n: " PROCEED | ||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | if [ ${PROCEED} = " | ||
- | then | ||
- | apt-get install -y kubelet=${VERSION} kubectl=${VERSION} | ||
- | apt-mark hold kubeadm kubernetes-cni kubelet kubectl docker-ce | ||
- | systemctl restart docker.service kubelet.service | ||
- | systemctl status docker.service kubelet.service | cat | ||
- | else | ||
- | exit 1 | ||
- | fi | ||
- | |||
- | </ | ||
- | |||
- | **Hint:** Please patch always within one version to the latest patchlevel, before you upgrade to the new version. | ||
- | |||
- | Example: | ||
- | |||
- | Running version: 1.15.3-00 | ||
- | Update to 1.15.6-00 | ||
- | Update to 1.16.X-00 | ||
- | |||
- | Prepare/ patch worker: | ||
- | |||
- | <code sh prepare-k8s-worker.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.1 | ||
- | |||
- | echo " | ||
- | echo "" | ||
- | su - kubernetes -c " | ||
- | echo "" | ||
- | read -p " | ||
- | echo " | ||
- | echo "" | ||
- | su - kubernetes -c " | ||
- | #Below version k8s <= v1.15.x run: | ||
- | #kubeadm upgrade node config --kubelet-version v1.15.x | ||
- | kubeadm upgrade node | ||
- | |||
- | #Proceed or cancel | ||
- | echo "" | ||
- | read -p "Do you want to wait until ${NODENAME} has been patched to finish (uncordon) the patch-process? | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | if [ $PROCEED = y ] | ||
- | then | ||
- | while read -s -p " | ||
- | do | ||
- | echo " | ||
- | done | ||
- | su - kubernetes -c " | ||
- | echo " | ||
- | su - kubernetes -c " | ||
- | else | ||
- | exit 1 | ||
- | fi | ||
- | </ | ||
- | |||
- | Patch worker: | ||
- | |||
- | <code sh patch-k8s-worker.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.1 | ||
- | |||
- | echo " | ||
- | echo "" | ||
- | su - kubernetes -c " | ||
- | echo "" | ||
- | read -s -n 1 -p "Press any key to continue . . ." | ||
- | #su - kubernetes -c "read -s -n 1 -p \" | ||
- | apt-get update | ||
- | apt-cache policy kubeadm | ||
- | |||
- | echo "" | ||
- | read -p " | ||
- | echo " | ||
- | echo "" | ||
- | |||
- | apt-mark unhold kubernetes-cni kubeadm kubelet kubectl docker-ce | ||
- | apt-get update && apt-get install -y kubeadm=${VERSION} kubelet=${VERSION} kubectl=${VERSION} | ||
- | systemctl restart docker.service kubelet.service | ||
- | systemctl status docker.service kubelet.service | cat | ||
- | apt-mark hold kubeadm kubernetes-cni kubelet kubectl docker-ce | ||
- | echo "" | ||
- | echo " | ||
- | </ | ||
- | |||
- | ====Trident==== | ||
- | |||
- | Check releases on: | ||
- | |||
- | https:// | ||
- | |||
- | Install script: | ||
- | |||
- | <code sh trident-install-or-update.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.2 | ||
- | |||
- | echo "" | ||
- | echo "You can get release info on \" | ||
- | echo "" | ||
- | |||
- | VERSION=" | ||
- | HOME="/ | ||
- | FILE=" | ||
- | |||
- | if [ -e $FILE ] | ||
- | then | ||
- | echo " | ||
- | exit 1 | ||
- | else | ||
- | |||
- | echo "" | ||
- | echo " | ||
- | echo "" | ||
- | sleep 3 | ||
- | su - kubernetes -c "wget https:// | ||
- | su - kubernetes -c "mv ~/ | ||
- | su - kubernetes -c "tar -xzf trident-installer-${VERSION}.tar.gz" | ||
- | su - kubernetes -c "mkdir ~/ | ||
- | su - kubernetes -c "cp -a ~/ | ||
- | su - kubernetes -c " | ||
- | su - kubernetes -c " | ||
- | su - kubernetes -c " | ||
- | fi | ||
- | </ | ||
- | |||
- | **Note:** Configfile has to be previously created within " | ||
- | |||
- | |||
- | ====Reset k8s==== | ||
- | |||
- | <code sh reset-k8s.sh> | ||
- | #!/bin/bash | ||
- | |||
- | # | ||
- | #version: 1.1 | ||
- | |||
- | HOME="/ | ||
- | |||
- | sudo kubeadm reset -f | ||
- | iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X | ||
- | rm -r ${HOME}/ | ||
- | </ | ||
- | ====helm==== | ||
- | |||
- | |||
- | https:// | ||
- | |||
- | |||
- | ====Kubernetes Links==== | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | http:// | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
- | |||
- | https:// | ||
+ | [[Docker: | ||
+ | [[Linux: |