How to install kubernetes kubeadm with containerd and

  linux
# Install kubeadm:

sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl

curl -fsSL https://dl.k8s.io/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg

echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list

sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl



# Disable swap:
sudo nano /etc/fstab

# Comment out the swap line

# Enable it live as well
sudo swapoff -a



# How to install containerd in Ubuntu

wget https://github.com/containerd/containerd/releases/download/v1.6.23/containerd-1.6.23-linux-amd64.tar.gz
sudo tar Cxzvf /usr/local containerd-1.6.23-linux-amd64.tar.gz
rm containerd-1.6.23-linux-amd64.tar.gz

wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service

sudo mv containerd.service /usr/lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable --now containerd



# install runc
wget https://github.com/opencontainers/runc/releases/download/v1.1.1/runc.amd64
sudo install -m 755 runc.amd64 /usr/local/sbin/runc
rm runc.amd64

sudo mkdir -p /etc/containerd/
containerd config default | sudo tee /etc/containerd/config.toml

sudo sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/g' /etc/containerd/config.toml

sudo systemctl restart containerd
sudo systemctl status containerd
q




# On the control plane, allow certs from other IPs.
sudo kubeadm init phase certs all



# Useful: https://kubernetes.io/docs/setup/production-environment/container-runtimes/#install-and-configure-prerequisites

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

# sysctl params required by setup, params persist across reboots
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

# Apply sysctl params without reboot
sudo sysctl --system

lsmod | grep br_netfilter
lsmod | grep overlay



# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The INIT, only on the control plane:
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

sudo kubeadm reset
Y
sudo kubeadm init phase certs all

# In VirtualBox:
sudo kubeadm init --pod-network-cidr=192.168.56.0/16 --control-plane-endpoint 192.168.56.1:56443

# Other system:
sudo kubeadm init --pod-network-cidr=10.244.0.0/16 --control-plane-endpoint 10.1.2.1:6443


# save the join command to a text file, run the join command on the worker nodes:

# On the worker nodes:
sudo kubeadm join 192.168.56.1:56443 --token e6wsuh.kx5pfpdocfneop45 \
        --discovery-token-ca-cert-hash sha256:ff864123efb391xxxxxxxxxxxxxxxxx8b8520f73bf6b --v=5



# on the control plane node, run:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


# on the master node, also set up autocomplete:
echo "alias k=kubectl" >> ~/.bashrc
echo "complete -F __start_kubectl k" >> ~/.bashrc

echo "source <(kubectl completion bash)" >> ~/.bashrc
source <(kubectl completion bash)




# Check the install:
kubectl get all

NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   73s





# install the container network interface (CNI) provider. You only do this on the control plane node, not on the worker nodes.

# Do not install: flannel
# bad: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# bad: kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
# Do this instead:
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml

# wait for it to get ready
clear && kubectl get all --all-namespaces



# test it out.

# create a new file on the master node:

nano nginx-deployment.yaml

# with contents:

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  selector:
    matchLabels:
      app: nginx
  replicas: 20
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.14.2
        ports:
        - containerPort: 80


# run it
kubectl apply -f nginx-deployment.yaml

# check that it's deployed
kubectl get po -o wide

# scale it down
k scale --replicas=5 rs/nginx-deployment<TAB>
kubectl get po -o wide

# delete the deployment
k delete deployments.apps nginx-deployment