Incus
Jump to navigation
Jump to search
Install
apt-get install qemu-kvm incus # qemu-kvm required for incus on ubunto at startup, it requires /dev/kvm, on debian you can install just incus-base to have just containers and no vm systemctl enable incus.socket systemctl enable incus.service systemctl start incus.socket systemctl start incus.service
Configure
mkdir -p /var/local/incus-storage/
incus admin init
no # yes in the future, configure the loadbalancer and dns
no # we will configure it manually
yes
incusbr0
10.10.10.1/24 # bigger range if needed
yes
none
no
yes
yes
config: {}
networks:
- config:
ipv4.address: 10.10.10.1/24
ipv4.nat: "true"
ipv6.address: none
description: ""
name: incusbr0
type: ""
project: default
storage_pools: []
profiles:
- config: {}
description: ""
devices:
eth0:
name: eth0
network: incusbr0
type: nic
name: default
projects: []
cluster: null
Kubernetes under Incus
- see https://discuss.linuxcontainers.org/t/not-able-to-run-kubernetes-inside-docker-inside-lxd/4643/2
- and https://ubuntu.com/kubernetes/charmed-k8s/docs/install-local
- and https://github.com/schu/kubedee/blob/master/lib.bash#L1159
- and https://radar231.com/posts/k3s-nodes-in-lxd-containers/
- and https://discuss.kubernetes.io/t/microk8s-in-lxd/11520/2
incus storage create default dir source=/var/local/incus-storage/ incus profile device add default "root" disk pool=default path=/ incus profile device add default "host_kmsg" unix-char source="/dev/kmsg" path="/dev/kmsg" # kubeadm requires kmsg device incus profile device add default "host_boot" disk source=/boot path=/boot readonly=true # kubeadm requires /boot kernel config file incus profile device add default "host_modules" disk source=/lib/modules path=/host/lib/modules readonly=true # kubeadm requires /lib/modules kernel modules incus profile set default "linux.kernel_modules" ip_tables,netlink_diag,nf_nat,overlay incus profile set default "security.privileged" true incus profile set default "security.nesting" true cat << EOF | incus profile set default "raw.lxc" - lxc.apparmor.profile=unconfined lxc.mount.auto=proc:rw sys:rw cgroup:rw lxc.cap.drop= lxc.cgroup.devices.allow=a EOF
- now, the current config shown by incus profile show default should be:
config:
linux.kernel_modules: ip_tables,netlink_diag,nf_nat,overlay
raw.lxc: |
lxc.apparmor.profile=unconfined
lxc.mount.auto=proc:rw sys:rw cgroup:rw
lxc.cap.drop=
lxc.cgroup.devices.allow=a
security.nesting: "true"
security.privileged: "true"
description: Default Incus profile
devices:
eth0:
name: eth0
network: incusbr0
type: nic
host_boot:
path: /boot
readonly: "true"
source: /boot
type: disk
host_kmsg:
path: /dev/kmsg
source: /dev/kmsg
type: unix-char
host_modules:
path: /host/lib/modules
readonly: "true"
source: /lib/modules
type: disk
root:
path: /
pool: default
type: disk
name: default
used_by: []
Now, follow Kubernetes Install section
Then, to test a cluster:
## temporary not HA kubeadm init --ignore-preflight-errors=NumCPU,Mem --cri-socket unix:/run/containerd/containerd.sock --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint "10.1.100.233:6443" --upload-certs
kubeadm init --ignore-preflight-errors=NumCPU,Mem --cri-socket unix:/run/containerd/containerd.sock --pod-network-cidr=192.168.0.0/16
export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl get all -A
## avoid error on write on /proc by kube-proxy (change the setting and remove the proxy pod to restart it)
kubectl -n kube-system get configmap kube-proxy -o yaml | sed 's/maxPerCore: null/maxPerCore: 0/' | sed 's/min: null/min: 0/' | kubectl apply -f -
kubectl -n kube-system delete pods -l k8s-app=kube-proxy
kubectl get all -A
# add network
VERSION="0.27.0"
curl -fsSL "https://github.com/flannel-io/flannel/releases/download/v${VERSION}/kube-flannel.yml" | sed "s:10.244.0.0/16:192.168.0.0/16:g" | kubectl create -f -
# wait ready state
kubectl get nodes
# untaint the manager nodes to be used also as worker node
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl get all -A