The article is using ubuntu 16.4 (xenial) as an example. Other distributions can be setup similarly.
$ cat /etc/lsb-release
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=16.04
DISTRIB_CODENAME=xenial
DISTRIB_DESCRIPTION="Ubuntu 16.04.1 LTS"
Software versions used here are:
$ ARCH=$(arch)
$ sudo sh -c "echo ‘deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/master/xUbuntu_$(lsb_release -rs)/ /‘ > /etc/apt/sources.list.d/kata-containers.list"
$ curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/master/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
$ sudo apt update
$ sudo apt install -y kata-runtime kata-proxy kata-shim
Other installation methods can be found in the official document.
Create /etc/docker/daemon.json
file if not existing. Make sure following configuration is in the file:
{
"registry-mirrors": ["https://registry.docker-cn.com"],
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"default-runtime": "runc",
"runtimes": {
"kata-runtime": {
"path": "kata-runtime"
}
}
}
sudo systemctl restart docker
$ uname -r
4.4.0-57-generic
$ docker run --rm -it --runtime kata-runtime busybox uname -r
4.14.67-16.container
Here container kernel version is different than the one on the host.
$ wget https://storage.googleapis.com/cri-containerd-release/cri-containerd-cni-1.2.2.linux-amd64.tar.gz
$ sudo tar --no-overwrite-dir -C / -xzf cri-containerd-cni-1.2.2.linux-amd64.tar.gz
$ sudo mkdir -p /etc/cni/net.d
$ sudo bash -c ‘cat >/etc/cni/net.d/10-containerd-net.conflist <<EOF
{
"cniVersion": "0.3.1",
"name": "containerd-net",
"plugins": [
{
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"promiscMode": true,
"ipam": {
"type": "host-local",
"subnet": "10.88.0.0/16",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true}
}
]
}
EOF‘
Create /etc/containerd/config.toml
file with following contents:
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0
[grpc]
address = "/run/containerd/containerd.sock"
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
address = ""
uid = 0
gid = 0
level = "debug"
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[plugins]
[plugins.cgroups]
no_prometheus = false
[plugins.cri]
stream_server_address = "127.0.0.1"
stream_server_port = "0"
enable_selinux = false
sandbox_image = "k8s.gcr.io/pause:3.1"
stats_collect_period = 10
systemd_cgroup = false
enable_tls_streaming = false
max_container_log_line_size = 16384
[plugins.cri.containerd]
snapshotter = "overlayfs"
no_pivot = false
[plugins.cri.containerd.runtimes]
[plugins.cri.containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v1"
[plugins.cri.containerd.runtimes.runc.options]
NoPivotRoot = false
NoNewKeyring = false
ShimCgroup = ""
IoUid = 0
IoGid = 0
BinaryName = "/usr/local/sbin/runc"
Root = ""
CriuPath = ""
SystemdCgroup = false
[plugins.cri.containerd.runtimes.kata]
runtime_type = "io.containerd.kata.v2"
[plugins.cri.containerd.runtimes.kata.options]
[plugins.cri.cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
[plugins.cri.x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins.diff-service]
default = ["walking"]
[plugins.linux]
shim = "containerd-shim"
runtime = "runc"
runtime_root = ""
no_shim = false
shim_debug = false
[plugins.opt]
path = "/opt/containerd"
[plugins.restart]
interval = "10s"
[plugins.scheduler]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"
If the host is behind a proxy, modify /lib/systemd/system/containerd.service
to add Environment
option under [Service]
section, like:
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
Environment="HTTP_PROXY=192.168.80.1:12343"
[Install]
WantedBy=multi-user.target
$ sudo systemctl daemon-reload
$ sudo systemctl restart containerd
Create two testing yaml files:
$ cat > pod.yaml << EOF
metadata:
attempt: 1
name: busybox-sandbox
namespace: default
uid: hdishd83djaidwnduwk28bcsb
log_directory: /tmp
linux:
namespaces:
options: {}
EOF
$ cat > container.yaml << EOF
metadata:
name: busybox
image:
image: busybox:latest
command:
- top
log_path: busybox.0.log
EOF
Test containerd with crictl
:
$ sudo crictl pull busybox
$ sudo crictl pull k8s.gcr.io/pause:3.1
$ sudo crictl runp -r kata pod.yaml
63f3f0d050745f1c48cfac24045de4cf01c801c48e5b850b73048f9330f533d2
$ sudo crictl pods
POD ID CREATED STATE NAME NAMESPACE ATTEMPT
63f3f0d050745 2 minutes ago Ready busybox-sandbox default 1
$ sudo crictl create 63f3f0d050745 container.yaml pod.yaml
d6d4169f06c2cdce479f734fa5d6db9fedb95a7c47b202dc1cc0376f06cfbfe1
$ sudo crictl ps -a
CONTAINER ID IMAGE CREATED STATE NAME ATTEMPT POD ID
d6d4169f06c2c busybox:latest 28 seconds ago Created busybox 0 63f3f0d050745
$ sudo crictl start d6d4169f06c2c
d6d4169f06c2c
$ sudo crictl exec -it d6d4169f06c2c uname -r
4.14.67-16.container
$ uname -r
4.4.0-57-generic
Here container kernel version is different than the one on the host.
On all Kubernetes nodes, repeat above 1-4 steps to install and configure docker, containerd and Kata Containers.
Following the Kubernetes official document to install kubeadm, kubelet and kubectl on all nodes.
On Kubernetes master node, create a kubeadm config file:
$ cat > config.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.121.15
bindPort: 6443
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: ubuntu1604.ubuntu1604
kubeletExtraArgs:
"feature-gates": "RuntimeClass=true"
"fail-swap-on": "false"
taints: []
---
apiServer:
timeoutForControlPlane: 4m0s
extraArgs:
"feature-gates": "RuntimeClass=true"
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: ""
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.13.1
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
EOF
NOTE: modify advertiseAddress
to specify the master node IP in above config.yaml
file.
$ sudo kubeadm init --config=config.yaml --ignore-preflight-errors=ALL
Your Kubernetes master has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of machines by running the following on each node
as root:
kubeadm join 192.168.121.86:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:e9909ea05b0045ea99c017e06a6a19d8d4da0a2dbbb11784e9546d5cc061ab70
$ mkdir -p $HOME/.kube
$ sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
Using flannel
as an example. Other CNI plugins can be installed per Kubernetes document
$ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
Wait a few minutes, see if all control plane pods are up and running:
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-86c58d9df4-qbgs2 1/1 Running 0 4m29s
kube-system coredns-86c58d9df4-rkgvd 1/1 Running 0 4m29s
kube-system etcd-ubuntu1604.ubuntu1604 1/1 Running 0 3m39s
kube-system kube-apiserver-ubuntu1604.ubuntu1604 1/1 Running 0 3m31s
kube-system kube-controller-manager-ubuntu1604.ubuntu1604 1/1 Running 0 3m25s
kube-system kube-flannel-ds-amd64-ccv2g 1/1 Running 0 93s
kube-system kube-proxy-nkp8t 1/1 Running 0 4m29s
kube-system kube-scheduler-ubuntu1604.ubuntu1604 1/1 Running 0 3m50s
$ kubectl set subject clusterrolebinding system:node --group=system:nodes
Based on the config file generated by kubeadm config print join-defaults
, create a customised config file for slave nodes, changing apiServerEndpoint
to the IP of Kubernetes master.
$ cat > cluster.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta1
caCertPath: /etc/kubernetes/pki/ca.crt
discovery:
bootstrapToken:
apiServerEndpoint: 192.168.121.86:6443
token: abcdef.0123456789abcdef
unsafeSkipCAVerification: true
timeout: 5m0s
tlsBootstrapToken: abcdef.0123456789abcdef
kind: JoinConfiguration
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: k8s-kata
kubeletExtraArgs:
"feature-gates": "RuntimeClass=true"
"fail-swap-on": "false"
EOF
Then, to join a node in Kubernetes cluster, run the following command:
$ sudo kubeadm join --config cluster.yaml --ignore-preflight-errors=ALL
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run ‘kubectl get nodes‘ on the master to see this node join the cluster.
Wait a moment, see if the new node is in Ready
status:
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-kata Ready <none> 19m v1.13.2
ubuntu1604.ubuntu1604 Ready master 108m v1.13.2
$ cat > kata_resource.yaml << EOF
apiVersion: node.k8s.io/v1beta1 # RuntimeClass is defined in the node.k8s.io API group
kind: RuntimeClass
metadata:
name: kataclass # The name the RuntimeClass will be referenced by
# RuntimeClass is a non-namespaced resource
handler: kata
EOF
$ kubectl apply -f kata_resource.yaml
runtimeclass.node.k8s.io/kataclass created
$ kubectl get runtimeclasses
NAME RUNTIME-HANDLER AGE
kataclass kata 49s
In a pod spec, set runtimeClassName
as kataclass
to ask Kubernetes to use Kata Containers:
$ cat > pod-kata.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: foobar-kata
spec:
runtimeClassName: kataclass
containers:
- name: nginx
image: nginx
EOF
$ kubectl apply -f pod-kata.yaml
pod/foobar-kata created
Wait a bit and verify that if pod is successfully created:
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
foobar-kata 0/1 ContainerCreating 0 5s
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
foobar-kata 1/1 Running 0 7s
See if pod kernel version is different than the host one:
$ kubectl exec -it foobar-kata bash
root@foobar-kata:/# uname -r
4.14.67-4.container
root@foobar-kata:/# exit
exit
$ uname -r
4.4.0-57-generic
Repeatedly join all nodes in your cluster to the Kubernetes cluster. Then voilà, congrats! A Kubernetes cluster is up and running with containerd and Kata Containers.
Install Kata Containers for docker and Kubernetes
原文:https://www.cnblogs.com/dream397/p/13750210.html