Last active
November 17, 2021 15:49
-
-
Save fgimenez/87dfa36106d7a1b4b841bcae27188dc0 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[root@localhost kubevirtci]# /usr/local/bin/runner.sh /bin/bash -c "make cluster-up" | |
Docker in Docker enabled, initializing... | |
================================================================================ | |
================================================================================ | |
Done setting up docker in docker. | |
+ /bin/bash -c 'make cluster-up' | |
./cluster-up/check.sh | |
[ OK ] found /dev/kvm | |
[ OK ] intel nested virtualization enabled | |
./cluster-up/up.sh | |
selecting docker as container runtime | |
Download the image quay.io/kubevirtci/k8s-1.20:podman-test19 | |
time="2021-11-17T15:44:21Z" level=info msg="Using local image quay.io/kubevirtci/k8s-1.20:podman-test19" | |
time="2021-11-17T15:44:25Z" level=info msg="Using local image quay.io/libpod/registry:2.7" | |
time="2021-11-17T15:44:30Z" level=info msg="waiting for node to come up" | |
2021/11/17 15:44:30 Waiting for host: 192.168.66.101:22 | |
2021/11/17 15:44:33 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s | |
2021/11/17 15:44:41 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s | |
2021/11/17 15:44:46 Connected to tcp://192.168.66.101:22 | |
VM is up | |
2021/11/17 15:44:47 Waiting for host: 192.168.66.101:22 | |
2021/11/17 15:44:47 Connected to tcp://192.168.66.101:22 | |
+ '[' --vendor '!=' --vendor ']' | |
+ vendor=8086:2668 | |
+ pci_address=(`lspci -D -d ${vendor}`) | |
++ lspci -D -d 8086:2668 | |
+ pci_address=0000:00:02.0 | |
+ dev_sysfs_path=/sys/bus/pci/devices/0000:00:02.0 | |
+ [[ ! -d /sys/bus/pci/devices/0000:00:02.0 ]] | |
+ [[ ! -d /sys/bus/pci/devices/0000:00:02.0/iommu/ ]] | |
+ driver_path=/sys/bus/pci/devices/0000:00:02.0/driver | |
+ driver_override=/sys/bus/pci/devices/0000:00:02.0/driver_override | |
+ modprobe -i vfio-pci | |
++ get_device_driver | |
+++ readlink /sys/bus/pci/devices/0000:00:02.0/driver | |
++ local dev_driver=../../../bus/pci/drivers/snd_hda_intel | |
++ echo snd_hda_intel | |
+ driver=snd_hda_intel | |
+ [[ snd_hda_intel != \v\f\i\o\-\p\c\i ]] | |
+ echo 0000:00:02.0 | |
+ echo vfio-pci | |
+ echo 0000:00:02.0 | |
++ get_device_driver | |
+++ readlink /sys/bus/pci/devices/0000:00:02.0/driver | |
++ local dev_driver=../../../bus/pci/drivers/vfio-pci | |
++ echo vfio-pci | |
+ new_driver=vfio-pci | |
+ [[ vfio-pci != \v\f\i\o\-\p\c\i ]] | |
2021/11/17 15:44:47 Waiting for host: 192.168.66.101:22 | |
2021/11/17 15:44:47 Connected to tcp://192.168.66.101:22 | |
+ timeout=30 | |
+ interval=5 | |
+ hostnamectl | |
+ grep Transient | |
Transient hostname: node01 | |
+ '[' -f /sys/fs/cgroup/cgroup.controllers ']' | |
++ kubectl version --short --client | |
++ cut -d: -f2 | |
++ sed 's/ //g' | |
++ cut -c2- | |
+ version=1.20.12 | |
+ cni_manifest=/provision/cni.yaml | |
++ systemctl status crio | |
++ grep active | |
++ wc -l | |
+ [[ 1 -eq 0 ]] | |
+ kubeadm init --config /etc/kubernetes/kubeadm.conf --experimental-patches /provision/kubeadm-patches/ | |
[init] Using Kubernetes version: v1.20.12 | |
[preflight] Running pre-flight checks | |
[preflight] Pulling images required for setting up a Kubernetes cluster | |
[preflight] This might take a minute or two, depending on the speed of your internet connection | |
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' | |
[certs] Using certificateDir folder "/etc/kubernetes/pki" | |
[certs] Generating "ca" certificate and key | |
[certs] Generating "apiserver" certificate and key | |
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local node01] and IPs [10.96.0.1 192.168.66.101] | |
[certs] Generating "apiserver-kubelet-client" certificate and key | |
[certs] Generating "front-proxy-ca" certificate and key | |
[certs] Generating "front-proxy-client" certificate and key | |
[certs] Generating "etcd/ca" certificate and key | |
[certs] Generating "etcd/server" certificate and key | |
[certs] etcd/server serving cert is signed for DNS names [localhost node01] and IPs [192.168.66.101 127.0.0.1 ::1] | |
[certs] Generating "etcd/peer" certificate and key | |
[certs] etcd/peer serving cert is signed for DNS names [localhost node01] and IPs [192.168.66.101 127.0.0.1 ::1] | |
[certs] Generating "etcd/healthcheck-client" certificate and key | |
[certs] Generating "apiserver-etcd-client" certificate and key | |
[certs] Generating "sa" key and public key | |
[kubeconfig] Using kubeconfig folder "/etc/kubernetes" | |
[kubeconfig] Writing "admin.conf" kubeconfig file | |
[kubeconfig] Writing "kubelet.conf" kubeconfig file | |
[kubeconfig] Writing "controller-manager.conf" kubeconfig file | |
[kubeconfig] Writing "scheduler.conf" kubeconfig file | |
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" | |
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" | |
[kubelet-start] Starting the kubelet | |
[control-plane] Using manifest folder "/etc/kubernetes/manifests" | |
[control-plane] Creating static Pod manifest for "kube-apiserver" | |
[patches] Reading patches from path "/provision/kubeadm-patches/" | |
[patches] Ignoring file "add-security-context-deployment-patch.yaml": unknown target, must be one of [etcd kube-apiserver kube-controller-manager kube-scheduler] | |
[patches] Found the following patch files: [etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml] | |
[patches] Ignored the following files: [add-security-context-deployment-patch.yaml] | |
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kube-apiserver" | |
[control-plane] Creating static Pod manifest for "kube-controller-manager" | |
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kube-controller-manager" | |
[control-plane] Creating static Pod manifest for "kube-scheduler" | |
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kube-scheduler" | |
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" | |
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "etcd" | |
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s | |
[apiclient] All control plane components are healthy after 19.504070 seconds | |
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace | |
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster | |
[upload-certs] Skipping phase. Please see --upload-certs | |
[mark-control-plane] Marking the node node01 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)" | |
[mark-control-plane] Marking the node node01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] | |
[bootstrap-token] Using token: abcdef.1234567890123456 | |
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles | |
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes | |
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials | |
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token | |
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster | |
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace | |
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key | |
[addons] Applied essential addon: CoreDNS | |
[addons] Applied essential addon: kube-proxy | |
Your Kubernetes control-plane has initialized successfully! | |
To start using your cluster, you need to run the following as a regular user: | |
mkdir -p $HOME/.kube | |
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config | |
sudo chown $(id -u):$(id -g) $HOME/.kube/config | |
Alternatively, if you are the root user, you can run: | |
export KUBECONFIG=/etc/kubernetes/admin.conf | |
You should now deploy a pod network to the cluster. | |
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: | |
https://kubernetes.io/docs/concepts/cluster-administration/addons/ | |
Then you can join any number of worker nodes by running the following on each as root: | |
kubeadm join 192.168.66.101:6443 --token abcdef.1234567890123456 \ | |
--discovery-token-ca-cert-hash sha256:e5bbdbf8eade603053c32d7f15d4e278e6bf3e4698d11fd3dea2b2fa073395a3 | |
++ cat /provision/kubeadm-patches/add-security-context-deployment-patch.yaml | |
+ kubectl --kubeconfig=/etc/kubernetes/admin.conf patch deployment coredns -n kube-system -p 'spec: | |
template: | |
spec: | |
securityContext: | |
seLinuxOptions: | |
type: spc_t' | |
deployment.apps/coredns patched | |
+ kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f /provision/cni.yaml | |
configmap/calico-config created | |
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created | |
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created | |
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created | |
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created | |
clusterrole.rbac.authorization.k8s.io/calico-node created | |
clusterrolebinding.rbac.authorization.k8s.io/calico-node created | |
daemonset.apps/calico-node created | |
serviceaccount/calico-node created | |
deployment.apps/calico-kube-controllers created | |
serviceaccount/calico-kube-controllers created | |
poddisruptionbudget.policy/calico-kube-controllers created | |
+ kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes node01 node-role.kubernetes.io/master:NoSchedule- | |
node/node01 untainted | |
+ kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes --no-headers | |
node01 NotReady control-plane,master 4s v1.20.12 | |
+ kubectl_rc=0 | |
+ retry_counter=0 | |
+ [[ 0 -lt 20 ]] | |
+ [[ 0 -ne 0 ]] | |
+ local_volume_manifest=/provision/local-volume.yaml | |
+ kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f /provision/local-volume.yaml | |
storageclass.storage.k8s.io/local created | |
configmap/local-storage-config created | |
clusterrolebinding.rbac.authorization.k8s.io/local-storage-provisioner-pv-binding created | |
clusterrole.rbac.authorization.k8s.io/local-storage-provisioner-node-clusterrole created | |
clusterrolebinding.rbac.authorization.k8s.io/local-storage-provisioner-node-binding created | |
role.rbac.authorization.k8s.io/local-storage-provisioner-jobs-role created | |
rolebinding.rbac.authorization.k8s.io/local-storage-provisioner-jobs-rolebinding created | |
serviceaccount/local-storage-admin created | |
daemonset.apps/local-volume-provisioner created | |
+ mkdir -p /var/lib/rook | |
+ chcon -t container_file_t /var/lib/rook | |
Cluster "kubernetes" set. | |
Cluster "kubernetes" set. | |
2021/11/17 15:45:27 Waiting for host: 192.168.66.101:22 | |
2021/11/17 15:45:27 Connected to tcp://192.168.66.101:22 | |
node/node01 labeled | |
selecting docker as container runtime | |
+ EXIT_VALUE=0 | |
+ set +o xtrace | |
[root@localhost kubevirtci]# docker ps | |
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES | |
fe10fc3d5db6 quay.io/kubevirtci/k8s-1.20:podman-test19 "/bin/bash -c '/vm.s…" About a minute ago Up About a minute k8s-1.20-node01 | |
18085dd653ed quay.io/libpod/registry:2.7 "/entrypoint.sh /etc…" About a minute ago Up About a minute k8s-1.20-registry | |
340f5ab13b5a quay.io/kubevirtci/k8s-1.20:podman-test19 "/bin/bash -c /dnsma…" About a minute ago Up About a minute 127.0.0.1:8443->8443/tcp, 0.0.0.0:32776->80/tcp, 0.0.0.0:32775->443/tcp, 0.0.0.0:32774->2201/tcp, 0.0.0.0:32773->5000/tcp, 0.0.0.0:32772->5901/tcp, 0.0.0.0:32771->6443/tcp, 0.0.0.0:32770->30007/tcp, 0.0.0.0:32769->30008/tcp, 0.0.0.0:32768->31001/tcp k8s-1.20-dnsmasq | |
[root@localhost kubevirtci]# docker exec -ti k8s-1.20-registry /bin/sh | |
/ # cd /var/lib/registry/ | |
/var/lib/registry # touch tt | |
/var/lib/registry # ls -al | |
total 0 | |
drwxr-xr-x 1 root root 4 Nov 17 15:46 . | |
drwxr-xr-x 1 root root 42 Nov 17 15:44 .. | |
-rw-r--r-- 1 root root 0 Nov 17 15:46 tt | |
/var/lib/registry # exit | |
[root@localhost kubevirtci]# docker inspect k8s-1.20-registry | |
[ | |
{ | |
"Id": "18085dd653ed8f58cd052ddebc8fdab7cfd5c4385e7ec9d7c844f71750a521e3", | |
"Created": "2021-11-17T15:44:25.84068696Z", | |
"Path": "/entrypoint.sh", | |
"Args": [ | |
"/etc/docker/registry/config.yml" | |
], | |
"State": { | |
"Status": "running", | |
"Running": true, | |
"Paused": false, | |
"Restarting": false, | |
"OOMKilled": false, | |
"Dead": false, | |
"Pid": 3940, | |
"ExitCode": 0, | |
"Error": "", | |
"StartedAt": "2021-11-17T15:44:26.766142011Z", | |
"FinishedAt": "0001-01-01T00:00:00Z" | |
}, | |
"Image": "sha256:2d4f4b5309b1e41b4f83ae59b44df6d673ef44433c734b14c1c103ebca82c116", | |
"ResolvConfPath": "/docker-graph/containers/340f5ab13b5aedf9b25defd3738c5b4acb24b86d615db2cd28bc6d48b0597c4c/resolv.conf", | |
"HostnamePath": "/docker-graph/containers/340f5ab13b5aedf9b25defd3738c5b4acb24b86d615db2cd28bc6d48b0597c4c/hostname", | |
"HostsPath": "/docker-graph/containers/340f5ab13b5aedf9b25defd3738c5b4acb24b86d615db2cd28bc6d48b0597c4c/hosts", | |
"LogPath": "/docker-graph/containers/18085dd653ed8f58cd052ddebc8fdab7cfd5c4385e7ec9d7c844f71750a521e3/18085dd653ed8f58cd052ddebc8fdab7cfd5c4385e7ec9d7c844f71750a521e3-json.log", | |
"Name": "/k8s-1.20-registry", | |
"RestartCount": 0, | |
"Driver": "vfs", | |
"Platform": "linux", | |
"MountLabel": "", | |
"ProcessLabel": "", | |
"AppArmorProfile": "", | |
"ExecIDs": null, | |
"HostConfig": { | |
"Binds": null, | |
"ContainerIDFile": "", | |
"LogConfig": { | |
"Type": "json-file", | |
"Config": {} | |
}, | |
"NetworkMode": "container:340f5ab13b5aedf9b25defd3738c5b4acb24b86d615db2cd28bc6d48b0597c4c", | |
"PortBindings": null, | |
"RestartPolicy": { | |
"Name": "", | |
"MaximumRetryCount": 0 | |
}, | |
"AutoRemove": false, | |
"VolumeDriver": "", | |
"VolumesFrom": null, | |
"CapAdd": null, | |
"CapDrop": null, | |
"Capabilities": null, | |
"Dns": null, | |
"DnsOptions": null, | |
"DnsSearch": null, | |
"ExtraHosts": null, | |
"GroupAdd": null, | |
"IpcMode": "shareable", | |
"Cgroup": "", | |
"Links": null, | |
"OomScoreAdj": 0, | |
"PidMode": "", | |
"Privileged": true, | |
"PublishAllPorts": false, | |
"ReadonlyRootfs": false, | |
"SecurityOpt": [ | |
"label=disable" | |
], | |
"UTSMode": "", | |
"UsernsMode": "", | |
"ShmSize": 67108864, | |
"Runtime": "runc", | |
"ConsoleSize": [ | |
0, | |
0 | |
], | |
"Isolation": "", | |
"CpuShares": 0, | |
"Memory": 0, | |
"NanoCpus": 0, | |
"CgroupParent": "", | |
"BlkioWeight": 0, | |
"BlkioWeightDevice": null, | |
"BlkioDeviceReadBps": null, | |
"BlkioDeviceWriteBps": null, | |
"BlkioDeviceReadIOps": null, | |
"BlkioDeviceWriteIOps": null, | |
"CpuPeriod": 0, | |
"CpuQuota": 0, | |
"CpuRealtimePeriod": 0, | |
"CpuRealtimeRuntime": 0, | |
"CpusetCpus": "", | |
"CpusetMems": "", | |
"Devices": null, | |
"DeviceCgroupRules": null, | |
"DeviceRequests": null, | |
"KernelMemory": 0, | |
"KernelMemoryTCP": 0, | |
"MemoryReservation": 0, | |
"MemorySwap": 0, | |
"MemorySwappiness": null, | |
"OomKillDisable": false, | |
"PidsLimit": null, | |
"Ulimits": null, | |
"CpuCount": 0, | |
"CpuPercent": 0, | |
"IOMaximumIOps": 0, | |
"IOMaximumBandwidth": 0, | |
"MaskedPaths": null, | |
"ReadonlyPaths": null | |
}, | |
"GraphDriver": { | |
"Data": null, | |
"Name": "vfs" | |
}, | |
"Mounts": [ | |
{ | |
"Type": "volume", | |
"Name": "7af1f0ad6aa1629f86532c478a28eabab1617a342efbb065bf525d05de999fd0", | |
"Source": "/docker-graph/volumes/7af1f0ad6aa1629f86532c478a28eabab1617a342efbb065bf525d05de999fd0/_data", | |
"Destination": "/var/lib/registry", | |
"Driver": "local", | |
"Mode": "", | |
"RW": true, | |
"Propagation": "" | |
} | |
], | |
"Config": { | |
"Hostname": "340f5ab13b5a", | |
"Domainname": "", | |
"User": "", | |
"AttachStdin": false, | |
"AttachStdout": false, | |
"AttachStderr": false, | |
"ExposedPorts": { | |
"5000/tcp": {} | |
}, | |
"Tty": false, | |
"OpenStdin": false, | |
"StdinOnce": false, | |
"Env": [ | |
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" | |
], | |
"Cmd": [ | |
"/etc/docker/registry/config.yml" | |
], | |
"Image": "quay.io/libpod/registry:2.7", | |
"Volumes": { | |
"/var/lib/registry": {} | |
}, | |
"WorkingDir": "", | |
"Entrypoint": [ | |
"/entrypoint.sh" | |
], | |
"OnBuild": null, | |
"Labels": {} | |
}, | |
"NetworkSettings": { | |
"Bridge": "", | |
"SandboxID": "", | |
"HairpinMode": false, | |
"LinkLocalIPv6Address": "", | |
"LinkLocalIPv6PrefixLen": 0, | |
"Ports": {}, | |
"SandboxKey": "", | |
"SecondaryIPAddresses": null, | |
"SecondaryIPv6Addresses": null, | |
"EndpointID": "", | |
"Gateway": "", | |
"GlobalIPv6Address": "", | |
"GlobalIPv6PrefixLen": 0, | |
"IPAddress": "", | |
"IPPrefixLen": 0, | |
"IPv6Gateway": "", | |
"MacAddress": "", | |
"Networks": {} | |
} | |
} | |
] | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment